From 92b28a88d8bcd5aa50352ecaff844229df1cee59 Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Tue, 18 Sep 2018 15:31:54 -0400 Subject: Vendor in latest containers/buildah Switch from projectatomic/buildah to containers/buildah Signed-off-by: Daniel J Walsh --- vendor/github.com/containers/buildah/LICENSE | 201 ++ vendor/github.com/containers/buildah/README.md | 120 ++ vendor/github.com/containers/buildah/add.go | 242 +++ vendor/github.com/containers/buildah/bind/mount.go | 295 +++ .../containers/buildah/bind/mount_unsupported.go | 13 + vendor/github.com/containers/buildah/bind/util.go | 39 + vendor/github.com/containers/buildah/buildah.go | 513 +++++ vendor/github.com/containers/buildah/chroot/run.go | 1308 +++++++++++++ .../containers/buildah/chroot/seccomp.go | 142 ++ .../buildah/chroot/seccomp_unsupported.go | 15 + .../containers/buildah/chroot/selinux.go | 22 + .../buildah/chroot/selinux_unsupported.go | 18 + .../containers/buildah/chroot/unsupported.go | 15 + .../github.com/containers/buildah/chroot/util.go | 15 + vendor/github.com/containers/buildah/commit.go | 189 ++ vendor/github.com/containers/buildah/common.go | 35 + vendor/github.com/containers/buildah/config.go | 545 ++++++ vendor/github.com/containers/buildah/delete.go | 18 + .../github.com/containers/buildah/docker/types.go | 262 +++ vendor/github.com/containers/buildah/image.go | 634 +++++++ .../containers/buildah/imagebuildah/build.go | 1337 +++++++++++++ .../buildah/imagebuildah/chroot_symlink.go | 145 ++ .../containers/buildah/imagebuildah/util.go | 113 ++ vendor/github.com/containers/buildah/import.go | 131 ++ vendor/github.com/containers/buildah/mount.go | 17 + vendor/github.com/containers/buildah/new.go | 370 ++++ .../containers/buildah/pkg/cli/common.go | 295 +++ .../containers/buildah/pkg/parse/parse.go | 572 ++++++ vendor/github.com/containers/buildah/pull.go | 228 +++ vendor/github.com/containers/buildah/run.go | 1995 ++++++++++++++++++++ vendor/github.com/containers/buildah/run_linux.go | 17 + .../github.com/containers/buildah/run_unsupport.go | 11 + vendor/github.com/containers/buildah/seccomp.go | 35 + .../containers/buildah/seccomp_unsupported.go | 15 + vendor/github.com/containers/buildah/selinux.go | 12 + .../containers/buildah/selinux_unsupported.go | 10 + vendor/github.com/containers/buildah/unmount.go | 11 + .../containers/buildah/unshare/unshare.c | 110 ++ .../containers/buildah/unshare/unshare.go | 273 +++ .../containers/buildah/unshare/unshare_cgo.go | 10 + .../containers/buildah/unshare/unshare_gccgo.go | 25 + .../buildah/unshare/unshare_unsupported.go | 1 + vendor/github.com/containers/buildah/util.go | 196 ++ vendor/github.com/containers/buildah/util/types.go | 35 + vendor/github.com/containers/buildah/util/util.go | 494 +++++ vendor/github.com/containers/buildah/vendor.conf | 63 + vendor/github.com/projectatomic/buildah/LICENSE | 201 -- vendor/github.com/projectatomic/buildah/README.md | 120 -- vendor/github.com/projectatomic/buildah/add.go | 242 --- .../github.com/projectatomic/buildah/bind/mount.go | 295 --- .../buildah/bind/mount_unsupported.go | 13 - .../github.com/projectatomic/buildah/bind/util.go | 39 - vendor/github.com/projectatomic/buildah/buildah.go | 513 ----- .../github.com/projectatomic/buildah/chroot/run.go | 1308 ------------- .../projectatomic/buildah/chroot/seccomp.go | 142 -- .../buildah/chroot/seccomp_unsupported.go | 15 - .../projectatomic/buildah/chroot/selinux.go | 22 - .../buildah/chroot/selinux_unsupported.go | 18 - .../projectatomic/buildah/chroot/unsupported.go | 15 - .../projectatomic/buildah/chroot/util.go | 15 - vendor/github.com/projectatomic/buildah/commit.go | 189 -- vendor/github.com/projectatomic/buildah/common.go | 35 - vendor/github.com/projectatomic/buildah/config.go | 545 ------ vendor/github.com/projectatomic/buildah/delete.go | 18 - .../projectatomic/buildah/docker/types.go | 262 --- vendor/github.com/projectatomic/buildah/image.go | 634 ------- .../projectatomic/buildah/imagebuildah/build.go | 1337 ------------- .../buildah/imagebuildah/chroot_symlink.go | 145 -- .../projectatomic/buildah/imagebuildah/util.go | 113 -- vendor/github.com/projectatomic/buildah/import.go | 131 -- vendor/github.com/projectatomic/buildah/mount.go | 17 - vendor/github.com/projectatomic/buildah/new.go | 370 ---- .../projectatomic/buildah/pkg/cli/common.go | 295 --- .../projectatomic/buildah/pkg/parse/parse.go | 572 ------ vendor/github.com/projectatomic/buildah/pull.go | 228 --- vendor/github.com/projectatomic/buildah/run.go | 1995 -------------------- .../github.com/projectatomic/buildah/run_linux.go | 17 - .../projectatomic/buildah/run_unsupport.go | 11 - vendor/github.com/projectatomic/buildah/seccomp.go | 35 - .../projectatomic/buildah/seccomp_unsupported.go | 15 - vendor/github.com/projectatomic/buildah/selinux.go | 12 - .../projectatomic/buildah/selinux_unsupported.go | 10 - vendor/github.com/projectatomic/buildah/unmount.go | 11 - .../projectatomic/buildah/unshare/unshare.c | 110 -- .../projectatomic/buildah/unshare/unshare.go | 273 --- .../projectatomic/buildah/unshare/unshare_cgo.go | 10 - .../projectatomic/buildah/unshare/unshare_gccgo.go | 25 - .../buildah/unshare/unshare_unsupported.go | 1 - vendor/github.com/projectatomic/buildah/util.go | 196 -- .../github.com/projectatomic/buildah/util/types.go | 35 - .../github.com/projectatomic/buildah/util/util.go | 494 ----- .../github.com/projectatomic/buildah/vendor.conf | 63 - 92 files changed, 11162 insertions(+), 11162 deletions(-) create mode 100644 vendor/github.com/containers/buildah/LICENSE create mode 100644 vendor/github.com/containers/buildah/README.md create mode 100644 vendor/github.com/containers/buildah/add.go create mode 100644 vendor/github.com/containers/buildah/bind/mount.go create mode 100644 vendor/github.com/containers/buildah/bind/mount_unsupported.go create mode 100644 vendor/github.com/containers/buildah/bind/util.go create mode 100644 vendor/github.com/containers/buildah/buildah.go create mode 100644 vendor/github.com/containers/buildah/chroot/run.go create mode 100644 vendor/github.com/containers/buildah/chroot/seccomp.go create mode 100644 vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go create mode 100644 vendor/github.com/containers/buildah/chroot/selinux.go create mode 100644 vendor/github.com/containers/buildah/chroot/selinux_unsupported.go create mode 100644 vendor/github.com/containers/buildah/chroot/unsupported.go create mode 100644 vendor/github.com/containers/buildah/chroot/util.go create mode 100644 vendor/github.com/containers/buildah/commit.go create mode 100644 vendor/github.com/containers/buildah/common.go create mode 100644 vendor/github.com/containers/buildah/config.go create mode 100644 vendor/github.com/containers/buildah/delete.go create mode 100644 vendor/github.com/containers/buildah/docker/types.go create mode 100644 vendor/github.com/containers/buildah/image.go create mode 100644 vendor/github.com/containers/buildah/imagebuildah/build.go create mode 100644 vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go create mode 100644 vendor/github.com/containers/buildah/imagebuildah/util.go create mode 100644 vendor/github.com/containers/buildah/import.go create mode 100644 vendor/github.com/containers/buildah/mount.go create mode 100644 vendor/github.com/containers/buildah/new.go create mode 100644 vendor/github.com/containers/buildah/pkg/cli/common.go create mode 100644 vendor/github.com/containers/buildah/pkg/parse/parse.go create mode 100644 vendor/github.com/containers/buildah/pull.go create mode 100644 vendor/github.com/containers/buildah/run.go create mode 100644 vendor/github.com/containers/buildah/run_linux.go create mode 100644 vendor/github.com/containers/buildah/run_unsupport.go create mode 100644 vendor/github.com/containers/buildah/seccomp.go create mode 100644 vendor/github.com/containers/buildah/seccomp_unsupported.go create mode 100644 vendor/github.com/containers/buildah/selinux.go create mode 100644 vendor/github.com/containers/buildah/selinux_unsupported.go create mode 100644 vendor/github.com/containers/buildah/unmount.go create mode 100644 vendor/github.com/containers/buildah/unshare/unshare.c create mode 100644 vendor/github.com/containers/buildah/unshare/unshare.go create mode 100644 vendor/github.com/containers/buildah/unshare/unshare_cgo.go create mode 100644 vendor/github.com/containers/buildah/unshare/unshare_gccgo.go create mode 100644 vendor/github.com/containers/buildah/unshare/unshare_unsupported.go create mode 100644 vendor/github.com/containers/buildah/util.go create mode 100644 vendor/github.com/containers/buildah/util/types.go create mode 100644 vendor/github.com/containers/buildah/util/util.go create mode 100644 vendor/github.com/containers/buildah/vendor.conf delete mode 100644 vendor/github.com/projectatomic/buildah/LICENSE delete mode 100644 vendor/github.com/projectatomic/buildah/README.md delete mode 100644 vendor/github.com/projectatomic/buildah/add.go delete mode 100644 vendor/github.com/projectatomic/buildah/bind/mount.go delete mode 100644 vendor/github.com/projectatomic/buildah/bind/mount_unsupported.go delete mode 100644 vendor/github.com/projectatomic/buildah/bind/util.go delete mode 100644 vendor/github.com/projectatomic/buildah/buildah.go delete mode 100644 vendor/github.com/projectatomic/buildah/chroot/run.go delete mode 100644 vendor/github.com/projectatomic/buildah/chroot/seccomp.go delete mode 100644 vendor/github.com/projectatomic/buildah/chroot/seccomp_unsupported.go delete mode 100644 vendor/github.com/projectatomic/buildah/chroot/selinux.go delete mode 100644 vendor/github.com/projectatomic/buildah/chroot/selinux_unsupported.go delete mode 100644 vendor/github.com/projectatomic/buildah/chroot/unsupported.go delete mode 100644 vendor/github.com/projectatomic/buildah/chroot/util.go delete mode 100644 vendor/github.com/projectatomic/buildah/commit.go delete mode 100644 vendor/github.com/projectatomic/buildah/common.go delete mode 100644 vendor/github.com/projectatomic/buildah/config.go delete mode 100644 vendor/github.com/projectatomic/buildah/delete.go delete mode 100644 vendor/github.com/projectatomic/buildah/docker/types.go delete mode 100644 vendor/github.com/projectatomic/buildah/image.go delete mode 100644 vendor/github.com/projectatomic/buildah/imagebuildah/build.go delete mode 100644 vendor/github.com/projectatomic/buildah/imagebuildah/chroot_symlink.go delete mode 100644 vendor/github.com/projectatomic/buildah/imagebuildah/util.go delete mode 100644 vendor/github.com/projectatomic/buildah/import.go delete mode 100644 vendor/github.com/projectatomic/buildah/mount.go delete mode 100644 vendor/github.com/projectatomic/buildah/new.go delete mode 100644 vendor/github.com/projectatomic/buildah/pkg/cli/common.go delete mode 100644 vendor/github.com/projectatomic/buildah/pkg/parse/parse.go delete mode 100644 vendor/github.com/projectatomic/buildah/pull.go delete mode 100644 vendor/github.com/projectatomic/buildah/run.go delete mode 100644 vendor/github.com/projectatomic/buildah/run_linux.go delete mode 100644 vendor/github.com/projectatomic/buildah/run_unsupport.go delete mode 100644 vendor/github.com/projectatomic/buildah/seccomp.go delete mode 100644 vendor/github.com/projectatomic/buildah/seccomp_unsupported.go delete mode 100644 vendor/github.com/projectatomic/buildah/selinux.go delete mode 100644 vendor/github.com/projectatomic/buildah/selinux_unsupported.go delete mode 100644 vendor/github.com/projectatomic/buildah/unmount.go delete mode 100644 vendor/github.com/projectatomic/buildah/unshare/unshare.c delete mode 100644 vendor/github.com/projectatomic/buildah/unshare/unshare.go delete mode 100644 vendor/github.com/projectatomic/buildah/unshare/unshare_cgo.go delete mode 100644 vendor/github.com/projectatomic/buildah/unshare/unshare_gccgo.go delete mode 100644 vendor/github.com/projectatomic/buildah/unshare/unshare_unsupported.go delete mode 100644 vendor/github.com/projectatomic/buildah/util.go delete mode 100644 vendor/github.com/projectatomic/buildah/util/types.go delete mode 100644 vendor/github.com/projectatomic/buildah/util/util.go delete mode 100644 vendor/github.com/projectatomic/buildah/vendor.conf (limited to 'vendor') diff --git a/vendor/github.com/containers/buildah/LICENSE b/vendor/github.com/containers/buildah/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/containers/buildah/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md new file mode 100644 index 000000000..b04957591 --- /dev/null +++ b/vendor/github.com/containers/buildah/README.md @@ -0,0 +1,120 @@ +![buildah logo](https://cdn.rawgit.com/containers/buildah/master/logos/buildah-logo_large.png) + +# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool that facilitates building [Open Container Initiative (OCI)](https://www.opencontainers.org/) container images + +[![Go Report Card](https://goreportcard.com/badge/github.com/containers/buildah)](https://goreportcard.com/report/github.com/containers/buildah) +[![Travis](https://travis-ci.org/containers/buildah.svg?branch=master)](https://travis-ci.org/containers/buildah) + +The Buildah package provides a command line tool that can be used to +* create a working container, either from scratch or using an image as a starting point +* create an image, either from a working container or via the instructions in a Dockerfile +* images can be built in either the OCI image format or the traditional upstream docker image format +* mount a working container's root filesystem for manipulation +* unmount a working container's root filesystem +* use the updated contents of a container's root filesystem as a filesystem layer to create a new image +* delete a working container or an image +* rename a local container + +## Buildah Information for Developers + +**[Buildah Demos](demos)** + +**[Changelog](CHANGELOG.md)** + +**[Contributing](CONTRIBUTING.md)** + +**[Development Plan](developmentplan.md)** + +**[Installation notes](install.md)** + +**[Troubleshooting Guide](troubleshooting.md)** + +**[Tutorials](docs/tutorials)** + +## Buildah and Podman relationship + +Buildah and Podman are two complementary Open-source projects that are available on +most Linux platforms and both projects reside at [GitHub.com](https://github.com) +with Buildah [here](https://github.com/containers/buildah) and +Podman [here](https://github.com/containers/libpod). Both Buildah and Podman are +command line tools that work on OCI images and containers. The two projects +differentiate in their specialization. + +Buildah specializes in building OCI images. Buildah's commands replicate all +of the commands that are found in a Dockerfile. Buildah’s goal is also to +provide a lower level coreutils interface to build images, allowing people to build +containers without requiring a Dockerfile. The intent with Buildah is to allow other +scripting languages to build container images, without requiring a daemon. + +Podman specializes in all of the commands and functions that help you to maintain and modify +OCI images, such as pulling and tagging. It also allows you to create, run, and maintain those containers +created from those images. + +A major difference between Podman and Buildah is their concept of a container. Podman +allows users to create "traditional containers" where the intent of these containers is +to be long lived. While Buildah containers are really just created to allow content +to be added back to the container image. An easy way to think of it is the +`buildah run` command emulates the RUN command in a Dockerfile while the `podman run` +command emulates the `docker run` command in functionality. Because of this and their underlying +storage differences, you can not see Podman containers from within Buildah or vice versa. + +In short Buildah is an efficient way to create OCI images while Podman allows +you to manage and maintain those images and containers in a production environment using +familiar container cli commands. For more details, see the +[Container Tools Guide](https://github.com/containers/buildah/tree/master/docs/containertools). + +## Example + +From [`./examples/lighttpd.sh`](examples/lighttpd.sh): + +```bash +$ cat > lighttpd.sh <<"EOF" +#!/bin/bash -x + +ctr1=`buildah from ${1:-fedora}` + +## Get all updates and install our minimal httpd server +buildah run $ctr1 -- dnf update -y +buildah run $ctr1 -- dnf install -y lighttpd + +## Include some buildtime annotations +buildah config --annotation "com.example.build.host=$(uname -n)" $ctr1 + +## Run our server and expose the port +buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" $ctr1 +buildah config --port 80 $ctr1 + +## Commit this container to an image name +buildah commit $ctr1 ${2:-$USER/lighttpd} +EOF + +$ chmod +x lighttpd.sh +$ sudo ./lighttpd.sh +``` + +## Commands +| Command | Description | +| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | +| [buildah-add(1)](/docs/buildah-add.md) | Add the contents of a file, URL, or a directory to the container. | +| [buildah-bud(1)](/docs/buildah-bud.md) | Build an image using instructions from Dockerfiles. | +| [buildah-commit(1)](/docs/buildah-commit.md) | Create an image from a working container. | +| [buildah-config(1)](/docs/buildah-config.md) | Update image configuration settings. | +| [buildah-containers(1)](/docs/buildah-containers.md) | List the working containers and their base images. | +| [buildah-copy(1)](/docs/buildah-copy.md) | Copies the contents of a file, URL, or directory into a container's working directory. | +| [buildah-from(1)](/docs/buildah-from.md) | Creates a new working container, either from scratch or using a specified image as a starting point. | +| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. | +| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. | +| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. | +| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. | +| [buildah-rename(1)](/docs/buildah-rename.md) | Rename a local container. | +| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. | +| [buildah-rmi(1)](/docs/buildah-rmi.md) | Removes one or more images. | +| [buildah-run(1)](/docs/buildah-run.md) | Run a command inside of the container. | +| [buildah-tag(1)](/docs/buildah-tag.md) | Add an additional name to a local image. | +| [buildah-umount(1)](/docs/buildah-umount.md) | Unmount a working container's root file system. | +| [buildah-unshare(1)](/docs/buildah-unshare.md) | Launch a command in a user namespace with modified ID mappings. | +| [buildah-version(1)](/docs/buildah-version.md) | Display the Buildah Version Information | + +**Future goals include:** +* more CI tests +* additional CLI commands (?) diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go new file mode 100644 index 000000000..b1747db94 --- /dev/null +++ b/vendor/github.com/containers/buildah/add.go @@ -0,0 +1,242 @@ +package buildah + +import ( + "io" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/containers/buildah/util" + "github.com/containers/libpod/pkg/chrootuser" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// AddAndCopyOptions holds options for add and copy commands. +type AddAndCopyOptions struct { + // Chown is a spec for the user who should be given ownership over the + // newly-added content, potentially overriding permissions which would + // otherwise match those of local files and directories being copied. + Chown string + // All of the data being copied will pass through Hasher, if set. + // If the sources are URLs or files, their contents will be passed to + // Hasher. + // If the sources include directory trees, Hasher will be passed + // tar-format archives of the directory trees. + Hasher io.Writer +} + +// addURL copies the contents of the source URL to the destination. This is +// its own function so that deferred closes happen after we're done pulling +// down each item of potentially many. +func addURL(destination, srcurl string, owner idtools.IDPair, hasher io.Writer) error { + logrus.Debugf("saving %q to %q", srcurl, destination) + resp, err := http.Get(srcurl) + if err != nil { + return errors.Wrapf(err, "error getting %q", srcurl) + } + defer resp.Body.Close() + f, err := os.Create(destination) + if err != nil { + return errors.Wrapf(err, "error creating %q", destination) + } + if err = f.Chown(owner.UID, owner.GID); err != nil { + return errors.Wrapf(err, "error setting owner of %q", destination) + } + if last := resp.Header.Get("Last-Modified"); last != "" { + if mtime, err2 := time.Parse(time.RFC1123, last); err2 != nil { + logrus.Debugf("error parsing Last-Modified time %q: %v", last, err2) + } else { + defer func() { + if err3 := os.Chtimes(destination, time.Now(), mtime); err3 != nil { + logrus.Debugf("error setting mtime to Last-Modified time %q: %v", last, err3) + } + }() + } + } + defer f.Close() + bodyReader := io.Reader(resp.Body) + if hasher != nil { + bodyReader = io.TeeReader(bodyReader, hasher) + } + n, err := io.Copy(f, bodyReader) + if err != nil { + return errors.Wrapf(err, "error reading contents for %q", destination) + } + if resp.ContentLength >= 0 && n != resp.ContentLength { + return errors.Errorf("error reading contents for %q: wrong length (%d != %d)", destination, n, resp.ContentLength) + } + if err := f.Chmod(0600); err != nil { + return errors.Wrapf(err, "error setting permissions on %q", destination) + } + return nil +} + +// Add copies the contents of the specified sources into the container's root +// filesystem, optionally extracting contents of local files that look like +// non-empty archives. +func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, source ...string) error { + mountPoint, err := b.Mount(b.MountLabel) + if err != nil { + return err + } + defer func() { + if err2 := b.Unmount(); err2 != nil { + logrus.Errorf("error unmounting container: %v", err2) + } + }() + // Find out which user (and group) the destination should belong to. + user, err := b.user(mountPoint, options.Chown) + if err != nil { + return err + } + containerOwner := idtools.IDPair{UID: int(user.UID), GID: int(user.GID)} + hostUID, hostGID, err := util.GetHostIDs(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap, user.UID, user.GID) + if err != nil { + return err + } + hostOwner := idtools.IDPair{UID: int(hostUID), GID: int(hostGID)} + dest := mountPoint + if destination != "" && filepath.IsAbs(destination) { + dest = filepath.Join(dest, destination) + } else { + if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, b.WorkDir()), 0755, hostOwner); err != nil { + return err + } + dest = filepath.Join(dest, b.WorkDir(), destination) + } + // If the destination was explicitly marked as a directory by ending it + // with a '/', create it so that we can be sure that it's a directory, + // and any files we're copying will be placed in the directory. + if len(destination) > 0 && destination[len(destination)-1] == os.PathSeparator { + if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil { + return err + } + } + // Make sure the destination's parent directory is usable. + if destpfi, err2 := os.Stat(filepath.Dir(dest)); err2 == nil && !destpfi.IsDir() { + return errors.Errorf("%q already exists, but is not a subdirectory)", filepath.Dir(dest)) + } + // Now look at the destination itself. + destfi, err := os.Stat(dest) + if err != nil { + if !os.IsNotExist(err) { + return errors.Wrapf(err, "couldn't determine what %q is", dest) + } + destfi = nil + } + if len(source) > 1 && (destfi == nil || !destfi.IsDir()) { + return errors.Errorf("destination %q is not a directory", dest) + } + copyFileWithTar := b.copyFileWithTar(&containerOwner, options.Hasher) + copyWithTar := b.copyWithTar(&containerOwner, options.Hasher) + untarPath := b.untarPath(nil, options.Hasher) + for _, src := range source { + if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { + // We assume that source is a file, and we're copying + // it to the destination. If the destination is + // already a directory, create a file inside of it. + // Otherwise, the destination is the file to which + // we'll save the contents. + url, err := url.Parse(src) + if err != nil { + return errors.Wrapf(err, "error parsing URL %q", src) + } + d := dest + if destfi != nil && destfi.IsDir() { + d = filepath.Join(dest, path.Base(url.Path)) + } + if err := addURL(d, src, hostOwner, options.Hasher); err != nil { + return err + } + continue + } + + glob, err := filepath.Glob(src) + if err != nil { + return errors.Wrapf(err, "invalid glob %q", src) + } + if len(glob) == 0 { + return errors.Wrapf(syscall.ENOENT, "no files found matching %q", src) + } + for _, gsrc := range glob { + esrc, err := filepath.EvalSymlinks(gsrc) + if err != nil { + return errors.Wrapf(err, "error evaluating symlinks %q", gsrc) + } + srcfi, err := os.Stat(esrc) + if err != nil { + return errors.Wrapf(err, "error reading %q", esrc) + } + if srcfi.IsDir() { + // The source is a directory, so copy the contents of + // the source directory into the target directory. Try + // to create it first, so that if there's a problem, + // we'll discover why that won't work. + if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil { + return err + } + logrus.Debugf("copying %q to %q", esrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*") + if err := copyWithTar(esrc, dest); err != nil { + return errors.Wrapf(err, "error copying %q to %q", esrc, dest) + } + continue + } + if !extract || !archive.IsArchivePath(esrc) { + // This source is a file, and either it's not an + // archive, or we don't care whether or not it's an + // archive. + d := dest + if destfi != nil && destfi.IsDir() { + d = filepath.Join(dest, filepath.Base(gsrc)) + } + // Copy the file, preserving attributes. + logrus.Debugf("copying %q to %q", esrc, d) + if err := copyFileWithTar(esrc, d); err != nil { + return errors.Wrapf(err, "error copying %q to %q", esrc, d) + } + continue + } + // We're extracting an archive into the destination directory. + logrus.Debugf("extracting contents of %q into %q", esrc, dest) + if err := untarPath(esrc, dest); err != nil { + return errors.Wrapf(err, "error extracting %q into %q", esrc, dest) + } + } + } + return nil +} + +// user returns the user (and group) information which the destination should belong to. +func (b *Builder) user(mountPoint string, userspec string) (specs.User, error) { + if userspec == "" { + userspec = b.User() + } + + uid, gid, err := chrootuser.GetUser(mountPoint, userspec) + u := specs.User{ + UID: uid, + GID: gid, + Username: userspec, + } + if !strings.Contains(userspec, ":") { + groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID)) + if err2 != nil { + if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil { + err = err2 + } + } else { + u.AdditionalGids = groups + } + + } + return u, err +} diff --git a/vendor/github.com/containers/buildah/bind/mount.go b/vendor/github.com/containers/buildah/bind/mount.go new file mode 100644 index 000000000..e1ae323b9 --- /dev/null +++ b/vendor/github.com/containers/buildah/bind/mount.go @@ -0,0 +1,295 @@ +// +build linux + +package bind + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/containers/buildah/util" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// SetupIntermediateMountNamespace creates a new mount namespace and bind +// mounts all bind-mount sources into a subdirectory of bundlePath that can +// only be reached by the root user of the container's user namespace, except +// for Mounts which include the NoBindOption option in their options list. The +// NoBindOption will then merely be removed. +func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmountAll func() error, err error) { + defer stripNoBindOption(spec) + + // We expect a root directory to be defined. + if spec.Root == nil { + return nil, errors.Errorf("configuration has no root filesystem?") + } + rootPath := spec.Root.Path + + // Create a new mount namespace in which to do the things we're doing. + if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { + return nil, errors.Wrapf(err, "error creating new mount namespace for %v", spec.Process.Args) + } + + // Make all of our mounts private to our namespace. + if err := mount.MakeRPrivate("/"); err != nil { + return nil, errors.Wrapf(err, "error making mounts private to mount namespace for %v", spec.Process.Args) + } + + // Make sure the bundle directory is searchable. We created it with + // TempDir(), so it should have started with permissions set to 0700. + info, err := os.Stat(bundlePath) + if err != nil { + return nil, errors.Wrapf(err, "error checking permissions on %q", bundlePath) + } + if err = os.Chmod(bundlePath, info.Mode()|0111); err != nil { + return nil, errors.Wrapf(err, "error loosening permissions on %q", bundlePath) + } + + // Figure out who needs to be able to reach these bind mounts in order + // for the container to be started. + rootUID, rootGID, err := util.GetHostRootIDs(spec) + if err != nil { + return nil, err + } + + // Hand back a callback that the caller can use to clean up everything + // we're doing here. + unmount := []string{} + unmountAll = func() (err error) { + for _, mountpoint := range unmount { + // Unmount it and anything under it. + if err2 := UnmountMountpoints(mountpoint, nil); err2 != nil { + logrus.Warnf("pkg/bind: error unmounting %q: %v", mountpoint, err2) + if err == nil { + err = err2 + } + } + if err2 := unix.Unmount(mountpoint, unix.MNT_DETACH); err2 != nil { + if errno, ok := err2.(syscall.Errno); !ok || errno != syscall.EINVAL { + logrus.Warnf("pkg/bind: error detaching %q: %v", mountpoint, err2) + if err == nil { + err = err2 + } + } + } + // Remove just the mountpoint. + retry := 10 + remove := unix.Unlink + err2 := remove(mountpoint) + for err2 != nil && retry > 0 { + if errno, ok := err2.(syscall.Errno); ok { + switch errno { + default: + retry = 0 + continue + case syscall.EISDIR: + remove = unix.Rmdir + err2 = remove(mountpoint) + case syscall.EBUSY: + if err3 := unix.Unmount(mountpoint, unix.MNT_DETACH); err3 == nil { + err2 = remove(mountpoint) + } + } + retry-- + } + } + if err2 != nil { + logrus.Warnf("pkg/bind: error removing %q: %v", mountpoint, err2) + if err == nil { + err = err2 + } + } + } + return err + } + + // Create a top-level directory that the "root" user will be able to + // access, that "root" from containers which use different mappings, or + // other unprivileged users outside of containers, shouldn't be able to + // access. + mnt := filepath.Join(bundlePath, "mnt") + if err = idtools.MkdirAndChown(mnt, 0100, idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}); err != nil { + return unmountAll, errors.Wrapf(err, "error creating %q owned by the container's root user", mnt) + } + + // Make that directory private, and add it to the list of locations we + // unmount at cleanup time. + if err = mount.MakeRPrivate(mnt); err != nil { + return unmountAll, errors.Wrapf(err, "error marking filesystem at %q as private", mnt) + } + unmount = append([]string{mnt}, unmount...) + + // Create a bind mount for the root filesystem and add it to the list. + rootfs := filepath.Join(mnt, "rootfs") + if err = os.Mkdir(rootfs, 0000); err != nil { + return unmountAll, errors.Wrapf(err, "error creating directory %q", rootfs) + } + if err = unix.Mount(rootPath, rootfs, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil { + return unmountAll, errors.Wrapf(err, "error bind mounting root filesystem from %q to %q", rootPath, rootfs) + } + logrus.Debugf("bind mounted %q to %q", rootPath, rootfs) + unmount = append([]string{rootfs}, unmount...) + spec.Root.Path = rootfs + + // Do the same for everything we're binding in. + mounts := make([]specs.Mount, 0, len(spec.Mounts)) + for i := range spec.Mounts { + // If we're not using an intermediate, leave it in the list. + if leaveBindMountAlone(spec.Mounts[i]) { + mounts = append(mounts, spec.Mounts[i]) + continue + } + // Check if the source is a directory or something else. + info, err := os.Stat(spec.Mounts[i].Source) + if err != nil { + if os.IsNotExist(err) { + logrus.Warnf("couldn't find %q on host to bind mount into container", spec.Mounts[i].Source) + continue + } + return unmountAll, errors.Wrapf(err, "error checking if %q is a directory", spec.Mounts[i].Source) + } + stage := filepath.Join(mnt, fmt.Sprintf("buildah-bind-target-%d", i)) + if info.IsDir() { + // If the source is a directory, make one to use as the + // mount target. + if err = os.Mkdir(stage, 0000); err != nil { + return unmountAll, errors.Wrapf(err, "error creating directory %q", stage) + } + } else { + // If the source is not a directory, create an empty + // file to use as the mount target. + file, err := os.OpenFile(stage, os.O_WRONLY|os.O_CREATE, 0000) + if err != nil { + return unmountAll, errors.Wrapf(err, "error creating file %q", stage) + } + file.Close() + } + // Bind mount the source from wherever it is to a place where + // we know the runtime helper will be able to get to it... + if err = unix.Mount(spec.Mounts[i].Source, stage, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil { + return unmountAll, errors.Wrapf(err, "error bind mounting bind object from %q to %q", spec.Mounts[i].Source, stage) + } + logrus.Debugf("bind mounted %q to %q", spec.Mounts[i].Source, stage) + spec.Mounts[i].Source = stage + // ... and update the source location that we'll pass to the + // runtime to our intermediate location. + mounts = append(mounts, spec.Mounts[i]) + unmount = append([]string{stage}, unmount...) + } + spec.Mounts = mounts + + return unmountAll, nil +} + +// Decide if the mount should not be redirected to an intermediate location first. +func leaveBindMountAlone(mount specs.Mount) bool { + // If we know we shouldn't do a redirection for this mount, skip it. + if util.StringInSlice(NoBindOption, mount.Options) { + return true + } + // If we're not bind mounting it in, we don't need to do anything for it. + if mount.Type != "bind" && !util.StringInSlice("bind", mount.Options) && !util.StringInSlice("rbind", mount.Options) { + return true + } + return false +} + +// UnmountMountpoints unmounts the given mountpoints and anything that's hanging +// off of them, rather aggressively. If a mountpoint also appears in the +// mountpointsToRemove slice, the mountpoints are removed after they are +// unmounted. +func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error { + mounts, err := mount.GetMounts() + if err != nil { + return errors.Wrapf(err, "error retrieving list of mounts") + } + // getChildren returns the list of mount IDs that hang off of the + // specified ID. + getChildren := func(id int) []int { + var list []int + for _, info := range mounts { + if info.Parent == id { + list = append(list, info.ID) + } + } + return list + } + // getTree returns the list of mount IDs that hang off of the specified + // ID, and off of those mount IDs, etc. + getTree := func(id int) []int { + mounts := []int{id} + i := 0 + for i < len(mounts) { + children := getChildren(mounts[i]) + mounts = append(mounts, children...) + i++ + } + return mounts + } + // getMountByID looks up the mount info with the specified ID + getMountByID := func(id int) *mount.Info { + for i := range mounts { + if mounts[i].ID == id { + return mounts[i] + } + } + return nil + } + // getMountByPoint looks up the mount info with the specified mountpoint + getMountByPoint := func(mountpoint string) *mount.Info { + for i := range mounts { + if mounts[i].Mountpoint == mountpoint { + return mounts[i] + } + } + return nil + } + // find the top of the tree we're unmounting + top := getMountByPoint(mountpoint) + if top == nil { + return errors.Wrapf(err, "%q is not mounted", mountpoint) + } + // add all of the mounts that are hanging off of it + tree := getTree(top.ID) + // unmount each mountpoint, working from the end of the list (leaf nodes) to the top + for i := range tree { + var st unix.Stat_t + id := tree[len(tree)-i-1] + mount := getMountByID(id) + // check if this mountpoint is mounted + if err := unix.Lstat(mount.Mountpoint, &st); err != nil { + return errors.Wrapf(err, "error checking if %q is mounted", mount.Mountpoint) + } + if mount.Major != int(unix.Major(st.Dev)) || mount.Minor != int(unix.Minor(st.Dev)) { + logrus.Debugf("%q is apparently not really mounted, skipping", mount.Mountpoint) + continue + } + // do the unmount + if err := unix.Unmount(mount.Mountpoint, 0); err != nil { + // if it was busy, detach it + if errno, ok := err.(syscall.Errno); ok && errno == syscall.EBUSY { + err = unix.Unmount(mount.Mountpoint, unix.MNT_DETACH) + } + if err != nil { + // if it was invalid (not mounted), hide the error, else return it + if errno, ok := err.(syscall.Errno); !ok || errno != syscall.EINVAL { + logrus.Warnf("error unmounting %q: %v", mount.Mountpoint, err) + continue + } + } + } + // if we're also supposed to remove this thing, do that, too + if util.StringInSlice(mount.Mountpoint, mountpointsToRemove) { + if err := os.Remove(mount.Mountpoint); err != nil { + return errors.Wrapf(err, "error removing %q", mount.Mountpoint) + } + } + } + return nil +} diff --git a/vendor/github.com/containers/buildah/bind/mount_unsupported.go b/vendor/github.com/containers/buildah/bind/mount_unsupported.go new file mode 100644 index 000000000..88ca2ca8b --- /dev/null +++ b/vendor/github.com/containers/buildah/bind/mount_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package bind + +import ( + "github.com/opencontainers/runtime-spec/specs-go" +) + +// SetupIntermediateMountNamespace returns a no-op unmountAll() and no error. +func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmountAll func() error, err error) { + stripNoBindOption(spec) + return func() error { return nil }, nil +} diff --git a/vendor/github.com/containers/buildah/bind/util.go b/vendor/github.com/containers/buildah/bind/util.go new file mode 100644 index 000000000..93ba4e2b7 --- /dev/null +++ b/vendor/github.com/containers/buildah/bind/util.go @@ -0,0 +1,39 @@ +package bind + +import ( + "github.com/containers/buildah/util" + "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + // NoBindOption is an option which, if present in a Mount structure's + // options list, will cause SetupIntermediateMountNamespace to not + // redirect it through a bind mount. + NoBindOption = "nobuildahbind" +) + +func stripNoBindOption(spec *specs.Spec) { + for i := range spec.Mounts { + if util.StringInSlice(NoBindOption, spec.Mounts[i].Options) { + prunedOptions := make([]string, 0, len(spec.Mounts[i].Options)) + for _, option := range spec.Mounts[i].Options { + if option != NoBindOption { + prunedOptions = append(prunedOptions, option) + } + } + spec.Mounts[i].Options = prunedOptions + } + } +} + +func dedupeStringSlice(slice []string) []string { + done := make([]string, 0, len(slice)) + m := make(map[string]struct{}) + for _, s := range slice { + if _, present := m[s]; !present { + m[s] = struct{}{} + done = append(done, s) + } + } + return done +} diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go new file mode 100644 index 000000000..7891810a2 --- /dev/null +++ b/vendor/github.com/containers/buildah/buildah.go @@ -0,0 +1,513 @@ +package buildah + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/buildah/docker" + "github.com/containers/buildah/util" + "github.com/containers/image/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/ioutils" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + // Package is the name of this package, used in help output and to + // identify working containers. + Package = "buildah" + // Version for the Package. Bump version in contrib/rpm/buildah.spec + // too. + Version = "1.4-dev" + // The value we use to identify what type of information, currently a + // serialized Builder structure, we are using as per-container state. + // This should only be changed when we make incompatible changes to + // that data structure, as it's used to distinguish containers which + // are "ours" from ones that aren't. + containerType = Package + " 0.0.1" + // The file in the per-container directory which we use to store our + // per-container state. If it isn't there, then the container isn't + // one of our build containers. + stateFile = Package + ".json" +) + +// PullPolicy takes the value PullIfMissing, PullAlways, or PullNever. +type PullPolicy int + +const ( + // PullIfMissing is one of the values that BuilderOptions.PullPolicy + // can take, signalling that the source image should be pulled from a + // registry if a local copy of it is not already present. + PullIfMissing PullPolicy = iota + // PullAlways is one of the values that BuilderOptions.PullPolicy can + // take, signalling that a fresh, possibly updated, copy of the image + // should be pulled from a registry before the build proceeds. + PullAlways + // PullNever is one of the values that BuilderOptions.PullPolicy can + // take, signalling that the source image should not be pulled from a + // registry if a local copy of it is not already present. + PullNever +) + +// String converts a PullPolicy into a string. +func (p PullPolicy) String() string { + switch p { + case PullIfMissing: + return "PullIfMissing" + case PullAlways: + return "PullAlways" + case PullNever: + return "PullNever" + } + return fmt.Sprintf("unrecognized policy %d", p) +} + +// NetworkConfigurationPolicy takes the value NetworkDefault, NetworkDisabled, +// or NetworkEnabled. +type NetworkConfigurationPolicy int + +const ( + // NetworkDefault is one of the values that BuilderOptions.ConfigureNetwork + // can take, signalling that the default behavior should be used. + NetworkDefault NetworkConfigurationPolicy = iota + // NetworkDisabled is one of the values that BuilderOptions.ConfigureNetwork + // can take, signalling that network interfaces should NOT be configured for + // newly-created network namespaces. + NetworkDisabled + // NetworkEnabled is one of the values that BuilderOptions.ConfigureNetwork + // can take, signalling that network interfaces should be configured for + // newly-created network namespaces. + NetworkEnabled +) + +// String formats a NetworkConfigurationPolicy as a string. +func (p NetworkConfigurationPolicy) String() string { + switch p { + case NetworkDefault: + return "NetworkDefault" + case NetworkDisabled: + return "NetworkDisabled" + case NetworkEnabled: + return "NetworkEnabled" + } + return fmt.Sprintf("unknown NetworkConfigurationPolicy %d", p) +} + +// Builder objects are used to represent containers which are being used to +// build images. They also carry potential updates which will be applied to +// the image's configuration when the container's contents are used to build an +// image. +type Builder struct { + store storage.Store + + // Args define variables that users can pass at build-time to the builder + Args map[string]string + // Type is used to help identify a build container's metadata. It + // should not be modified. + Type string `json:"type"` + // FromImage is the name of the source image which was used to create + // the container, if one was used. It should not be modified. + FromImage string `json:"image,omitempty"` + // FromImageID is the ID of the source image which was used to create + // the container, if one was used. It should not be modified. + FromImageID string `json:"image-id"` + // Config is the source image's configuration. It should not be + // modified. + Config []byte `json:"config,omitempty"` + // Manifest is the source image's manifest. It should not be modified. + Manifest []byte `json:"manifest,omitempty"` + + // Container is the name of the build container. It should not be modified. + Container string `json:"container-name,omitempty"` + // ContainerID is the ID of the build container. It should not be modified. + ContainerID string `json:"container-id,omitempty"` + // MountPoint is the last location where the container's root + // filesystem was mounted. It should not be modified. + MountPoint string `json:"mountpoint,omitempty"` + // ProcessLabel is the SELinux process label associated with the container + ProcessLabel string `json:"process-label,omitempty"` + // MountLabel is the SELinux mount label associated with the container + MountLabel string `json:"mount-label,omitempty"` + + // ImageAnnotations is a set of key-value pairs which is stored in the + // image's manifest. + ImageAnnotations map[string]string `json:"annotations,omitempty"` + // ImageCreatedBy is a description of how this container was built. + ImageCreatedBy string `json:"created-by,omitempty"` + // ImageHistoryComment is a description of how our added layers were built. + ImageHistoryComment string `json:"history-comment,omitempty"` + + // Image metadata and runtime settings, in multiple formats. + OCIv1 v1.Image `json:"ociv1,omitempty"` + Docker docker.V2Image `json:"docker,omitempty"` + // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format. + DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"` + + // Isolation controls how we handle "RUN" statements and the Run() method. + Isolation Isolation + // NamespaceOptions controls how we set up the namespaces for processes that we run in the container. + NamespaceOptions NamespaceOptions + // ConfigureNetwork controls whether or not network interfaces and + // routing are configured for a new network namespace (i.e., when not + // joining another's namespace and not just using the host's + // namespace), effectively deciding whether or not the process has a + // usable network. + ConfigureNetwork NetworkConfigurationPolicy + // CNIPluginPath is the location of CNI plugin helpers, if they should be + // run from a location other than the default location. + CNIPluginPath string + // CNIConfigDir is the location of CNI configuration files, if the files in + // the default configuration directory shouldn't be used. + CNIConfigDir string + // ID mapping options to use when running processes in the container with non-host user namespaces. + IDMappingOptions IDMappingOptions + // AddCapabilities is a list of capabilities to add to the default set when running + // commands in the container. + AddCapabilities []string + // DropCapabilities is a list of capabilities to remove from the default set, + // after processing the AddCapabilities set, when running commands in the container. + // If a capability appears in both lists, it will be dropped. + DropCapabilities []string + + CommonBuildOpts *CommonBuildOptions + // TopLayer is the top layer of the image + TopLayer string + // Format for the build Image + Format string +} + +// BuilderInfo are used as objects to display container information +type BuilderInfo struct { + Type string + FromImage string + FromImageID string + Config string + Manifest string + Container string + ContainerID string + MountPoint string + ProcessLabel string + MountLabel string + ImageAnnotations map[string]string + ImageCreatedBy string + OCIv1 v1.Image + Docker docker.V2Image + DefaultMountsFilePath string + Isolation string + NamespaceOptions NamespaceOptions + ConfigureNetwork string + CNIPluginPath string + CNIConfigDir string + IDMappingOptions IDMappingOptions + DefaultCapabilities []string + AddCapabilities []string + DropCapabilities []string +} + +// GetBuildInfo gets a pointer to a Builder object and returns a BuilderInfo object from it. +// This is used in the inspect command to display Manifest and Config as string and not []byte. +func GetBuildInfo(b *Builder) BuilderInfo { + return BuilderInfo{ + Type: b.Type, + FromImage: b.FromImage, + FromImageID: b.FromImageID, + Config: string(b.Config), + Manifest: string(b.Manifest), + Container: b.Container, + ContainerID: b.ContainerID, + MountPoint: b.MountPoint, + ProcessLabel: b.ProcessLabel, + ImageAnnotations: b.ImageAnnotations, + ImageCreatedBy: b.ImageCreatedBy, + OCIv1: b.OCIv1, + Docker: b.Docker, + DefaultMountsFilePath: b.DefaultMountsFilePath, + Isolation: b.Isolation.String(), + NamespaceOptions: b.NamespaceOptions, + ConfigureNetwork: fmt.Sprintf("%v", b.ConfigureNetwork), + CNIPluginPath: b.CNIPluginPath, + CNIConfigDir: b.CNIConfigDir, + IDMappingOptions: b.IDMappingOptions, + DefaultCapabilities: append([]string{}, util.DefaultCapabilities...), + AddCapabilities: append([]string{}, b.AddCapabilities...), + DropCapabilities: append([]string{}, b.DropCapabilities...), + } +} + +// CommonBuildOptions are resources that can be defined by flags for both buildah from and build-using-dockerfile +type CommonBuildOptions struct { + // AddHost is the list of hostnames to add to the build container's /etc/hosts. + AddHost []string + // CgroupParent is the path to cgroups under which the cgroup for the container will be created. + CgroupParent string + // CPUPeriod limits the CPU CFS (Completely Fair Scheduler) period + CPUPeriod uint64 + // CPUQuota limits the CPU CFS (Completely Fair Scheduler) quota + CPUQuota int64 + // CPUShares (relative weight + CPUShares uint64 + // CPUSetCPUs in which to allow execution (0-3, 0,1) + CPUSetCPUs string + // CPUSetMems memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + CPUSetMems string + // Memory is the upper limit (in bytes) on how much memory running containers can use. + Memory int64 + // MemorySwap limits the amount of memory and swap together. + MemorySwap int64 + // LabelOpts is the a slice of fields of an SELinux context, given in "field:pair" format, or "disable". + // Recognized field names are "role", "type", and "level". + LabelOpts []string + // SeccompProfilePath is the pathname of a seccomp profile. + SeccompProfilePath string + // ApparmorProfile is the name of an apparmor profile. + ApparmorProfile string + // ShmSize is the "size" value to use when mounting an shmfs on the container's /dev/shm directory. + ShmSize string + // Ulimit specifies resource limit options, in the form type:softlimit[:hardlimit]. + // These types are recognized: + // "core": maximimum core dump size (ulimit -c) + // "cpu": maximum CPU time (ulimit -t) + // "data": maximum size of a process's data segment (ulimit -d) + // "fsize": maximum size of new files (ulimit -f) + // "locks": maximum number of file locks (ulimit -x) + // "memlock": maximum amount of locked memory (ulimit -l) + // "msgqueue": maximum amount of data in message queues (ulimit -q) + // "nice": niceness adjustment (nice -n, ulimit -e) + // "nofile": maximum number of open files (ulimit -n) + // "nproc": maximum number of processes (ulimit -u) + // "rss": maximum size of a process's (ulimit -m) + // "rtprio": maximum real-time scheduling priority (ulimit -r) + // "rttime": maximum amount of real-time execution between blocking syscalls + // "sigpending": maximum number of pending signals (ulimit -i) + // "stack": maximum stack size (ulimit -s) + Ulimit []string + // Volumes to bind mount into the container + Volumes []string +} + +// BuilderOptions are used to initialize a new Builder. +type BuilderOptions struct { + // Args define variables that users can pass at build-time to the builder + Args map[string]string + // FromImage is the name of the image which should be used as the + // starting point for the container. It can be set to an empty value + // or "scratch" to indicate that the container should not be based on + // an image. + FromImage string + // Container is a desired name for the build container. + Container string + // PullPolicy decides whether or not we should pull the image that + // we're using as a base image. It should be PullIfMissing, + // PullAlways, or PullNever. + PullPolicy PullPolicy + // Registry is a value which is prepended to the image's name, if it + // needs to be pulled and the image name alone can not be resolved to a + // reference to a source image. No separator is implicitly added. + Registry string + // Transport is a value which is prepended to the image's name, if it + // needs to be pulled and the image name alone, or the image name and + // the registry together, can not be resolved to a reference to a + // source image. No separator is implicitly added. + Transport string + // Mount signals to NewBuilder() that the container should be mounted + // immediately. + Mount bool + // SignaturePolicyPath specifies an override location for the signature + // policy which should be used for verifying the new image as it is + // being written. Except in specific circumstances, no value should be + // specified, indicating that the shared, system-wide default policy + // should be used. + SignaturePolicyPath string + // ReportWriter is an io.Writer which will be used to log the reading + // of the source image from a registry, if we end up pulling the image. + ReportWriter io.Writer + // github.com/containers/image/types SystemContext to hold credentials + // and other authentication/authorization information. + SystemContext *types.SystemContext + // DefaultMountsFilePath is the file path holding the mounts to be + // mounted in "host-path:container-path" format + DefaultMountsFilePath string + // Isolation controls how we handle "RUN" statements and the Run() + // method. + Isolation Isolation + // NamespaceOptions controls how we set up namespaces for processes that + // we might need to run using the container's root filesystem. + NamespaceOptions NamespaceOptions + // ConfigureNetwork controls whether or not network interfaces and + // routing are configured for a new network namespace (i.e., when not + // joining another's namespace and not just using the host's + // namespace), effectively deciding whether or not the process has a + // usable network. + ConfigureNetwork NetworkConfigurationPolicy + // CNIPluginPath is the location of CNI plugin helpers, if they should be + // run from a location other than the default location. + CNIPluginPath string + // CNIConfigDir is the location of CNI configuration files, if the files in + // the default configuration directory shouldn't be used. + CNIConfigDir string + // ID mapping options to use if we're setting up our own user namespace. + IDMappingOptions *IDMappingOptions + // AddCapabilities is a list of capabilities to add to the default set when + // running commands in the container. + AddCapabilities []string + // DropCapabilities is a list of capabilities to remove from the default set, + // after processing the AddCapabilities set, when running commands in the + // container. If a capability appears in both lists, it will be dropped. + DropCapabilities []string + + CommonBuildOpts *CommonBuildOptions + // Format for the container image + Format string +} + +// ImportOptions are used to initialize a Builder from an existing container +// which was created elsewhere. +type ImportOptions struct { + // Container is the name of the build container. + Container string + // SignaturePolicyPath specifies an override location for the signature + // policy which should be used for verifying the new image as it is + // being written. Except in specific circumstances, no value should be + // specified, indicating that the shared, system-wide default policy + // should be used. + SignaturePolicyPath string +} + +// ImportFromImageOptions are used to initialize a Builder from an image. +type ImportFromImageOptions struct { + // Image is the name or ID of the image we'd like to examine. + Image string + // SignaturePolicyPath specifies an override location for the signature + // policy which should be used for verifying the new image as it is + // being written. Except in specific circumstances, no value should be + // specified, indicating that the shared, system-wide default policy + // should be used. + SignaturePolicyPath string + // github.com/containers/image/types SystemContext to hold information + // about which registries we should check for completing image names + // that don't include a domain portion. + SystemContext *types.SystemContext +} + +// NewBuilder creates a new build container. +func NewBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) { + return newBuilder(ctx, store, options) +} + +// ImportBuilder creates a new build configuration using an already-present +// container. +func ImportBuilder(ctx context.Context, store storage.Store, options ImportOptions) (*Builder, error) { + return importBuilder(ctx, store, options) +} + +// ImportBuilderFromImage creates a new builder configuration using an image. +// The returned object can be modified and examined, but it can not be saved +// or committed because it is not associated with a working container. +func ImportBuilderFromImage(ctx context.Context, store storage.Store, options ImportFromImageOptions) (*Builder, error) { + return importBuilderFromImage(ctx, store, options) +} + +// OpenBuilder loads information about a build container given its name or ID. +func OpenBuilder(store storage.Store, container string) (*Builder, error) { + cdir, err := store.ContainerDirectory(container) + if err != nil { + return nil, err + } + buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) + if err != nil { + return nil, err + } + b := &Builder{} + err = json.Unmarshal(buildstate, &b) + if err != nil { + return nil, err + } + if b.Type != containerType { + return nil, errors.Errorf("container is not a %s container", Package) + } + b.store = store + b.fixupConfig() + return b, nil +} + +// OpenBuilderByPath loads information about a build container given a +// path to the container's root filesystem +func OpenBuilderByPath(store storage.Store, path string) (*Builder, error) { + containers, err := store.Containers() + if err != nil { + return nil, err + } + abs, err := filepath.Abs(path) + if err != nil { + return nil, err + } + builderMatchesPath := func(b *Builder, path string) bool { + return (b.MountPoint == path) + } + for _, container := range containers { + cdir, err := store.ContainerDirectory(container.ID) + if err != nil { + return nil, err + } + buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) + if err != nil { + return nil, err + } + b := &Builder{} + err = json.Unmarshal(buildstate, &b) + if err == nil && b.Type == containerType && builderMatchesPath(b, abs) { + b.store = store + b.fixupConfig() + return b, nil + } + } + return nil, storage.ErrContainerUnknown +} + +// OpenAllBuilders loads all containers which have a state file that we use in +// their data directory, typically so that they can be listed. +func OpenAllBuilders(store storage.Store) (builders []*Builder, err error) { + containers, err := store.Containers() + if err != nil { + return nil, err + } + for _, container := range containers { + cdir, err := store.ContainerDirectory(container.ID) + if err != nil { + return nil, err + } + buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) + if err != nil && os.IsNotExist(err) { + continue + } + b := &Builder{} + err = json.Unmarshal(buildstate, &b) + if err == nil && b.Type == containerType { + b.store = store + b.fixupConfig() + builders = append(builders, b) + } + } + return builders, nil +} + +// Save saves the builder's current state to the build container's metadata. +// This should not need to be called directly, as other methods of the Builder +// object take care of saving their state. +func (b *Builder) Save() error { + buildstate, err := json.Marshal(b) + if err != nil { + return err + } + cdir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600) +} diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go new file mode 100644 index 000000000..58b7a883c --- /dev/null +++ b/vendor/github.com/containers/buildah/chroot/run.go @@ -0,0 +1,1308 @@ +// +build linux + +package chroot + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "unsafe" + + "github.com/containers/buildah/bind" + "github.com/containers/buildah/unshare" + "github.com/containers/buildah/util" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/reexec" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/syndtr/gocapability/capability" + "golang.org/x/crypto/ssh/terminal" + "golang.org/x/sys/unix" +) + +const ( + // runUsingChrootCommand is a command we use as a key for reexec + runUsingChrootCommand = "buildah-chroot-runtime" + // runUsingChrootExec is a command we use as a key for reexec + runUsingChrootExecCommand = "buildah-chroot-exec" +) + +var ( + rlimitsMap = map[string]int{ + "RLIMIT_AS": unix.RLIMIT_AS, + "RLIMIT_CORE": unix.RLIMIT_CORE, + "RLIMIT_CPU": unix.RLIMIT_CPU, + "RLIMIT_DATA": unix.RLIMIT_DATA, + "RLIMIT_FSIZE": unix.RLIMIT_FSIZE, + "RLIMIT_LOCKS": unix.RLIMIT_LOCKS, + "RLIMIT_MEMLOCK": unix.RLIMIT_MEMLOCK, + "RLIMIT_MSGQUEUE": unix.RLIMIT_MSGQUEUE, + "RLIMIT_NICE": unix.RLIMIT_NICE, + "RLIMIT_NOFILE": unix.RLIMIT_NOFILE, + "RLIMIT_NPROC": unix.RLIMIT_NPROC, + "RLIMIT_RSS": unix.RLIMIT_RSS, + "RLIMIT_RTPRIO": unix.RLIMIT_RTPRIO, + "RLIMIT_RTTIME": unix.RLIMIT_RTTIME, + "RLIMIT_SIGPENDING": unix.RLIMIT_SIGPENDING, + "RLIMIT_STACK": unix.RLIMIT_STACK, + } + rlimitsReverseMap = map[int]string{} +) + +func init() { + reexec.Register(runUsingChrootCommand, runUsingChrootMain) + reexec.Register(runUsingChrootExecCommand, runUsingChrootExecMain) + for limitName, limitNumber := range rlimitsMap { + rlimitsReverseMap[limitNumber] = limitName + } +} + +type runUsingChrootSubprocOptions struct { + Spec *specs.Spec + BundlePath string + UIDMappings []syscall.SysProcIDMap + GIDMappings []syscall.SysProcIDMap +} + +type runUsingChrootExecSubprocOptions struct { + Spec *specs.Spec + BundlePath string +} + +// RunUsingChroot runs a chrooted process, using some of the settings from the +// passed-in spec, and using the specified bundlePath to hold temporary files, +// directories, and mountpoints. +func RunUsingChroot(spec *specs.Spec, bundlePath string, stdin io.Reader, stdout, stderr io.Writer) (err error) { + var confwg sync.WaitGroup + + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // Write the runtime configuration, mainly for debugging. + specbytes, err := json.Marshal(spec) + if err != nil { + return err + } + if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil { + return errors.Wrapf(err, "error storing runtime configuration") + } + logrus.Debugf("config = %v", string(specbytes)) + + // Run the grandparent subprocess in a user namespace that reuses the mappings that we have. + uidmap, gidmap, err := util.GetHostIDMappings("") + if err != nil { + return err + } + for i := range uidmap { + uidmap[i].HostID = uidmap[i].ContainerID + } + for i := range gidmap { + gidmap[i].HostID = gidmap[i].ContainerID + } + + // Default to using stdin/stdout/stderr if we weren't passed objects to use. + if stdin == nil { + stdin = os.Stdin + } + if stdout == nil { + stdout = os.Stdout + } + if stderr == nil { + stderr = os.Stderr + } + + // Create a pipe for passing configuration down to the next process. + preader, pwriter, err := os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating configuration pipe") + } + config, conferr := json.Marshal(runUsingChrootSubprocOptions{ + Spec: spec, + BundlePath: bundlePath, + }) + if conferr != nil { + return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingChrootCommand) + } + + // Set our terminal's mode to raw, to pass handling of special + // terminal input to the terminal in the container. + if spec.Process.Terminal && terminal.IsTerminal(unix.Stdin) { + state, err := terminal.MakeRaw(unix.Stdin) + if err != nil { + logrus.Warnf("error setting terminal state: %v", err) + } else { + defer func() { + if err = terminal.Restore(unix.Stdin, state); err != nil { + logrus.Errorf("unable to restore terminal state: %v", err) + } + }() + } + } + + // Raise any resource limits that are higher than they are now, before + // we drop any more privileges. + if err = setRlimits(spec, false, true); err != nil { + return err + } + + // Start the grandparent subprocess. + cmd := unshare.Command(runUsingChrootCommand) + cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr + cmd.Dir = "/" + cmd.Env = append([]string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())}, os.Environ()...) + cmd.UnshareFlags = syscall.CLONE_NEWUSER + cmd.UidMappings = uidmap + cmd.GidMappings = gidmap + cmd.GidMappingsEnableSetgroups = true + + logrus.Debugf("Running %#v in %#v", cmd.Cmd, cmd) + confwg.Add(1) + go func() { + _, conferr = io.Copy(pwriter, bytes.NewReader(config)) + pwriter.Close() + confwg.Done() + }() + cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) + err = cmd.Run() + confwg.Wait() + if err == nil { + return conferr + } + return err +} + +// main() for grandparent subprocess. Its main job is to shuttle stdio back +// and forth, managing a pseudo-terminal if we want one, for our child, the +// parent subprocess. +func runUsingChrootMain() { + var options runUsingChrootSubprocOptions + + runtime.LockOSThread() + + // Set logging. + if level := os.Getenv("LOGLEVEL"); level != "" { + if ll, err := strconv.Atoi(level); err == nil { + logrus.SetLevel(logrus.Level(ll)) + } + os.Unsetenv("LOGLEVEL") + } + + // Unpack our configuration. + confPipe := os.NewFile(3, "confpipe") + if confPipe == nil { + fmt.Fprintf(os.Stderr, "error reading options pipe\n") + os.Exit(1) + } + defer confPipe.Close() + if err := json.NewDecoder(confPipe).Decode(&options); err != nil { + fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err) + os.Exit(1) + } + + // Prepare to shuttle stdio back and forth. + rootUid32, rootGid32, err := util.GetHostRootIDs(options.Spec) + if err != nil { + logrus.Errorf("error determining ownership for container stdio") + os.Exit(1) + } + rootUid := int(rootUid32) + rootGid := int(rootGid32) + relays := make(map[int]int) + closeOnceRunning := []*os.File{} + var ctty *os.File + var stdin io.Reader + var stdinCopy io.WriteCloser + var stdout io.Writer + var stderr io.Writer + fdDesc := make(map[int]string) + deferred := func() {} + if options.Spec.Process.Terminal { + // Create a pseudo-terminal -- open a copy of the master side. + ptyMasterFd, err := unix.Open("/dev/ptmx", os.O_RDWR, 0600) + if err != nil { + logrus.Errorf("error opening PTY master using /dev/ptmx: %v", err) + os.Exit(1) + } + // Set the kernel's lock to "unlocked". + locked := 0 + if result, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(ptyMasterFd), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&locked))); int(result) == -1 { + logrus.Errorf("error locking PTY descriptor: %v", err) + os.Exit(1) + } + // Get a handle for the other end. + ptyFd, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(ptyMasterFd), unix.TIOCGPTPEER, unix.O_RDWR|unix.O_NOCTTY) + if int(ptyFd) == -1 { + if errno, isErrno := err.(syscall.Errno); !isErrno || (errno != syscall.EINVAL && errno != syscall.ENOTTY) { + logrus.Errorf("error getting PTY descriptor: %v", err) + os.Exit(1) + } + // EINVAL means the kernel's too old to understand TIOCGPTPEER. Try TIOCGPTN. + ptyN, err := unix.IoctlGetInt(ptyMasterFd, unix.TIOCGPTN) + if err != nil { + logrus.Errorf("error getting PTY number: %v", err) + os.Exit(1) + } + ptyName := fmt.Sprintf("/dev/pts/%d", ptyN) + fd, err := unix.Open(ptyName, unix.O_RDWR|unix.O_NOCTTY, 0620) + if err != nil { + logrus.Errorf("error opening PTY %q: %v", ptyName, err) + os.Exit(1) + } + ptyFd = uintptr(fd) + } + // Make notes about what's going where. + relays[ptyMasterFd] = unix.Stdout + relays[unix.Stdin] = ptyMasterFd + fdDesc[ptyMasterFd] = "container terminal" + fdDesc[unix.Stdin] = "stdin" + fdDesc[unix.Stdout] = "stdout" + winsize := &unix.Winsize{} + // Set the pseudoterminal's size to the configured size, or our own. + if options.Spec.Process.ConsoleSize != nil { + // Use configured sizes. + winsize.Row = uint16(options.Spec.Process.ConsoleSize.Height) + winsize.Col = uint16(options.Spec.Process.ConsoleSize.Width) + } else { + if terminal.IsTerminal(unix.Stdin) { + // Use the size of our terminal. + winsize, err = unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ) + if err != nil { + logrus.Debugf("error reading current terminal's size") + winsize.Row = 0 + winsize.Col = 0 + } + } + } + if winsize.Row != 0 && winsize.Col != 0 { + if err = unix.IoctlSetWinsize(int(ptyFd), unix.TIOCSWINSZ, winsize); err != nil { + logrus.Warnf("error setting terminal size for pty") + } + // FIXME - if we're connected to a terminal, we should + // be passing the updated terminal size down when we + // receive a SIGWINCH. + } + // Open an *os.File object that we can pass to our child. + ctty = os.NewFile(ptyFd, "/dev/tty") + // Set ownership for the PTY. + if err = ctty.Chown(rootUid, rootGid); err != nil { + var cttyInfo unix.Stat_t + err2 := unix.Fstat(int(ptyFd), &cttyInfo) + from := "" + op := "setting" + if err2 == nil { + op = "changing" + from = fmt.Sprintf("from %d/%d ", cttyInfo.Uid, cttyInfo.Gid) + } + logrus.Warnf("error %s ownership of container PTY %sto %d/%d: %v", op, from, rootUid, rootGid, err) + } + // Set permissions on the PTY. + if err = ctty.Chmod(0620); err != nil { + logrus.Errorf("error setting permissions of container PTY: %v", err) + os.Exit(1) + } + // Make a note that our child (the parent subprocess) should + // have the PTY connected to its stdio, and that we should + // close it once it's running. + stdin = ctty + stdout = ctty + stderr = ctty + closeOnceRunning = append(closeOnceRunning, ctty) + } else { + // Create pipes for stdio. + stdinRead, stdinWrite, err := os.Pipe() + if err != nil { + logrus.Errorf("error opening pipe for stdin: %v", err) + } + stdoutRead, stdoutWrite, err := os.Pipe() + if err != nil { + logrus.Errorf("error opening pipe for stdout: %v", err) + } + stderrRead, stderrWrite, err := os.Pipe() + if err != nil { + logrus.Errorf("error opening pipe for stderr: %v", err) + } + // Make notes about what's going where. + relays[unix.Stdin] = int(stdinWrite.Fd()) + relays[int(stdoutRead.Fd())] = unix.Stdout + relays[int(stderrRead.Fd())] = unix.Stderr + fdDesc[int(stdinWrite.Fd())] = "container stdin pipe" + fdDesc[int(stdoutRead.Fd())] = "container stdout pipe" + fdDesc[int(stderrRead.Fd())] = "container stderr pipe" + fdDesc[unix.Stdin] = "stdin" + fdDesc[unix.Stdout] = "stdout" + fdDesc[unix.Stderr] = "stderr" + // Set ownership for the pipes. + if err = stdinRead.Chown(rootUid, rootGid); err != nil { + logrus.Errorf("error setting ownership of container stdin pipe: %v", err) + os.Exit(1) + } + if err = stdoutWrite.Chown(rootUid, rootGid); err != nil { + logrus.Errorf("error setting ownership of container stdout pipe: %v", err) + os.Exit(1) + } + if err = stderrWrite.Chown(rootUid, rootGid); err != nil { + logrus.Errorf("error setting ownership of container stderr pipe: %v", err) + os.Exit(1) + } + // Make a note that our child (the parent subprocess) should + // have the pipes connected to its stdio, and that we should + // close its ends of them once it's running. + stdin = stdinRead + stdout = stdoutWrite + stderr = stderrWrite + closeOnceRunning = append(closeOnceRunning, stdinRead, stdoutWrite, stderrWrite) + stdinCopy = stdinWrite + defer stdoutRead.Close() + defer stderrRead.Close() + } + // A helper that returns false if err is an error that would cause us + // to give up. + logIfNotRetryable := func(err error, what string) (retry bool) { + if err == nil { + return true + } + if errno, isErrno := err.(syscall.Errno); isErrno { + switch errno { + case syscall.EINTR, syscall.EAGAIN: + return true + } + } + logrus.Error(what) + return false + } + for readFd, writeFd := range relays { + if err := unix.SetNonblock(readFd, true); err != nil { + logrus.Errorf("error setting descriptor %d (%s) non-blocking: %v", readFd, fdDesc[readFd], err) + return + } + if err := unix.SetNonblock(writeFd, false); err != nil { + logrus.Errorf("error setting descriptor %d (%s) blocking: %v", relays[writeFd], fdDesc[writeFd], err) + return + } + } + go func() { + buffers := make(map[int]*bytes.Buffer) + for _, writeFd := range relays { + buffers[writeFd] = new(bytes.Buffer) + } + pollTimeout := -1 + for len(relays) > 0 { + fds := make([]unix.PollFd, 0, len(relays)) + for fd := range relays { + fds = append(fds, unix.PollFd{Fd: int32(fd), Events: unix.POLLIN | unix.POLLHUP}) + } + _, err := unix.Poll(fds, pollTimeout) + if !logIfNotRetryable(err, fmt.Sprintf("poll: %v", err)) { + return + } + removeFds := make(map[int]struct{}) + for _, rfd := range fds { + if rfd.Revents&unix.POLLHUP == unix.POLLHUP { + removeFds[int(rfd.Fd)] = struct{}{} + } + if rfd.Revents&unix.POLLNVAL == unix.POLLNVAL { + logrus.Debugf("error polling descriptor %s: closed?", fdDesc[int(rfd.Fd)]) + removeFds[int(rfd.Fd)] = struct{}{} + } + if rfd.Revents&unix.POLLIN == 0 { + continue + } + b := make([]byte, 8192) + nread, err := unix.Read(int(rfd.Fd), b) + logIfNotRetryable(err, fmt.Sprintf("read %s: %v", fdDesc[int(rfd.Fd)], err)) + if nread > 0 { + if wfd, ok := relays[int(rfd.Fd)]; ok { + nwritten, err := buffers[wfd].Write(b[:nread]) + if err != nil { + logrus.Debugf("buffer: %v", err) + continue + } + if nwritten != nread { + logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nread, nwritten) + continue + } + } + // If this is the last of the data we'll be able to read + // from this descriptor, read as much as there is to read. + for rfd.Revents&unix.POLLHUP == unix.POLLHUP { + nr, err := unix.Read(int(rfd.Fd), b) + logIfNotRetryable(err, fmt.Sprintf("read %s: %v", fdDesc[int(rfd.Fd)], err)) + if nr <= 0 { + break + } + if wfd, ok := relays[int(rfd.Fd)]; ok { + nwritten, err := buffers[wfd].Write(b[:nr]) + if err != nil { + logrus.Debugf("buffer: %v", err) + break + } + if nwritten != nr { + logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nr, nwritten) + break + } + } + } + } + if nread == 0 { + removeFds[int(rfd.Fd)] = struct{}{} + } + } + pollTimeout = -1 + for wfd, buffer := range buffers { + if buffer.Len() > 0 { + nwritten, err := unix.Write(wfd, buffer.Bytes()) + logIfNotRetryable(err, fmt.Sprintf("write %s: %v", fdDesc[wfd], err)) + if nwritten >= 0 { + _ = buffer.Next(nwritten) + } + } + if buffer.Len() > 0 { + pollTimeout = 100 + } + } + for rfd := range removeFds { + if !options.Spec.Process.Terminal && rfd == unix.Stdin { + stdinCopy.Close() + } + delete(relays, rfd) + } + } + }() + + // Set up mounts and namespaces, and run the parent subprocess. + status, err := runUsingChroot(options.Spec, options.BundlePath, ctty, stdin, stdout, stderr, closeOnceRunning) + deferred() + if err != nil { + fmt.Fprintf(os.Stderr, "error running subprocess: %v\n", err) + os.Exit(1) + } + + // Pass the process's exit status back to the caller by exiting with the same status. + if status.Exited() { + if status.ExitStatus() != 0 { + fmt.Fprintf(os.Stderr, "subprocess exited with status %d\n", status.ExitStatus()) + } + os.Exit(status.ExitStatus()) + } else if status.Signaled() { + fmt.Fprintf(os.Stderr, "subprocess exited on %s\n", status.Signal()) + os.Exit(1) + } +} + +// runUsingChroot, still in the grandparent process, sets up various bind +// mounts and then runs the parent process in its own user namespace with the +// necessary ID mappings. +func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io.Reader, stdout, stderr io.Writer, closeOnceRunning []*os.File) (wstatus unix.WaitStatus, err error) { + var confwg sync.WaitGroup + + // Create a new mount namespace for ourselves and bind mount everything to a new location. + undoIntermediates, err := bind.SetupIntermediateMountNamespace(spec, bundlePath) + if err != nil { + return 1, err + } + defer func() { + undoIntermediates() + }() + + // Bind mount in our filesystems. + undoChroots, err := setupChrootBindMounts(spec, bundlePath) + if err != nil { + return 1, err + } + defer func() { + undoChroots() + }() + + // Create a pipe for passing configuration down to the next process. + preader, pwriter, err := os.Pipe() + if err != nil { + return 1, errors.Wrapf(err, "error creating configuration pipe") + } + config, conferr := json.Marshal(runUsingChrootExecSubprocOptions{ + Spec: spec, + BundlePath: bundlePath, + }) + if conferr != nil { + fmt.Fprintf(os.Stderr, "error re-encoding configuration for %q", runUsingChrootExecCommand) + os.Exit(1) + } + + // Apologize for the namespace configuration that we're about to ignore. + logNamespaceDiagnostics(spec) + + // If we have configured ID mappings, set them here so that they can apply to the child. + hostUidmap, hostGidmap, err := util.GetHostIDMappings("") + if err != nil { + return 1, err + } + uidmap, gidmap := spec.Linux.UIDMappings, spec.Linux.GIDMappings + if len(uidmap) == 0 { + // No UID mappings are configured for the container. Borrow our parent's mappings. + uidmap = append([]specs.LinuxIDMapping{}, hostUidmap...) + for i := range uidmap { + uidmap[i].HostID = uidmap[i].ContainerID + } + } + if len(gidmap) == 0 { + // No GID mappings are configured for the container. Borrow our parent's mappings. + gidmap = append([]specs.LinuxIDMapping{}, hostGidmap...) + for i := range gidmap { + gidmap[i].HostID = gidmap[i].ContainerID + } + } + + // Start the parent subprocess. + cmd := unshare.Command(append([]string{runUsingChrootExecCommand}, spec.Process.Args...)...) + cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr + cmd.Dir = "/" + cmd.Env = append([]string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())}, os.Environ()...) + cmd.UnshareFlags = syscall.CLONE_NEWUSER | syscall.CLONE_NEWUTS | syscall.CLONE_NEWNS + cmd.UidMappings = uidmap + cmd.GidMappings = gidmap + cmd.GidMappingsEnableSetgroups = true + if ctty != nil { + cmd.Setsid = true + cmd.Ctty = ctty + } + cmd.OOMScoreAdj = spec.Process.OOMScoreAdj + cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) + cmd.Hook = func(int) error { + for _, f := range closeOnceRunning { + f.Close() + } + return nil + } + + logrus.Debugf("Running %#v in %#v", cmd.Cmd, cmd) + confwg.Add(1) + go func() { + _, conferr = io.Copy(pwriter, bytes.NewReader(config)) + pwriter.Close() + confwg.Done() + }() + err = cmd.Run() + confwg.Wait() + if err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { + if waitStatus.Exited() { + if waitStatus.ExitStatus() != 0 { + fmt.Fprintf(os.Stderr, "subprocess exited with status %d\n", waitStatus.ExitStatus()) + } + os.Exit(waitStatus.ExitStatus()) + } else if waitStatus.Signaled() { + fmt.Fprintf(os.Stderr, "subprocess exited on %s\n", waitStatus.Signal()) + os.Exit(1) + } + } + } + fmt.Fprintf(os.Stderr, "process exited with error: %v", err) + os.Exit(1) + } + + return 0, nil +} + +// main() for parent subprocess. Its main job is to try to make our +// environment look like the one described by the runtime configuration blob, +// and then launch the intended command as a child. +func runUsingChrootExecMain() { + args := os.Args[1:] + var options runUsingChrootExecSubprocOptions + var err error + + runtime.LockOSThread() + + // Set logging. + if level := os.Getenv("LOGLEVEL"); level != "" { + if ll, err := strconv.Atoi(level); err == nil { + logrus.SetLevel(logrus.Level(ll)) + } + os.Unsetenv("LOGLEVEL") + } + + // Unpack our configuration. + confPipe := os.NewFile(3, "confpipe") + if confPipe == nil { + fmt.Fprintf(os.Stderr, "error reading options pipe\n") + os.Exit(1) + } + defer confPipe.Close() + if err := json.NewDecoder(confPipe).Decode(&options); err != nil { + fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err) + os.Exit(1) + } + + // Set the hostname. We're already in a distinct UTS namespace and are admins in the user + // namespace which created it, so we shouldn't get a permissions error, but seccomp policy + // might deny our attempt to call sethostname() anyway, so log a debug message for that. + if options.Spec.Hostname != "" { + if err := unix.Sethostname([]byte(options.Spec.Hostname)); err != nil { + logrus.Debugf("failed to set hostname %q for process: %v", options.Spec.Hostname, err) + } + } + + // Try to chroot into the root. Do this before we potentially block the syscall via the + // seccomp profile. + var oldst, newst unix.Stat_t + if err := unix.Stat(options.Spec.Root.Path, &oldst); err != nil { + fmt.Fprintf(os.Stderr, "error stat()ing intended root directory %q: %v\n", options.Spec.Root.Path, err) + os.Exit(1) + } + if err := unix.Chdir(options.Spec.Root.Path); err != nil { + fmt.Fprintf(os.Stderr, "error chdir()ing to intended root directory %q: %v\n", options.Spec.Root.Path, err) + os.Exit(1) + } + if err := unix.Chroot(options.Spec.Root.Path); err != nil { + fmt.Fprintf(os.Stderr, "error chroot()ing into directory %q: %v\n", options.Spec.Root.Path, err) + os.Exit(1) + } + if err := unix.Stat("/", &newst); err != nil { + fmt.Fprintf(os.Stderr, "error stat()ing current root directory: %v\n", err) + os.Exit(1) + } + if oldst.Dev != newst.Dev || oldst.Ino != newst.Ino { + fmt.Fprintf(os.Stderr, "unknown error chroot()ing into directory %q: %v\n", options.Spec.Root.Path, err) + os.Exit(1) + } + logrus.Debugf("chrooted into %q", options.Spec.Root.Path) + + // not doing because it's still shared: creating devices + // not doing because it's not applicable: setting annotations + // not doing because it's still shared: setting sysctl settings + // not doing because cgroupfs is read only: configuring control groups + // -> this means we can use the freezer to make sure there aren't any lingering processes + // -> this means we ignore cgroups-based controls + // not doing because we don't set any in the config: running hooks + // not doing because we don't set it in the config: setting rootfs read-only + // not doing because we don't set it in the config: setting rootfs propagation + logrus.Debugf("setting apparmor profile") + if err = setApparmorProfile(options.Spec); err != nil { + fmt.Fprintf(os.Stderr, "error setting apparmor profile for process: %v\n", err) + os.Exit(1) + } + if err = setSelinuxLabel(options.Spec); err != nil { + fmt.Fprintf(os.Stderr, "error setting SELinux label for process: %v\n", err) + os.Exit(1) + } + logrus.Debugf("setting capabilities") + if err := setCapabilities(options.Spec); err != nil { + fmt.Fprintf(os.Stderr, "error setting capabilities for process %v\n", err) + os.Exit(1) + } + if err = setSeccomp(options.Spec); err != nil { + fmt.Fprintf(os.Stderr, "error setting seccomp filter for process: %v\n", err) + os.Exit(1) + } + logrus.Debugf("setting resource limits") + if err = setRlimits(options.Spec, false, false); err != nil { + fmt.Fprintf(os.Stderr, "error setting process resource limits for process: %v\n", err) + os.Exit(1) + } + + // Try to change to the directory. + cwd := options.Spec.Process.Cwd + if !filepath.IsAbs(cwd) { + cwd = "/" + cwd + } + cwd = filepath.Clean(cwd) + if err := unix.Chdir("/"); err != nil { + fmt.Fprintf(os.Stderr, "error chdir()ing into new root directory %q: %v\n", options.Spec.Root.Path, err) + os.Exit(1) + } + if err := unix.Chdir(cwd); err != nil { + fmt.Fprintf(os.Stderr, "error chdir()ing into directory %q under root %q: %v\n", cwd, options.Spec.Root.Path, err) + os.Exit(1) + } + logrus.Debugf("changed working directory to %q", cwd) + + // Drop privileges. + user := options.Spec.Process.User + if len(user.AdditionalGids) > 0 { + gids := make([]int, len(user.AdditionalGids)) + for i := range user.AdditionalGids { + gids[i] = int(user.AdditionalGids[i]) + } + logrus.Debugf("setting supplemental groups") + if err = syscall.Setgroups(gids); err != nil { + fmt.Fprintf(os.Stderr, "error setting supplemental groups list: %v", err) + os.Exit(1) + } + } else { + logrus.Debugf("clearing supplemental groups") + if err = syscall.Setgroups([]int{}); err != nil { + fmt.Fprintf(os.Stderr, "error clearing supplemental groups list: %v", err) + os.Exit(1) + } + } + logrus.Debugf("setting gid") + if err = syscall.Setresgid(int(user.GID), int(user.GID), int(user.GID)); err != nil { + fmt.Fprintf(os.Stderr, "error setting GID: %v", err) + os.Exit(1) + } + logrus.Debugf("setting uid") + if err = syscall.Setresuid(int(user.UID), int(user.UID), int(user.UID)); err != nil { + fmt.Fprintf(os.Stderr, "error setting UID: %v", err) + os.Exit(1) + } + + // Actually run the specified command. + cmd := exec.Command(args[0], args[1:]...) + cmd.Env = options.Spec.Process.Env + cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr + cmd.Dir = cwd + logrus.Debugf("Running %#v (PATH = %q)", cmd, os.Getenv("PATH")) + if err = cmd.Run(); err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { + if waitStatus.Exited() { + if waitStatus.ExitStatus() != 0 { + fmt.Fprintf(os.Stderr, "subprocess exited with status %d\n", waitStatus.ExitStatus()) + } + os.Exit(waitStatus.ExitStatus()) + } else if waitStatus.Signaled() { + fmt.Fprintf(os.Stderr, "subprocess exited on %s\n", waitStatus.Signal()) + os.Exit(1) + } + } + } + fmt.Fprintf(os.Stderr, "process exited with error: %v", err) + os.Exit(1) + } +} + +// logNamespaceDiagnostics knows which namespaces we want to create. +// Output debug messages when that differs from what we're being asked to do. +func logNamespaceDiagnostics(spec *specs.Spec) { + sawMountNS := false + sawUserNS := false + sawUTSNS := false + for _, ns := range spec.Linux.Namespaces { + switch ns.Type { + case specs.CgroupNamespace: + if ns.Path != "" { + logrus.Debugf("unable to join cgroup namespace, sorry about that") + } else { + logrus.Debugf("unable to create cgroup namespace, sorry about that") + } + case specs.IPCNamespace: + if ns.Path != "" { + logrus.Debugf("unable to join IPC namespace, sorry about that") + } else { + logrus.Debugf("unable to create IPC namespace, sorry about that") + } + case specs.MountNamespace: + if ns.Path != "" { + logrus.Debugf("unable to join mount namespace %q, creating a new one", ns.Path) + } + sawMountNS = true + case specs.NetworkNamespace: + if ns.Path != "" { + logrus.Debugf("unable to join network namespace, sorry about that") + } else { + logrus.Debugf("unable to create network namespace, sorry about that") + } + case specs.PIDNamespace: + if ns.Path != "" { + logrus.Debugf("unable to join PID namespace, sorry about that") + } else { + logrus.Debugf("unable to create PID namespace, sorry about that") + } + case specs.UserNamespace: + if ns.Path != "" { + logrus.Debugf("unable to join user namespace %q, creating a new one", ns.Path) + } + sawUserNS = true + case specs.UTSNamespace: + if ns.Path != "" { + logrus.Debugf("unable to join UTS namespace %q, creating a new one", ns.Path) + } + sawUTSNS = true + } + } + if !sawMountNS { + logrus.Debugf("mount namespace not requested, but creating a new one anyway") + } + if !sawUserNS { + logrus.Debugf("user namespace not requested, but creating a new one anyway") + } + if !sawUTSNS { + logrus.Debugf("UTS namespace not requested, but creating a new one anyway") + } +} + +// setApparmorProfile sets the apparmor profile for ourselves, and hopefully any child processes that we'll start. +func setApparmorProfile(spec *specs.Spec) error { + if !apparmor.IsEnabled() || spec.Process.ApparmorProfile == "" { + return nil + } + if err := apparmor.ApplyProfile(spec.Process.ApparmorProfile); err != nil { + return errors.Wrapf(err, "error setting apparmor profile to %q", spec.Process.ApparmorProfile) + } + return nil +} + +// setCapabilities sets capabilities for ourselves, to be more or less inherited by any processes that we'll start. +func setCapabilities(spec *specs.Spec) error { + caps, err := capability.NewPid(0) + if err != nil { + return errors.Wrapf(err, "error reading capabilities of current process") + } + capMap := map[capability.CapType][]string{ + capability.BOUNDING: spec.Process.Capabilities.Bounding, + capability.EFFECTIVE: spec.Process.Capabilities.Effective, + capability.INHERITABLE: spec.Process.Capabilities.Inheritable, + capability.PERMITTED: spec.Process.Capabilities.Permitted, + capability.AMBIENT: spec.Process.Capabilities.Ambient, + } + knownCaps := capability.List() + for capType, capList := range capMap { + caps.Clear(capType) + for _, capToSet := range capList { + cap := capability.CAP_LAST_CAP + for _, c := range knownCaps { + if strings.EqualFold("CAP_"+c.String(), capToSet) { + cap = c + break + } + } + if cap == capability.CAP_LAST_CAP { + return errors.Errorf("error mapping capability %q to a number", capToSet) + } + caps.Set(capType, cap) + } + } + for capType := range capMap { + if err = caps.Apply(capType); err != nil { + return errors.Wrapf(err, "error setting %s capabilities to %#v", capType.String(), capMap[capType]) + } + } + return nil +} + +// parses the resource limits for ourselves and any processes that +// we'll start into a format that's more in line with the kernel APIs +func parseRlimits(spec *specs.Spec) (map[int]unix.Rlimit, error) { + if spec.Process == nil { + return nil, nil + } + parsed := make(map[int]unix.Rlimit) + for _, limit := range spec.Process.Rlimits { + resource, recognized := rlimitsMap[strings.ToUpper(limit.Type)] + if !recognized { + return nil, errors.Errorf("error parsing limit type %q", limit.Type) + } + parsed[resource] = unix.Rlimit{Cur: limit.Soft, Max: limit.Hard} + } + return parsed, nil +} + +// setRlimits sets any resource limits that we want to apply to processes that +// we'll start. +func setRlimits(spec *specs.Spec, onlyLower, onlyRaise bool) error { + limits, err := parseRlimits(spec) + if err != nil { + return err + } + for resource, desired := range limits { + var current unix.Rlimit + if err := unix.Getrlimit(resource, ¤t); err != nil { + return errors.Wrapf(err, "error reading %q limit", rlimitsReverseMap[resource]) + } + if desired.Max > current.Max && onlyLower { + // this would raise a hard limit, and we're only here to lower them + continue + } + if desired.Max < current.Max && onlyRaise { + // this would lower a hard limit, and we're only here to raise them + continue + } + if err := unix.Setrlimit(resource, &desired); err != nil { + return errors.Wrapf(err, "error setting %q limit to soft=%d,hard=%d (was soft=%d,hard=%d)", rlimitsReverseMap[resource], desired.Cur, desired.Max, current.Cur, current.Max) + } + } + return nil +} + +// setupChrootBindMounts actually bind mounts things under the rootfs, and returns a +// callback that will clean up its work. +func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func() error, err error) { + var fs unix.Statfs_t + removes := []string{} + undoBinds = func() error { + if err2 := bind.UnmountMountpoints(spec.Root.Path, removes); err2 != nil { + logrus.Warnf("pkg/chroot: error unmounting %q: %v", spec.Root.Path, err2) + if err == nil { + err = err2 + } + } + return err + } + + // Now bind mount all of those things to be under the rootfs's location in this + // mount namespace. + commonFlags := uintptr(unix.MS_BIND | unix.MS_REC | unix.MS_PRIVATE) + bindFlags := commonFlags | unix.MS_NODEV + devFlags := commonFlags | unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_RDONLY + procFlags := devFlags | unix.MS_NODEV + sysFlags := devFlags | unix.MS_NODEV | unix.MS_RDONLY + + // Bind /dev read-only. + subDev := filepath.Join(spec.Root.Path, "/dev") + if err := unix.Mount("/dev", subDev, "bind", devFlags, ""); err != nil { + if os.IsNotExist(err) { + err = os.Mkdir(subDev, 0700) + if err == nil { + err = unix.Mount("/dev", subDev, "bind", devFlags, "") + } + } + if err != nil { + return undoBinds, errors.Wrapf(err, "error bind mounting /dev from host into mount namespace") + } + } + // Make sure it's read-only. + if err = unix.Statfs(subDev, &fs); err != nil { + return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", subDev) + } + if fs.Flags&unix.ST_RDONLY == 0 { + if err := unix.Mount(subDev, subDev, "bind", devFlags|unix.MS_REMOUNT, ""); err != nil { + return undoBinds, errors.Wrapf(err, "error remounting /dev in mount namespace read-only") + } + } + logrus.Debugf("bind mounted %q to %q", "/dev", filepath.Join(spec.Root.Path, "/dev")) + + // Bind /proc read-only. + subProc := filepath.Join(spec.Root.Path, "/proc") + if err := unix.Mount("/proc", subProc, "bind", procFlags, ""); err != nil { + if os.IsNotExist(err) { + err = os.Mkdir(subProc, 0700) + if err == nil { + err = unix.Mount("/proc", subProc, "bind", procFlags, "") + } + } + if err != nil { + return undoBinds, errors.Wrapf(err, "error bind mounting /proc from host into mount namespace") + } + } + logrus.Debugf("bind mounted %q to %q", "/proc", filepath.Join(spec.Root.Path, "/proc")) + + // Bind /sys read-only. + subSys := filepath.Join(spec.Root.Path, "/sys") + if err := unix.Mount("/sys", subSys, "bind", sysFlags, ""); err != nil { + if os.IsNotExist(err) { + err = os.Mkdir(subSys, 0700) + if err == nil { + err = unix.Mount("/sys", subSys, "bind", sysFlags, "") + } + } + if err != nil { + return undoBinds, errors.Wrapf(err, "error bind mounting /sys from host into mount namespace") + } + } + // Make sure it's read-only. + if err = unix.Statfs(subSys, &fs); err != nil { + return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", subSys) + } + if fs.Flags&unix.ST_RDONLY == 0 { + if err := unix.Mount(subSys, subSys, "bind", sysFlags|unix.MS_REMOUNT, ""); err != nil { + return undoBinds, errors.Wrapf(err, "error remounting /sys in mount namespace read-only") + } + } + logrus.Debugf("bind mounted %q to %q", "/sys", filepath.Join(spec.Root.Path, "/sys")) + + // Add /sys/fs/selinux to the set of masked paths, to ensure that we don't have processes + // attempting to interact with labeling, when they aren't allowed to do so. + spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/selinux") + // Add /sys/fs/cgroup to the set of masked paths, to ensure that we don't have processes + // attempting to mess with cgroup configuration, when they aren't allowed to do so. + spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/cgroup") + + // Bind mount in everything we've been asked to mount. + for _, m := range spec.Mounts { + // Skip anything that we just mounted. + switch m.Destination { + case "/dev", "/proc", "/sys": + logrus.Debugf("already bind mounted %q on %q", m.Destination, filepath.Join(spec.Root.Path, m.Destination)) + continue + default: + if strings.HasPrefix(m.Destination, "/dev/") { + continue + } + if strings.HasPrefix(m.Destination, "/proc/") { + continue + } + if strings.HasPrefix(m.Destination, "/sys/") { + continue + } + } + // Skip anything that isn't a bind or tmpfs mount. + if m.Type != "bind" && m.Type != "tmpfs" { + logrus.Debugf("skipping mount of type %q on %q", m.Type, m.Destination) + continue + } + // If the target is there, we can just mount it. + var srcinfo os.FileInfo + switch m.Type { + case "bind": + srcinfo, err = os.Stat(m.Source) + if err != nil { + return undoBinds, errors.Wrapf(err, "error examining %q for mounting in mount namespace", m.Source) + } + case "tmpfs": + srcinfo, err = os.Stat("/") + if err != nil { + return undoBinds, errors.Wrapf(err, "error examining / to use as a template for a tmpfs") + } + } + target := filepath.Join(spec.Root.Path, m.Destination) + if _, err := os.Stat(target); err != nil { + // If the target can't be stat()ted, check the error. + if !os.IsNotExist(err) { + return undoBinds, errors.Wrapf(err, "error examining %q for mounting in mount namespace", target) + } + // The target isn't there yet, so create it, and make a + // note to remove it later. + if srcinfo.IsDir() { + if err = os.MkdirAll(target, 0111); err != nil { + return undoBinds, errors.Wrapf(err, "error creating mountpoint %q in mount namespace", target) + } + removes = append(removes, target) + } else { + if err = os.MkdirAll(filepath.Dir(target), 0111); err != nil { + return undoBinds, errors.Wrapf(err, "error ensuring parent of mountpoint %q (%q) is present in mount namespace", target, filepath.Dir(target)) + } + var file *os.File + if file, err = os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0); err != nil { + return undoBinds, errors.Wrapf(err, "error creating mountpoint %q in mount namespace", target) + } + file.Close() + removes = append(removes, target) + } + } + requestFlags := bindFlags + expectedFlags := uintptr(0) + if util.StringInSlice("nodev", m.Options) { + requestFlags |= unix.MS_NODEV + expectedFlags |= unix.ST_NODEV + } + if util.StringInSlice("noexec", m.Options) { + requestFlags |= unix.MS_NOEXEC + expectedFlags |= unix.ST_NOEXEC + } + if util.StringInSlice("nosuid", m.Options) { + requestFlags |= unix.MS_NOSUID + expectedFlags |= unix.ST_NOSUID + } + if util.StringInSlice("ro", m.Options) { + requestFlags |= unix.MS_RDONLY + expectedFlags |= unix.ST_RDONLY + } + switch m.Type { + case "bind": + // Do the bind mount. + if err := unix.Mount(m.Source, target, "", requestFlags, ""); err != nil { + return undoBinds, errors.Wrapf(err, "error bind mounting %q from host to %q in mount namespace (%q)", m.Source, m.Destination, target) + } + logrus.Debugf("bind mounted %q to %q", m.Source, target) + case "tmpfs": + // Mount a tmpfs. + if err := mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil { + return undoBinds, errors.Wrapf(err, "error mounting tmpfs to %q in mount namespace (%q, %q)", m.Destination, target, strings.Join(m.Options, ",")) + } + logrus.Debugf("mounted a tmpfs to %q", target) + } + if err = unix.Statfs(target, &fs); err != nil { + return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", subSys) + } + if uintptr(fs.Flags)&expectedFlags != expectedFlags { + if err := unix.Mount(target, target, "bind", requestFlags|unix.MS_REMOUNT, ""); err != nil { + return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace with expected flags") + } + } + } + + // Set up any read-only paths that we need to. If we're running inside + // of a container, some of these locations will already be read-only. + for _, roPath := range spec.Linux.ReadonlyPaths { + r := filepath.Join(spec.Root.Path, roPath) + target, err := filepath.EvalSymlinks(r) + if err != nil { + if os.IsNotExist(err) { + // No target, no problem. + continue + } + return undoBinds, errors.Wrapf(err, "error checking %q for symlinks before marking it read-only", r) + } + // Check if the location is already read-only. + var fs unix.Statfs_t + if err = unix.Statfs(target, &fs); err != nil { + if os.IsNotExist(err) { + // No target, no problem. + continue + } + return undoBinds, errors.Wrapf(err, "error checking if directory %q is already read-only", target) + } + if fs.Flags&unix.ST_RDONLY != 0 { + continue + } + // Mount the location over itself, so that we can remount it as read-only. + roFlags := uintptr(unix.MS_NODEV | unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_RDONLY) + if err := unix.Mount(target, target, "", roFlags|unix.MS_BIND|unix.MS_REC, ""); err != nil { + if os.IsNotExist(err) { + // No target, no problem. + continue + } + return undoBinds, errors.Wrapf(err, "error bind mounting %q onto itself in preparation for making it read-only", target) + } + // Remount the location read-only. + if err = unix.Statfs(target, &fs); err != nil { + return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", target) + } + if fs.Flags&unix.ST_RDONLY == 0 { + if err := unix.Mount(target, target, "", roFlags|unix.MS_BIND|unix.MS_REMOUNT, ""); err != nil { + return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace read-only", target) + } + } + // Check again. + if err = unix.Statfs(target, &fs); err != nil { + return undoBinds, errors.Wrapf(err, "error checking if directory %q was remounted read-only", target) + } + if fs.Flags&unix.ST_RDONLY == 0 { + return undoBinds, errors.Wrapf(err, "error verifying that %q in mount namespace was remounted read-only", target) + } + } + + // Create an empty directory for to use for masking directories. + roEmptyDir := filepath.Join(bundlePath, "empty") + if len(spec.Linux.MaskedPaths) > 0 { + if err := os.Mkdir(roEmptyDir, 0700); err != nil { + return undoBinds, errors.Wrapf(err, "error creating empty directory %q", roEmptyDir) + } + removes = append(removes, roEmptyDir) + } + + // Set up any masked paths that we need to. If we're running inside of + // a container, some of these locations will already be read-only tmpfs + // filesystems or bind mounted to os.DevNull. If we're not running + // inside of a container, and nobody else has done that, we'll do it. + for _, masked := range spec.Linux.MaskedPaths { + t := filepath.Join(spec.Root.Path, masked) + target, err := filepath.EvalSymlinks(t) + if err != nil { + target = t + } + // Get some info about the null device. + nullinfo, err := os.Stat(os.DevNull) + if err != nil { + return undoBinds, errors.Wrapf(err, "error examining %q for masking in mount namespace", os.DevNull) + } + // Get some info about the target. + targetinfo, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + // No target, no problem. + continue + } + return undoBinds, errors.Wrapf(err, "error examining %q for masking in mount namespace", target) + } + if targetinfo.IsDir() { + // The target's a directory. Check if it's a read-only filesystem. + var statfs unix.Statfs_t + if err = unix.Statfs(target, &statfs); err != nil { + return undoBinds, errors.Wrapf(err, "error checking if directory %q is a mountpoint", target) + } + isReadOnly := statfs.Flags&unix.MS_RDONLY != 0 + // Check if any of the IDs we're mapping could read it. + isAccessible := true + var stat unix.Stat_t + if err = unix.Stat(target, &stat); err != nil { + return undoBinds, errors.Wrapf(err, "error checking permissions on directory %q", target) + } + isAccessible = false + if stat.Mode&unix.S_IROTH|unix.S_IXOTH != 0 { + isAccessible = true + } + if !isAccessible && stat.Mode&unix.S_IROTH|unix.S_IXOTH != 0 { + if len(spec.Linux.GIDMappings) > 0 { + for _, mapping := range spec.Linux.GIDMappings { + if stat.Gid >= mapping.ContainerID && stat.Gid < mapping.ContainerID+mapping.Size { + isAccessible = true + break + } + } + } + } + if !isAccessible && stat.Mode&unix.S_IRUSR|unix.S_IXUSR != 0 { + if len(spec.Linux.UIDMappings) > 0 { + for _, mapping := range spec.Linux.UIDMappings { + if stat.Uid >= mapping.ContainerID && stat.Uid < mapping.ContainerID+mapping.Size { + isAccessible = true + break + } + } + } + } + // Check if it's empty. + hasContent := false + directory, err := os.Open(target) + if err != nil { + if !os.IsPermission(err) { + return undoBinds, errors.Wrapf(err, "error opening directory %q", target) + } + } else { + names, err := directory.Readdirnames(0) + directory.Close() + if err != nil { + return undoBinds, errors.Wrapf(err, "error reading contents of directory %q", target) + } + hasContent = false + for _, name := range names { + switch name { + case ".", "..": + continue + default: + hasContent = true + } + if hasContent { + break + } + } + } + // The target's a directory, so read-only bind mount an empty directory on it. + roFlags := uintptr(syscall.MS_BIND | syscall.MS_NOSUID | syscall.MS_NODEV | syscall.MS_NOEXEC | syscall.MS_RDONLY) + if !isReadOnly || (hasContent && isAccessible) { + if err = unix.Mount(roEmptyDir, target, "bind", roFlags, ""); err != nil { + return undoBinds, errors.Wrapf(err, "error masking directory %q in mount namespace", target) + } + if err = unix.Statfs(target, &fs); err != nil { + return undoBinds, errors.Wrapf(err, "error checking if directory %q was mounted read-only in mount namespace", target) + } + if fs.Flags&unix.ST_RDONLY == 0 { + if err = unix.Mount(target, target, "", roFlags|syscall.MS_REMOUNT, ""); err != nil { + return undoBinds, errors.Wrapf(err, "error making sure directory %q in mount namespace is read only", target) + } + } + } + } else { + // The target's not a directory, so bind mount os.DevNull over it, unless it's already os.DevNull. + if !os.SameFile(nullinfo, targetinfo) { + if err = unix.Mount(os.DevNull, target, "", uintptr(syscall.MS_BIND|syscall.MS_RDONLY|syscall.MS_PRIVATE), ""); err != nil { + return undoBinds, errors.Wrapf(err, "error masking non-directory %q in mount namespace", target) + } + } + } + } + return undoBinds, nil +} diff --git a/vendor/github.com/containers/buildah/chroot/seccomp.go b/vendor/github.com/containers/buildah/chroot/seccomp.go new file mode 100644 index 000000000..f2c55017f --- /dev/null +++ b/vendor/github.com/containers/buildah/chroot/seccomp.go @@ -0,0 +1,142 @@ +// +build linux,seccomp + +package chroot + +import ( + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + libseccomp "github.com/seccomp/libseccomp-golang" + "github.com/sirupsen/logrus" +) + +// setSeccomp sets the seccomp filter for ourselves and any processes that we'll start. +func setSeccomp(spec *specs.Spec) error { + logrus.Debugf("setting seccomp configuration") + if spec.Linux.Seccomp == nil { + return nil + } + mapAction := func(specAction specs.LinuxSeccompAction) libseccomp.ScmpAction { + switch specAction { + case specs.ActKill: + return libseccomp.ActKill + case specs.ActTrap: + return libseccomp.ActTrap + case specs.ActErrno: + return libseccomp.ActErrno + case specs.ActTrace: + return libseccomp.ActTrace + case specs.ActAllow: + return libseccomp.ActAllow + } + return libseccomp.ActInvalid + } + mapArch := func(specArch specs.Arch) libseccomp.ScmpArch { + switch specArch { + case specs.ArchX86: + return libseccomp.ArchX86 + case specs.ArchX86_64: + return libseccomp.ArchAMD64 + case specs.ArchX32: + return libseccomp.ArchX32 + case specs.ArchARM: + return libseccomp.ArchARM + case specs.ArchAARCH64: + return libseccomp.ArchARM64 + case specs.ArchMIPS: + return libseccomp.ArchMIPS + case specs.ArchMIPS64: + return libseccomp.ArchMIPS64 + case specs.ArchMIPS64N32: + return libseccomp.ArchMIPS64N32 + case specs.ArchMIPSEL: + return libseccomp.ArchMIPSEL + case specs.ArchMIPSEL64: + return libseccomp.ArchMIPSEL64 + case specs.ArchMIPSEL64N32: + return libseccomp.ArchMIPSEL64N32 + case specs.ArchPPC: + return libseccomp.ArchPPC + case specs.ArchPPC64: + return libseccomp.ArchPPC64 + case specs.ArchPPC64LE: + return libseccomp.ArchPPC64LE + case specs.ArchS390: + return libseccomp.ArchS390 + case specs.ArchS390X: + return libseccomp.ArchS390X + case specs.ArchPARISC: + /* fallthrough */ /* for now */ + case specs.ArchPARISC64: + /* fallthrough */ /* for now */ + } + return libseccomp.ArchInvalid + } + mapOp := func(op specs.LinuxSeccompOperator) libseccomp.ScmpCompareOp { + switch op { + case specs.OpNotEqual: + return libseccomp.CompareNotEqual + case specs.OpLessThan: + return libseccomp.CompareLess + case specs.OpLessEqual: + return libseccomp.CompareLessOrEqual + case specs.OpEqualTo: + return libseccomp.CompareEqual + case specs.OpGreaterEqual: + return libseccomp.CompareGreaterEqual + case specs.OpGreaterThan: + return libseccomp.CompareGreater + case specs.OpMaskedEqual: + return libseccomp.CompareMaskedEqual + } + return libseccomp.CompareInvalid + } + + filter, err := libseccomp.NewFilter(mapAction(spec.Linux.Seccomp.DefaultAction)) + if err != nil { + return errors.Wrapf(err, "error creating seccomp filter with default action %q", spec.Linux.Seccomp.DefaultAction) + } + for _, arch := range spec.Linux.Seccomp.Architectures { + if err = filter.AddArch(mapArch(arch)); err != nil { + return errors.Wrapf(err, "error adding architecture %q(%q) to seccomp filter", arch, mapArch(arch)) + } + } + for _, rule := range spec.Linux.Seccomp.Syscalls { + scnames := make(map[libseccomp.ScmpSyscall]string) + for _, name := range rule.Names { + scnum, err := libseccomp.GetSyscallFromName(name) + if err != nil { + logrus.Debugf("error mapping syscall %q to a syscall, ignoring %q rule for %q", name, rule.Action, name) + continue + } + scnames[scnum] = name + } + for scnum := range scnames { + if len(rule.Args) == 0 { + if err = filter.AddRule(scnum, mapAction(rule.Action)); err != nil { + return errors.Wrapf(err, "error adding a rule (%q:%q) to seccomp filter", scnames[scnum], rule.Action) + } + continue + } + var conditions []libseccomp.ScmpCondition + for _, arg := range rule.Args { + condition, err := libseccomp.MakeCondition(arg.Index, mapOp(arg.Op), arg.Value, arg.ValueTwo) + if err != nil { + return errors.Wrapf(err, "error building a seccomp condition %d:%v:%d:%d", arg.Index, arg.Op, arg.Value, arg.ValueTwo) + } + conditions = append(conditions, condition) + } + if err = filter.AddRuleConditional(scnum, mapAction(rule.Action), conditions); err != nil { + return errors.Wrapf(err, "error adding a conditional rule (%q:%q) to seccomp filter", scnames[scnum], rule.Action) + } + } + } + if err = filter.SetNoNewPrivsBit(spec.Process.NoNewPrivileges); err != nil { + return errors.Wrapf(err, "error setting no-new-privileges bit to %v", spec.Process.NoNewPrivileges) + } + err = filter.Load() + filter.Release() + if err != nil { + return errors.Wrapf(err, "error activating seccomp filter") + } + return nil +} diff --git a/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go new file mode 100644 index 000000000..a5b74bf09 --- /dev/null +++ b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux !seccomp + +package chroot + +import ( + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +func setSeccomp(spec *specs.Spec) error { + if spec.Linux.Seccomp != nil { + return errors.New("configured a seccomp filter without seccomp support?") + } + return nil +} diff --git a/vendor/github.com/containers/buildah/chroot/selinux.go b/vendor/github.com/containers/buildah/chroot/selinux.go new file mode 100644 index 000000000..3e62d743d --- /dev/null +++ b/vendor/github.com/containers/buildah/chroot/selinux.go @@ -0,0 +1,22 @@ +// +build linux,selinux + +package chroot + +import ( + "github.com/opencontainers/runtime-spec/specs-go" + selinux "github.com/opencontainers/selinux/go-selinux" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// setSelinuxLabel sets the process label for child processes that we'll start. +func setSelinuxLabel(spec *specs.Spec) error { + logrus.Debugf("setting selinux label") + if spec.Process.SelinuxLabel != "" && selinux.EnforceMode() != selinux.Disabled { + if err := label.SetProcessLabel(spec.Process.SelinuxLabel); err != nil { + return errors.Wrapf(err, "error setting process label to %q", spec.Process.SelinuxLabel) + } + } + return nil +} diff --git a/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go b/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go new file mode 100644 index 000000000..1c6f48912 --- /dev/null +++ b/vendor/github.com/containers/buildah/chroot/selinux_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux !selinux + +package chroot + +import ( + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +func setSelinuxLabel(spec *specs.Spec) error { + if spec.Linux.MountLabel != "" { + return errors.New("configured an SELinux mount label without SELinux support?") + } + if spec.Process.SelinuxLabel != "" { + return errors.New("configured an SELinux process label without SELinux support?") + } + return nil +} diff --git a/vendor/github.com/containers/buildah/chroot/unsupported.go b/vendor/github.com/containers/buildah/chroot/unsupported.go new file mode 100644 index 000000000..5312c0024 --- /dev/null +++ b/vendor/github.com/containers/buildah/chroot/unsupported.go @@ -0,0 +1,15 @@ +// +build !linux + +package chroot + +import ( + "io" + + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// RunUsingChroot is not supported. +func RunUsingChroot(spec *specs.Spec, bundlePath string, stdin io.Reader, stdout, stderr io.Writer) (err error) { + return errors.Errorf("--isolation chroot is not supported on this platform") +} diff --git a/vendor/github.com/containers/buildah/chroot/util.go b/vendor/github.com/containers/buildah/chroot/util.go new file mode 100644 index 000000000..34cc77260 --- /dev/null +++ b/vendor/github.com/containers/buildah/chroot/util.go @@ -0,0 +1,15 @@ +// +build linux + +package chroot + +func dedupeStringSlice(slice []string) []string { + done := make([]string, 0, len(slice)) + m := make(map[string]struct{}) + for _, s := range slice { + if _, present := m[s]; !present { + m[s] = struct{}{} + done = append(done, s) + } + } + return done +} diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go new file mode 100644 index 000000000..f89930399 --- /dev/null +++ b/vendor/github.com/containers/buildah/commit.go @@ -0,0 +1,189 @@ +package buildah + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "time" + + "github.com/containers/buildah/util" + cp "github.com/containers/image/copy" + "github.com/containers/image/signature" + is "github.com/containers/image/storage" + "github.com/containers/image/transports" + "github.com/containers/image/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// CommitOptions can be used to alter how an image is committed. +type CommitOptions struct { + // PreferredManifestType is the preferred type of image manifest. The + // image configuration format will be of a compatible type. + PreferredManifestType string + // Compression specifies the type of compression which is applied to + // layer blobs. The default is to not use compression, but + // archive.Gzip is recommended. + Compression archive.Compression + // SignaturePolicyPath specifies an override location for the signature + // policy which should be used for verifying the new image as it is + // being written. Except in specific circumstances, no value should be + // specified, indicating that the shared, system-wide default policy + // should be used. + SignaturePolicyPath string + // AdditionalTags is a list of additional names to add to the image, if + // the transport to which we're writing the image gives us a way to add + // them. + AdditionalTags []string + // ReportWriter is an io.Writer which will be used to log the writing + // of the new image. + ReportWriter io.Writer + // HistoryTimestamp is the timestamp used when creating new items in the + // image's history. If unset, the current time will be used. + HistoryTimestamp *time.Time + // github.com/containers/image/types SystemContext to hold credentials + // and other authentication/authorization information. + SystemContext *types.SystemContext + // IIDFile tells the builder to write the image ID to the specified file + IIDFile string + // Squash tells the builder to produce an image with a single layer + // instead of with possibly more than one layer. + Squash bool + + // OnBuild is a list of commands to be run by images based on this image + OnBuild []string + // Parent is the base image that this image was created by. + Parent string +} + +// PushOptions can be used to alter how an image is copied somewhere. +type PushOptions struct { + // Compression specifies the type of compression which is applied to + // layer blobs. The default is to not use compression, but + // archive.Gzip is recommended. + Compression archive.Compression + // SignaturePolicyPath specifies an override location for the signature + // policy which should be used for verifying the new image as it is + // being written. Except in specific circumstances, no value should be + // specified, indicating that the shared, system-wide default policy + // should be used. + SignaturePolicyPath string + // ReportWriter is an io.Writer which will be used to log the writing + // of the new image. + ReportWriter io.Writer + // Store is the local storage store which holds the source image. + Store storage.Store + // github.com/containers/image/types SystemContext to hold credentials + // and other authentication/authorization information. + SystemContext *types.SystemContext + // ManifestType is the format to use when saving the imge using the 'dir' transport + // possible options are oci, v2s1, and v2s2 + ManifestType string +} + +// Commit writes the contents of the container, along with its updated +// configuration, to a new image in the specified location, and if we know how, +// add any additional tags that were specified. Returns the ID of the new image +// if commit was successful and the image destination was local +func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options CommitOptions) (string, error) { + var imgID string + + systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) + policy, err := signature.DefaultPolicy(systemContext) + if err != nil { + return imgID, errors.Wrapf(err, "error obtaining default signature policy") + } + policyContext, err := signature.NewPolicyContext(policy) + if err != nil { + return imgID, errors.Wrapf(err, "error creating new signature policy context") + } + defer func() { + if err2 := policyContext.Destroy(); err2 != nil { + logrus.Debugf("error destroying signature policy context: %v", err2) + } + }() + // Check if the base image is already in the destination and it's some kind of local + // storage. If so, we can skip recompressing any layers that come from the base image. + exportBaseLayers := true + if transport, destIsStorage := dest.Transport().(is.StoreTransport); destIsStorage && b.FromImageID != "" { + if baseref, err := transport.ParseReference(b.FromImageID); baseref != nil && err == nil { + if img, err := transport.GetImage(baseref); img != nil && err == nil { + exportBaseLayers = false + } + } + } + src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.Compression, options.HistoryTimestamp) + if err != nil { + return imgID, errors.Wrapf(err, "error computing layer digests and building metadata") + } + // "Copy" our image to where it needs to be. + err = cp.Image(ctx, policyContext, dest, src, getCopyOptions(options.ReportWriter, nil, systemContext, "")) + if err != nil { + return imgID, errors.Wrapf(err, "error copying layers and metadata") + } + if len(options.AdditionalTags) > 0 { + switch dest.Transport().Name() { + case is.Transport.Name(): + img, err := is.Transport.GetStoreImage(b.store, dest) + if err != nil { + return imgID, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest)) + } + err = util.AddImageNames(b.store, "", systemContext, img, options.AdditionalTags) + if err != nil { + return imgID, errors.Wrapf(err, "error setting image names to %v", append(img.Names, options.AdditionalTags...)) + } + logrus.Debugf("assigned names %v to image %q", img.Names, img.ID) + default: + logrus.Warnf("don't know how to add tags to images stored in %q transport", dest.Transport().Name()) + } + } + + img, err := is.Transport.GetStoreImage(b.store, dest) + if err != nil && err != storage.ErrImageUnknown { + return imgID, err + } + + if err == nil { + imgID = img.ID + + if options.IIDFile != "" { + if err := ioutil.WriteFile(options.IIDFile, []byte(img.ID), 0644); err != nil { + return imgID, errors.Wrapf(err, "failed to write Image ID File %q", options.IIDFile) + } + } + } + + return imgID, nil +} + +// Push copies the contents of the image to a new location. +func Push(ctx context.Context, image string, dest types.ImageReference, options PushOptions) error { + systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) + policy, err := signature.DefaultPolicy(systemContext) + if err != nil { + return errors.Wrapf(err, "error obtaining default signature policy") + } + policyContext, err := signature.NewPolicyContext(policy) + if err != nil { + return errors.Wrapf(err, "error creating new signature policy context") + } + // Look up the image. + src, img, err := util.FindImage(options.Store, "", systemContext, image) + if err != nil { + return err + } + // Copy everything. + err = cp.Image(ctx, policyContext, dest, src, getCopyOptions(options.ReportWriter, nil, systemContext, options.ManifestType)) + if err != nil { + return errors.Wrapf(err, "error copying layers and metadata") + } + if options.ReportWriter != nil { + fmt.Fprintf(options.ReportWriter, "") + } + digest := "@" + img.Digest.Hex() + fmt.Printf("Successfully pushed %s%s\n", dest.StringWithinTransport(), digest) + return nil +} diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go new file mode 100644 index 000000000..dcf922dc9 --- /dev/null +++ b/vendor/github.com/containers/buildah/common.go @@ -0,0 +1,35 @@ +package buildah + +import ( + "io" + + cp "github.com/containers/image/copy" + "github.com/containers/image/types" +) + +const ( + // OCI used to define the "oci" image format + OCI = "oci" + // DOCKER used to define the "docker" image format + DOCKER = "docker" +) + +func getCopyOptions(reportWriter io.Writer, sourceSystemContext *types.SystemContext, destinationSystemContext *types.SystemContext, manifestType string) *cp.Options { + return &cp.Options{ + ReportWriter: reportWriter, + SourceCtx: sourceSystemContext, + DestinationCtx: destinationSystemContext, + ForceManifestMIMEType: manifestType, + } +} + +func getSystemContext(defaults *types.SystemContext, signaturePolicyPath string) *types.SystemContext { + sc := &types.SystemContext{} + if defaults != nil { + *sc = *defaults + } + if signaturePolicyPath != "" { + sc.SignaturePolicyPath = signaturePolicyPath + } + return sc +} diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go new file mode 100644 index 000000000..f6a742d59 --- /dev/null +++ b/vendor/github.com/containers/buildah/config.go @@ -0,0 +1,545 @@ +package buildah + +import ( + "context" + "encoding/json" + "os" + "runtime" + "strings" + "time" + + "github.com/containers/buildah/docker" + "github.com/containers/image/manifest" + "github.com/containers/image/transports" + "github.com/containers/image/types" + "github.com/containers/storage/pkg/stringid" + ociv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// unmarshalConvertedConfig obtains the config blob of img valid for the wantedManifestMIMEType format +// (either as it exists, or converting the image if necessary), and unmarshals it into dest. +// NOTE: The MIME type is of the _manifest_, not of the _config_ that is returned. +func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.Image, wantedManifestMIMEType string) error { + _, actualManifestMIMEType, err := img.Manifest(ctx) + if err != nil { + return errors.Wrapf(err, "error getting manifest MIME type for %q", transports.ImageName(img.Reference())) + } + if wantedManifestMIMEType != actualManifestMIMEType { + img, err = img.UpdatedImage(ctx, types.ManifestUpdateOptions{ + ManifestMIMEType: wantedManifestMIMEType, + InformationOnly: types.ManifestUpdateInformation{ // Strictly speaking, every value in here is invalid. But… + Destination: nil, // Destination is technically required, but actually necessary only for conversion _to_ v2s1. Leave it nil, we will crash if that ever changes. + LayerInfos: nil, // LayerInfos is necessary for size information in v2s2/OCI manifests, but the code can work with nil, and we are not reading the converted manifest at all. + LayerDiffIDs: nil, // LayerDiffIDs are actually embedded in the converted manifest, but the code can work with nil, and the values are not needed until pushing the finished image, at which time containerImageRef.NewImageSource builds the values from scratch. + }, + }) + if err != nil { + return errors.Wrapf(err, "error converting image %q to %s", transports.ImageName(img.Reference()), wantedManifestMIMEType) + } + } + config, err := img.ConfigBlob(ctx) + if err != nil { + return errors.Wrapf(err, "error reading %s config from %q", wantedManifestMIMEType, transports.ImageName(img.Reference())) + } + if err := json.Unmarshal(config, dest); err != nil { + return errors.Wrapf(err, "error parsing %s configuration from %q", wantedManifestMIMEType, transports.ImageName(img.Reference())) + } + return nil +} + +func (b *Builder) initConfig(ctx context.Context, img types.Image) error { + if img != nil { // A pre-existing image, as opposed to a "FROM scratch" new one. + rawManifest, manifestMIMEType, err := img.Manifest(ctx) + if err != nil { + return errors.Wrapf(err, "error reading image manifest for %q", transports.ImageName(img.Reference())) + } + rawConfig, err := img.ConfigBlob(ctx) + if err != nil { + return errors.Wrapf(err, "error reading image configuration for %q", transports.ImageName(img.Reference())) + } + b.Manifest = rawManifest + b.Config = rawConfig + + dimage := docker.V2Image{} + if err := unmarshalConvertedConfig(ctx, &dimage, img, manifest.DockerV2Schema2MediaType); err != nil { + return err + } + b.Docker = dimage + + oimage := ociv1.Image{} + if err := unmarshalConvertedConfig(ctx, &oimage, img, ociv1.MediaTypeImageManifest); err != nil { + return err + } + b.OCIv1 = oimage + + if manifestMIMEType == ociv1.MediaTypeImageManifest { + // Attempt to recover format-specific data from the manifest. + v1Manifest := ociv1.Manifest{} + if err := json.Unmarshal(b.Manifest, &v1Manifest); err != nil { + return errors.Wrapf(err, "error parsing OCI manifest") + } + b.ImageAnnotations = v1Manifest.Annotations + } + } + + b.fixupConfig() + return nil +} + +func (b *Builder) fixupConfig() { + if b.Docker.Config != nil { + // Prefer image-level settings over those from the container it was built from. + b.Docker.ContainerConfig = *b.Docker.Config + } + b.Docker.Config = &b.Docker.ContainerConfig + b.Docker.DockerVersion = "" + now := time.Now().UTC() + if b.Docker.Created.IsZero() { + b.Docker.Created = now + } + if b.OCIv1.Created == nil || b.OCIv1.Created.IsZero() { + b.OCIv1.Created = &now + } + if b.OS() == "" { + b.SetOS(runtime.GOOS) + } + if b.Architecture() == "" { + b.SetArchitecture(runtime.GOARCH) + } + if b.Format == Dockerv2ImageManifest && b.Hostname() == "" { + b.SetHostname(stringid.TruncateID(stringid.GenerateRandomID())) + } +} + +// Annotations returns a set of key-value pairs from the image's manifest. +func (b *Builder) Annotations() map[string]string { + return copyStringStringMap(b.ImageAnnotations) +} + +// SetAnnotation adds or overwrites a key's value from the image's manifest. +// Note: this setting is not present in the Docker v2 image format, so it is +// discarded when writing images using Docker v2 formats. +func (b *Builder) SetAnnotation(key, value string) { + if b.ImageAnnotations == nil { + b.ImageAnnotations = map[string]string{} + } + b.ImageAnnotations[key] = value +} + +// UnsetAnnotation removes a key and its value from the image's manifest, if +// it's present. +func (b *Builder) UnsetAnnotation(key string) { + delete(b.ImageAnnotations, key) +} + +// ClearAnnotations removes all keys and their values from the image's +// manifest. +func (b *Builder) ClearAnnotations() { + b.ImageAnnotations = map[string]string{} +} + +// CreatedBy returns a description of how this image was built. +func (b *Builder) CreatedBy() string { + return b.ImageCreatedBy +} + +// SetCreatedBy sets the description of how this image was built. +func (b *Builder) SetCreatedBy(how string) { + b.ImageCreatedBy = how +} + +// OS returns a name of the OS on which the container, or a container built +// using an image built from this container, is intended to be run. +func (b *Builder) OS() string { + return b.OCIv1.OS +} + +// SetOS sets the name of the OS on which the container, or a container built +// using an image built from this container, is intended to be run. +func (b *Builder) SetOS(os string) { + b.OCIv1.OS = os + b.Docker.OS = os +} + +// Architecture returns a name of the architecture on which the container, or a +// container built using an image built from this container, is intended to be +// run. +func (b *Builder) Architecture() string { + return b.OCIv1.Architecture +} + +// SetArchitecture sets the name of the architecture on which the container, or +// a container built using an image built from this container, is intended to +// be run. +func (b *Builder) SetArchitecture(arch string) { + b.OCIv1.Architecture = arch + b.Docker.Architecture = arch +} + +// Maintainer returns contact information for the person who built the image. +func (b *Builder) Maintainer() string { + return b.OCIv1.Author +} + +// SetMaintainer sets contact information for the person who built the image. +func (b *Builder) SetMaintainer(who string) { + b.OCIv1.Author = who + b.Docker.Author = who +} + +// User returns information about the user as whom the container, or a +// container built using an image built from this container, should be run. +func (b *Builder) User() string { + return b.OCIv1.Config.User +} + +// SetUser sets information about the user as whom the container, or a +// container built using an image built from this container, should be run. +// Acceptable forms are a user name or ID, optionally followed by a colon and a +// group name or ID. +func (b *Builder) SetUser(spec string) { + b.OCIv1.Config.User = spec + b.Docker.Config.User = spec +} + +// OnBuild returns the OnBuild value from the container. +func (b *Builder) OnBuild() []string { + return copyStringSlice(b.Docker.Config.OnBuild) +} + +// ClearOnBuild removes all values from the OnBuild structure +func (b *Builder) ClearOnBuild() { + b.Docker.Config.OnBuild = []string{} +} + +// SetOnBuild sets a trigger instruction to be executed when the image is used +// as the base of another image. +// Note: this setting is not present in the OCIv1 image format, so it is +// discarded when writing images using OCIv1 formats. +func (b *Builder) SetOnBuild(onBuild string) { + if onBuild != "" && b.Format != Dockerv2ImageManifest { + logrus.Errorf("ONBUILD is not supported for OCI Image formats, %s will be ignored. Must use `docker` format", onBuild) + } + b.Docker.Config.OnBuild = append(b.Docker.Config.OnBuild, onBuild) +} + +// WorkDir returns the default working directory for running commands in the +// container, or in a container built using an image built from this container. +func (b *Builder) WorkDir() string { + return b.OCIv1.Config.WorkingDir +} + +// SetWorkDir sets the location of the default working directory for running +// commands in the container, or in a container built using an image built from +// this container. +func (b *Builder) SetWorkDir(there string) { + b.OCIv1.Config.WorkingDir = there + b.Docker.Config.WorkingDir = there +} + +// Shell returns the default shell for running commands in the +// container, or in a container built using an image built from this container. +func (b *Builder) Shell() []string { + return copyStringSlice(b.Docker.Config.Shell) +} + +// SetShell sets the default shell for running +// commands in the container, or in a container built using an image built from +// this container. +// Note: this setting is not present in the OCIv1 image format, so it is +// discarded when writing images using OCIv1 formats. +func (b *Builder) SetShell(shell []string) { + if len(shell) > 0 && b.Format != Dockerv2ImageManifest { + logrus.Errorf("SHELL is not supported for OCI Image format, %s will be ignored. Must use `docker` format", shell) + } + + b.Docker.Config.Shell = copyStringSlice(shell) +} + +// Env returns a list of key-value pairs to be set when running commands in the +// container, or in a container built using an image built from this container. +func (b *Builder) Env() []string { + return copyStringSlice(b.OCIv1.Config.Env) +} + +// SetEnv adds or overwrites a value to the set of environment strings which +// should be set when running commands in the container, or in a container +// built using an image built from this container. +func (b *Builder) SetEnv(k string, v string) { + reset := func(s *[]string) { + getenv := func(name string) string { + for i := range *s { + val := strings.SplitN((*s)[i], "=", 2) + if len(val) == 2 && val[0] == name { + return val[1] + } + } + return name + } + n := []string{} + for i := range *s { + if !strings.HasPrefix((*s)[i], k+"=") { + n = append(n, (*s)[i]) + } + v = os.Expand(v, getenv) + } + n = append(n, k+"="+v) + *s = n + } + reset(&b.OCIv1.Config.Env) + reset(&b.Docker.Config.Env) +} + +// UnsetEnv removes a value from the set of environment strings which should be +// set when running commands in this container, or in a container built using +// an image built from this container. +func (b *Builder) UnsetEnv(k string) { + unset := func(s *[]string) { + n := []string{} + for i := range *s { + if !strings.HasPrefix((*s)[i], k+"=") { + n = append(n, (*s)[i]) + } + } + *s = n + } + unset(&b.OCIv1.Config.Env) + unset(&b.Docker.Config.Env) +} + +// ClearEnv removes all values from the set of environment strings which should +// be set when running commands in this container, or in a container built +// using an image built from this container. +func (b *Builder) ClearEnv() { + b.OCIv1.Config.Env = []string{} + b.Docker.Config.Env = []string{} +} + +// Cmd returns the default command, or command parameters if an Entrypoint is +// set, to use when running a container built from an image built from this +// container. +func (b *Builder) Cmd() []string { + return copyStringSlice(b.OCIv1.Config.Cmd) +} + +// SetCmd sets the default command, or command parameters if an Entrypoint is +// set, to use when running a container built from an image built from this +// container. +func (b *Builder) SetCmd(cmd []string) { + b.OCIv1.Config.Cmd = copyStringSlice(cmd) + b.Docker.Config.Cmd = copyStringSlice(cmd) +} + +// Entrypoint returns the command to be run for containers built from images +// built from this container. +func (b *Builder) Entrypoint() []string { + if len(b.OCIv1.Config.Entrypoint) > 0 { + return copyStringSlice(b.OCIv1.Config.Entrypoint) + } + return nil +} + +// SetEntrypoint sets the command to be run for in containers built from images +// built from this container. +func (b *Builder) SetEntrypoint(ep []string) { + b.OCIv1.Config.Entrypoint = copyStringSlice(ep) + b.Docker.Config.Entrypoint = copyStringSlice(ep) +} + +// Labels returns a set of key-value pairs from the image's runtime +// configuration. +func (b *Builder) Labels() map[string]string { + return copyStringStringMap(b.OCIv1.Config.Labels) +} + +// SetLabel adds or overwrites a key's value from the image's runtime +// configuration. +func (b *Builder) SetLabel(k string, v string) { + if b.OCIv1.Config.Labels == nil { + b.OCIv1.Config.Labels = map[string]string{} + } + b.OCIv1.Config.Labels[k] = v + if b.Docker.Config.Labels == nil { + b.Docker.Config.Labels = map[string]string{} + } + b.Docker.Config.Labels[k] = v +} + +// UnsetLabel removes a key and its value from the image's runtime +// configuration, if it's present. +func (b *Builder) UnsetLabel(k string) { + delete(b.OCIv1.Config.Labels, k) + delete(b.Docker.Config.Labels, k) +} + +// ClearLabels removes all keys and their values from the image's runtime +// configuration. +func (b *Builder) ClearLabels() { + b.OCIv1.Config.Labels = map[string]string{} + b.Docker.Config.Labels = map[string]string{} +} + +// Ports returns the set of ports which should be exposed when a container +// based on an image built from this container is run. +func (b *Builder) Ports() []string { + p := []string{} + for k := range b.OCIv1.Config.ExposedPorts { + p = append(p, k) + } + return p +} + +// SetPort adds or overwrites an exported port in the set of ports which should +// be exposed when a container based on an image built from this container is +// run. +func (b *Builder) SetPort(p string) { + if b.OCIv1.Config.ExposedPorts == nil { + b.OCIv1.Config.ExposedPorts = map[string]struct{}{} + } + b.OCIv1.Config.ExposedPorts[p] = struct{}{} + if b.Docker.Config.ExposedPorts == nil { + b.Docker.Config.ExposedPorts = make(docker.PortSet) + } + b.Docker.Config.ExposedPorts[docker.Port(p)] = struct{}{} +} + +// UnsetPort removes an exposed port from the set of ports which should be +// exposed when a container based on an image built from this container is run. +func (b *Builder) UnsetPort(p string) { + delete(b.OCIv1.Config.ExposedPorts, p) + delete(b.Docker.Config.ExposedPorts, docker.Port(p)) +} + +// ClearPorts empties the set of ports which should be exposed when a container +// based on an image built from this container is run. +func (b *Builder) ClearPorts() { + b.OCIv1.Config.ExposedPorts = map[string]struct{}{} + b.Docker.Config.ExposedPorts = docker.PortSet{} +} + +// Volumes returns a list of filesystem locations which should be mounted from +// outside of the container when a container built from an image built from +// this container is run. +func (b *Builder) Volumes() []string { + v := []string{} + for k := range b.OCIv1.Config.Volumes { + v = append(v, k) + } + if len(v) > 0 { + return v + } + return nil +} + +// AddVolume adds a location to the image's list of locations which should be +// mounted from outside of the container when a container based on an image +// built from this container is run. +func (b *Builder) AddVolume(v string) { + if b.OCIv1.Config.Volumes == nil { + b.OCIv1.Config.Volumes = map[string]struct{}{} + } + b.OCIv1.Config.Volumes[v] = struct{}{} + if b.Docker.Config.Volumes == nil { + b.Docker.Config.Volumes = map[string]struct{}{} + } + b.Docker.Config.Volumes[v] = struct{}{} +} + +// RemoveVolume removes a location from the list of locations which should be +// mounted from outside of the container when a container based on an image +// built from this container is run. +func (b *Builder) RemoveVolume(v string) { + delete(b.OCIv1.Config.Volumes, v) + delete(b.Docker.Config.Volumes, v) +} + +// ClearVolumes removes all locations from the image's list of locations which +// should be mounted from outside of the container when a container based on an +// image built from this container is run. +func (b *Builder) ClearVolumes() { + b.OCIv1.Config.Volumes = map[string]struct{}{} + b.Docker.Config.Volumes = map[string]struct{}{} +} + +// Hostname returns the hostname which will be set in the container and in +// containers built using images built from the container. +func (b *Builder) Hostname() string { + return b.Docker.Config.Hostname +} + +// SetHostname sets the hostname which will be set in the container and in +// containers built using images built from the container. +// Note: this setting is not present in the OCIv1 image format, so it is +// discarded when writing images using OCIv1 formats. +func (b *Builder) SetHostname(name string) { + if name != "" && b.Format != Dockerv2ImageManifest { + logrus.Errorf("HOSTNAME is not supported for OCI Image format, hostname %s will be ignored. Must use `docker` format", name) + } + b.Docker.Config.Hostname = name +} + +// Domainname returns the domainname which will be set in the container and in +// containers built using images built from the container. +func (b *Builder) Domainname() string { + return b.Docker.Config.Domainname +} + +// SetDomainname sets the domainname which will be set in the container and in +// containers built using images built from the container. +// Note: this setting is not present in the OCIv1 image format, so it is +// discarded when writing images using OCIv1 formats. +func (b *Builder) SetDomainname(name string) { + if name != "" && b.Format != Dockerv2ImageManifest { + logrus.Errorf("DOMAINNAME is not supported for OCI Image format, domainname %s will be ignored. Must use `docker` format", name) + } + b.Docker.Config.Domainname = name +} + +// SetDefaultMountsFilePath sets the mounts file path for testing purposes +func (b *Builder) SetDefaultMountsFilePath(path string) { + b.DefaultMountsFilePath = path +} + +// Comment returns the comment which will be set in the container and in +// containers built using images built from the container +func (b *Builder) Comment() string { + return b.Docker.Comment +} + +// SetComment sets the comment which will be set in the container and in +// containers built using images built from the container. +// Note: this setting is not present in the OCIv1 image format, so it is +// discarded when writing images using OCIv1 formats. +func (b *Builder) SetComment(comment string) { + if comment != "" && b.Format != Dockerv2ImageManifest { + logrus.Errorf("COMMENT is not supported for OCI Image format, comment %s will be ignored. Must use `docker` format", comment) + } + b.Docker.Comment = comment +} + +// HistoryComment returns the comment which will be used in the history item +// which will describe the latest layer when we commit an image. +func (b *Builder) HistoryComment() string { + return b.ImageHistoryComment +} + +// SetHistoryComment sets the comment which will be used in the history item +// which will describe the latest layer when we commit an image. +func (b *Builder) SetHistoryComment(comment string) { + b.ImageHistoryComment = comment +} + +// StopSignal returns the signal which will be set in the container and in +// containers built using images buiilt from the container +func (b *Builder) StopSignal() string { + return b.Docker.Config.StopSignal +} + +// SetStopSignal sets the signal which will be set in the container and in +// containers built using images built from the container. +func (b *Builder) SetStopSignal(stopSignal string) { + b.OCIv1.Config.StopSignal = stopSignal + b.Docker.Config.StopSignal = stopSignal +} diff --git a/vendor/github.com/containers/buildah/delete.go b/vendor/github.com/containers/buildah/delete.go new file mode 100644 index 000000000..8de774ff9 --- /dev/null +++ b/vendor/github.com/containers/buildah/delete.go @@ -0,0 +1,18 @@ +package buildah + +import ( + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" +) + +// Delete removes the working container. The buildah.Builder object should not +// be used after this method is called. +func (b *Builder) Delete() error { + if err := b.store.DeleteContainer(b.ContainerID); err != nil { + return errors.Wrapf(err, "error deleting build container") + } + b.MountPoint = "" + b.Container = "" + b.ContainerID = "" + return label.ReleaseLabel(b.ProcessLabel) +} diff --git a/vendor/github.com/containers/buildah/docker/types.go b/vendor/github.com/containers/buildah/docker/types.go new file mode 100644 index 000000000..759fc1246 --- /dev/null +++ b/vendor/github.com/containers/buildah/docker/types.go @@ -0,0 +1,262 @@ +package docker + +// +// Types extracted from Docker +// + +import ( + "time" + + "github.com/containers/image/pkg/strslice" + "github.com/opencontainers/go-digest" +) + +// github.com/moby/moby/image/rootfs.go +const TypeLayers = "layers" + +// github.com/docker/distribution/manifest/schema2/manifest.go +const V2S2MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar" + +// github.com/moby/moby/image/rootfs.go +// RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into an interface that supports different implementations. +type V2S2RootFS struct { + Type string `json:"type"` + DiffIDs []digest.Digest `json:"diff_ids,omitempty"` +} + +// github.com/moby/moby/image/image.go +// History stores build commands that were used to create an image +type V2S2History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// github.com/moby/moby/image/image.go +// ID is the content-addressable ID of an image. +type ID digest.Digest + +// github.com/moby/moby/api/types/container/config.go +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// github.com/docker/go-connections/nat/nat.go +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// github.com/docker/go-connections/nat/nat.go +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// github.com/moby/moby/api/types/container/config.go +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// github.com/docker/distribution/manifest/schema1/config_builder.go +// For non-top-level layers, create fake V1Compatibility strings that +// fit the format and don't collide with anything else, but don't +// result in runnable images on their own. +type V1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` +} + +// github.com/moby/moby/image/image.go +// V1Image stores the V1 image configuration. +type V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *Config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// github.com/moby/moby/image/image.go +// Image stores the image configuration +type V2Image struct { + V1Image + Parent ID `json:"parent,omitempty"` + RootFS *V2S2RootFS `json:"rootfs,omitempty"` + History []V2S2History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID ID +} + +// github.com/docker/distribution/manifest/versioned.go +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type V2Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` +} + +// github.com/docker/distribution/manifest/schema1/manifest.go +// FSLayer is a container struct for BlobSums defined in an image manifest +type V2S1FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// github.com/docker/distribution/manifest/schema1/manifest.go +// History stores unstructured v1 compatibility information +type V2S1History struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} + +// github.com/docker/distribution/manifest/schema1/manifest.go +// Manifest provides the base accessible fields for working with V2 image +// format in the registry. +type V2S1Manifest struct { + V2Versioned + + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to + // run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []V2S1FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []V2S1History `json:"history"` +} + +// github.com/docker/distribution/blobs.go +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type V2S2Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against against this digest. + Digest digest.Digest `json:"digest,omitempty"` + + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + + // NOTE: Before adding a field here, please ensure that all + // other options have been exhausted. Much of the type relationships + // depend on the simplicity of this type. +} + +// github.com/docker/distribution/manifest/schema2/manifest.go +// Manifest defines a schema2 manifest. +type V2S2Manifest struct { + V2Versioned + + // Config references the image configuration as a blob. + Config V2S2Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []V2S2Descriptor `json:"layers"` +} diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go new file mode 100644 index 000000000..df50d95bd --- /dev/null +++ b/vendor/github.com/containers/buildah/image.go @@ -0,0 +1,634 @@ +package buildah + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/containers/buildah/docker" + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" + "github.com/containers/image/manifest" + is "github.com/containers/image/storage" + "github.com/containers/image/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // OCIv1ImageManifest is the MIME type of an OCIv1 image manifest, + // suitable for specifying as a value of the PreferredManifestType + // member of a CommitOptions structure. It is also the default. + OCIv1ImageManifest = v1.MediaTypeImageManifest + // Dockerv2ImageManifest is the MIME type of a Docker v2s2 image + // manifest, suitable for specifying as a value of the + // PreferredManifestType member of a CommitOptions structure. + Dockerv2ImageManifest = manifest.DockerV2Schema2MediaType +) + +type containerImageRef struct { + store storage.Store + compression archive.Compression + name reference.Named + names []string + containerID string + mountLabel string + layerID string + oconfig []byte + dconfig []byte + created time.Time + createdBy string + historyComment string + annotations map[string]string + preferredManifestType string + exporting bool + squash bool + tarPath func(path string) (io.ReadCloser, error) + parent string +} + +type containerImageSource struct { + path string + ref *containerImageRef + store storage.Store + containerID string + mountLabel string + layerID string + names []string + compression archive.Compression + config []byte + configDigest digest.Digest + manifest []byte + manifestType string + exporting bool +} + +func (i *containerImageRef) NewImage(ctx context.Context, sc *types.SystemContext) (types.ImageCloser, error) { + src, err := i.NewImageSource(ctx, sc) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sc, src) +} + +func expectedOCIDiffIDs(image v1.Image) int { + expected := 0 + for _, history := range image.History { + if !history.EmptyLayer { + expected = expected + 1 + } + } + return expected +} + +func expectedDockerDiffIDs(image docker.V2Image) int { + expected := 0 + for _, history := range image.History { + if !history.EmptyLayer { + expected = expected + 1 + } + } + return expected +} + +// Compute the media types which we need to attach to a layer, given the type of +// compression that we'll be applying. +func (i *containerImageRef) computeLayerMIMEType(what string) (omediaType, dmediaType string, err error) { + omediaType = v1.MediaTypeImageLayer + //TODO: Convert to manifest.DockerV2Schema2LayerUncompressedMediaType once available + dmediaType = docker.V2S2MediaTypeUncompressedLayer + if i.compression != archive.Uncompressed { + switch i.compression { + case archive.Gzip: + omediaType = v1.MediaTypeImageLayerGzip + dmediaType = manifest.DockerV2Schema2LayerMediaType + logrus.Debugf("compressing %s with gzip", what) + case archive.Bzip2: + // Until the image specs define a media type for bzip2-compressed layers, even if we know + // how to decompress them, we can't try to compress layers with bzip2. + return "", "", errors.New("media type for bzip2-compressed layers is not defined") + case archive.Xz: + // Until the image specs define a media type for xz-compressed layers, even if we know + // how to decompress them, we can't try to compress layers with xz. + return "", "", errors.New("media type for xz-compressed layers is not defined") + default: + logrus.Debugf("compressing %s with unknown compressor(?)", what) + } + } + return omediaType, dmediaType, nil +} + +// Extract the container's whole filesystem as if it were a single layer. +func (i *containerImageRef) extractRootfs() (io.ReadCloser, error) { + mountPoint, err := i.store.Mount(i.containerID, i.mountLabel) + if err != nil { + return nil, errors.Wrapf(err, "error extracting container %q", i.containerID) + } + rc, err := i.tarPath(mountPoint) + if err != nil { + return nil, errors.Wrapf(err, "error extracting container %q", i.containerID) + } + return ioutils.NewReadCloserWrapper(rc, func() error { + err := rc.Close() + if err != nil { + err = errors.Wrapf(err, "error closing tar archive of container %q", i.containerID) + } + if _, err2 := i.store.Unmount(i.containerID, false); err == nil { + if err2 != nil { + err2 = errors.Wrapf(err2, "error unmounting container %q", i.containerID) + } + err = err2 + } + return err + }), nil +} + +// Build fresh copies of the container configuration structures so that we can edit them +// without making unintended changes to the original Builder. +func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, docker.V2Image, docker.V2S2Manifest, error) { + created := i.created + + // Build an empty image, and then decode over it. + oimage := v1.Image{} + if err := json.Unmarshal(i.oconfig, &oimage); err != nil { + return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err + } + // Always replace this value, since we're newer than our base image. + oimage.Created = &created + // Clear the list of diffIDs, since we always repopulate it. + oimage.RootFS.Type = docker.TypeLayers + oimage.RootFS.DiffIDs = []digest.Digest{} + // Only clear the history if we're squashing, otherwise leave it be so that we can append + // entries to it. + if i.squash { + oimage.History = []v1.History{} + } + + // Build an empty image, and then decode over it. + dimage := docker.V2Image{} + if err := json.Unmarshal(i.dconfig, &dimage); err != nil { + return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err + } + dimage.Parent = docker.ID(digest.FromString(i.parent)) + // Always replace this value, since we're newer than our base image. + dimage.Created = created + // Clear the list of diffIDs, since we always repopulate it. + dimage.RootFS = &docker.V2S2RootFS{} + dimage.RootFS.Type = docker.TypeLayers + dimage.RootFS.DiffIDs = []digest.Digest{} + // Only clear the history if we're squashing, otherwise leave it be so that we can append + // entries to it. + if i.squash { + dimage.History = []docker.V2S2History{} + } + + // Build empty manifests. The Layers lists will be populated later. + omanifest := v1.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: v1.Descriptor{ + MediaType: v1.MediaTypeImageConfig, + }, + Layers: []v1.Descriptor{}, + Annotations: i.annotations, + } + + dmanifest := docker.V2S2Manifest{ + V2Versioned: docker.V2Versioned{ + SchemaVersion: 2, + MediaType: manifest.DockerV2Schema2MediaType, + }, + Config: docker.V2S2Descriptor{ + MediaType: manifest.DockerV2Schema2ConfigMediaType, + }, + Layers: []docker.V2S2Descriptor{}, + } + + return oimage, omanifest, dimage, dmanifest, nil +} + +func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.SystemContext) (src types.ImageSource, err error) { + // Decide which type of manifest and configuration output we're going to provide. + manifestType := i.preferredManifestType + // If it's not a format we support, return an error. + if manifestType != v1.MediaTypeImageManifest && manifestType != manifest.DockerV2Schema2MediaType { + return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)", + manifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType) + } + // Start building the list of layers using the read-write layer. + layers := []string{} + layerID := i.layerID + layer, err := i.store.Layer(layerID) + if err != nil { + return nil, errors.Wrapf(err, "unable to read layer %q", layerID) + } + // Walk the list of parent layers, prepending each as we go. If we're squashing, + // stop at the layer ID of the top layer, which we won't really be using anyway. + for layer != nil { + layers = append(append([]string{}, layerID), layers...) + layerID = layer.Parent + if layerID == "" || i.squash { + err = nil + break + } + layer, err = i.store.Layer(layerID) + if err != nil { + return nil, errors.Wrapf(err, "unable to read layer %q", layerID) + } + } + logrus.Debugf("layer list: %q", layers) + + // Make a temporary directory to hold blobs. + path, err := ioutil.TempDir(os.TempDir(), Package) + if err != nil { + return nil, err + } + logrus.Debugf("using %q to hold temporary data", path) + defer func() { + if src == nil { + err2 := os.RemoveAll(path) + if err2 != nil { + logrus.Errorf("error removing %q: %v", path, err) + } + } + }() + + // Build fresh copies of the configurations and manifest so that we don't mess with any + // values in the Builder object itself. + oimage, omanifest, dimage, dmanifest, err := i.createConfigsAndManifests() + if err != nil { + return nil, err + } + + // Extract each layer and compute its digests, both compressed (if requested) and uncompressed. + for _, layerID := range layers { + what := fmt.Sprintf("layer %q", layerID) + if i.squash { + what = fmt.Sprintf("container %q", i.containerID) + } + // The default layer media type assumes no compression. + omediaType := v1.MediaTypeImageLayer + dmediaType := docker.V2S2MediaTypeUncompressedLayer + // If we're not re-exporting the data, and we're reusing layers individually, reuse + // the blobsum and diff IDs. + if !i.exporting && !i.squash && layerID != i.layerID { + layer, err2 := i.store.Layer(layerID) + if err2 != nil { + return nil, errors.Wrapf(err, "unable to locate layer %q", layerID) + } + if layer.UncompressedDigest == "" { + return nil, errors.Errorf("unable to look up size of layer %q", layerID) + } + layerBlobSum := layer.UncompressedDigest + layerBlobSize := layer.UncompressedSize + // Note this layer in the manifest, using the uncompressed blobsum. + olayerDescriptor := v1.Descriptor{ + MediaType: omediaType, + Digest: layerBlobSum, + Size: layerBlobSize, + } + omanifest.Layers = append(omanifest.Layers, olayerDescriptor) + dlayerDescriptor := docker.V2S2Descriptor{ + MediaType: dmediaType, + Digest: layerBlobSum, + Size: layerBlobSize, + } + dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor) + // Note this layer in the list of diffIDs, again using the uncompressed blobsum. + oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, layerBlobSum) + dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, layerBlobSum) + continue + } + // Figure out if we need to change the media type, in case we're using compression. + omediaType, dmediaType, err = i.computeLayerMIMEType(what) + if err != nil { + return nil, err + } + // Start reading either the layer or the whole container rootfs. + noCompression := archive.Uncompressed + diffOptions := &storage.DiffOptions{ + Compression: &noCompression, + } + var rc io.ReadCloser + if i.squash { + // Extract the root filesystem as a single layer. + rc, err = i.extractRootfs() + if err != nil { + return nil, err + } + defer rc.Close() + } else { + // Extract this layer, one of possibly many. + rc, err = i.store.Diff("", layerID, diffOptions) + if err != nil { + return nil, errors.Wrapf(err, "error extracting %s", what) + } + defer rc.Close() + } + srcHasher := digest.Canonical.Digester() + reader := io.TeeReader(rc, srcHasher.Hash()) + // Set up to write the possibly-recompressed blob. + layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + return nil, errors.Wrapf(err, "error opening file for %s", what) + } + destHasher := digest.Canonical.Digester() + counter := ioutils.NewWriteCounter(layerFile) + multiWriter := io.MultiWriter(counter, destHasher.Hash()) + // Compress the layer, if we're recompressing it. + writer, err := archive.CompressStream(multiWriter, i.compression) + if err != nil { + return nil, errors.Wrapf(err, "error compressing %s", what) + } + size, err := io.Copy(writer, reader) + if err != nil { + return nil, errors.Wrapf(err, "error storing %s to file", what) + } + writer.Close() + layerFile.Close() + if i.compression == archive.Uncompressed { + if size != counter.Count { + return nil, errors.Errorf("error storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count) + } + } else { + size = counter.Count + } + logrus.Debugf("%s size is %d bytes", what, size) + // Rename the layer so that we can more easily find it by digest later. + err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String())) + if err != nil { + return nil, errors.Wrapf(err, "error storing %s to file", what) + } + // Add a note in the manifest about the layer. The blobs are identified by their possibly- + // compressed blob digests. + olayerDescriptor := v1.Descriptor{ + MediaType: omediaType, + Digest: destHasher.Digest(), + Size: size, + } + omanifest.Layers = append(omanifest.Layers, olayerDescriptor) + dlayerDescriptor := docker.V2S2Descriptor{ + MediaType: dmediaType, + Digest: destHasher.Digest(), + Size: size, + } + dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor) + // Add a note about the diffID, which is always the layer's uncompressed digest. + oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest()) + dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest()) + } + + // Build history notes in the image configurations. + onews := v1.History{ + Created: &i.created, + CreatedBy: i.createdBy, + Author: oimage.Author, + Comment: i.historyComment, + EmptyLayer: false, + } + oimage.History = append(oimage.History, onews) + dnews := docker.V2S2History{ + Created: i.created, + CreatedBy: i.createdBy, + Author: dimage.Author, + Comment: i.historyComment, + EmptyLayer: false, + } + dimage.History = append(dimage.History, dnews) + dimage.Parent = docker.ID(digest.FromString(i.parent)) + + // Sanity check that we didn't just create a mismatch between non-empty layers in the + // history and the number of diffIDs. + expectedDiffIDs := expectedOCIDiffIDs(oimage) + if len(oimage.RootFS.DiffIDs) != expectedDiffIDs { + return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs)) + } + expectedDiffIDs = expectedDockerDiffIDs(dimage) + if len(dimage.RootFS.DiffIDs) != expectedDiffIDs { + return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs)) + } + + // Encode the image configuration blob. + oconfig, err := json.Marshal(&oimage) + if err != nil { + return nil, err + } + logrus.Debugf("OCIv1 config = %s", oconfig) + + // Add the configuration blob to the manifest. + omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig) + omanifest.Config.Size = int64(len(oconfig)) + omanifest.Config.MediaType = v1.MediaTypeImageConfig + + // Encode the manifest. + omanifestbytes, err := json.Marshal(&omanifest) + if err != nil { + return nil, err + } + logrus.Debugf("OCIv1 manifest = %s", omanifestbytes) + + // Encode the image configuration blob. + dconfig, err := json.Marshal(&dimage) + if err != nil { + return nil, err + } + logrus.Debugf("Docker v2s2 config = %s", dconfig) + + // Add the configuration blob to the manifest. + dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig) + dmanifest.Config.Size = int64(len(dconfig)) + dmanifest.Config.MediaType = manifest.DockerV2Schema2ConfigMediaType + + // Encode the manifest. + dmanifestbytes, err := json.Marshal(&dmanifest) + if err != nil { + return nil, err + } + logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes) + + // Decide which manifest and configuration blobs we'll actually output. + var config []byte + var imageManifest []byte + switch manifestType { + case v1.MediaTypeImageManifest: + imageManifest = omanifestbytes + config = oconfig + case manifest.DockerV2Schema2MediaType: + imageManifest = dmanifestbytes + config = dconfig + default: + panic("unreachable code: unsupported manifest type") + } + src = &containerImageSource{ + path: path, + ref: i, + store: i.store, + containerID: i.containerID, + mountLabel: i.mountLabel, + layerID: i.layerID, + names: i.names, + compression: i.compression, + config: config, + configDigest: digest.Canonical.FromBytes(config), + manifest: imageManifest, + manifestType: manifestType, + exporting: i.exporting, + } + return src, nil +} + +func (i *containerImageRef) NewImageDestination(ctx context.Context, sc *types.SystemContext) (types.ImageDestination, error) { + return nil, errors.Errorf("can't write to a container") +} + +func (i *containerImageRef) DockerReference() reference.Named { + return i.name +} + +func (i *containerImageRef) StringWithinTransport() string { + if len(i.names) > 0 { + return i.names[0] + } + return "" +} + +func (i *containerImageRef) DeleteImage(context.Context, *types.SystemContext) error { + // we were never here + return nil +} + +func (i *containerImageRef) PolicyConfigurationIdentity() string { + return "" +} + +func (i *containerImageRef) PolicyConfigurationNamespaces() []string { + return nil +} + +func (i *containerImageRef) Transport() types.ImageTransport { + return is.Transport +} + +func (i *containerImageSource) Close() error { + err := os.RemoveAll(i.path) + if err != nil { + logrus.Errorf("error removing %q: %v", i.path, err) + } + return err +} + +func (i *containerImageSource) Reference() types.ImageReference { + return i.ref +} + +func (i *containerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.Errorf("containerImageSource does not support manifest lists") + } + return nil, nil +} + +func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.Errorf("containerImageSource does not support manifest lists") + } + return i.manifest, i.manifestType, nil +} + +func (i *containerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} + +func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) { + if blob.Digest == i.configDigest { + logrus.Debugf("start reading config") + reader := bytes.NewReader(i.config) + closer := func() error { + logrus.Debugf("finished reading config") + return nil + } + return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil + } + layerFile, err := os.OpenFile(filepath.Join(i.path, blob.Digest.String()), os.O_RDONLY, 0600) + if err != nil { + logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err) + return nil, -1, err + } + size = -1 + st, err := layerFile.Stat() + if err != nil { + logrus.Warnf("error reading size of layer %q: %v", blob.Digest.String(), err) + } else { + size = st.Size() + } + logrus.Debugf("reading layer %q", blob.Digest.String()) + closer := func() error { + layerFile.Close() + logrus.Debugf("finished reading layer %q", blob.Digest.String()) + return nil + } + return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil +} + +func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squash bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) { + var name reference.Named + container, err := b.store.Container(b.ContainerID) + if err != nil { + return nil, errors.Wrapf(err, "error locating container %q", b.ContainerID) + } + if len(container.Names) > 0 { + if parsed, err2 := reference.ParseNamed(container.Names[0]); err2 == nil { + name = parsed + } + } + if manifestType == "" { + manifestType = OCIv1ImageManifest + } + oconfig, err := json.Marshal(&b.OCIv1) + if err != nil { + return nil, errors.Wrapf(err, "error encoding OCI-format image configuration") + } + dconfig, err := json.Marshal(&b.Docker) + if err != nil { + return nil, errors.Wrapf(err, "error encoding docker-format image configuration") + } + created := time.Now().UTC() + if historyTimestamp != nil { + created = historyTimestamp.UTC() + } + + ref := &containerImageRef{ + store: b.store, + compression: compress, + name: name, + names: container.Names, + containerID: container.ID, + mountLabel: b.MountLabel, + layerID: container.LayerID, + oconfig: oconfig, + dconfig: dconfig, + created: created, + createdBy: b.CreatedBy(), + historyComment: b.HistoryComment(), + annotations: b.Annotations(), + preferredManifestType: manifestType, + exporting: exporting, + squash: squash, + tarPath: b.tarPath(), + parent: parent, + } + return ref, nil +} diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go new file mode 100644 index 000000000..4bcd38c05 --- /dev/null +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -0,0 +1,1337 @@ +package imagebuildah + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/containers/buildah" + "github.com/containers/buildah/util" + cp "github.com/containers/image/copy" + is "github.com/containers/image/storage" + "github.com/containers/image/transports" + "github.com/containers/image/transports/alltransports" + "github.com/containers/image/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/stringid" + "github.com/docker/docker/builder/dockerfile/parser" + docker "github.com/fsouza/go-dockerclient" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/openshift/imagebuilder" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + PullIfMissing = buildah.PullIfMissing + PullAlways = buildah.PullAlways + PullNever = buildah.PullNever + + Gzip = archive.Gzip + Bzip2 = archive.Bzip2 + Xz = archive.Xz + Uncompressed = archive.Uncompressed +) + +// Mount is a mountpoint for the build container. +type Mount specs.Mount + +// BuildOptions can be used to alter how an image is built. +type BuildOptions struct { + // ContextDirectory is the default source location for COPY and ADD + // commands. + ContextDirectory string + // PullPolicy controls whether or not we pull images. It should be one + // of PullIfMissing, PullAlways, or PullNever. + PullPolicy buildah.PullPolicy + // Registry is a value which is prepended to the image's name, if it + // needs to be pulled and the image name alone can not be resolved to a + // reference to a source image. No separator is implicitly added. + Registry string + // Transport is a value which is prepended to the image's name, if it + // needs to be pulled and the image name alone, or the image name and + // the registry together, can not be resolved to a reference to a + // source image. No separator is implicitly added. + Transport string + // IgnoreUnrecognizedInstructions tells us to just log instructions we + // don't recognize, and try to keep going. + IgnoreUnrecognizedInstructions bool + // Quiet tells us whether or not to announce steps as we go through them. + Quiet bool + // Isolation controls how Run() runs things. + Isolation buildah.Isolation + // Runtime is the name of the command to run for RUN instructions when + // Isolation is either IsolationDefault or IsolationOCI. It should + // accept the same arguments and flags that runc does. + Runtime string + // RuntimeArgs adds global arguments for the runtime. + RuntimeArgs []string + // TransientMounts is a list of mounts that won't be kept in the image. + TransientMounts []Mount + // Compression specifies the type of compression which is applied to + // layer blobs. The default is to not use compression, but + // archive.Gzip is recommended. + Compression archive.Compression + // Arguments which can be interpolated into Dockerfiles + Args map[string]string + // Name of the image to write to. + Output string + // Additional tags to add to the image that we write, if we know of a + // way to add them. + AdditionalTags []string + // Log is a callback that will print a progress message. If no value + // is supplied, the message will be sent to Err (or os.Stderr, if Err + // is nil) by default. + Log func(format string, args ...interface{}) + // In is connected to stdin for RUN instructions. + In io.Reader + // Out is a place where non-error log messages are sent. + Out io.Writer + // Err is a place where error log messages should be sent. + Err io.Writer + // SignaturePolicyPath specifies an override location for the signature + // policy which should be used for verifying the new image as it is + // being written. Except in specific circumstances, no value should be + // specified, indicating that the shared, system-wide default policy + // should be used. + SignaturePolicyPath string + // ReportWriter is an io.Writer which will be used to report the + // progress of the (possible) pulling of the source image and the + // writing of the new image. + ReportWriter io.Writer + // OutputFormat is the format of the output image's manifest and + // configuration data. + // Accepted values are buildah.OCIv1ImageManifest and buildah.Dockerv2ImageManifest. + OutputFormat string + // SystemContext holds parameters used for authentication. + SystemContext *types.SystemContext + // NamespaceOptions controls how we set up namespaces processes that we + // might need when handling RUN instructions. + NamespaceOptions []buildah.NamespaceOption + // ConfigureNetwork controls whether or not network interfaces and + // routing are configured for a new network namespace (i.e., when not + // joining another's namespace and not just using the host's + // namespace), effectively deciding whether or not the process has a + // usable network. + ConfigureNetwork buildah.NetworkConfigurationPolicy + // CNIPluginPath is the location of CNI plugin helpers, if they should be + // run from a location other than the default location. + CNIPluginPath string + // CNIConfigDir is the location of CNI configuration files, if the files in + // the default configuration directory shouldn't be used. + CNIConfigDir string + // ID mapping options to use if we're setting up our own user namespace + // when handling RUN instructions. + IDMappingOptions *buildah.IDMappingOptions + // AddCapabilities is a list of capabilities to add to the default set when + // handling RUN instructions. + AddCapabilities []string + // DropCapabilities is a list of capabilities to remove from the default set + // when handling RUN instructions. If a capability appears in both lists, it + // will be dropped. + DropCapabilities []string + CommonBuildOpts *buildah.CommonBuildOptions + // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format + DefaultMountsFilePath string + // IIDFile tells the builder to write the image ID to the specified file + IIDFile string + // Squash tells the builder to produce an image with a single layer + // instead of with possibly more than one layer. + Squash bool + // Labels metadata for an image + Labels []string + // Annotation metadata for an image + Annotations []string + // OnBuild commands to be run by images based on this image + OnBuild []string + // Layers tells the builder to create a cache of images for each step in the Dockerfile + Layers bool + // NoCache tells the builder to build the image from scratch without checking for a cache. + // It creates a new set of cached images for the build. + NoCache bool + // RemoveIntermediateCtrs tells the builder whether to remove intermediate containers used + // during the build process. Default is true. + RemoveIntermediateCtrs bool + // ForceRmIntermediateCtrs tells the builder to remove all intermediate containers even if + // the build was unsuccessful. + ForceRmIntermediateCtrs bool +} + +// Executor is a buildah-based implementation of the imagebuilder.Executor +// interface. +type Executor struct { + index int + name string + named map[string]*Executor + store storage.Store + contextDir string + builder *buildah.Builder + pullPolicy buildah.PullPolicy + registry string + transport string + ignoreUnrecognizedInstructions bool + quiet bool + runtime string + runtimeArgs []string + transientMounts []Mount + compression archive.Compression + output string + outputFormat string + additionalTags []string + log func(format string, args ...interface{}) + in io.Reader + out io.Writer + err io.Writer + signaturePolicyPath string + systemContext *types.SystemContext + mountPoint string + preserved int + volumes imagebuilder.VolumeSet + volumeCache map[string]string + volumeCacheInfo map[string]os.FileInfo + reportWriter io.Writer + isolation buildah.Isolation + namespaceOptions []buildah.NamespaceOption + configureNetwork buildah.NetworkConfigurationPolicy + cniPluginPath string + cniConfigDir string + idmappingOptions *buildah.IDMappingOptions + commonBuildOptions *buildah.CommonBuildOptions + defaultMountsFilePath string + iidfile string + squash bool + labels []string + annotations []string + onbuild []string + layers bool + topLayers []string + noCache bool + removeIntermediateCtrs bool + forceRmIntermediateCtrs bool + containerIDs []string // Stores the IDs of the successful intermediate containers used during layer build +} + +// withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME. +func (b *Executor) withName(name string, index int) *Executor { + if b.named == nil { + b.named = make(map[string]*Executor) + } + copied := *b + copied.index = index + copied.name = name + child := &copied + b.named[name] = child + if idx := strconv.Itoa(index); idx != name { + b.named[idx] = child + } + return child +} + +// Preserve informs the executor that from this point on, it needs to ensure +// that only COPY and ADD instructions can modify the contents of this +// directory or anything below it. +// The Executor handles this by caching the contents of directories which have +// been marked this way before executing a RUN instruction, invalidating that +// cache when an ADD or COPY instruction sets any location under the directory +// as the destination, and using the cache to reset the contents of the +// directory tree after processing each RUN instruction. +// It would be simpler if we could just mark the directory as a read-only bind +// mount of itself during Run(), but the directory is expected to be remain +// writeable, even if any changes within it are ultimately discarded. +func (b *Executor) Preserve(path string) error { + logrus.Debugf("PRESERVE %q", path) + if b.volumes.Covers(path) { + // This path is already a subdirectory of a volume path that + // we're already preserving, so there's nothing new to be done + // except ensure that it exists. + archivedPath := filepath.Join(b.mountPoint, path) + if err := os.MkdirAll(archivedPath, 0755); err != nil { + return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath) + } + if err := b.volumeCacheInvalidate(path); err != nil { + return errors.Wrapf(err, "error ensuring volume path %q is preserved", archivedPath) + } + return nil + } + // Figure out where the cache for this volume would be stored. + b.preserved++ + cacheDir, err := b.store.ContainerDirectory(b.builder.ContainerID) + if err != nil { + return errors.Errorf("unable to locate temporary directory for container") + } + cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", b.preserved)) + // Save info about the top level of the location that we'll be archiving. + archivedPath := filepath.Join(b.mountPoint, path) + + // Try and resolve the symlink (if one exists) + // Set archivedPath and path based on whether a symlink is found or not + if symLink, err := resolveSymLink(b.mountPoint, path); err == nil { + archivedPath = filepath.Join(b.mountPoint, symLink) + path = symLink + } else { + return errors.Wrapf(err, "error reading symbolic link to %q", path) + } + + st, err := os.Stat(archivedPath) + if os.IsNotExist(err) { + if err = os.MkdirAll(archivedPath, 0755); err != nil { + return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath) + } + st, err = os.Stat(archivedPath) + } + if err != nil { + logrus.Debugf("error reading info about %q: %v", archivedPath, err) + return errors.Wrapf(err, "error reading info about volume path %q", archivedPath) + } + b.volumeCacheInfo[path] = st + if !b.volumes.Add(path) { + // This path is not a subdirectory of a volume path that we're + // already preserving, so adding it to the list should work. + return errors.Errorf("error adding %q to the volume cache", path) + } + b.volumeCache[path] = cacheFile + // Now prune cache files for volumes that are now supplanted by this one. + removed := []string{} + for cachedPath := range b.volumeCache { + // Walk our list of cached volumes, and check that they're + // still in the list of locations that we need to cache. + found := false + for _, volume := range b.volumes { + if volume == cachedPath { + // We need to keep this volume's cache. + found = true + break + } + } + if !found { + // We don't need to keep this volume's cache. Make a + // note to remove it. + removed = append(removed, cachedPath) + } + } + // Actually remove the caches that we decided to remove. + for _, cachedPath := range removed { + archivedPath := filepath.Join(b.mountPoint, cachedPath) + logrus.Debugf("no longer need cache of %q in %q", archivedPath, b.volumeCache[cachedPath]) + if err := os.Remove(b.volumeCache[cachedPath]); err != nil { + return errors.Wrapf(err, "error removing %q", b.volumeCache[cachedPath]) + } + delete(b.volumeCache, cachedPath) + } + return nil +} + +// Remove any volume cache item which will need to be re-saved because we're +// writing to part of it. +func (b *Executor) volumeCacheInvalidate(path string) error { + invalidated := []string{} + for cachedPath := range b.volumeCache { + if strings.HasPrefix(path, cachedPath+string(os.PathSeparator)) { + invalidated = append(invalidated, cachedPath) + } + } + for _, cachedPath := range invalidated { + if err := os.Remove(b.volumeCache[cachedPath]); err != nil { + return errors.Wrapf(err, "error removing volume cache %q", b.volumeCache[cachedPath]) + } + archivedPath := filepath.Join(b.mountPoint, cachedPath) + logrus.Debugf("invalidated volume cache for %q from %q", archivedPath, b.volumeCache[cachedPath]) + delete(b.volumeCache, cachedPath) + } + return nil +} + +// Save the contents of each of the executor's list of volumes for which we +// don't already have a cache file. +func (b *Executor) volumeCacheSave() error { + for cachedPath, cacheFile := range b.volumeCache { + archivedPath := filepath.Join(b.mountPoint, cachedPath) + _, err := os.Stat(cacheFile) + if err == nil { + logrus.Debugf("contents of volume %q are already cached in %q", archivedPath, cacheFile) + continue + } + if !os.IsNotExist(err) { + return errors.Wrapf(err, "error checking for cache of %q in %q", archivedPath, cacheFile) + } + if err := os.MkdirAll(archivedPath, 0755); err != nil { + return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath) + } + logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile) + cache, err := os.Create(cacheFile) + if err != nil { + return errors.Wrapf(err, "error creating archive at %q", cacheFile) + } + defer cache.Close() + rc, err := archive.Tar(archivedPath, archive.Uncompressed) + if err != nil { + return errors.Wrapf(err, "error archiving %q", archivedPath) + } + defer rc.Close() + _, err = io.Copy(cache, rc) + if err != nil { + return errors.Wrapf(err, "error archiving %q to %q", archivedPath, cacheFile) + } + } + return nil +} + +// Restore the contents of each of the executor's list of volumes. +func (b *Executor) volumeCacheRestore() error { + for cachedPath, cacheFile := range b.volumeCache { + archivedPath := filepath.Join(b.mountPoint, cachedPath) + logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile) + cache, err := os.Open(cacheFile) + if err != nil { + return errors.Wrapf(err, "error opening archive at %q", cacheFile) + } + defer cache.Close() + if err := os.RemoveAll(archivedPath); err != nil { + return errors.Wrapf(err, "error clearing volume path %q", archivedPath) + } + if err := os.MkdirAll(archivedPath, 0755); err != nil { + return errors.Wrapf(err, "error recreating volume path %q", archivedPath) + } + err = archive.Untar(cache, archivedPath, nil) + if err != nil { + return errors.Wrapf(err, "error extracting archive at %q", archivedPath) + } + if st, ok := b.volumeCacheInfo[cachedPath]; ok { + if err := os.Chmod(archivedPath, st.Mode()); err != nil { + return errors.Wrapf(err, "error restoring permissions on %q", archivedPath) + } + if err := os.Chown(archivedPath, 0, 0); err != nil { + return errors.Wrapf(err, "error setting ownership on %q", archivedPath) + } + if err := os.Chtimes(archivedPath, st.ModTime(), st.ModTime()); err != nil { + return errors.Wrapf(err, "error restoring datestamps on %q", archivedPath) + } + } + } + return nil +} + +// Copy copies data into the working tree. The "Download" field is how +// imagebuilder tells us the instruction was "ADD" and not "COPY". +func (b *Executor) Copy(excludes []string, copies ...imagebuilder.Copy) error { + for _, copy := range copies { + logrus.Debugf("COPY %#v, %#v", excludes, copy) + if err := b.volumeCacheInvalidate(copy.Dest); err != nil { + return err + } + sources := []string{} + for _, src := range copy.Src { + if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { + sources = append(sources, src) + } else if len(copy.From) > 0 { + if other, ok := b.named[copy.From]; ok && other.index < b.index { + sources = append(sources, filepath.Join(other.mountPoint, src)) + } else { + return errors.Errorf("the stage %q has not been built", copy.From) + } + } else { + sources = append(sources, filepath.Join(b.contextDir, src)) + } + } + + options := buildah.AddAndCopyOptions{ + Chown: copy.Chown, + } + + if err := b.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil { + return err + } + } + return nil +} + +func convertMounts(mounts []Mount) []specs.Mount { + specmounts := []specs.Mount{} + for _, m := range mounts { + s := specs.Mount{ + Destination: m.Destination, + Type: m.Type, + Source: m.Source, + Options: m.Options, + } + specmounts = append(specmounts, s) + } + return specmounts +} + +// Run executes a RUN instruction using the working container as a root +// directory. +func (b *Executor) Run(run imagebuilder.Run, config docker.Config) error { + logrus.Debugf("RUN %#v, %#v", run, config) + if b.builder == nil { + return errors.Errorf("no build container available") + } + stdin := b.in + if stdin == nil { + devNull, err := os.Open(os.DevNull) + if err != nil { + return errors.Errorf("error opening %q for reading: %v", os.DevNull, err) + } + defer devNull.Close() + stdin = devNull + } + options := buildah.RunOptions{ + Hostname: config.Hostname, + Runtime: b.runtime, + Args: b.runtimeArgs, + Mounts: convertMounts(b.transientMounts), + Env: config.Env, + User: config.User, + WorkingDir: config.WorkingDir, + Entrypoint: config.Entrypoint, + Cmd: config.Cmd, + Stdin: stdin, + Stdout: b.out, + Stderr: b.err, + Quiet: b.quiet, + } + if config.NetworkDisabled { + options.ConfigureNetwork = buildah.NetworkDisabled + } else { + options.ConfigureNetwork = buildah.NetworkEnabled + } + + args := run.Args + if run.Shell { + args = append([]string{"/bin/sh", "-c"}, args...) + } + if err := b.volumeCacheSave(); err != nil { + return err + } + err := b.builder.Run(args, options) + if err2 := b.volumeCacheRestore(); err2 != nil { + if err == nil { + return err2 + } + } + return err +} + +// UnrecognizedInstruction is called when we encounter an instruction that the +// imagebuilder parser didn't understand. +func (b *Executor) UnrecognizedInstruction(step *imagebuilder.Step) error { + errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", step.Command) + err := fmt.Sprintf(errStr+"%#v", step) + if b.ignoreUnrecognizedInstructions { + logrus.Debugf(err) + return nil + } + + switch logrus.GetLevel() { + case logrus.ErrorLevel: + logrus.Errorf(errStr) + case logrus.DebugLevel: + logrus.Debugf(err) + default: + logrus.Errorf("+(UNHANDLED LOGLEVEL) %#v", step) + } + + return errors.Errorf(err) +} + +// NewExecutor creates a new instance of the imagebuilder.Executor interface. +func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) { + exec := Executor{ + store: store, + contextDir: options.ContextDirectory, + pullPolicy: options.PullPolicy, + registry: options.Registry, + transport: options.Transport, + ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions, + quiet: options.Quiet, + runtime: options.Runtime, + runtimeArgs: options.RuntimeArgs, + transientMounts: options.TransientMounts, + compression: options.Compression, + output: options.Output, + outputFormat: options.OutputFormat, + additionalTags: options.AdditionalTags, + signaturePolicyPath: options.SignaturePolicyPath, + systemContext: options.SystemContext, + volumeCache: make(map[string]string), + volumeCacheInfo: make(map[string]os.FileInfo), + log: options.Log, + in: options.In, + out: options.Out, + err: options.Err, + reportWriter: options.ReportWriter, + isolation: options.Isolation, + namespaceOptions: options.NamespaceOptions, + configureNetwork: options.ConfigureNetwork, + cniPluginPath: options.CNIPluginPath, + cniConfigDir: options.CNIConfigDir, + idmappingOptions: options.IDMappingOptions, + commonBuildOptions: options.CommonBuildOpts, + defaultMountsFilePath: options.DefaultMountsFilePath, + iidfile: options.IIDFile, + squash: options.Squash, + labels: append([]string{}, options.Labels...), + annotations: append([]string{}, options.Annotations...), + layers: options.Layers, + noCache: options.NoCache, + removeIntermediateCtrs: options.RemoveIntermediateCtrs, + forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs, + } + if exec.err == nil { + exec.err = os.Stderr + } + if exec.out == nil { + exec.out = os.Stdout + } + if exec.log == nil { + stepCounter := 0 + exec.log = func(format string, args ...interface{}) { + stepCounter++ + prefix := fmt.Sprintf("STEP %d: ", stepCounter) + suffix := "\n" + fmt.Fprintf(exec.err, prefix+format+suffix, args...) + } + } + return &exec, nil +} + +// Prepare creates a working container based on specified image, or if one +// isn't specified, the first FROM instruction we can find in the parsed tree. +func (b *Executor) Prepare(ctx context.Context, ib *imagebuilder.Builder, node *parser.Node, from string) error { + if from == "" { + base, err := ib.From(node) + if err != nil { + logrus.Debugf("Prepare(node.Children=%#v)", node.Children) + return errors.Wrapf(err, "error determining starting point for build") + } + from = base + } + logrus.Debugf("FROM %#v", from) + if !b.quiet { + b.log("FROM %s", from) + } + builderOptions := buildah.BuilderOptions{ + Args: ib.Args, + FromImage: from, + PullPolicy: b.pullPolicy, + Registry: b.registry, + Transport: b.transport, + SignaturePolicyPath: b.signaturePolicyPath, + ReportWriter: b.reportWriter, + SystemContext: b.systemContext, + Isolation: b.isolation, + NamespaceOptions: b.namespaceOptions, + ConfigureNetwork: b.configureNetwork, + CNIPluginPath: b.cniPluginPath, + CNIConfigDir: b.cniConfigDir, + IDMappingOptions: b.idmappingOptions, + CommonBuildOpts: b.commonBuildOptions, + DefaultMountsFilePath: b.defaultMountsFilePath, + Format: b.outputFormat, + } + builder, err := buildah.NewBuilder(ctx, b.store, builderOptions) + if err != nil { + return errors.Wrapf(err, "error creating build container") + } + volumes := map[string]struct{}{} + for _, v := range builder.Volumes() { + volumes[v] = struct{}{} + } + dConfig := docker.Config{ + Hostname: builder.Hostname(), + Domainname: builder.Domainname(), + User: builder.User(), + Env: builder.Env(), + Cmd: builder.Cmd(), + Image: from, + Volumes: volumes, + WorkingDir: builder.WorkDir(), + Entrypoint: builder.Entrypoint(), + Labels: builder.Labels(), + Shell: builder.Shell(), + StopSignal: builder.StopSignal(), + OnBuild: builder.OnBuild(), + } + var rootfs *docker.RootFS + if builder.Docker.RootFS != nil { + rootfs = &docker.RootFS{ + Type: builder.Docker.RootFS.Type, + } + for _, id := range builder.Docker.RootFS.DiffIDs { + rootfs.Layers = append(rootfs.Layers, id.String()) + } + } + dImage := docker.Image{ + Parent: builder.FromImage, + ContainerConfig: dConfig, + Container: builder.Container, + Author: builder.Maintainer(), + Architecture: builder.Architecture(), + RootFS: rootfs, + } + dImage.Config = &dImage.ContainerConfig + err = ib.FromImage(&dImage, node) + if err != nil { + if err2 := builder.Delete(); err2 != nil { + logrus.Debugf("error deleting container which we failed to update: %v", err2) + } + return errors.Wrapf(err, "error updating build context") + } + mountPoint, err := builder.Mount(builder.MountLabel) + if err != nil { + if err2 := builder.Delete(); err2 != nil { + logrus.Debugf("error deleting container which we failed to mount: %v", err2) + } + return errors.Wrapf(err, "error mounting new container") + } + b.mountPoint = mountPoint + b.builder = builder + // Add the top layer of this image to b.topLayers so we can keep track of them + // when building with cached images. + b.topLayers = append(b.topLayers, builder.TopLayer) + logrus.Debugln("Container ID:", builder.ContainerID) + return nil +} + +// Delete deletes the working container, if we have one. The Executor object +// should not be used to build another image, as the name of the output image +// isn't resettable. +func (b *Executor) Delete() (err error) { + if b.builder != nil { + err = b.builder.Delete() + b.builder = nil + } + return err +} + +// resolveNameToImageRef creates a types.ImageReference from b.output +func (b *Executor) resolveNameToImageRef() (types.ImageReference, error) { + var ( + imageRef types.ImageReference + err error + ) + if b.output != "" { + imageRef, err = alltransports.ParseImageName(b.output) + if err != nil { + candidates, err := util.ResolveName(b.output, "", b.systemContext, b.store) + if err != nil { + return nil, errors.Wrapf(err, "error parsing target image name %q: %v", b.output) + } + if len(candidates) == 0 { + return nil, errors.Errorf("error parsing target image name %q", b.output) + } + imageRef2, err2 := is.Transport.ParseStoreReference(b.store, candidates[0]) + if err2 != nil { + return nil, errors.Wrapf(err, "error parsing target image name %q", b.output) + } + return imageRef2, nil + } + return imageRef, nil + } + imageRef, err = is.Transport.ParseStoreReference(b.store, "@"+stringid.GenerateRandomID()) + if err != nil { + return nil, errors.Wrapf(err, "error parsing reference for image to be written") + } + return imageRef, nil +} + +// Execute runs each of the steps in the parsed tree, in turn. +func (b *Executor) Execute(ctx context.Context, ib *imagebuilder.Builder, node *parser.Node) error { + checkForLayers := true + children := node.Children + commitName := b.output + for i, node := range node.Children { + step := ib.Step() + if err := step.Resolve(node); err != nil { + return errors.Wrapf(err, "error resolving step %+v", *node) + } + logrus.Debugf("Parsed Step: %+v", *step) + if !b.quiet { + b.log("%s", step.Original) + } + requiresStart := false + if i < len(node.Children)-1 { + requiresStart = ib.RequiresStart(&parser.Node{Children: node.Children[i+1:]}) + } + + if !b.layers && !b.noCache { + err := ib.Run(step, b, requiresStart) + if err != nil { + return errors.Wrapf(err, "error building at step %+v", *step) + } + continue + } + + if i < len(children)-1 { + b.output = "" + } else { + b.output = commitName + } + + var ( + cacheID string + err error + imgID string + ) + // checkForLayers will be true if b.layers is true and a cached intermediate image is found. + // checkForLayers is set to false when either there is no cached image or a break occurs where + // the instructions in the Dockerfile change from a previous build. + // Don't check for cache if b.noCache is set to true. + if checkForLayers && !b.noCache { + cacheID, err = b.layerExists(ctx, node, children[:i]) + if err != nil { + return errors.Wrap(err, "error checking if cached image exists from a previous build") + } + } + + if cacheID != "" { + fmt.Fprintf(b.out, "--> Using cache %s\n", cacheID) + } + + // If a cache is found for the last step, that means nothing in the + // Dockerfile changed. Just create a copy of the existing image and + // save it with the new name passed in by the user. + if cacheID != "" && i == len(children)-1 { + if err := b.copyExistingImage(ctx, cacheID); err != nil { + return err + } + break + } + + if cacheID == "" || !checkForLayers { + checkForLayers = false + err := ib.Run(step, b, requiresStart) + if err != nil { + return errors.Wrapf(err, "error building at step %+v", *step) + } + } + + // Commit if no cache is found + if cacheID == "" { + imgID, err = b.Commit(ctx, ib, getCreatedBy(node)) + if err != nil { + return errors.Wrapf(err, "error committing container for step %+v", *step) + } + if i == len(children)-1 { + b.log("COMMIT %s", b.output) + } + } else { + // Cache is found, assign imgID the id of the cached image so + // it is used to create the container for the next step. + imgID = cacheID + } + // Add container ID of successful intermediate container to b.containerIDs + b.containerIDs = append(b.containerIDs, b.builder.ContainerID) + // Prepare for the next step with imgID as the new base image. + if i != len(children)-1 { + if err := b.Prepare(ctx, ib, node, imgID); err != nil { + return errors.Wrap(err, "error preparing container for next step") + } + } + } + return nil +} + +// copyExistingImage creates a copy of an image already in store +func (b *Executor) copyExistingImage(ctx context.Context, cacheID string) error { + // Get the destination Image Reference + dest, err := b.resolveNameToImageRef() + if err != nil { + return err + } + + policyContext, err := util.GetPolicyContext(b.systemContext) + if err != nil { + return err + } + defer policyContext.Destroy() + + // Look up the source image, expecting it to be in local storage + src, err := is.Transport.ParseStoreReference(b.store, cacheID) + if err != nil { + return errors.Wrapf(err, "error getting source imageReference for %q", cacheID) + } + if err := cp.Image(ctx, policyContext, dest, src, nil); err != nil { + return errors.Wrapf(err, "error copying image %q", cacheID) + } + b.log("COMMIT %s", b.output) + return nil +} + +// layerExists returns true if an intermediate image of currNode exists in the image store from a previous build. +// It verifies tihis by checking the parent of the top layer of the image and the history. +func (b *Executor) layerExists(ctx context.Context, currNode *parser.Node, children []*parser.Node) (string, error) { + // Get the list of images available in the image store + images, err := b.store.Images() + if err != nil { + return "", errors.Wrap(err, "error getting image list from store") + } + for _, image := range images { + layer, err := b.store.Layer(image.TopLayer) + if err != nil { + return "", errors.Wrapf(err, "error getting top layer info") + } + // If the parent of the top layer of an image is equal to the last entry in b.topLayers + // it means that this image is potentially a cached intermediate image from a previous + // build. Next we double check that the history of this image is equivalent to the previous + // lines in the Dockerfile up till the point we are at in the build. + if layer.Parent == b.topLayers[len(b.topLayers)-1] { + history, err := b.getImageHistory(ctx, image.ID) + if err != nil { + return "", errors.Wrapf(err, "error getting history of %q", image.ID) + } + // children + currNode is the point of the Dockerfile we are currently at. + if historyMatches(append(children, currNode), history) { + // This checks if the files copied during build have been changed if the node is + // a COPY or ADD command. + filesMatch, err := b.copiedFilesMatch(currNode, history[len(history)-1].Created) + if err != nil { + return "", errors.Wrapf(err, "error checking if copied files match") + } + if filesMatch { + return image.ID, nil + } + } + } + } + return "", nil +} + +// getImageHistory returns the history of imageID. +func (b *Executor) getImageHistory(ctx context.Context, imageID string) ([]v1.History, error) { + imageRef, err := is.Transport.ParseStoreReference(b.store, "@"+imageID) + if err != nil { + return nil, errors.Wrapf(err, "error getting image reference %q", imageID) + } + ref, err := imageRef.NewImage(ctx, nil) + if err != nil { + return nil, errors.Wrap(err, "error creating new image from reference") + } + oci, err := ref.OCIConfig(ctx) + if err != nil { + return nil, errors.Wrapf(err, "error getting oci config of image %q", imageID) + } + return oci.History, nil +} + +// getCreatedBy returns the command the image at node will be created by. +func getCreatedBy(node *parser.Node) string { + if node.Value == "run" { + return "/bin/sh -c " + node.Original[4:] + } + return "/bin/sh -c #(nop) " + node.Original +} + +// historyMatches returns true if the history of the image matches the lines +// in the Dockerfile till the point of build we are at. +// Used to verify whether a cache of the intermediate image exists and whether +// to run the build again. +func historyMatches(children []*parser.Node, history []v1.History) bool { + i := len(history) - 1 + for j := len(children) - 1; j >= 0; j-- { + instruction := children[j].Original + if children[j].Value == "run" { + instruction = instruction[4:] + } + if !strings.Contains(history[i].CreatedBy, instruction) { + return false + } + i-- + } + return true +} + +// getFilesToCopy goes through node to get all the src files that are copied, added or downloaded. +// It is possible for the Dockerfile to have src as hom*, which means all files that have hom as a prefix. +// Another format is hom?.txt, which means all files that have that name format with the ? replaced by another character. +func (b *Executor) getFilesToCopy(node *parser.Node) ([]string, error) { + currNode := node.Next + var src []string + for currNode.Next != nil { + if currNode.Next == nil { + break + } + if strings.HasPrefix(currNode.Value, "http://") || strings.HasPrefix(currNode.Value, "https://") { + src = append(src, currNode.Value) + currNode = currNode.Next + continue + } + matches, err := filepath.Glob(filepath.Join(b.contextDir, currNode.Value)) + if err != nil { + return nil, errors.Wrapf(err, "error finding match for pattern %q", currNode.Value) + } + src = append(src, matches...) + currNode = currNode.Next + } + return src, nil +} + +// copiedFilesMatch checks to see if the node instruction is a COPY or ADD. +// If it is either of those two it checks the timestamps on all the files copied/added +// by the dockerfile. If the host version has a time stamp greater than the time stamp +// of the build, the build will not use the cached version and will rebuild. +func (b *Executor) copiedFilesMatch(node *parser.Node, historyTime *time.Time) (bool, error) { + if node.Value != "add" && node.Value != "copy" { + return true, nil + } + + src, err := b.getFilesToCopy(node) + if err != nil { + return false, err + } + for _, item := range src { + // for urls, check the Last-Modified field in the header. + if strings.HasPrefix(item, "http://") || strings.HasPrefix(item, "https://") { + urlContentNew, err := urlContentModified(item, historyTime) + if err != nil { + return false, err + } + if urlContentNew { + return false, nil + } + continue + } + // For local files, walk the file tree and check the time stamps. + timeIsGreater := false + err := filepath.Walk(item, func(path string, info os.FileInfo, err error) error { + if info.ModTime().After(*historyTime) { + timeIsGreater = true + return nil + } + return nil + }) + if err != nil { + return false, errors.Wrapf(err, "error walking file tree %q", item) + } + if timeIsGreater { + return false, nil + } + } + return true, nil +} + +// urlContentModified sends a get request to the url and checks if the header has a value in +// Last-Modified, and if it does compares the time stamp to that of the history of the cached image. +// returns true if there is no Last-Modified value in the header. +func urlContentModified(url string, historyTime *time.Time) (bool, error) { + resp, err := http.Get(url) + if err != nil { + return false, errors.Wrapf(err, "error getting %q", url) + } + if lastModified := resp.Header.Get("Last-Modified"); lastModified != "" { + lastModifiedTime, err := time.Parse(time.RFC1123, lastModified) + if err != nil { + return false, errors.Wrapf(err, "error parsing time for %q", url) + } + return lastModifiedTime.After(*historyTime), nil + } + logrus.Debugf("Response header did not have Last-Modified %q, will rebuild.", url) + return true, nil +} + +// Commit writes the container's contents to an image, using a passed-in tag as +// the name if there is one, generating a unique ID-based one otherwise. +func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder, createdBy string) (string, error) { + imageRef, err := b.resolveNameToImageRef() + if err != nil { + return "", err + } + + if ib.Author != "" { + b.builder.SetMaintainer(ib.Author) + } + config := ib.Config() + b.builder.SetCreatedBy(createdBy) + b.builder.SetHostname(config.Hostname) + b.builder.SetDomainname(config.Domainname) + b.builder.SetUser(config.User) + b.builder.ClearPorts() + for p := range config.ExposedPorts { + b.builder.SetPort(string(p)) + } + for _, envSpec := range config.Env { + spec := strings.SplitN(envSpec, "=", 2) + b.builder.SetEnv(spec[0], spec[1]) + } + b.builder.SetCmd(config.Cmd) + b.builder.ClearVolumes() + for v := range config.Volumes { + b.builder.AddVolume(v) + } + b.builder.ClearOnBuild() + for _, onBuildSpec := range config.OnBuild { + b.builder.SetOnBuild(onBuildSpec) + } + b.builder.SetWorkDir(config.WorkingDir) + b.builder.SetEntrypoint(config.Entrypoint) + b.builder.SetShell(config.Shell) + b.builder.SetStopSignal(config.StopSignal) + b.builder.ClearLabels() + for k, v := range config.Labels { + b.builder.SetLabel(k, v) + } + for _, labelSpec := range b.labels { + label := strings.SplitN(labelSpec, "=", 2) + if len(label) > 1 { + b.builder.SetLabel(label[0], label[1]) + } else { + b.builder.SetLabel(label[0], "") + } + } + for _, annotationSpec := range b.annotations { + annotation := strings.SplitN(annotationSpec, "=", 2) + if len(annotation) > 1 { + b.builder.SetAnnotation(annotation[0], annotation[1]) + } else { + b.builder.SetAnnotation(annotation[0], "") + } + } + if imageRef != nil { + logName := transports.ImageName(imageRef) + logrus.Debugf("COMMIT %q", logName) + if !b.quiet && !b.layers && !b.noCache { + b.log("COMMIT %s", logName) + } + } else { + logrus.Debugf("COMMIT") + if !b.quiet && !b.layers && !b.noCache { + b.log("COMMIT") + } + } + writer := b.reportWriter + if b.layers || b.noCache { + writer = nil + } + options := buildah.CommitOptions{ + Compression: b.compression, + SignaturePolicyPath: b.signaturePolicyPath, + AdditionalTags: b.additionalTags, + ReportWriter: writer, + PreferredManifestType: b.outputFormat, + IIDFile: b.iidfile, + Squash: b.squash, + Parent: b.builder.FromImageID, + } + imgID, err := b.builder.Commit(ctx, imageRef, options) + if err != nil { + return "", err + } + if options.IIDFile == "" && imgID != "" { + fmt.Fprintf(b.out, "--> %s\n", imgID) + } + return imgID, nil +} + +// Build takes care of the details of running Prepare/Execute/Commit/Delete +// over each of the one or more parsed Dockerfiles and stages. +func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) error { + if len(stages) == 0 { + errors.New("error building: no stages to build") + } + var ( + stageExecutor *Executor + lastErr error + ) + for _, stage := range stages { + stageExecutor = b.withName(stage.Name, stage.Position) + if err := stageExecutor.Prepare(ctx, stage.Builder, stage.Node, ""); err != nil { + return err + } + // Always remove the intermediate/build containers, even if the build was unsuccessful. + // If building with layers, remove all intermediate/build containers if b.forceRmIntermediateCtrs + // is true. + if b.forceRmIntermediateCtrs || (!b.layers && !b.noCache) { + defer stageExecutor.Delete() + } + if err := stageExecutor.Execute(ctx, stage.Builder, stage.Node); err != nil { + lastErr = err + } + + // Delete the successful intermediate containers if an error in the build + // process occurs and b.removeIntermediateCtrs is true. + if lastErr != nil { + if b.removeIntermediateCtrs { + stageExecutor.deleteSuccessfulIntermediateCtrs() + } + return lastErr + } + b.containerIDs = append(b.containerIDs, stageExecutor.containerIDs...) + } + + if !b.layers && !b.noCache { + _, err := stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "") + if err != nil { + return err + } + } + // If building with layers and b.removeIntermediateCtrs is true + // only remove intermediate container for each step if an error + // during the build process doesn't occur. + // If the build is unsuccessful, the container created at the step + // the failure happened will persist in the container store. + // This if condition will be false if not building with layers and + // the removal of intermediate/build containers will be handled by the + // defer statement above. + if b.removeIntermediateCtrs && (b.layers || b.noCache) { + if err := b.deleteSuccessfulIntermediateCtrs(); err != nil { + return errors.Errorf("Failed to cleanup intermediate containers") + } + } + return nil +} + +// BuildDockerfiles parses a set of one or more Dockerfiles (which may be +// URLs), creates a new Executor, and then runs Prepare/Execute/Commit/Delete +// over the entire set of instructions. +func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOptions, paths ...string) error { + if len(paths) == 0 { + return errors.Errorf("error building: no dockerfiles specified") + } + var dockerfiles []io.ReadCloser + defer func(dockerfiles ...io.ReadCloser) { + for _, d := range dockerfiles { + d.Close() + } + }(dockerfiles...) + for _, dfile := range paths { + var data io.ReadCloser + + if strings.HasPrefix(dfile, "http://") || strings.HasPrefix(dfile, "https://") { + logrus.Debugf("reading remote Dockerfile %q", dfile) + resp, err := http.Get(dfile) + if err != nil { + return errors.Wrapf(err, "error getting %q", dfile) + } + if resp.ContentLength == 0 { + resp.Body.Close() + return errors.Errorf("no contents in %q", dfile) + } + data = resp.Body + } else { + // If the Dockerfile isn't found try prepending the + // context directory to it. + if _, err := os.Stat(dfile); os.IsNotExist(err) { + dfile = filepath.Join(options.ContextDirectory, dfile) + } + logrus.Debugf("reading local Dockerfile %q", dfile) + contents, err := os.Open(dfile) + if err != nil { + return errors.Wrapf(err, "error reading %q", dfile) + } + dinfo, err := contents.Stat() + if err != nil { + contents.Close() + return errors.Wrapf(err, "error reading info about %q", dfile) + } + if dinfo.Mode().IsRegular() && dinfo.Size() == 0 { + contents.Close() + return errors.Wrapf(err, "no contents in %q", dfile) + } + data = contents + } + + // pre-process Dockerfiles with ".in" suffix + if strings.HasSuffix(dfile, ".in") { + pData, err := preprocessDockerfileContents(data, options.ContextDirectory) + if err != nil { + return err + } + data = *pData + } + + dockerfiles = append(dockerfiles, data) + } + mainNode, err := imagebuilder.ParseDockerfile(dockerfiles[0]) + if err != nil { + return errors.Wrapf(err, "error parsing main Dockerfile") + } + for _, d := range dockerfiles[1:] { + additionalNode, err := imagebuilder.ParseDockerfile(d) + if err != nil { + return errors.Wrapf(err, "error parsing additional Dockerfile") + } + mainNode.Children = append(mainNode.Children, additionalNode.Children...) + } + exec, err := NewExecutor(store, options) + if err != nil { + return errors.Wrapf(err, "error creating build executor") + } + b := imagebuilder.NewBuilder(options.Args) + stages := imagebuilder.NewStages(mainNode, b) + return exec.Build(ctx, stages) +} + +// deleteSuccessfulIntermediateCtrs goes through the container IDs in b.containerIDs +// and deletes the containers associated with that ID. +func (b *Executor) deleteSuccessfulIntermediateCtrs() error { + var lastErr error + for _, ctr := range b.containerIDs { + if err := b.store.DeleteContainer(ctr); err != nil { + logrus.Errorf("error deleting build container %q: %v\n", ctr, err) + lastErr = err + } + } + return lastErr +} + +// preprocessDockerfileContents runs CPP(1) in preprocess-only mode on the input +// dockerfile content and will use ctxDir as the base include path. +// +// Note: we cannot use cmd.StdoutPipe() as cmd.Wait() closes it. +func preprocessDockerfileContents(r io.ReadCloser, ctxDir string) (rdrCloser *io.ReadCloser, err error) { + cppPath := "/usr/bin/cpp" + if _, err = os.Stat(cppPath); err != nil { + if os.IsNotExist(err) { + err = errors.Errorf("error: Dockerfile.in support requires %s to be installed", cppPath) + } + return nil, err + } + + stdout := bytes.Buffer{} + stderr := bytes.Buffer{} + + cmd := exec.Command(cppPath, "-E", "-iquote", ctxDir, "-") + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + pipe, err := cmd.StdinPipe() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + pipe.Close() + } + }() + + if err = cmd.Start(); err != nil { + return nil, err + } + + if _, err = io.Copy(pipe, r); err != nil { + return nil, err + } + + pipe.Close() + if err = cmd.Wait(); err != nil { + if stderr.Len() > 0 { + err = fmt.Errorf("%v: %s", err, strings.TrimSpace(stderr.String())) + } + return nil, errors.Wrapf(err, "error pre-processing Dockerfile") + } + + rc := ioutil.NopCloser(bytes.NewReader(stdout.Bytes())) + return &rc, nil +} diff --git a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go new file mode 100644 index 000000000..f1fec7f70 --- /dev/null +++ b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink.go @@ -0,0 +1,145 @@ +package imagebuildah + +import ( + "flag" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/reexec" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +const ( + symlinkChrootedCommand = "chrootsymlinks-resolve" + maxSymlinksResolved = 40 +) + +func init() { + reexec.Register(symlinkChrootedCommand, resolveChrootedSymlinks) +} + +func resolveChrootedSymlinks() { + status := 0 + flag.Parse() + if len(flag.Args()) < 1 { + os.Exit(1) + } + // Our first parameter is the directory to chroot into. + if err := unix.Chdir(flag.Arg(0)); err != nil { + fmt.Fprintf(os.Stderr, "chdir(): %v\n", err) + os.Exit(1) + } + if err := unix.Chroot(flag.Arg(0)); err != nil { + fmt.Fprintf(os.Stderr, "chroot(): %v\n", err) + os.Exit(1) + } + + // Our second parameter is the path name to evaluate for symbolic links + symLink, err := getSymbolicLink(flag.Arg(0), flag.Arg(1)) + if err != nil { + fmt.Fprintf(os.Stderr, "error getting symbolic links: %v\n", err) + os.Exit(1) + } + if _, err := os.Stdout.WriteString(symLink); err != nil { + fmt.Fprintf(os.Stderr, "error writing string to stdout: %v\n", err) + os.Exit(1) + } + os.Exit(status) +} + +func resolveSymLink(rootdir, filename string) (string, error) { + // The child process expects a chroot and one path that + // will be consulted relative to the chroot directory and evaluated + // for any symbolic links present. + cmd := reexec.Command(symlinkChrootedCommand, rootdir, filename) + output, err := cmd.CombinedOutput() + if err != nil { + return "", errors.Wrapf(err, string(output)) + } + + // Hand back the resolved symlink, will be "" if a symlink is not found + return string(output), nil +} + +// getSymbolic link goes through each part of the path and continues resolving symlinks as they appear. +// Returns what the whole target path for what "path" resolves to. +func getSymbolicLink(rootdir, path string) (string, error) { + var ( + symPath string + symLinksResolved int + ) + + // Splitting path as we need to resolve each parth of the path at a time + splitPath := strings.Split(path, "/") + if splitPath[0] == "" { + splitPath = splitPath[1:] + symPath = "/" + } + + for _, p := range splitPath { + // If we have resolved 40 symlinks, that means something is terribly wrong + // will return an error and exit + if symLinksResolved >= maxSymlinksResolved { + return "", errors.Errorf("have resolved %q symlinks, something is terribly wrong!", maxSymlinksResolved) + } + + symPath = filepath.Join(symPath, p) + isSymlink, resolvedPath, err := hasSymlink(symPath) + if err != nil { + return "", errors.Wrapf(err, "error checking symlink for %q", symPath) + } + // if isSymlink is true, check if resolvedPath is potentially another symlink + // keep doing this till resolvedPath is not a symlink and isSymlink is false + for isSymlink == true { + // Need to keep track of number of symlinks resolved + // Will also return an error if the symlink points to itself as that will exceed maxSymlinksResolved + if symLinksResolved >= maxSymlinksResolved { + return "", errors.Errorf("have resolved %q symlinks, something is terribly wrong!", maxSymlinksResolved) + } + isSymlink, resolvedPath, err = hasSymlink(resolvedPath) + if err != nil { + return "", errors.Wrapf(err, "error checking symlink for %q", resolvedPath) + } + symLinksResolved++ + } + // Assign resolvedPath to symPath. The next part of the loop will append the next part of the original path + // and continue resolving + symPath = resolvedPath + symLinksResolved++ + } + return symPath, nil +} + +// hasSymlink returns true and the target if path is symlink +// otherwise it returns false and path +func hasSymlink(path string) (bool, string, error) { + info, err := os.Lstat(path) + if os.IsNotExist(err) { + if err = os.MkdirAll(path, 0755); err != nil { + return false, "", errors.Wrapf(err, "error ensuring volume path %q exists", path) + } + info, err = os.Lstat(path) + if err != nil { + return false, "", errors.Wrapf(err, "error running lstat on %q", path) + } + } + // Return false and path as path is not a symlink + if info.Mode()&os.ModeSymlink != os.ModeSymlink { + return false, path, nil + } + + // Read the symlink to get what it points to + targetDir, err := os.Readlink(path) + if err != nil { + return false, "", errors.Wrapf(err, "error reading link %q", path) + } + // if the symlink points to a relative path, prepend the path till now to the resolved path + if !filepath.IsAbs(targetDir) { + targetDir = filepath.Join(path, targetDir) + } + // run filepath.Clean to remove the ".." from relative paths + return true, filepath.Clean(targetDir), nil +} diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go new file mode 100644 index 000000000..35dc5438a --- /dev/null +++ b/vendor/github.com/containers/buildah/imagebuildah/util.go @@ -0,0 +1,113 @@ +package imagebuildah + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + + "github.com/containers/buildah" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +func cloneToDirectory(url, dir string) error { + if !strings.HasPrefix(url, "git://") { + url = "git://" + url + } + logrus.Debugf("cloning %q to %q", url, dir) + cmd := exec.Command("git", "clone", url, dir) + return cmd.Run() +} + +func downloadToDirectory(url, dir string) error { + logrus.Debugf("extracting %q to %q", url, dir) + resp, err := http.Get(url) + if err != nil { + return errors.Wrapf(err, "error getting %q", url) + } + defer resp.Body.Close() + if resp.ContentLength == 0 { + return errors.Errorf("no contents in %q", url) + } + if err := chrootarchive.Untar(resp.Body, dir, nil); err != nil { + resp1, err := http.Get(url) + if err != nil { + return errors.Wrapf(err, "error getting %q", url) + } + defer resp1.Body.Close() + body, err := ioutil.ReadAll(resp1.Body) + if err != nil { + return errors.Wrapf(err, "Failed to read %q", url) + } + dockerfile := filepath.Join(dir, "Dockerfile") + // Assume this is a Dockerfile + if err := ioutil.WriteFile(dockerfile, body, 0600); err != nil { + return errors.Wrapf(err, "Failed to write %q to %q", url, dockerfile) + } + } + return nil +} + +// TempDirForURL checks if the passed-in string looks like a URL. If it is, +// TempDirForURL creates a temporary directory, arranges for its contents to be +// the contents of that URL, and returns the temporary directory's path, along +// with the name of a subdirectory which should be used as the build context +// (which may be empty or "."). Removal of the temporary directory is the +// responsibility of the caller. If the string doesn't look like a URL, +// TempDirForURL returns empty strings and a nil error code. +func TempDirForURL(dir, prefix, url string) (name string, subdir string, err error) { + if !strings.HasPrefix(url, "http://") && + !strings.HasPrefix(url, "https://") && + !strings.HasPrefix(url, "git://") && + !strings.HasPrefix(url, "github.com/") { + return "", "", nil + } + name, err = ioutil.TempDir(dir, prefix) + if err != nil { + return "", "", errors.Wrapf(err, "error creating temporary directory for %q", url) + } + if strings.HasPrefix(url, "git://") { + err = cloneToDirectory(url, name) + if err != nil { + if err2 := os.Remove(name); err2 != nil { + logrus.Debugf("error removing temporary directory %q: %v", name, err2) + } + return "", "", err + } + return name, "", nil + } + if strings.HasPrefix(url, "github.com/") { + ghurl := url + url = fmt.Sprintf("https://%s/archive/master.tar.gz", ghurl) + logrus.Debugf("resolving url %q to %q", ghurl, url) + subdir = path.Base(ghurl) + "-master" + } + if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") { + err = downloadToDirectory(url, name) + if err != nil { + if err2 := os.Remove(name); err2 != nil { + logrus.Debugf("error removing temporary directory %q: %v", name, err2) + } + return "", subdir, err + } + return name, subdir, nil + } + logrus.Debugf("don't know how to retrieve %q", url) + if err2 := os.Remove(name); err2 != nil { + logrus.Debugf("error removing temporary directory %q: %v", name, err2) + } + return "", "", errors.Errorf("unreachable code reached") +} + +// InitReexec is a wrapper for buildah.InitReexec(). It should be called at +// the start of main(), and if it returns true, main() should return +// immediately. +func InitReexec() bool { + return buildah.InitReexec() +} diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go new file mode 100644 index 000000000..f5f156be2 --- /dev/null +++ b/vendor/github.com/containers/buildah/import.go @@ -0,0 +1,131 @@ +package buildah + +import ( + "context" + + "github.com/containers/buildah/docker" + "github.com/containers/buildah/util" + is "github.com/containers/image/storage" + "github.com/containers/image/types" + "github.com/containers/storage" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func importBuilderDataFromImage(ctx context.Context, store storage.Store, systemContext *types.SystemContext, imageID, containerName, containerID string) (*Builder, error) { + if imageID == "" { + return nil, errors.Errorf("Internal error: imageID is empty in importBuilderDataFromImage") + } + + uidmap, gidmap := convertStorageIDMaps(storage.DefaultStoreOptions.UIDMap, storage.DefaultStoreOptions.GIDMap) + + ref, err := is.Transport.ParseStoreReference(store, imageID) + if err != nil { + return nil, errors.Wrapf(err, "no such image %q", imageID) + } + src, err2 := ref.NewImage(ctx, systemContext) + if err2 != nil { + return nil, errors.Wrapf(err2, "error instantiating image") + } + defer src.Close() + + imageName := "" + if img, err3 := store.Image(imageID); err3 == nil { + if len(img.Names) > 0 { + imageName = img.Names[0] + } + if img.TopLayer != "" { + layer, err4 := store.Layer(img.TopLayer) + if err4 != nil { + return nil, errors.Wrapf(err4, "error reading information about image's top layer") + } + uidmap, gidmap = convertStorageIDMaps(layer.UIDMap, layer.GIDMap) + } + } + + defaultNamespaceOptions, err := DefaultNamespaceOptions() + if err != nil { + return nil, err + } + + builder := &Builder{ + store: store, + Type: containerType, + FromImage: imageName, + FromImageID: imageID, + Container: containerName, + ContainerID: containerID, + ImageAnnotations: map[string]string{}, + ImageCreatedBy: "", + NamespaceOptions: defaultNamespaceOptions, + IDMappingOptions: IDMappingOptions{ + HostUIDMapping: len(uidmap) == 0, + HostGIDMapping: len(uidmap) == 0, + UIDMap: uidmap, + GIDMap: gidmap, + }, + } + + if err := builder.initConfig(ctx, src); err != nil { + return nil, errors.Wrapf(err, "error preparing image configuration") + } + + return builder, nil +} + +func importBuilder(ctx context.Context, store storage.Store, options ImportOptions) (*Builder, error) { + if options.Container == "" { + return nil, errors.Errorf("container name must be specified") + } + + c, err := store.Container(options.Container) + if err != nil { + return nil, err + } + + systemContext := getSystemContext(&types.SystemContext{}, options.SignaturePolicyPath) + + builder, err := importBuilderDataFromImage(ctx, store, systemContext, c.ImageID, options.Container, c.ID) + if err != nil { + return nil, err + } + + if builder.FromImageID != "" { + if d, err2 := digest.Parse(builder.FromImageID); err2 == nil { + builder.Docker.Parent = docker.ID(d) + } else { + builder.Docker.Parent = docker.ID(digest.NewDigestFromHex(digest.Canonical.String(), builder.FromImageID)) + } + } + if builder.FromImage != "" { + builder.Docker.ContainerConfig.Image = builder.FromImage + } + builder.IDMappingOptions.UIDMap, builder.IDMappingOptions.GIDMap = convertStorageIDMaps(c.UIDMap, c.GIDMap) + + err = builder.Save() + if err != nil { + return nil, errors.Wrapf(err, "error saving builder state") + } + + return builder, nil +} + +func importBuilderFromImage(ctx context.Context, store storage.Store, options ImportFromImageOptions) (*Builder, error) { + if options.Image == "" { + return nil, errors.Errorf("image name must be specified") + } + + systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) + + _, img, err := util.FindImage(store, "", systemContext, options.Image) + if err != nil { + return nil, errors.Wrapf(err, "error locating image %q for importing settings", options.Image) + } + + builder, err := importBuilderDataFromImage(ctx, store, systemContext, img.ID, "", "") + if err != nil { + return nil, errors.Wrapf(err, "error importing build settings from image %q", options.Image) + } + + return builder, nil +} diff --git a/vendor/github.com/containers/buildah/mount.go b/vendor/github.com/containers/buildah/mount.go new file mode 100644 index 000000000..4f1ae3c6e --- /dev/null +++ b/vendor/github.com/containers/buildah/mount.go @@ -0,0 +1,17 @@ +package buildah + +// Mount mounts a container's root filesystem in a location which can be +// accessed from the host, and returns the location. +func (b *Builder) Mount(label string) (string, error) { + mountpoint, err := b.store.Mount(b.ContainerID, label) + if err != nil { + return "", err + } + b.MountPoint = mountpoint + + err = b.Save() + if err != nil { + return "", err + } + return mountpoint, nil +} diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go new file mode 100644 index 000000000..b0b655da9 --- /dev/null +++ b/vendor/github.com/containers/buildah/new.go @@ -0,0 +1,370 @@ +package buildah + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/containers/buildah/util" + "github.com/containers/image/pkg/sysregistries" + is "github.com/containers/image/storage" + "github.com/containers/image/transports" + "github.com/containers/image/transports/alltransports" + "github.com/containers/image/types" + "github.com/containers/storage" + multierror "github.com/hashicorp/go-multierror" + "github.com/opencontainers/selinux/go-selinux" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/openshift/imagebuilder" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // BaseImageFakeName is the "name" of a source image which we interpret + // as "no image". + BaseImageFakeName = imagebuilder.NoBaseImageSpecifier + + // DefaultTransport is a prefix that we apply to an image name if we + // can't find one in the local Store, in order to generate a source + // reference for the image that we can then copy to the local Store. + DefaultTransport = "docker://" + + // minimumTruncatedIDLength is the minimum length of an identifier that + // we'll accept as possibly being a truncated image ID. + minimumTruncatedIDLength = 3 +) + +func reserveSELinuxLabels(store storage.Store, id string) error { + if selinux.GetEnabled() { + containers, err := store.Containers() + if err != nil { + return err + } + + for _, c := range containers { + if id == c.ID { + continue + } else { + b, err := OpenBuilder(store, c.ID) + if err != nil { + if os.IsNotExist(err) { + // Ignore not exist errors since containers probably created by other tool + // TODO, we need to read other containers json data to reserve their SELinux labels + continue + } + return err + } + // Prevent different containers from using same MCS label + if err := label.ReserveLabel(b.ProcessLabel); err != nil { + return err + } + } + } + } + return nil +} + +func pullAndFindImage(ctx context.Context, store storage.Store, imageName string, options BuilderOptions, sc *types.SystemContext) (*storage.Image, types.ImageReference, error) { + pullOptions := PullOptions{ + ReportWriter: options.ReportWriter, + Store: store, + SystemContext: options.SystemContext, + Transport: options.Transport, + } + ref, err := pullImage(ctx, store, imageName, pullOptions, sc) + if err != nil { + logrus.Debugf("error pulling image %q: %v", imageName, err) + return nil, nil, err + } + img, err := is.Transport.GetStoreImage(store, ref) + if err != nil { + logrus.Debugf("error reading pulled image %q: %v", imageName, err) + return nil, nil, err + } + return img, ref, nil +} + +func getImageName(name string, img *storage.Image) string { + imageName := name + if len(img.Names) > 0 { + imageName = img.Names[0] + // When the image used by the container is a tagged image + // the container name might be set to the original image instead of + // the image given in the "form" command line. + // This loop is supposed to fix this. + for _, n := range img.Names { + if strings.Contains(n, name) { + imageName = n + break + } + } + } + return imageName +} + +func imageNamePrefix(imageName string) string { + prefix := imageName + s := strings.Split(imageName, "/") + if len(s) > 0 { + prefix = s[len(s)-1] + } + s = strings.Split(prefix, ":") + if len(s) > 0 { + prefix = s[0] + } + s = strings.Split(prefix, "@") + if len(s) > 0 { + prefix = s[0] + } + return prefix +} + +func newContainerIDMappingOptions(idmapOptions *IDMappingOptions) storage.IDMappingOptions { + var options storage.IDMappingOptions + if idmapOptions != nil { + options.HostUIDMapping = idmapOptions.HostUIDMapping + options.HostGIDMapping = idmapOptions.HostGIDMapping + uidmap, gidmap := convertRuntimeIDMaps(idmapOptions.UIDMap, idmapOptions.GIDMap) + if len(uidmap) > 0 && len(gidmap) > 0 { + options.UIDMap = uidmap + options.GIDMap = gidmap + } else { + options.HostUIDMapping = true + options.HostGIDMapping = true + } + } + return options +} + +func resolveImage(ctx context.Context, systemContext *types.SystemContext, store storage.Store, options BuilderOptions) (types.ImageReference, *storage.Image, error) { + var ref types.ImageReference + var img *storage.Image + images, err := util.ResolveName(options.FromImage, options.Registry, systemContext, store) + if err != nil { + return nil, nil, errors.Wrapf(err, "error parsing reference to image %q", options.FromImage) + } + var pullErrors *multierror.Error + for _, image := range images { + var err error + if len(image) >= minimumTruncatedIDLength { + if img, err = store.Image(image); err == nil && img != nil && strings.HasPrefix(img.ID, image) { + if ref, err = is.Transport.ParseStoreReference(store, img.ID); err != nil { + return nil, nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID) + } + break + } + } + + if options.PullPolicy == PullAlways { + pulledImg, pulledReference, err := pullAndFindImage(ctx, store, image, options, systemContext) + if err != nil { + pullErrors = multierror.Append(pullErrors, err) + logrus.Debugf("unable to pull and read image %q: %v", image, err) + continue + } + ref = pulledReference + img = pulledImg + break + } + + srcRef, err := alltransports.ParseImageName(image) + if err != nil { + if options.Transport == "" { + pullErrors = multierror.Append(pullErrors, err) + logrus.Debugf("error parsing image name %q: %v", image, err) + continue + } + transport := options.Transport + if transport != DefaultTransport { + transport = transport + ":" + } + srcRef2, err := alltransports.ParseImageName(transport + image) + if err != nil { + pullErrors = multierror.Append(pullErrors, err) + logrus.Debugf("error parsing image name %q: %v", image, err) + continue + } + srcRef = srcRef2 + } + + destImage, err := localImageNameForReference(ctx, store, srcRef, options.FromImage) + if err != nil { + return nil, nil, errors.Wrapf(err, "error computing local image name for %q", transports.ImageName(srcRef)) + } + if destImage == "" { + return nil, nil, errors.Errorf("error computing local image name for %q", transports.ImageName(srcRef)) + } + + ref, err = is.Transport.ParseStoreReference(store, destImage) + if err != nil { + return nil, nil, errors.Wrapf(err, "error parsing reference to image %q", destImage) + } + img, err = is.Transport.GetStoreImage(store, ref) + if err != nil { + if errors.Cause(err) == storage.ErrImageUnknown && options.PullPolicy != PullIfMissing { + pullErrors = multierror.Append(pullErrors, err) + logrus.Debugf("no such image %q: %v", transports.ImageName(ref), err) + continue + } + pulledImg, pulledReference, err := pullAndFindImage(ctx, store, image, options, systemContext) + if err != nil { + pullErrors = multierror.Append(pullErrors, err) + logrus.Debugf("unable to pull and read image %q: %v", image, err) + continue + } + ref = pulledReference + img = pulledImg + } + break + } + + if img == nil && pullErrors != nil { + return nil, nil, pullErrors + } + + return ref, img, nil +} + +func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) { + var ref types.ImageReference + var img *storage.Image + var err error + + if options.FromImage == BaseImageFakeName { + options.FromImage = "" + } + if options.Transport == "" { + options.Transport = DefaultTransport + } + + systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) + + if options.FromImage != "scratch" { + ref, img, err = resolveImage(ctx, systemContext, store, options) + if err != nil { + return nil, err + } + if options.FromImage != "" && (ref == nil || img == nil) { + // If options.FromImage is set but we ended up + // with nil in ref or in img then there was an error that + // we should return. + return nil, errors.Wrapf(storage.ErrImageUnknown, "image %q not found in %s registries", options.FromImage, sysregistries.RegistriesConfPath(systemContext)) + } + } + image := options.FromImage + imageID := "" + topLayer := "" + if img != nil { + image = getImageName(imageNamePrefix(image), img) + imageID = img.ID + topLayer = img.TopLayer + } + var src types.ImageCloser + if ref != nil { + src, err = ref.NewImage(ctx, systemContext) + if err != nil { + return nil, errors.Wrapf(err, "error instantiating image for %q", transports.ImageName(ref)) + } + defer src.Close() + } + + name := "working-container" + if options.Container != "" { + name = options.Container + } else { + if image != "" { + name = imageNamePrefix(image) + "-" + name + } + } + + coptions := storage.ContainerOptions{} + coptions.IDMappingOptions = newContainerIDMappingOptions(options.IDMappingOptions) + + container, err := store.CreateContainer("", []string{name}, imageID, "", "", &coptions) + suffix := 1 + for err != nil && errors.Cause(err) == storage.ErrDuplicateName && options.Container == "" { + suffix++ + tmpName := fmt.Sprintf("%s-%d", name, suffix) + if container, err = store.CreateContainer("", []string{tmpName}, imageID, "", "", &coptions); err == nil { + name = tmpName + } + } + if err != nil { + return nil, errors.Wrapf(err, "error creating container") + } + + defer func() { + if err != nil { + if err2 := store.DeleteContainer(container.ID); err != nil { + logrus.Errorf("error deleting container %q: %v", container.ID, err2) + } + } + }() + + if err = reserveSELinuxLabels(store, container.ID); err != nil { + return nil, err + } + processLabel, mountLabel, err := label.InitLabels(options.CommonBuildOpts.LabelOpts) + if err != nil { + return nil, err + } + uidmap, gidmap := convertStorageIDMaps(container.UIDMap, container.GIDMap) + + defaultNamespaceOptions, err := DefaultNamespaceOptions() + if err != nil { + return nil, err + } + + namespaceOptions := defaultNamespaceOptions + namespaceOptions.AddOrReplace(options.NamespaceOptions...) + + builder := &Builder{ + store: store, + Type: containerType, + FromImage: image, + FromImageID: imageID, + Container: name, + ContainerID: container.ID, + ImageAnnotations: map[string]string{}, + ImageCreatedBy: "", + ProcessLabel: processLabel, + MountLabel: mountLabel, + DefaultMountsFilePath: options.DefaultMountsFilePath, + Isolation: options.Isolation, + NamespaceOptions: namespaceOptions, + ConfigureNetwork: options.ConfigureNetwork, + CNIPluginPath: options.CNIPluginPath, + CNIConfigDir: options.CNIConfigDir, + IDMappingOptions: IDMappingOptions{ + HostUIDMapping: len(uidmap) == 0, + HostGIDMapping: len(uidmap) == 0, + UIDMap: uidmap, + GIDMap: gidmap, + }, + AddCapabilities: copyStringSlice(options.AddCapabilities), + DropCapabilities: copyStringSlice(options.DropCapabilities), + CommonBuildOpts: options.CommonBuildOpts, + TopLayer: topLayer, + Args: options.Args, + Format: options.Format, + } + + if options.Mount { + _, err = builder.Mount(mountLabel) + if err != nil { + return nil, errors.Wrapf(err, "error mounting build container") + } + } + + if err := builder.initConfig(ctx, src); err != nil { + return nil, errors.Wrapf(err, "error preparing image configuration") + } + err = builder.Save() + if err != nil { + return nil, errors.Wrapf(err, "error saving builder state") + } + + return builder, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go new file mode 100644 index 000000000..94b92e1eb --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/cli/common.go @@ -0,0 +1,295 @@ +package cli + +// the cli package contains urfave/cli related structs that help make up +// the command line for buildah commands. it resides here so other projects +// that vendor in this code can use them too. + +import ( + "fmt" + "os" + "strings" + + "github.com/containers/buildah" + "github.com/containers/buildah/util" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/urfave/cli" +) + +var ( + usernsFlags = []cli.Flag{ + cli.StringFlag{ + Name: "userns", + Usage: "'container', `path` of user namespace to join, or 'host'", + }, + cli.StringSliceFlag{ + Name: "userns-uid-map", + Usage: "`containerID:hostID:length` UID mapping to use in user namespace", + }, + cli.StringSliceFlag{ + Name: "userns-gid-map", + Usage: "`containerID:hostID:length` GID mapping to use in user namespace", + }, + cli.StringFlag{ + Name: "userns-uid-map-user", + Usage: "`name` of entries from /etc/subuid to use to set user namespace UID mapping", + }, + cli.StringFlag{ + Name: "userns-gid-map-group", + Usage: "`name` of entries from /etc/subgid to use to set user namespace GID mapping", + }, + } + + NamespaceFlags = []cli.Flag{ + cli.StringFlag{ + Name: string(specs.IPCNamespace), + Usage: "'container', `path` of IPC namespace to join, or 'host'", + }, + cli.StringFlag{ + Name: string(specs.NetworkNamespace) + ", net", + Usage: "'container', `path` of network namespace to join, or 'host'", + }, + cli.StringFlag{ + Name: "cni-config-dir", + Usage: "`directory` of CNI configuration files", + Value: util.DefaultCNIConfigDir, + }, + cli.StringFlag{ + Name: "cni-plugin-path", + Usage: "`path` of CNI network plugins", + Value: util.DefaultCNIPluginPath, + }, + cli.StringFlag{ + Name: string(specs.PIDNamespace), + Usage: "'container', `path` of PID namespace to join, or 'host'", + }, + cli.StringFlag{ + Name: string(specs.UTSNamespace), + Usage: "'container', `path` of UTS namespace to join, or 'host'", + }, + } + + LayerFlags = []cli.Flag{ + cli.BoolFlag{ + Name: "layers", + Usage: fmt.Sprintf("cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override. (default %t)", UseLayers()), + }, + } + + BudFlags = []cli.Flag{ + cli.StringSliceFlag{ + Name: "annotation", + Usage: "Set metadata for an image (default [])", + }, + cli.StringFlag{ + Name: "authfile", + Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json", + }, + cli.StringSliceFlag{ + Name: "build-arg", + Usage: "`argument=value` to supply to the builder", + }, + cli.StringFlag{ + Name: "cache-from", + Usage: "Images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.", + }, + cli.StringFlag{ + Name: "cert-dir", + Value: "", + Usage: "use certificates at the specified path to access the registry", + }, + cli.BoolFlag{ + Name: "compress", + Usage: "This is legacy option, which has no effect on the image", + }, + cli.StringFlag{ + Name: "creds", + Value: "", + Usage: "use `[username[:password]]` for accessing the registry", + }, + cli.BoolFlag{ + Name: "disable-content-trust", + Usage: "This is a Docker specific option and is a NOOP", + }, + cli.StringSliceFlag{ + Name: "file, f", + Usage: "`pathname or URL` of a Dockerfile", + }, + cli.BoolFlag{ + Name: "force-rm", + Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful.", + }, + cli.StringFlag{ + Name: "format", + Usage: "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.", + Value: DefaultFormat(), + }, + cli.StringFlag{ + Name: "iidfile", + Usage: "`file` to write the image ID to", + }, + cli.StringFlag{ + Name: "isolation", + Usage: "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.", + Value: DefaultIsolation(), + }, + cli.StringSliceFlag{ + Name: "label", + Usage: "Set metadata for an image (default [])", + }, + cli.BoolFlag{ + Name: "no-cache", + Usage: "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.", + }, + cli.StringFlag{ + Name: "logfile", + Usage: "log to `file` instead of stdout/stderr", + }, + cli.IntFlag{ + Name: "loglevel", + Usage: "adjust logging level (range from -2 to 3)", + }, + cli.BoolTFlag{ + Name: "pull", + Usage: "pull the image if not present", + }, + cli.BoolFlag{ + Name: "pull-always", + Usage: "pull the image, even if a version is present", + }, + cli.BoolFlag{ + Name: "quiet, q", + Usage: "refrain from announcing build instructions and image read/write progress", + }, + cli.BoolTFlag{ + Name: "rm", + Usage: "Remove intermediate containers after a successful build (default true)", + }, + cli.StringFlag{ + Name: "runtime", + Usage: "`path` to an alternate runtime. Use BUILDAH_RUNTIME environment variable to override.", + Value: util.Runtime(), + }, + cli.StringSliceFlag{ + Name: "runtime-flag", + Usage: "add global flags for the container runtime", + }, + cli.StringFlag{ + Name: "signature-policy", + Usage: "`pathname` of signature policy file (not usually used)", + }, + cli.BoolFlag{ + Name: "squash", + Usage: "Squash newly built layers into a single new layer. The build process does not currently support caching so this is a NOOP.", + }, + cli.StringSliceFlag{ + Name: "tag, t", + Usage: "tagged `name` to apply to the built image", + }, + cli.BoolTFlag{ + Name: "tls-verify", + Usage: "require HTTPS and verify certificates when accessing the registry", + }, + } + + FromAndBudFlags = append(append([]cli.Flag{ + cli.StringSliceFlag{ + Name: "add-host", + Usage: "add a custom host-to-IP mapping (`host:ip`) (default [])", + }, + cli.StringSliceFlag{ + Name: "cap-add", + Usage: "add the specified capability when running (default [])", + }, + cli.StringSliceFlag{ + Name: "cap-drop", + Usage: "drop the specified capability when running (default [])", + }, + cli.StringFlag{ + Name: "cgroup-parent", + Usage: "optional parent cgroup for the container", + }, + cli.Uint64Flag{ + Name: "cpu-period", + Usage: "limit the CPU CFS (Completely Fair Scheduler) period", + }, + cli.Int64Flag{ + Name: "cpu-quota", + Usage: "limit the CPU CFS (Completely Fair Scheduler) quota", + }, + cli.Uint64Flag{ + Name: "cpu-shares, c", + Usage: "CPU shares (relative weight)", + }, + cli.StringFlag{ + Name: "cpuset-cpus", + Usage: "CPUs in which to allow execution (0-3, 0,1)", + }, + cli.StringFlag{ + Name: "cpuset-mems", + Usage: "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.", + }, + cli.StringFlag{ + Name: "memory, m", + Usage: "memory limit (format: [], where unit = b, k, m or g)", + }, + cli.StringFlag{ + Name: "memory-swap", + Usage: "swap limit equal to memory plus swap: '-1' to enable unlimited swap", + }, + cli.StringSliceFlag{ + Name: "security-opt", + Usage: "security options (default [])", + }, + cli.StringFlag{ + Name: "shm-size", + Usage: "size of '/dev/shm'. The format is ``.", + Value: "65536k", + }, + cli.StringSliceFlag{ + Name: "ulimit", + Usage: "ulimit options (default [])", + }, + cli.StringSliceFlag{ + Name: "volume, v", + Usage: "bind mount a volume into the container (default [])", + }, + }, usernsFlags...), NamespaceFlags...) +) + +// UseLayers returns true if BUILDAH_LAYERS is set to "1" or "true" +// otherwise it returns false +func UseLayers() bool { + layers := os.Getenv("BUILDAH_LAYERS") + if strings.ToLower(layers) == "true" || layers == "1" { + return true + } + return false +} + +// DefaultFormat returns the default image format +func DefaultFormat() string { + format := os.Getenv("BUILDAH_FORMAT") + if format != "" { + return format + } + return buildah.OCI +} + +// DefaultIsolation returns the default image format +func DefaultIsolation() string { + isolation := os.Getenv("BUILDAH_ISOLATION") + if isolation != "" { + return isolation + } + return buildah.OCI +} + +func VerifyFlagsArgsOrder(args []string) error { + for _, arg := range args { + if strings.HasPrefix(arg, "-") { + return errors.Errorf("No options (%s) can be specified after the image or container name", arg) + } + } + return nil +} diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go new file mode 100644 index 000000000..d206508b4 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -0,0 +1,572 @@ +package parse + +// this package should contain functions that parse and validate +// user input and is shared either amongst buildah subcommands or +// would be useful to projects vendoring buildah + +import ( + "fmt" + "net" + "os" + "path/filepath" + "reflect" + "regexp" + "strconv" + "strings" + "unicode" + + "github.com/containers/buildah" + "github.com/containers/image/types" + "github.com/containers/storage/pkg/idtools" + "github.com/docker/go-units" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/urfave/cli" + "golang.org/x/crypto/ssh/terminal" + "golang.org/x/sys/unix" +) + +const ( + // SeccompDefaultPath defines the default seccomp path + SeccompDefaultPath = "/usr/share/containers/seccomp.json" + // SeccompOverridePath if this exists it overrides the default seccomp path + SeccompOverridePath = "/etc/crio/seccomp.json" +) + +// CommonBuildOptions parses the build options from the bud cli +func CommonBuildOptions(c *cli.Context) (*buildah.CommonBuildOptions, error) { + var ( + memoryLimit int64 + memorySwap int64 + err error + ) + rlim := unix.Rlimit{Cur: 1048576, Max: 1048576} + defaultLimits := []string{} + if err := unix.Setrlimit(unix.RLIMIT_NOFILE, &rlim); err == nil { + defaultLimits = append(defaultLimits, fmt.Sprintf("nofile=%d:%d", rlim.Cur, rlim.Max)) + } + if err := unix.Setrlimit(unix.RLIMIT_NPROC, &rlim); err == nil { + defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", rlim.Cur, rlim.Max)) + } + if c.String("memory") != "" { + memoryLimit, err = units.RAMInBytes(c.String("memory")) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for memory") + } + } + if c.String("memory-swap") != "" { + memorySwap, err = units.RAMInBytes(c.String("memory-swap")) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for memory-swap") + } + } + if len(c.StringSlice("add-host")) > 0 { + for _, host := range c.StringSlice("add-host") { + if err := validateExtraHost(host); err != nil { + return nil, errors.Wrapf(err, "invalid value for add-host") + } + } + } + if _, err := units.FromHumanSize(c.String("shm-size")); err != nil { + return nil, errors.Wrapf(err, "invalid --shm-size") + } + if err := ParseVolumes(c.StringSlice("volume")); err != nil { + return nil, err + } + + commonOpts := &buildah.CommonBuildOptions{ + AddHost: c.StringSlice("add-host"), + CgroupParent: c.String("cgroup-parent"), + CPUPeriod: c.Uint64("cpu-period"), + CPUQuota: c.Int64("cpu-quota"), + CPUSetCPUs: c.String("cpuset-cpus"), + CPUSetMems: c.String("cpuset-mems"), + CPUShares: c.Uint64("cpu-shares"), + Memory: memoryLimit, + MemorySwap: memorySwap, + ShmSize: c.String("shm-size"), + Ulimit: append(defaultLimits, c.StringSlice("ulimit")...), + Volumes: c.StringSlice("volume"), + } + if err := parseSecurityOpts(c.StringSlice("security-opt"), commonOpts); err != nil { + return nil, err + } + return commonOpts, nil +} + +func parseSecurityOpts(securityOpts []string, commonOpts *buildah.CommonBuildOptions) error { + for _, opt := range securityOpts { + if opt == "no-new-privileges" { + return errors.Errorf("no-new-privileges is not supported") + } + con := strings.SplitN(opt, "=", 2) + if len(con) != 2 { + return errors.Errorf("Invalid --security-opt name=value pair: %q", opt) + } + + switch con[0] { + case "label": + commonOpts.LabelOpts = append(commonOpts.LabelOpts, con[1]) + case "apparmor": + commonOpts.ApparmorProfile = con[1] + case "seccomp": + commonOpts.SeccompProfilePath = con[1] + default: + return errors.Errorf("Invalid --security-opt 2: %q", opt) + } + + } + + if commonOpts.SeccompProfilePath == "" { + if _, err := os.Stat(SeccompOverridePath); err == nil { + commonOpts.SeccompProfilePath = SeccompOverridePath + } else { + if !os.IsNotExist(err) { + return errors.Wrapf(err, "can't check if %q exists", SeccompOverridePath) + } + if _, err := os.Stat(SeccompDefaultPath); err != nil { + if !os.IsNotExist(err) { + return errors.Wrapf(err, "can't check if %q exists", SeccompDefaultPath) + } + } else { + commonOpts.SeccompProfilePath = SeccompDefaultPath + } + } + } + return nil +} + +// ParseVolumes validates the host and container paths passed in to the --volume flag +func ParseVolumes(volumes []string) error { + if len(volumes) == 0 { + return nil + } + for _, volume := range volumes { + arr := strings.SplitN(volume, ":", 3) + if len(arr) < 2 { + return errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume) + } + if err := validateVolumeHostDir(arr[0]); err != nil { + return err + } + if err := validateVolumeCtrDir(arr[1]); err != nil { + return err + } + if len(arr) > 2 { + if err := validateVolumeOpts(arr[2]); err != nil { + return err + } + } + } + return nil +} + +func validateVolumeHostDir(hostDir string) error { + if !filepath.IsAbs(hostDir) { + return errors.Errorf("invalid host path, must be an absolute path %q", hostDir) + } + if _, err := os.Stat(hostDir); err != nil { + return errors.Wrapf(err, "error checking path %q", hostDir) + } + return nil +} + +func validateVolumeCtrDir(ctrDir string) error { + if !filepath.IsAbs(ctrDir) { + return errors.Errorf("invalid container path, must be an absolute path %q", ctrDir) + } + return nil +} + +func validateVolumeOpts(option string) error { + var foundRootPropagation, foundRWRO, foundLabelChange int + options := strings.Split(option, ",") + for _, opt := range options { + switch opt { + case "rw", "ro": + if foundRWRO > 1 { + return errors.Errorf("invalid options %q, can only specify 1 'rw' or 'ro' option", option) + } + foundRWRO++ + case "z", "Z": + if foundLabelChange > 1 { + return errors.Errorf("invalid options %q, can only specify 1 'z' or 'Z' option", option) + } + foundLabelChange++ + case "private", "rprivate", "shared", "rshared", "slave", "rslave", "unbindable", "runbindable": + if foundRootPropagation > 1 { + return errors.Errorf("invalid options %q, can only specify 1 '[r]shared', '[r]private', '[r]slave' or '[r]unbindable' option", option) + } + foundRootPropagation++ + default: + return errors.Errorf("invalid option type %q", option) + } + } + return nil +} + +// validateExtraHost validates that the specified string is a valid extrahost and returns it. +// ExtraHost is in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6). +// for add-host flag +func validateExtraHost(val string) error { + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + arr := strings.SplitN(val, ":", 2) + if len(arr) != 2 || len(arr[0]) == 0 { + return fmt.Errorf("bad format for add-host: %q", val) + } + if _, err := validateIPAddress(arr[1]); err != nil { + return fmt.Errorf("invalid IP address in add-host: %q", arr[1]) + } + return nil +} + +// validateIPAddress validates an Ip address. +// for dns, ip, and ip6 flags also +func validateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +// ValidateFlags searches for StringFlags or StringSlice flags that never had +// a value set. This commonly occurs when the CLI mistakenly takes the next +// option and uses it as a value. +func ValidateFlags(c *cli.Context, flags []cli.Flag) error { + re, err := regexp.Compile("^-.+") + if err != nil { + return errors.Wrap(err, "compiling regex failed") + } + + // The --cmd flag can have a following command i.e. --cmd="--help". + // Let's skip this check just for the --cmd flag. + for _, flag := range flags { + switch reflect.TypeOf(flag).String() { + case "cli.StringSliceFlag": + { + f := flag.(cli.StringSliceFlag) + name := strings.Split(f.Name, ",") + if f.Name == "cmd" { + continue + } + val := c.StringSlice(name[0]) + for _, v := range val { + if ok := re.MatchString(v); ok { + return errors.Errorf("option --%s requires a value", name[0]) + } + } + } + case "cli.StringFlag": + { + f := flag.(cli.StringFlag) + name := strings.Split(f.Name, ",") + if f.Name == "cmd" { + continue + } + val := c.String(name[0]) + if ok := re.MatchString(val); ok { + return errors.Errorf("option --%s requires a value", name[0]) + } + } + } + } + return nil +} + +// SystemContextFromOptions returns a SystemContext populated with values +// per the input parameters provided by the caller for the use in authentication. +func SystemContextFromOptions(c *cli.Context) (*types.SystemContext, error) { + ctx := &types.SystemContext{ + DockerCertPath: c.String("cert-dir"), + } + if c.IsSet("tls-verify") { + ctx.DockerInsecureSkipTLSVerify = !c.BoolT("tls-verify") + } + if c.IsSet("creds") { + var err error + ctx.DockerAuthConfig, err = getDockerAuth(c.String("creds")) + if err != nil { + return nil, err + } + } + if c.IsSet("signature-policy") { + ctx.SignaturePolicyPath = c.String("signature-policy") + } + if c.IsSet("authfile") { + ctx.AuthFilePath = c.String("authfile") + } + if c.GlobalIsSet("registries-conf") { + ctx.SystemRegistriesConfPath = c.GlobalString("registries-conf") + } + if c.GlobalIsSet("registries-conf-dir") { + ctx.RegistriesDirPath = c.GlobalString("registries-conf-dir") + } + ctx.DockerRegistryUserAgent = fmt.Sprintf("Buildah/%s", buildah.Version) + return ctx, nil +} + +func parseCreds(creds string) (string, string) { + if creds == "" { + return "", "" + } + up := strings.SplitN(creds, ":", 2) + if len(up) == 1 { + return up[0], "" + } + if up[0] == "" { + return "", up[1] + } + return up[0], up[1] +} + +func getDockerAuth(creds string) (*types.DockerAuthConfig, error) { + username, password := parseCreds(creds) + if username == "" { + fmt.Print("Username: ") + fmt.Scanln(&username) + } + if password == "" { + fmt.Print("Password: ") + termPassword, err := terminal.ReadPassword(0) + if err != nil { + return nil, errors.Wrapf(err, "could not read password from terminal") + } + password = string(termPassword) + } + + return &types.DockerAuthConfig{ + Username: username, + Password: password, + }, nil +} + +// IDMappingOptions parses the build options related to user namespaces and ID mapping. +func IDMappingOptions(c *cli.Context) (usernsOptions buildah.NamespaceOptions, idmapOptions *buildah.IDMappingOptions, err error) { + user := c.String("userns-uid-map-user") + group := c.String("userns-gid-map-group") + // If only the user or group was specified, use the same value for the + // other, since we need both in order to initialize the maps using the + // names. + if user == "" && group != "" { + user = group + } + if group == "" && user != "" { + group = user + } + // Either start with empty maps or the name-based maps. + mappings := idtools.NewIDMappingsFromMaps(nil, nil) + if user != "" && group != "" { + submappings, err := idtools.NewIDMappings(user, group) + if err != nil { + return nil, nil, err + } + mappings = submappings + } + // We'll parse the UID and GID mapping options the same way. + buildIDMap := func(basemap []idtools.IDMap, option string) ([]specs.LinuxIDMapping, error) { + outmap := make([]specs.LinuxIDMapping, 0, len(basemap)) + // Start with the name-based map entries. + for _, m := range basemap { + outmap = append(outmap, specs.LinuxIDMapping{ + ContainerID: uint32(m.ContainerID), + HostID: uint32(m.HostID), + Size: uint32(m.Size), + }) + } + // Parse the flag's value as one or more triples (if it's even + // been set), and append them. + var spec []string + if c.GlobalIsSet(option) { + spec = c.GlobalStringSlice(option) + } + if c.IsSet(option) { + spec = c.StringSlice(option) + } + idmap, err := parseIDMap(spec) + if err != nil { + return nil, err + } + for _, m := range idmap { + outmap = append(outmap, specs.LinuxIDMapping{ + ContainerID: m[0], + HostID: m[1], + Size: m[2], + }) + } + return outmap, nil + } + uidmap, err := buildIDMap(mappings.UIDs(), "userns-uid-map") + if err != nil { + return nil, nil, err + } + gidmap, err := buildIDMap(mappings.GIDs(), "userns-gid-map") + if err != nil { + return nil, nil, err + } + // If we only have one map or the other populated at this point, then + // use the same mapping for both, since we know that no user or group + // name was specified, but a specific mapping was for one or the other. + if len(uidmap) == 0 && len(gidmap) != 0 { + uidmap = gidmap + } + if len(gidmap) == 0 && len(uidmap) != 0 { + gidmap = uidmap + } + // By default, having mappings configured means we use a user + // namespace. Otherwise, we don't. + usernsOption := buildah.NamespaceOption{ + Name: string(specs.UserNamespace), + Host: len(uidmap) == 0 && len(gidmap) == 0, + } + // If the user specifically requested that we either use or don't use + // user namespaces, override that default. + if c.IsSet("userns") { + how := c.String("userns") + switch how { + case "", "container": + usernsOption.Host = false + case "host": + usernsOption.Host = true + default: + if _, err := os.Stat(how); err != nil { + return nil, nil, errors.Wrapf(err, "error checking for %s namespace at %q", string(specs.UserNamespace), how) + } + logrus.Debugf("setting %q namespace to %q", string(specs.UserNamespace), how) + usernsOption.Path = how + } + } + usernsOptions = buildah.NamespaceOptions{usernsOption} + if !c.IsSet("net") { + usernsOptions = append(usernsOptions, buildah.NamespaceOption{ + Name: string(specs.NetworkNamespace), + Host: usernsOption.Host, + }) + } + // If the user requested that we use the host namespace, but also that + // we use mappings, that's not going to work. + if (len(uidmap) != 0 || len(gidmap) != 0) && usernsOption.Host { + return nil, nil, errors.Errorf("can not specify ID mappings while using host's user namespace") + } + return usernsOptions, &buildah.IDMappingOptions{ + HostUIDMapping: usernsOption.Host, + HostGIDMapping: usernsOption.Host, + UIDMap: uidmap, + GIDMap: gidmap, + }, nil +} + +func parseIDMap(spec []string) (m [][3]uint32, err error) { + for _, s := range spec { + args := strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsDigit(r) }) + if len(args)%3 != 0 { + return nil, fmt.Errorf("mapping %q is not in the form containerid:hostid:size[,...]", s) + } + for len(args) >= 3 { + cid, err := strconv.ParseUint(args[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("error parsing container ID %q from mapping %q as a number: %v", args[0], s, err) + } + hostid, err := strconv.ParseUint(args[1], 10, 32) + if err != nil { + return nil, fmt.Errorf("error parsing host ID %q from mapping %q as a number: %v", args[1], s, err) + } + size, err := strconv.ParseUint(args[2], 10, 32) + if err != nil { + return nil, fmt.Errorf("error parsing %q from mapping %q as a number: %v", args[2], s, err) + } + m = append(m, [3]uint32{uint32(cid), uint32(hostid), uint32(size)}) + args = args[3:] + } + } + return m, nil +} + +// NamespaceOptions parses the build options for all namespaces except for user namespace. +func NamespaceOptions(c *cli.Context) (namespaceOptions buildah.NamespaceOptions, networkPolicy buildah.NetworkConfigurationPolicy, err error) { + options := make(buildah.NamespaceOptions, 0, 7) + policy := buildah.NetworkDefault + for _, what := range []string{string(specs.IPCNamespace), "net", string(specs.PIDNamespace), string(specs.UTSNamespace)} { + if c.IsSet(what) { + how := c.String(what) + switch what { + case "net", "network": + what = string(specs.NetworkNamespace) + } + switch how { + case "", "container": + logrus.Debugf("setting %q namespace to %q", what, "") + options.AddOrReplace(buildah.NamespaceOption{ + Name: what, + }) + case "host": + logrus.Debugf("setting %q namespace to host", what) + options.AddOrReplace(buildah.NamespaceOption{ + Name: what, + Host: true, + }) + default: + if what == specs.NetworkNamespace { + if how == "none" { + options.AddOrReplace(buildah.NamespaceOption{ + Name: what, + }) + policy = buildah.NetworkDisabled + logrus.Debugf("setting network to disabled") + break + } + if !filepath.IsAbs(how) { + options.AddOrReplace(buildah.NamespaceOption{ + Name: what, + Path: how, + }) + policy = buildah.NetworkEnabled + logrus.Debugf("setting network configuration to %q", how) + break + } + } + if _, err := os.Stat(how); err != nil { + return nil, buildah.NetworkDefault, errors.Wrapf(err, "error checking for %s namespace at %q", what, how) + } + logrus.Debugf("setting %q namespace to %q", what, how) + options.AddOrReplace(buildah.NamespaceOption{ + Name: what, + Path: how, + }) + } + } + } + return options, policy, nil +} + +func defaultIsolation() (buildah.Isolation, error) { + isolation, isSet := os.LookupEnv("BUILDAH_ISOLATION") + if isSet { + if strings.HasPrefix(strings.ToLower(isolation), "oci") { + return buildah.IsolationOCI, nil + } else if strings.HasPrefix(strings.ToLower(isolation), "rootless") { + return buildah.IsolationOCIRootless, nil + } else if strings.HasPrefix(strings.ToLower(isolation), "chroot") { + return buildah.IsolationChroot, nil + } + return 0, errors.Errorf("unrecognized $BUILDAH_ISOLATION value %q", isolation) + } + return buildah.IsolationDefault, nil +} + +// IsolationOption parses the --isolation flag. +func IsolationOption(c *cli.Context) (buildah.Isolation, error) { + if c.String("isolation") != "" { + if strings.HasPrefix(strings.ToLower(c.String("isolation")), "oci") { + return buildah.IsolationOCI, nil + } else if strings.HasPrefix(strings.ToLower(c.String("isolation")), "rootless") { + return buildah.IsolationOCIRootless, nil + } else if strings.HasPrefix(strings.ToLower(c.String("isolation")), "chroot") { + return buildah.IsolationChroot, nil + } else { + return buildah.IsolationDefault, errors.Errorf("unrecognized isolation type %q", c.String("isolation")) + } + } + return defaultIsolation() +} diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go new file mode 100644 index 000000000..c2fc6637f --- /dev/null +++ b/vendor/github.com/containers/buildah/pull.go @@ -0,0 +1,228 @@ +package buildah + +import ( + "context" + "io" + "strings" + + "github.com/containers/buildah/util" + cp "github.com/containers/image/copy" + "github.com/containers/image/docker/reference" + tarfile "github.com/containers/image/docker/tarfile" + ociarchive "github.com/containers/image/oci/archive" + "github.com/containers/image/pkg/sysregistries" + "github.com/containers/image/signature" + is "github.com/containers/image/storage" + "github.com/containers/image/transports" + "github.com/containers/image/transports/alltransports" + "github.com/containers/image/types" + "github.com/containers/storage" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// PullOptions can be used to alter how an image is copied in from somewhere. +type PullOptions struct { + // SignaturePolicyPath specifies an override location for the signature + // policy which should be used for verifying the new image as it is + // being written. Except in specific circumstances, no value should be + // specified, indicating that the shared, system-wide default policy + // should be used. + SignaturePolicyPath string + // ReportWriter is an io.Writer which will be used to log the writing + // of the new image. + ReportWriter io.Writer + // Store is the local storage store which holds the source image. + Store storage.Store + // github.com/containers/image/types SystemContext to hold credentials + // and other authentication/authorization information. + SystemContext *types.SystemContext + // Transport is a value which is prepended to the image's name, if the + // image name alone can not be resolved to a reference to a source + // image. No separator is implicitly added. + Transport string +} + +func localImageNameForReference(ctx context.Context, store storage.Store, srcRef types.ImageReference, spec string) (string, error) { + if srcRef == nil { + return "", errors.Errorf("reference to image is empty") + } + split := strings.SplitN(spec, ":", 2) + file := split[len(split)-1] + var name string + switch srcRef.Transport().Name() { + case util.DockerArchive: + tarSource, err := tarfile.NewSourceFromFile(file) + if err != nil { + return "", err + } + manifest, err := tarSource.LoadTarManifest() + if err != nil { + return "", errors.Errorf("error retrieving manifest.json: %v", err) + } + // to pull the first image stored in the tar file + if len(manifest) == 0 { + // use the hex of the digest if no manifest is found + name, err = getImageDigest(ctx, srcRef, nil) + if err != nil { + return "", err + } + } else { + if len(manifest[0].RepoTags) > 0 { + name = manifest[0].RepoTags[0] + } else { + // If the input image has no repotags, we need to feed it a dest anyways + name, err = getImageDigest(ctx, srcRef, nil) + if err != nil { + return "", err + } + } + } + case util.OCIArchive: + // retrieve the manifest from index.json to access the image name + manifest, err := ociarchive.LoadManifestDescriptor(srcRef) + if err != nil { + return "", errors.Wrapf(err, "error loading manifest for %q", srcRef) + } + // if index.json has no reference name, compute the image digest instead + if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" { + name, err = getImageDigest(ctx, srcRef, nil) + if err != nil { + return "", err + } + } else { + name = manifest.Annotations["org.opencontainers.image.ref.name"] + } + case util.DirTransport: + // supports pull from a directory + name = split[1] + // remove leading "/" + if name[:1] == "/" { + name = name[1:] + } + default: + ref := srcRef.DockerReference() + if ref == nil { + name = srcRef.StringWithinTransport() + _, err := is.Transport.ParseStoreReference(store, name) + if err == nil { + return name, nil + } + if strings.LastIndex(name, "/") != -1 { + name = name[strings.LastIndex(name, "/")+1:] + _, err = is.Transport.ParseStoreReference(store, name) + if err == nil { + return name, nil + } + } + return "", errors.Errorf("reference to image %q is not a named reference", transports.ImageName(srcRef)) + } + + if named, ok := ref.(reference.Named); ok { + name = named.Name() + if namedTagged, ok := ref.(reference.NamedTagged); ok { + name = name + ":" + namedTagged.Tag() + } + if canonical, ok := ref.(reference.Canonical); ok { + name = name + "@" + canonical.Digest().String() + } + } + } + + if _, err := is.Transport.ParseStoreReference(store, name); err != nil { + return "", errors.Wrapf(err, "error parsing computed local image name %q", name) + } + return name, nil +} + +// Pull copies the contents of the image from somewhere else. +func Pull(ctx context.Context, imageName string, options PullOptions) (types.ImageReference, error) { + systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) + return pullImage(ctx, options.Store, imageName, options, systemContext) +} + +func pullImage(ctx context.Context, store storage.Store, imageName string, options PullOptions, sc *types.SystemContext) (types.ImageReference, error) { + spec := imageName + srcRef, err := alltransports.ParseImageName(spec) + if err != nil { + if options.Transport == "" { + return nil, errors.Wrapf(err, "error parsing image name %q", spec) + } + transport := options.Transport + if transport != DefaultTransport { + transport = transport + ":" + } + spec = transport + spec + srcRef2, err2 := alltransports.ParseImageName(spec) + if err2 != nil { + return nil, errors.Wrapf(err2, "error parsing image name %q", spec) + } + srcRef = srcRef2 + } + + destName, err := localImageNameForReference(ctx, store, srcRef, spec) + if err != nil { + return nil, errors.Wrapf(err, "error computing local image name for %q", transports.ImageName(srcRef)) + } + if destName == "" { + return nil, errors.Errorf("error computing local image name for %q", transports.ImageName(srcRef)) + } + + destRef, err := is.Transport.ParseStoreReference(store, destName) + if err != nil { + return nil, errors.Wrapf(err, "error parsing image name %q", destName) + } + + policy, err := signature.DefaultPolicy(sc) + if err != nil { + return nil, errors.Wrapf(err, "error obtaining default signature policy") + } + + policyContext, err := signature.NewPolicyContext(policy) + if err != nil { + return nil, errors.Wrapf(err, "error creating new signature policy context") + } + + defer func() { + if err2 := policyContext.Destroy(); err2 != nil { + logrus.Debugf("error destroying signature policy context: %v", err2) + } + }() + + logrus.Debugf("copying %q to %q", spec, destName) + pullError := cp.Image(ctx, policyContext, destRef, srcRef, getCopyOptions(options.ReportWriter, sc, nil, "")) + if pullError == nil { + return destRef, nil + } + + // If no image was found, we should handle. Lets be nicer to the user and see if we can figure out why. + registryPath := sysregistries.RegistriesConfPath(sc) + searchRegistries, err := getRegistries(sc) + if err != nil { + return nil, err + } + hasRegistryInName, err := hasRegistry(imageName) + if err != nil { + return nil, err + } + if !hasRegistryInName && len(searchRegistries) == 0 { + return nil, errors.Errorf("image name provided is a short name and no search registries are defined in %s: %s", registryPath, pullError) + } + return nil, pullError +} + +// getImageDigest creates an image object and uses the hex value of the digest as the image ID +// for parsing the store reference +func getImageDigest(ctx context.Context, src types.ImageReference, sc *types.SystemContext) (string, error) { + newImg, err := src.NewImage(ctx, sc) + if err != nil { + return "", err + } + defer newImg.Close() + + digest := newImg.ConfigInfo().Digest + if err = digest.Validate(); err != nil { + return "", errors.Wrapf(err, "error getting config info") + } + return "@" + digest.Hex(), nil +} diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go new file mode 100644 index 000000000..3d9b909ae --- /dev/null +++ b/vendor/github.com/containers/buildah/run.go @@ -0,0 +1,1995 @@ +package buildah + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/containernetworking/cni/libcni" + "github.com/containers/buildah/bind" + "github.com/containers/buildah/chroot" + "github.com/containers/buildah/util" + "github.com/containers/libpod/pkg/secrets" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/reexec" + units "github.com/docker/go-units" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-tools/generate" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/crypto/ssh/terminal" + "golang.org/x/sys/unix" +) + +const ( + // DefaultWorkingDir is used if none was specified. + DefaultWorkingDir = "/" + // runUsingRuntimeCommand is a command we use as a key for reexec + runUsingRuntimeCommand = Package + "-oci-runtime" +) + +// TerminalPolicy takes the value DefaultTerminal, WithoutTerminal, or WithTerminal. +type TerminalPolicy int + +const ( + // DefaultTerminal indicates that this Run invocation should be + // connected to a pseudoterminal if we're connected to a terminal. + DefaultTerminal TerminalPolicy = iota + // WithoutTerminal indicates that this Run invocation should NOT be + // connected to a pseudoterminal. + WithoutTerminal + // WithTerminal indicates that this Run invocation should be connected + // to a pseudoterminal. + WithTerminal +) + +// String converts a TerminalPoliicy into a string. +func (t TerminalPolicy) String() string { + switch t { + case DefaultTerminal: + return "DefaultTerminal" + case WithoutTerminal: + return "WithoutTerminal" + case WithTerminal: + return "WithTerminal" + } + return fmt.Sprintf("unrecognized terminal setting %d", t) +} + +// NamespaceOption controls how we set up a namespace when launching processes. +type NamespaceOption struct { + // Name specifies the type of namespace, typically matching one of the + // ...Namespace constants defined in + // github.com/opencontainers/runtime-spec/specs-go. + Name string + // Host is used to force our processes to use the host's namespace of + // this type. + Host bool + // Path is the path of the namespace to attach our process to, if Host + // is not set. If Host is not set and Path is also empty, a new + // namespace will be created for the process that we're starting. + // If Name is specs.NetworkNamespace, if Path doesn't look like an + // absolute path, it is treated as a comma-separated list of CNI + // configuration names which will be selected from among all of the CNI + // network configurations which we find. + Path string +} + +// NamespaceOptions provides some helper methods for a slice of NamespaceOption +// structs. +type NamespaceOptions []NamespaceOption + +// IDMappingOptions controls how we set up UID/GID mapping when we set up a +// user namespace. +type IDMappingOptions struct { + HostUIDMapping bool + HostGIDMapping bool + UIDMap []specs.LinuxIDMapping + GIDMap []specs.LinuxIDMapping +} + +// Isolation provides a way to specify whether we're supposed to use a proper +// OCI runtime, or some other method for running commands. +type Isolation int + +const ( + // IsolationDefault is whatever we think will work best. + IsolationDefault Isolation = iota + // IsolationOCI is a proper OCI runtime. + IsolationOCI + // IsolationChroot is a more chroot-like environment: less isolation, + // but with fewer requirements. + IsolationChroot + // IsolationOCIRootless is a proper OCI runtime in rootless mode. + IsolationOCIRootless +) + +// String converts a Isolation into a string. +func (i Isolation) String() string { + switch i { + case IsolationDefault: + return "IsolationDefault" + case IsolationOCI: + return "IsolationOCI" + case IsolationChroot: + return "IsolationChroot" + case IsolationOCIRootless: + return "IsolationOCIRootless" + } + return fmt.Sprintf("unrecognized isolation type %d", i) +} + +// RunOptions can be used to alter how a command is run in the container. +type RunOptions struct { + // Hostname is the hostname we set for the running container. + Hostname string + // Isolation is either IsolationDefault, IsolationOCI, IsolationChroot, or IsolationOCIRootless. + Isolation Isolation + // Runtime is the name of the runtime to run. It should accept the + // same arguments that runc does, and produce similar output. + Runtime string + // Args adds global arguments for the runtime. + Args []string + // Mounts are additional mount points which we want to provide. + Mounts []specs.Mount + // Env is additional environment variables to set. + Env []string + // User is the user as whom to run the command. + User string + // WorkingDir is an override for the working directory. + WorkingDir string + // Shell is default shell to run in a container. + Shell string + // Cmd is an override for the configured default command. + Cmd []string + // Entrypoint is an override for the configured entry point. + Entrypoint []string + // NamespaceOptions controls how we set up the namespaces for the process. + NamespaceOptions NamespaceOptions + // ConfigureNetwork controls whether or not network interfaces and + // routing are configured for a new network namespace (i.e., when not + // joining another's namespace and not just using the host's + // namespace), effectively deciding whether or not the process has a + // usable network. + ConfigureNetwork NetworkConfigurationPolicy + // CNIPluginPath is the location of CNI plugin helpers, if they should be + // run from a location other than the default location. + CNIPluginPath string + // CNIConfigDir is the location of CNI configuration files, if the files in + // the default configuration directory shouldn't be used. + CNIConfigDir string + // Terminal provides a way to specify whether or not the command should + // be run with a pseudoterminal. By default (DefaultTerminal), a + // terminal is used if os.Stdout is connected to a terminal, but that + // decision can be overridden by specifying either WithTerminal or + // WithoutTerminal. + Terminal TerminalPolicy + // TerminalSize provides a way to set the number of rows and columns in + // a pseudo-terminal, if we create one, and Stdin/Stdout/Stderr aren't + // connected to a terminal. + TerminalSize *specs.Box + // The stdin/stdout/stderr descriptors to use. If set to nil, the + // corresponding files in the "os" package are used as defaults. + Stdin io.Reader `json:"-"` + Stdout io.Writer `json:"-"` + Stderr io.Writer `json:"-"` + // Quiet tells the run to turn off output to stdout. + Quiet bool + // AddCapabilities is a list of capabilities to add to the default set. + AddCapabilities []string + // DropCapabilities is a list of capabilities to remove from the default set, + // after processing the AddCapabilities set. If a capability appears in both + // lists, it will be dropped. + DropCapabilities []string +} + +// DefaultNamespaceOptions returns the default namespace settings from the +// runtime-tools generator library. +func DefaultNamespaceOptions() (NamespaceOptions, error) { + options := NamespaceOptions{ + {Name: string(specs.CgroupNamespace), Host: true}, + {Name: string(specs.IPCNamespace), Host: true}, + {Name: string(specs.MountNamespace), Host: true}, + {Name: string(specs.NetworkNamespace), Host: true}, + {Name: string(specs.PIDNamespace), Host: true}, + {Name: string(specs.UserNamespace), Host: true}, + {Name: string(specs.UTSNamespace), Host: true}, + } + g, err := generate.New("linux") + if err != nil { + return options, err + } + spec := g.Config + if spec.Linux != nil { + for _, ns := range spec.Linux.Namespaces { + options.AddOrReplace(NamespaceOption{ + Name: string(ns.Type), + Path: ns.Path, + }) + } + } + return options, nil +} + +// Find the configuration for the namespace of the given type. If there are +// duplicates, find the _last_ one of the type, since we assume it was appended +// more recently. +func (n *NamespaceOptions) Find(namespace string) *NamespaceOption { + for i := range *n { + j := len(*n) - 1 - i + if (*n)[j].Name == namespace { + return &((*n)[j]) + } + } + return nil +} + +// AddOrReplace either adds or replaces the configuration for a given namespace. +func (n *NamespaceOptions) AddOrReplace(options ...NamespaceOption) { +nextOption: + for _, option := range options { + for i := range *n { + j := len(*n) - 1 - i + if (*n)[j].Name == option.Name { + (*n)[j] = option + continue nextOption + } + } + *n = append(*n, option) + } +} + +func addRlimits(ulimit []string, g *generate.Generator) error { + var ( + ul *units.Ulimit + err error + ) + + for _, u := range ulimit { + if ul, err = units.ParseUlimit(u); err != nil { + return errors.Wrapf(err, "ulimit option %q requires name=SOFT:HARD, failed to be parsed", u) + } + + g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft)) + } + return nil +} + +func addHosts(hosts []string, w io.Writer) error { + buf := bufio.NewWriter(w) + for _, host := range hosts { + values := strings.SplitN(host, ":", 2) + if len(values) != 2 { + return errors.Errorf("unable to parse host entry %q: incorrect format", host) + } + if values[0] == "" { + return errors.Errorf("hostname in host entry %q is empty", host) + } + if values[1] == "" { + return errors.Errorf("IP address in host entry %q is empty", host) + } + fmt.Fprintf(buf, "%s\t%s\n", values[1], values[0]) + } + return buf.Flush() +} + +func addHostsToFile(hosts []string, filename string) error { + if len(hosts) == 0 { + return nil + } + file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, os.ModeAppend) + if err != nil { + return err + } + defer file.Close() + return addHosts(hosts, file) +} + +func addCommonOptsToSpec(commonOpts *CommonBuildOptions, g *generate.Generator) error { + // Resources - CPU + if commonOpts.CPUPeriod != 0 { + g.SetLinuxResourcesCPUPeriod(commonOpts.CPUPeriod) + } + if commonOpts.CPUQuota != 0 { + g.SetLinuxResourcesCPUQuota(commonOpts.CPUQuota) + } + if commonOpts.CPUShares != 0 { + g.SetLinuxResourcesCPUShares(commonOpts.CPUShares) + } + if commonOpts.CPUSetCPUs != "" { + g.SetLinuxResourcesCPUCpus(commonOpts.CPUSetCPUs) + } + if commonOpts.CPUSetMems != "" { + g.SetLinuxResourcesCPUMems(commonOpts.CPUSetMems) + } + + // Resources - Memory + if commonOpts.Memory != 0 { + g.SetLinuxResourcesMemoryLimit(commonOpts.Memory) + } + if commonOpts.MemorySwap != 0 { + g.SetLinuxResourcesMemorySwap(commonOpts.MemorySwap) + } + + // cgroup membership + if commonOpts.CgroupParent != "" { + g.SetLinuxCgroupsPath(commonOpts.CgroupParent) + } + + // Other process resource limits + if err := addRlimits(commonOpts.Ulimit, g); err != nil { + return err + } + + logrus.Debugf("Resources: %#v", commonOpts) + return nil +} + +func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, shmSize string, namespaceOptions NamespaceOptions) error { + // Start building a new list of mounts. + var mounts []specs.Mount + haveMount := func(destination string) bool { + for _, mount := range mounts { + if mount.Destination == destination { + // Already have something to mount there. + return true + } + } + return false + } + + ipc := namespaceOptions.Find(string(specs.IPCNamespace)) + hostIPC := ipc == nil || ipc.Host + net := namespaceOptions.Find(string(specs.NetworkNamespace)) + hostNetwork := net == nil || net.Host + user := namespaceOptions.Find(string(specs.UserNamespace)) + hostUser := user == nil || user.Host + + // Copy mounts from the generated list. + mountCgroups := true + specMounts := []specs.Mount{} + for _, specMount := range spec.Mounts { + // Override some of the mounts from the generated list if we're doing different things with namespaces. + if specMount.Destination == "/dev/shm" { + specMount.Options = []string{"nosuid", "noexec", "nodev", "mode=1777", "size=" + shmSize} + if hostIPC && !hostUser { + if _, err := os.Stat("/dev/shm"); err != nil && os.IsNotExist(err) { + continue + } + specMount = specs.Mount{ + Source: "/dev/shm", + Type: "bind", + Destination: "/dev/shm", + Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, + } + } + } + if specMount.Destination == "/dev/mqueue" { + if hostIPC && !hostUser { + if _, err := os.Stat("/dev/mqueue"); err != nil && os.IsNotExist(err) { + continue + } + specMount = specs.Mount{ + Source: "/dev/mqueue", + Type: "bind", + Destination: "/dev/mqueue", + Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, + } + } + } + if specMount.Destination == "/sys" { + if hostNetwork && !hostUser { + mountCgroups = false + if _, err := os.Stat("/sys"); err != nil && os.IsNotExist(err) { + continue + } + specMount = specs.Mount{ + Source: "/sys", + Type: "bind", + Destination: "/sys", + Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev", "ro"}, + } + } + } + specMounts = append(specMounts, specMount) + } + + // Add a mount for the cgroups filesystem, unless we're already + // recursively bind mounting all of /sys, in which case we shouldn't + // bother with it. + sysfsMount := []specs.Mount{} + if mountCgroups { + sysfsMount = []specs.Mount{{ + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{bind.NoBindOption, "nosuid", "noexec", "nodev", "relatime", "ro"}, + }} + } + + // Get the list of files we need to bind into the container. + bindFileMounts, err := runSetupBoundFiles(bundlePath, bindFiles) + if err != nil { + return err + } + + // After this point we need to know the per-container persistent storage directory. + cdir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return errors.Wrapf(err, "error determining work directory for container %q", b.ContainerID) + } + + // Figure out which UID and GID to tell the secrets package to use + // for files that it creates. + rootUID, rootGID, err := util.GetHostRootIDs(spec) + if err != nil { + return err + } + + // Get the list of secrets mounts. + secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID)) + + // Add temporary copies of the contents of volume locations at the + // volume locations, unless we already have something there. + copyWithTar := b.copyWithTar(nil, nil) + builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes) + if err != nil { + return err + } + + // Get the list of explicitly-specified volume mounts. + volumes, err := runSetupVolumeMounts(spec.Linux.MountLabel, volumeMounts, optionMounts) + if err != nil { + return err + } + + // Add them all, in the preferred order, except where they conflict with something that was previously added. + for _, mount := range append(append(append(append(append(volumes, builtins...), secretMounts...), bindFileMounts...), specMounts...), sysfsMount...) { + if haveMount(mount.Destination) { + // Already mounting something there, no need to bother with this one. + continue + } + // Add the mount. + mounts = append(mounts, mount) + } + + // Set the list in the spec. + spec.Mounts = mounts + return nil +} + +func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount, err error) { + for dest, src := range bindFiles { + options := []string{"rbind"} + if strings.HasPrefix(src, bundlePath) { + options = append(options, bind.NoBindOption) + } + mounts = append(mounts, specs.Mount{ + Source: src, + Destination: dest, + Type: "bind", + Options: options, + }) + } + return mounts, nil +} + +func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWithTar func(srcPath, dstPath string) error, builtinVolumes []string) ([]specs.Mount, error) { + var mounts []specs.Mount + // Add temporary copies of the contents of volume locations at the + // volume locations, unless we already have something there. + for _, volume := range builtinVolumes { + subdir := digest.Canonical.FromString(volume).Hex() + volumePath := filepath.Join(containerDir, "buildah-volumes", subdir) + // If we need to, initialize the volume path's initial contents. + if _, err := os.Stat(volumePath); os.IsNotExist(err) { + if err = os.MkdirAll(volumePath, 0755); err != nil { + return nil, errors.Wrapf(err, "error creating directory %q for volume %q", volumePath, volume) + } + if err = label.Relabel(volumePath, mountLabel, false); err != nil { + return nil, errors.Wrapf(err, "error relabeling directory %q for volume %q", volumePath, volume) + } + srcPath := filepath.Join(mountPoint, volume) + if err = copyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) { + return nil, errors.Wrapf(err, "error populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath) + } + + } + // Add the bind mount. + mounts = append(mounts, specs.Mount{ + Source: volumePath, + Destination: volume, + Type: "bind", + Options: []string{"bind"}, + }) + } + return mounts, nil +} + +func runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount) ([]specs.Mount, error) { + var mounts []specs.Mount + + parseMount := func(host, container string, options []string) (specs.Mount, error) { + var foundrw, foundro, foundz, foundZ bool + var rootProp string + for _, opt := range options { + switch opt { + case "rw": + foundrw = true + case "ro": + foundro = true + case "z": + foundz = true + case "Z": + foundZ = true + case "private", "rprivate", "slave", "rslave", "shared", "rshared": + rootProp = opt + } + } + if !foundrw && !foundro { + options = append(options, "rw") + } + if foundz { + if err := label.Relabel(host, mountLabel, true); err != nil { + return specs.Mount{}, errors.Wrapf(err, "relabeling %q failed", host) + } + } + if foundZ { + if err := label.Relabel(host, mountLabel, false); err != nil { + return specs.Mount{}, errors.Wrapf(err, "relabeling %q failed", host) + } + } + if rootProp == "" { + options = append(options, "private") + } + return specs.Mount{ + Destination: container, + Type: "bind", + Source: host, + Options: options, + }, nil + } + // Bind mount volumes specified for this particular Run() invocation + for _, i := range optionMounts { + mount, err := parseMount(i.Source, i.Destination, append(i.Options, "rbind")) + if err != nil { + return nil, err + } + mounts = append(mounts, mount) + } + // Bind mount volumes given by the user when the container was created + for _, i := range volumeMounts { + var options []string + spliti := strings.Split(i, ":") + if len(spliti) > 2 { + options = strings.Split(spliti[2], ",") + } + options = append(options, "rbind") + mount, err := parseMount(spliti[0], spliti[1], options) + if err != nil { + return nil, err + } + mounts = append(mounts, mount) + } + return mounts, nil +} + +// addNetworkConfig copies files from host and sets them up to bind mount into container +func (b *Builder) addNetworkConfig(rdir, hostPath string, chownOpts *idtools.IDPair) (string, error) { + copyFileWithTar := b.copyFileWithTar(chownOpts, nil) + + cfile := filepath.Join(rdir, filepath.Base(hostPath)) + + if err := copyFileWithTar(hostPath, cfile); err != nil { + return "", errors.Wrapf(err, "error copying %q for container %q", cfile, b.ContainerID) + } + + if err := label.Relabel(cfile, b.MountLabel, false); err != nil { + return "", errors.Wrapf(err, "error relabeling %q in container %q", cfile, b.ContainerID) + } + + return cfile, nil +} + +func setupMaskedPaths(g *generate.Generator) { + for _, mp := range []string{ + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware", + } { + g.AddLinuxMaskedPaths(mp) + } +} + +func setupReadOnlyPaths(g *generate.Generator) { + for _, rp := range []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + } { + g.AddLinuxReadonlyPaths(rp) + } +} + +func setupCapAdd(g *generate.Generator, caps ...string) error { + for _, cap := range caps { + if err := g.AddProcessCapabilityBounding(cap); err != nil { + return errors.Wrapf(err, "error adding %q to the bounding capability set", cap) + } + if err := g.AddProcessCapabilityEffective(cap); err != nil { + return errors.Wrapf(err, "error adding %q to the effective capability set", cap) + } + if err := g.AddProcessCapabilityInheritable(cap); err != nil { + return errors.Wrapf(err, "error adding %q to the inheritable capability set", cap) + } + if err := g.AddProcessCapabilityPermitted(cap); err != nil { + return errors.Wrapf(err, "error adding %q to the permitted capability set", cap) + } + if err := g.AddProcessCapabilityAmbient(cap); err != nil { + return errors.Wrapf(err, "error adding %q to the ambient capability set", cap) + } + } + return nil +} + +func setupCapDrop(g *generate.Generator, caps ...string) error { + for _, cap := range caps { + if err := g.DropProcessCapabilityBounding(cap); err != nil { + return errors.Wrapf(err, "error removing %q from the bounding capability set", cap) + } + if err := g.DropProcessCapabilityEffective(cap); err != nil { + return errors.Wrapf(err, "error removing %q from the effective capability set", cap) + } + if err := g.DropProcessCapabilityInheritable(cap); err != nil { + return errors.Wrapf(err, "error removing %q from the inheritable capability set", cap) + } + if err := g.DropProcessCapabilityPermitted(cap); err != nil { + return errors.Wrapf(err, "error removing %q from the permitted capability set", cap) + } + if err := g.DropProcessCapabilityAmbient(cap); err != nil { + return errors.Wrapf(err, "error removing %q from the ambient capability set", cap) + } + } + return nil +} + +func setupCapabilities(g *generate.Generator, firstAdds, firstDrops, secondAdds, secondDrops []string) error { + g.ClearProcessCapabilities() + if err := setupCapAdd(g, util.DefaultCapabilities...); err != nil { + return err + } + if err := setupCapAdd(g, firstAdds...); err != nil { + return err + } + if err := setupCapDrop(g, firstDrops...); err != nil { + return err + } + if err := setupCapAdd(g, secondAdds...); err != nil { + return err + } + if err := setupCapDrop(g, secondDrops...); err != nil { + return err + } + return nil +} + +func setupTerminal(g *generate.Generator, terminalPolicy TerminalPolicy, terminalSize *specs.Box) { + switch terminalPolicy { + case DefaultTerminal: + onTerminal := terminal.IsTerminal(unix.Stdin) && terminal.IsTerminal(unix.Stdout) && terminal.IsTerminal(unix.Stderr) + if onTerminal { + logrus.Debugf("stdio is a terminal, defaulting to using a terminal") + } else { + logrus.Debugf("stdio is not a terminal, defaulting to not using a terminal") + } + g.SetProcessTerminal(onTerminal) + case WithTerminal: + g.SetProcessTerminal(true) + case WithoutTerminal: + g.SetProcessTerminal(false) + } + if terminalSize != nil { + g.SetProcessConsoleSize(terminalSize.Width, terminalSize.Height) + } +} + +func setupNamespaces(g *generate.Generator, namespaceOptions NamespaceOptions, idmapOptions IDMappingOptions, policy NetworkConfigurationPolicy) (configureNetwork bool, configureNetworks []string, configureUTS bool, err error) { + // Set namespace options in the container configuration. + configureUserns := false + specifiedNetwork := false + for _, namespaceOption := range namespaceOptions { + switch namespaceOption.Name { + case string(specs.UserNamespace): + configureUserns = false + if !namespaceOption.Host && namespaceOption.Path == "" { + configureUserns = true + } + case string(specs.NetworkNamespace): + specifiedNetwork = true + configureNetwork = false + if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) { + if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) { + configureNetworks = strings.Split(namespaceOption.Path, ",") + namespaceOption.Path = "" + } + configureNetwork = (policy != NetworkDisabled) + } + case string(specs.UTSNamespace): + configureUTS = false + if !namespaceOption.Host && namespaceOption.Path == "" { + configureUTS = true + } + } + if namespaceOption.Host { + if err := g.RemoveLinuxNamespace(namespaceOption.Name); err != nil { + return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", namespaceOption.Name) + } + } else if err := g.AddOrReplaceLinuxNamespace(namespaceOption.Name, namespaceOption.Path); err != nil { + if namespaceOption.Path == "" { + return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", namespaceOption.Name) + } + return false, nil, false, errors.Wrapf(err, "error adding %q namespace %q for run", namespaceOption.Name, namespaceOption.Path) + } + } + + // If we've got mappings, we're going to have to create a user namespace. + if len(idmapOptions.UIDMap) > 0 || len(idmapOptions.GIDMap) > 0 || configureUserns { + if err := g.AddOrReplaceLinuxNamespace(specs.UserNamespace, ""); err != nil { + return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.UserNamespace)) + } + hostUidmap, hostGidmap, err := util.GetHostIDMappings("") + if err != nil { + return false, nil, false, err + } + for _, m := range idmapOptions.UIDMap { + g.AddLinuxUIDMapping(m.HostID, m.ContainerID, m.Size) + } + if len(idmapOptions.UIDMap) == 0 { + for _, m := range hostUidmap { + g.AddLinuxUIDMapping(m.ContainerID, m.ContainerID, m.Size) + } + } + for _, m := range idmapOptions.GIDMap { + g.AddLinuxGIDMapping(m.HostID, m.ContainerID, m.Size) + } + if len(idmapOptions.GIDMap) == 0 { + for _, m := range hostGidmap { + g.AddLinuxGIDMapping(m.ContainerID, m.ContainerID, m.Size) + } + } + if !specifiedNetwork { + if err := g.AddOrReplaceLinuxNamespace(specs.NetworkNamespace, ""); err != nil { + return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.NetworkNamespace)) + } + configureNetwork = (policy != NetworkDisabled) + } + } else { + if err := g.RemoveLinuxNamespace(specs.UserNamespace); err != nil { + return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", string(specs.UserNamespace)) + } + if !specifiedNetwork { + if err := g.RemoveLinuxNamespace(specs.NetworkNamespace); err != nil { + return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", string(specs.NetworkNamespace)) + } + } + } + if configureNetwork { + for name, val := range util.DefaultNetworkSysctl { + g.AddLinuxSysctl(name, val) + } + } + return configureNetwork, configureNetworks, configureUTS, nil +} + +// Search for a command that isn't given as an absolute path using the $PATH +// under the rootfs. We can't resolve absolute symbolic links without +// chroot()ing, which we may not be able to do, so just accept a link as a +// valid resolution. +func runLookupPath(g *generate.Generator, command []string) []string { + // Look for the configured $PATH. + spec := g.Spec() + envPath := "" + for i := range spec.Process.Env { + if strings.HasPrefix(spec.Process.Env[i], "PATH=") { + envPath = spec.Process.Env[i] + } + } + // If there is no configured $PATH, supply one. + if envPath == "" { + defaultPath := "/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin" + envPath = "PATH=" + defaultPath + g.AddProcessEnv("PATH", defaultPath) + } + // No command, nothing to do. + if len(command) == 0 { + return command + } + // Command is already an absolute path, use it as-is. + if filepath.IsAbs(command[0]) { + return command + } + // For each element in the PATH, + for _, pathEntry := range filepath.SplitList(envPath[5:]) { + // if it's the empty string, it's ".", which is the Cwd, + if pathEntry == "" { + pathEntry = spec.Process.Cwd + } + // build the absolute path which it might be, + candidate := filepath.Join(pathEntry, command[0]) + // check if it's there, + if fi, err := os.Lstat(filepath.Join(spec.Root.Path, candidate)); fi != nil && err == nil { + // and if it's not a directory, and either a symlink or executable, + if !fi.IsDir() && ((fi.Mode()&os.ModeSymlink != 0) || (fi.Mode()&0111 != 0)) { + // use that. + return append([]string{candidate}, command[1:]...) + } + } + } + return command +} + +func (b *Builder) configureUIDGID(g *generate.Generator, mountPoint string, options RunOptions) error { + // Set the user UID/GID/supplemental group list/capabilities lists. + user, err := b.user(mountPoint, options.User) + if err != nil { + return err + } + if err := setupCapabilities(g, b.AddCapabilities, b.DropCapabilities, options.AddCapabilities, options.DropCapabilities); err != nil { + return err + } + g.SetProcessUID(user.UID) + g.SetProcessGID(user.GID) + for _, gid := range user.AdditionalGids { + g.AddProcessAdditionalGid(gid) + } + + // Remove capabilities if not running as root except Bounding set + if user.UID != 0 { + bounding := g.Config.Process.Capabilities.Bounding + g.ClearProcessCapabilities() + g.Config.Process.Capabilities.Bounding = bounding + } + + return nil +} + +func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions) { + g.ClearProcessEnv() + for _, envSpec := range append(b.Env(), options.Env...) { + env := strings.SplitN(envSpec, "=", 2) + if len(env) > 1 { + g.AddProcessEnv(env[0], env[1]) + } + } + + for src, dest := range b.Args { + g.AddProcessEnv(src, dest) + } +} + +func (b *Builder) configureNamespaces(g *generate.Generator, options RunOptions) (bool, []string, error) { + defaultNamespaceOptions, err := DefaultNamespaceOptions() + if err != nil { + return false, nil, err + } + + namespaceOptions := defaultNamespaceOptions + namespaceOptions.AddOrReplace(b.NamespaceOptions...) + namespaceOptions.AddOrReplace(options.NamespaceOptions...) + + networkPolicy := options.ConfigureNetwork + if networkPolicy == NetworkDefault { + networkPolicy = b.ConfigureNetwork + } + + configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(g, namespaceOptions, b.IDMappingOptions, networkPolicy) + if err != nil { + return false, nil, err + } + + if configureUTS { + if options.Hostname != "" { + g.SetHostname(options.Hostname) + } else if b.Hostname() != "" { + g.SetHostname(b.Hostname()) + } + } else { + g.SetHostname("") + } + return configureNetwork, configureNetworks, nil +} + +// Run runs the specified command in the container's root filesystem. +func (b *Builder) Run(command []string, options RunOptions) error { + p, err := ioutil.TempDir("", Package) + if err != nil { + return err + } + // On some hosts like AH, /tmp is a symlink and we need an + // absolute path. + path, err := filepath.EvalSymlinks(p) + if err != nil { + return err + } + logrus.Debugf("using %q to hold bundle data", path) + defer func() { + if err2 := os.RemoveAll(path); err2 != nil { + logrus.Errorf("error removing %q: %v", path, err2) + } + }() + + gp, err := generate.New("linux") + if err != nil { + return err + } + g := &gp + + isolation := options.Isolation + if isolation == IsolationDefault { + isolation = b.Isolation + if isolation == IsolationDefault { + isolation = IsolationOCI + } + } + if err := checkAndOverrideIsolationOptions(isolation, &options); err != nil { + return err + } + + b.configureEnvironment(g, options) + + if b.CommonBuildOpts == nil { + return errors.Errorf("Invalid format on container you must recreate the container") + } + + if err := addCommonOptsToSpec(b.CommonBuildOpts, g); err != nil { + return err + } + + if options.WorkingDir != "" { + g.SetProcessCwd(options.WorkingDir) + } else if b.WorkDir() != "" { + g.SetProcessCwd(b.WorkDir()) + } + setupSelinux(g, b.ProcessLabel, b.MountLabel) + mountPoint, err := b.Mount(b.MountLabel) + if err != nil { + return err + } + defer func() { + if err := b.Unmount(); err != nil { + logrus.Errorf("error unmounting container: %v", err) + } + }() + g.SetRootPath(mountPoint) + if len(command) > 0 { + command = runLookupPath(g, command) + g.SetProcessArgs(command) + } else { + g.SetProcessArgs(nil) + } + + setupMaskedPaths(g) + setupReadOnlyPaths(g) + + setupTerminal(g, options.Terminal, options.TerminalSize) + + configureNetwork, configureNetworks, err := b.configureNamespaces(g, options) + if err != nil { + return err + } + + if err := b.configureUIDGID(g, mountPoint, options); err != nil { + return err + } + + g.SetProcessApparmorProfile(b.CommonBuildOpts.ApparmorProfile) + + // Now grab the spec from the generator. Set the generator to nil so that future contributors + // will quickly be able to tell that they're supposed to be modifying the spec directly from here. + spec := g.Spec() + g = nil + + // Set the working directory, creating it if we must. + if spec.Process.Cwd == "" { + spec.Process.Cwd = DefaultWorkingDir + } + logrus.Debugf("ensuring working directory %q exists", filepath.Join(mountPoint, spec.Process.Cwd)) + if err = os.MkdirAll(filepath.Join(mountPoint, spec.Process.Cwd), 0755); err != nil { + return errors.Wrapf(err, "error ensuring working directory %q exists", spec.Process.Cwd) + } + + // Set the seccomp configuration using the specified profile name. Some syscalls are + // allowed if certain capabilities are to be granted (example: CAP_SYS_CHROOT and chroot), + // so we sorted out the capabilities lists first. + if err = setupSeccomp(spec, b.CommonBuildOpts.SeccompProfilePath); err != nil { + return err + } + + // Figure out who owns files that will appear to be owned by UID/GID 0 in the container. + rootUID, rootGID, err := util.GetHostRootIDs(spec) + if err != nil { + return err + } + rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} + + hostFile, err := b.addNetworkConfig(path, "/etc/hosts", rootIDPair) + if err != nil { + return err + } + resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf", rootIDPair) + if err != nil { + return err + } + + if err := addHostsToFile(b.CommonBuildOpts.AddHost, hostFile); err != nil { + return err + } + + bindFiles := map[string]string{ + "/etc/hosts": hostFile, + "/etc/resolv.conf": resolvFile, + } + err = b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, b.Volumes(), b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, append(b.NamespaceOptions, options.NamespaceOptions...)) + if err != nil { + return errors.Wrapf(err, "error resolving mountpoints for container") + } + + if options.CNIConfigDir == "" { + options.CNIConfigDir = b.CNIConfigDir + if b.CNIConfigDir == "" { + options.CNIConfigDir = util.DefaultCNIConfigDir + } + } + if options.CNIPluginPath == "" { + options.CNIPluginPath = b.CNIPluginPath + if b.CNIPluginPath == "" { + options.CNIPluginPath = util.DefaultCNIPluginPath + } + } + + switch isolation { + case IsolationOCI: + // The default is --rootless=auto, which makes troubleshooting a bit harder. + // rootlessFlag := []string{"--rootless=false"} + // for _, arg := range options.Args { + // if strings.HasPrefix(arg, "--rootless") { + // rootlessFlag = nil + // } + // } + // options.Args = append(options.Args, rootlessFlag...) + err = b.runUsingRuntimeSubproc(options, configureNetwork, configureNetworks, nil, spec, mountPoint, path, Package+"-"+filepath.Base(path)) + case IsolationChroot: + err = chroot.RunUsingChroot(spec, path, options.Stdin, options.Stdout, options.Stderr) + case IsolationOCIRootless: + if err := setupRootlessSpecChanges(spec, path, rootUID, rootGID); err != nil { + return err + } + rootlessFlag := []string{"--rootless=true"} + for _, arg := range options.Args { + if strings.HasPrefix(arg, "--rootless") { + rootlessFlag = nil + } + } + options.Args = append(options.Args, rootlessFlag...) + err = b.runUsingRuntimeSubproc(options, configureNetwork, configureNetworks, []string{"--no-new-keyring"}, spec, mountPoint, path, Package+"-"+filepath.Base(path)) + default: + err = errors.Errorf("don't know how to run this command") + } + return err +} + +func checkAndOverrideIsolationOptions(isolation Isolation, options *RunOptions) error { + switch isolation { + case IsolationOCIRootless: + if ns := options.NamespaceOptions.Find(string(specs.IPCNamespace)); ns == nil || ns.Host { + logrus.Debugf("Forcing use of an IPC namespace.") + } + options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.IPCNamespace)}) + if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host { + logrus.Debugf("Disabling network namespace.") + } + options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.NetworkNamespace), Host: true}) + if ns := options.NamespaceOptions.Find(string(specs.PIDNamespace)); ns == nil || ns.Host { + logrus.Debugf("Forcing use of a PID namespace.") + } + options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.PIDNamespace), Host: false}) + if ns := options.NamespaceOptions.Find(string(specs.UserNamespace)); ns == nil || ns.Host { + logrus.Debugf("Forcing use of a user namespace.") + } + options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.UserNamespace)}) + if ns := options.NamespaceOptions.Find(string(specs.UTSNamespace)); ns != nil && !ns.Host { + logrus.Debugf("Disabling UTS namespace.") + } + options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.UTSNamespace), Host: true}) + case IsolationOCI: + pidns := options.NamespaceOptions.Find(string(specs.PIDNamespace)) + userns := options.NamespaceOptions.Find(string(specs.UserNamespace)) + if (pidns == nil || pidns.Host) && (userns != nil && !userns.Host) { + return fmt.Errorf("not allowed to mix host PID namespace with container user namespace") + } + } + return nil +} + +func setupRootlessSpecChanges(spec *specs.Spec, bundleDir string, rootUID, rootGID uint32) error { + spec.Hostname = "" + spec.Process.User.AdditionalGids = nil + spec.Linux.Resources = nil + + emptyDir := filepath.Join(bundleDir, "empty") + if err := os.Mkdir(emptyDir, 0); err != nil { + return errors.Wrapf(err, "error creating %q", emptyDir) + } + + // Replace /sys with a read-only bind mount. + mounts := []specs.Mount{ + { + Source: "/dev", + Destination: "/dev", + Type: "tmpfs", + Options: []string{"private", "strictatime", "noexec", "nosuid", "mode=755", "size=65536k"}, + }, + { + Source: "mqueue", + Destination: "/dev/mqueue", + Type: "mqueue", + Options: []string{"private", "nodev", "noexec", "nosuid"}, + }, + { + Source: "pts", + Destination: "/dev/pts", + Type: "devpts", + Options: []string{"private", "noexec", "nosuid", "newinstance", "ptmxmode=0666", "mode=0620"}, + }, + { + Source: "shm", + Destination: "/dev/shm", + Type: "tmpfs", + Options: []string{"private", "nodev", "noexec", "nosuid", "mode=1777", "size=65536k"}, + }, + { + Source: "/proc", + Destination: "/proc", + Type: "proc", + Options: []string{"private", "nodev", "noexec", "nosuid"}, + }, + { + Source: "/sys", + Destination: "/sys", + Type: "bind", + Options: []string{bind.NoBindOption, "rbind", "private", "nodev", "noexec", "nosuid", "ro"}, + }, + } + // Cover up /sys/fs/cgroup and /sys/fs/selinux, if they exist in our source for /sys. + if _, err := os.Stat("/sys/fs/cgroup"); err == nil { + spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/cgroup") + } + if _, err := os.Stat("/sys/fs/selinux"); err == nil { + spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/selinux") + } + // Keep anything that isn't under /dev, /proc, or /sys. + for i := range spec.Mounts { + if spec.Mounts[i].Destination == "/dev" || strings.HasPrefix(spec.Mounts[i].Destination, "/dev/") || + spec.Mounts[i].Destination == "/proc" || strings.HasPrefix(spec.Mounts[i].Destination, "/proc/") || + spec.Mounts[i].Destination == "/sys" || strings.HasPrefix(spec.Mounts[i].Destination, "/sys/") { + continue + } + mounts = append(mounts, spec.Mounts[i]) + } + spec.Mounts = mounts + return nil +} + +type runUsingRuntimeSubprocOptions struct { + Options RunOptions + Spec *specs.Spec + RootPath string + BundlePath string + ConfigureNetwork bool + ConfigureNetworks []string + MoreCreateArgs []string + ContainerName string +} + +func (b *Builder) runUsingRuntimeSubproc(options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) { + var confwg sync.WaitGroup + config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{ + Options: options, + Spec: spec, + RootPath: rootPath, + BundlePath: bundlePath, + ConfigureNetwork: configureNetwork, + ConfigureNetworks: configureNetworks, + MoreCreateArgs: moreCreateArgs, + ContainerName: containerName, + }) + if conferr != nil { + return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand) + } + cmd := reexec.Command(runUsingRuntimeCommand) + cmd.Dir = bundlePath + cmd.Stdin = options.Stdin + if cmd.Stdin == nil { + cmd.Stdin = os.Stdin + } + cmd.Stdout = options.Stdout + if cmd.Stdout == nil { + cmd.Stdout = os.Stdout + } + cmd.Stderr = options.Stderr + if cmd.Stderr == nil { + cmd.Stderr = os.Stderr + } + cmd.Env = append(os.Environ(), fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())) + preader, pwriter, err := os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating configuration pipe") + } + confwg.Add(1) + go func() { + _, conferr = io.Copy(pwriter, bytes.NewReader(config)) + confwg.Done() + }() + cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) + defer preader.Close() + defer pwriter.Close() + err = cmd.Run() + confwg.Wait() + if err == nil { + return conferr + } + return err +} + +func init() { + reexec.Register(runUsingRuntimeCommand, runUsingRuntimeMain) +} + +func runUsingRuntimeMain() { + var options runUsingRuntimeSubprocOptions + // Set logging. + if level := os.Getenv("LOGLEVEL"); level != "" { + if ll, err := strconv.Atoi(level); err == nil { + logrus.SetLevel(logrus.Level(ll)) + } + } + // Unpack our configuration. + confPipe := os.NewFile(3, "confpipe") + if confPipe == nil { + fmt.Fprintf(os.Stderr, "error reading options pipe\n") + os.Exit(1) + } + defer confPipe.Close() + if err := json.NewDecoder(confPipe).Decode(&options); err != nil { + fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err) + os.Exit(1) + } + // Set ourselves up to read the container's exit status. We're doing this in a child process + // so that we won't mess with the setting in a caller of the library. This stubs to OS specific + // calls + if err := setChildProcess(); err != nil { + os.Exit(1) + } + // Run the container, start to finish. + status, err := runUsingRuntime(options.Options, options.ConfigureNetwork, options.ConfigureNetworks, options.MoreCreateArgs, options.Spec, options.RootPath, options.BundlePath, options.ContainerName) + if err != nil { + fmt.Fprintf(os.Stderr, "error running container: %v\n", err) + os.Exit(1) + } + // Pass the container's exit status back to the caller by exiting with the same status. + if status.Exited() { + os.Exit(status.ExitStatus()) + } else if status.Signaled() { + fmt.Fprintf(os.Stderr, "container exited on %s\n", status.Signal()) + os.Exit(1) + } + os.Exit(1) +} + +func runUsingRuntime(options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) { + // Lock the caller to a single OS-level thread. + runtime.LockOSThread() + + // Set up bind mounts for things that a namespaced user might not be able to get to directly. + unmountAll, err := bind.SetupIntermediateMountNamespace(spec, bundlePath) + if unmountAll != nil { + defer func() { + if err := unmountAll(); err != nil { + logrus.Error(err) + } + }() + } + if err != nil { + return 1, err + } + + // Write the runtime configuration. + specbytes, err := json.Marshal(spec) + if err != nil { + return 1, err + } + if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil { + return 1, errors.Wrapf(err, "error storing runtime configuration") + } + + logrus.Debugf("config = %v", string(specbytes)) + + // Decide which runtime to use. + runtime := options.Runtime + if runtime == "" { + runtime = util.Runtime() + } + + // Default to just passing down our stdio. + getCreateStdio := func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { + return os.Stdin, os.Stdout, os.Stderr + } + + // Figure out how we're doing stdio handling, and create pipes and sockets. + var stdio sync.WaitGroup + var consoleListener *net.UnixListener + var errorFds, closeBeforeReadingErrorFds []int + stdioPipe := make([][]int, 3) + copyConsole := false + copyPipes := false + finishCopy := make([]int, 2) + if err = unix.Pipe(finishCopy); err != nil { + return 1, errors.Wrapf(err, "error creating pipe for notifying to stop stdio") + } + finishedCopy := make(chan struct{}) + if spec.Process != nil { + if spec.Process.Terminal { + copyConsole = true + // Create a listening socket for accepting the container's terminal's PTY master. + socketPath := filepath.Join(bundlePath, "console.sock") + consoleListener, err = net.ListenUnix("unix", &net.UnixAddr{Name: socketPath, Net: "unix"}) + if err != nil { + return 1, errors.Wrapf(err, "error creating socket to receive terminal descriptor") + } + // Add console socket arguments. + moreCreateArgs = append(moreCreateArgs, "--console-socket", socketPath) + } else { + copyPipes = true + // Figure out who should own the pipes. + uid, gid, err := util.GetHostRootIDs(spec) + if err != nil { + return 1, err + } + // Create stdio pipes. + if stdioPipe, err = runMakeStdioPipe(int(uid), int(gid)); err != nil { + return 1, err + } + errorFds = []int{stdioPipe[unix.Stdout][0], stdioPipe[unix.Stderr][0]} + closeBeforeReadingErrorFds = []int{stdioPipe[unix.Stdout][1], stdioPipe[unix.Stderr][1]} + // Set stdio to our pipes. + getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { + stdin := os.NewFile(uintptr(stdioPipe[unix.Stdin][0]), "/dev/stdin") + stdout := os.NewFile(uintptr(stdioPipe[unix.Stdout][1]), "/dev/stdout") + stderr := os.NewFile(uintptr(stdioPipe[unix.Stderr][1]), "/dev/stderr") + return stdin, stdout, stderr + } + } + } else { + if options.Quiet { + // Discard stdout. + getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { + return os.Stdin, nil, os.Stderr + } + } + } + + // Build the commands that we'll execute. + pidFile := filepath.Join(bundlePath, "pid") + args := append(append(append(options.Args, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName) + create := exec.Command(runtime, args...) + create.Dir = bundlePath + stdin, stdout, stderr := getCreateStdio() + create.Stdin, create.Stdout, create.Stderr = stdin, stdout, stderr + if create.SysProcAttr == nil { + create.SysProcAttr = &syscall.SysProcAttr{} + } + + args = append(options.Args, "start", containerName) + start := exec.Command(runtime, args...) + start.Dir = bundlePath + start.Stderr = os.Stderr + + args = append(options.Args, "kill", containerName) + kill := exec.Command(runtime, args...) + kill.Dir = bundlePath + kill.Stderr = os.Stderr + + args = append(options.Args, "delete", containerName) + del := exec.Command(runtime, args...) + del.Dir = bundlePath + del.Stderr = os.Stderr + + // Actually create the container. + logrus.Debugf("Running %q", create.Args) + err = create.Run() + if err != nil { + return 1, errors.Wrapf(err, "error creating container for %v: %s", spec.Process.Args, runCollectOutput(errorFds, closeBeforeReadingErrorFds)) + } + defer func() { + err2 := del.Run() + if err2 != nil { + if err == nil { + err = errors.Wrapf(err2, "error deleting container") + } else { + logrus.Infof("error deleting container: %v", err2) + } + } + }() + + // Make sure we read the container's exit status when it exits. + pidValue, err := ioutil.ReadFile(pidFile) + if err != nil { + return 1, errors.Wrapf(err, "error reading pid from %q", pidFile) + } + pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue))) + if err != nil { + return 1, errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue)) + } + var reaping sync.WaitGroup + reaping.Add(1) + go func() { + defer reaping.Done() + var err error + _, err = unix.Wait4(pid, &wstatus, 0, nil) + if err != nil { + wstatus = 0 + logrus.Errorf("error waiting for container child process %d: %v\n", pid, err) + } + }() + + if configureNetwork { + teardown, err := runConfigureNetwork(options, configureNetworks, pid, containerName, spec.Process.Args) + if teardown != nil { + defer teardown() + } + if err != nil { + return 1, err + } + } + + if copyPipes { + // We don't need the ends of the pipes that belong to the container. + stdin.Close() + if stdout != nil { + stdout.Close() + } + stderr.Close() + } + + // Handle stdio for the container in the background. + stdio.Add(1) + go runCopyStdio(&stdio, copyPipes, stdioPipe, copyConsole, consoleListener, finishCopy, finishedCopy, spec) + + // Start the container. + logrus.Debugf("Running %q", start.Args) + err = start.Run() + if err != nil { + return 1, errors.Wrapf(err, "error starting container") + } + stopped := false + defer func() { + if !stopped { + err2 := kill.Run() + if err2 != nil { + if err == nil { + err = errors.Wrapf(err2, "error stopping container") + } else { + logrus.Infof("error stopping container: %v", err2) + } + } + } + }() + + // Wait for the container to exit. + for { + now := time.Now() + var state specs.State + args = append(options.Args, "state", containerName) + stat := exec.Command(runtime, args...) + stat.Dir = bundlePath + stat.Stderr = os.Stderr + stateOutput, stateErr := stat.Output() + if stateErr != nil { + return 1, errors.Wrapf(stateErr, "error reading container state") + } + if err = json.Unmarshal(stateOutput, &state); err != nil { + return 1, errors.Wrapf(stateErr, "error parsing container state %q", string(stateOutput)) + } + switch state.Status { + case "running": + case "stopped": + stopped = true + default: + return 1, errors.Errorf("container status unexpectedly changed to %q", state.Status) + } + if stopped { + break + } + select { + case <-finishedCopy: + stopped = true + case <-time.After(time.Until(now.Add(100 * time.Millisecond))): + continue + } + if stopped { + break + } + } + + // Close the writing end of the stop-handling-stdio notification pipe. + unix.Close(finishCopy[1]) + // Wait for the stdio copy goroutine to flush. + stdio.Wait() + // Wait until we finish reading the exit status. + reaping.Wait() + + return wstatus, nil +} + +func runCollectOutput(fds, closeBeforeReadingFds []int) string { + for _, fd := range closeBeforeReadingFds { + unix.Close(fd) + } + var b bytes.Buffer + buf := make([]byte, 8192) + for _, fd := range fds { + nread, err := unix.Read(fd, buf) + if err != nil { + if errno, isErrno := err.(syscall.Errno); isErrno { + switch errno { + default: + logrus.Errorf("error reading from pipe %d: %v", fd, err) + case syscall.EINTR, syscall.EAGAIN: + } + } else { + logrus.Errorf("unable to wait for data from pipe %d: %v", fd, err) + } + continue + } + for nread > 0 { + r := buf[:nread] + if nwritten, err := b.Write(r); err != nil || nwritten != len(r) { + if nwritten != len(r) { + logrus.Errorf("error buffering data from pipe %d: %v", fd, err) + break + } + } + nread, err = unix.Read(fd, buf) + if err != nil { + if errno, isErrno := err.(syscall.Errno); isErrno { + switch errno { + default: + logrus.Errorf("error reading from pipe %d: %v", fd, err) + case syscall.EINTR, syscall.EAGAIN: + } + } else { + logrus.Errorf("unable to wait for data from pipe %d: %v", fd, err) + } + break + } + } + } + return b.String() +} + +func runConfigureNetwork(options RunOptions, configureNetworks []string, pid int, containerName string, command []string) (teardown func(), err error) { + var netconf, undo []*libcni.NetworkConfigList + // Scan for CNI configuration files. + confdir := options.CNIConfigDir + files, err := libcni.ConfFiles(confdir, []string{".conf"}) + if err != nil { + return nil, errors.Wrapf(err, "error finding CNI networking configuration files named *.conf in directory %q", confdir) + } + lists, err := libcni.ConfFiles(confdir, []string{".conflist"}) + if err != nil { + return nil, errors.Wrapf(err, "error finding CNI networking configuration list files named *.conflist in directory %q", confdir) + } + logrus.Debugf("CNI network configuration file list: %#v", append(files, lists...)) + // Read the CNI configuration files. + for _, file := range files { + nc, err := libcni.ConfFromFile(file) + if err != nil { + return nil, errors.Wrapf(err, "error loading networking configuration from file %q for %v", file, command) + } + if len(configureNetworks) > 0 && nc.Network != nil && (nc.Network.Name == "" || !util.StringInSlice(nc.Network.Name, configureNetworks)) { + if nc.Network.Name == "" { + logrus.Debugf("configuration in %q has no name, skipping it", file) + } else { + logrus.Debugf("configuration in %q has name %q, skipping it", file, nc.Network.Name) + } + continue + } + cl, err := libcni.ConfListFromConf(nc) + if err != nil { + return nil, errors.Wrapf(err, "error converting networking configuration from file %q for %v", file, command) + } + logrus.Debugf("using network configuration from %q", file) + netconf = append(netconf, cl) + } + for _, list := range lists { + cl, err := libcni.ConfListFromFile(list) + if err != nil { + return nil, errors.Wrapf(err, "error loading networking configuration list from file %q for %v", list, command) + } + if len(configureNetworks) > 0 && (cl.Name == "" || !util.StringInSlice(cl.Name, configureNetworks)) { + if cl.Name == "" { + logrus.Debugf("configuration list in %q has no name, skipping it", list) + } else { + logrus.Debugf("configuration list in %q has name %q, skipping it", list, cl.Name) + } + continue + } + logrus.Debugf("using network configuration list from %q", list) + netconf = append(netconf, cl) + } + // Make sure we can access the container's network namespace, + // even after it exits, to successfully tear down the + // interfaces. Ensure this by opening a handle to the network + // namespace, and using our copy to both configure and + // deconfigure it. + netns := fmt.Sprintf("/proc/%d/ns/net", pid) + netFD, err := unix.Open(netns, unix.O_RDONLY, 0) + if err != nil { + return nil, errors.Wrapf(err, "error opening network namespace for %v", command) + } + mynetns := fmt.Sprintf("/proc/%d/fd/%d", unix.Getpid(), netFD) + // Build our search path for the plugins. + pluginPaths := strings.Split(options.CNIPluginPath, string(os.PathListSeparator)) + cni := libcni.CNIConfig{Path: pluginPaths} + // Configure the interfaces. + rtconf := make(map[*libcni.NetworkConfigList]*libcni.RuntimeConf) + teardown = func() { + for _, nc := range undo { + if err = cni.DelNetworkList(nc, rtconf[nc]); err != nil { + logrus.Errorf("error cleaning up network %v for %v: %v", rtconf[nc].IfName, command, err) + } + } + unix.Close(netFD) + } + for i, nc := range netconf { + // Build the runtime config for use with this network configuration. + rtconf[nc] = &libcni.RuntimeConf{ + ContainerID: containerName, + NetNS: mynetns, + IfName: fmt.Sprintf("if%d", i), + Args: [][2]string{}, + CapabilityArgs: map[string]interface{}{}, + } + // Bring it up. + _, err := cni.AddNetworkList(nc, rtconf[nc]) + if err != nil { + return teardown, errors.Wrapf(err, "error configuring network list %v for %v", rtconf[nc].IfName, command) + } + // Add it to the list of networks to take down when the container process exits. + undo = append([]*libcni.NetworkConfigList{nc}, undo...) + } + return teardown, nil +} + +func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copyConsole bool, consoleListener *net.UnixListener, finishCopy []int, finishedCopy chan struct{}, spec *specs.Spec) { + defer func() { + unix.Close(finishCopy[0]) + if copyPipes { + unix.Close(stdioPipe[unix.Stdin][1]) + unix.Close(stdioPipe[unix.Stdout][0]) + unix.Close(stdioPipe[unix.Stderr][0]) + } + stdio.Done() + finishedCopy <- struct{}{} + }() + // Map describing where data on an incoming descriptor should go. + relayMap := make(map[int]int) + // Map describing incoming and outgoing descriptors. + readDesc := make(map[int]string) + writeDesc := make(map[int]string) + // Buffers. + relayBuffer := make(map[int]*bytes.Buffer) + // Set up the terminal descriptor or pipes for polling. + if copyConsole { + // Accept a connection over our listening socket. + fd, err := runAcceptTerminal(consoleListener, spec.Process.ConsoleSize) + if err != nil { + logrus.Errorf("%v", err) + return + } + terminalFD := fd + // Input from our stdin, output from the terminal descriptor. + relayMap[unix.Stdin] = terminalFD + readDesc[unix.Stdin] = "stdin" + relayBuffer[terminalFD] = new(bytes.Buffer) + writeDesc[terminalFD] = "container terminal input" + relayMap[terminalFD] = unix.Stdout + readDesc[terminalFD] = "container terminal output" + relayBuffer[unix.Stdout] = new(bytes.Buffer) + writeDesc[unix.Stdout] = "output" + // Set our terminal's mode to raw, to pass handling of special + // terminal input to the terminal in the container. + if terminal.IsTerminal(unix.Stdin) { + if state, err := terminal.MakeRaw(unix.Stdin); err != nil { + logrus.Warnf("error setting terminal state: %v", err) + } else { + defer func() { + if err = terminal.Restore(unix.Stdin, state); err != nil { + logrus.Errorf("unable to restore terminal state: %v", err) + } + }() + } + } + } + if copyPipes { + // Input from our stdin, output from the stdout and stderr pipes. + relayMap[unix.Stdin] = stdioPipe[unix.Stdin][1] + readDesc[unix.Stdin] = "stdin" + relayBuffer[stdioPipe[unix.Stdin][1]] = new(bytes.Buffer) + writeDesc[stdioPipe[unix.Stdin][1]] = "container stdin" + relayMap[stdioPipe[unix.Stdout][0]] = unix.Stdout + readDesc[stdioPipe[unix.Stdout][0]] = "container stdout" + relayBuffer[unix.Stdout] = new(bytes.Buffer) + writeDesc[unix.Stdout] = "stdout" + relayMap[stdioPipe[unix.Stderr][0]] = unix.Stderr + readDesc[stdioPipe[unix.Stderr][0]] = "container stderr" + relayBuffer[unix.Stderr] = new(bytes.Buffer) + writeDesc[unix.Stderr] = "stderr" + } + // Set our reading descriptors to non-blocking. + for rfd, wfd := range relayMap { + if err := unix.SetNonblock(rfd, true); err != nil { + logrus.Errorf("error setting %s to nonblocking: %v", readDesc[rfd], err) + return + } + if err := unix.SetNonblock(wfd, false); err != nil { + logrus.Errorf("error setting descriptor %d (%s) blocking: %v", wfd, writeDesc[wfd], err) + } + } + // A helper that returns false if err is an error that would cause us + // to give up. + logIfNotRetryable := func(err error, what string) (retry bool) { + if err == nil { + return true + } + if errno, isErrno := err.(syscall.Errno); isErrno { + switch errno { + case syscall.EINTR, syscall.EAGAIN: + return true + } + } + logrus.Error(what) + return false + } + // Pass data back and forth. + pollTimeout := -1 + for len(relayMap) > 0 { + // Start building the list of descriptors to poll. + pollFds := make([]unix.PollFd, 0, len(relayMap)+1) + // Poll for a notification that we should stop handling stdio. + pollFds = append(pollFds, unix.PollFd{Fd: int32(finishCopy[0]), Events: unix.POLLIN | unix.POLLHUP}) + // Poll on our reading descriptors. + for rfd := range relayMap { + pollFds = append(pollFds, unix.PollFd{Fd: int32(rfd), Events: unix.POLLIN | unix.POLLHUP}) + } + buf := make([]byte, 8192) + // Wait for new data from any input descriptor, or a notification that we're done. + _, err := unix.Poll(pollFds, pollTimeout) + if !logIfNotRetryable(err, fmt.Sprintf("error waiting for stdio/terminal data to relay: %v", err)) { + return + } + removes := make(map[int]struct{}) + for _, pollFd := range pollFds { + // If this descriptor's just been closed from the other end, mark it for + // removal from the set that we're checking for. + if pollFd.Revents&unix.POLLHUP == unix.POLLHUP { + removes[int(pollFd.Fd)] = struct{}{} + } + // If the descriptor was closed elsewhere, remove it from our list. + if pollFd.Revents&unix.POLLNVAL != 0 { + logrus.Debugf("error polling descriptor %s: closed?", readDesc[int(pollFd.Fd)]) + removes[int(pollFd.Fd)] = struct{}{} + } + // If the POLLIN flag isn't set, then there's no data to be read from this descriptor. + if pollFd.Revents&unix.POLLIN == 0 { + // If we're using pipes and it's our stdin and it's closed, close the writing + // end of the corresponding pipe. + if copyPipes && int(pollFd.Fd) == unix.Stdin && pollFd.Revents&unix.POLLHUP != 0 { + unix.Close(stdioPipe[unix.Stdin][1]) + stdioPipe[unix.Stdin][1] = -1 + } + continue + } + // Read whatever there is to be read. + readFD := int(pollFd.Fd) + writeFD, needToRelay := relayMap[readFD] + if needToRelay { + n, err := unix.Read(readFD, buf) + if !logIfNotRetryable(err, fmt.Sprintf("unable to read %s data: %v", readDesc[readFD], err)) { + return + } + // If it's zero-length on our stdin and we're + // using pipes, it's an EOF, so close the stdin + // pipe's writing end. + if n == 0 && copyPipes && int(pollFd.Fd) == unix.Stdin { + unix.Close(stdioPipe[unix.Stdin][1]) + stdioPipe[unix.Stdin][1] = -1 + } + if n > 0 { + // Buffer the data in case we get blocked on where they need to go. + nwritten, err := relayBuffer[writeFD].Write(buf[:n]) + if err != nil { + logrus.Debugf("buffer: %v", err) + continue + } + if nwritten != n { + logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", n, nwritten) + continue + } + // If this is the last of the data we'll be able to read from this + // descriptor, read all that there is to read. + for pollFd.Revents&unix.POLLHUP == unix.POLLHUP { + nr, err := unix.Read(readFD, buf) + logIfNotRetryable(err, fmt.Sprintf("read %s: %v", readDesc[readFD], err)) + if nr <= 0 { + break + } + nwritten, err := relayBuffer[writeFD].Write(buf[:nr]) + if err != nil { + logrus.Debugf("buffer: %v", err) + break + } + if nwritten != nr { + logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nr, nwritten) + break + } + } + } + } + } + // Try to drain the output buffers. Set the default timeout + // for the next poll() to 100ms if we still have data to write. + pollTimeout = -1 + for writeFD := range relayBuffer { + if relayBuffer[writeFD].Len() > 0 { + n, err := unix.Write(writeFD, relayBuffer[writeFD].Bytes()) + if !logIfNotRetryable(err, fmt.Sprintf("unable to write %s data: %v", writeDesc[writeFD], err)) { + return + } + if n > 0 { + relayBuffer[writeFD].Next(n) + } + } + if relayBuffer[writeFD].Len() > 0 { + pollTimeout = 100 + } + } + // Remove any descriptors which we don't need to poll any more from the poll descriptor list. + for remove := range removes { + delete(relayMap, remove) + } + // If the we-can-return pipe had anything for us, we're done. + for _, pollFd := range pollFds { + if int(pollFd.Fd) == finishCopy[0] && pollFd.Revents != 0 { + // The pipe is closed, indicating that we can stop now. + return + } + } + } +} + +func runAcceptTerminal(consoleListener *net.UnixListener, terminalSize *specs.Box) (int, error) { + defer consoleListener.Close() + c, err := consoleListener.AcceptUnix() + if err != nil { + return -1, errors.Wrapf(err, "error accepting socket descriptor connection") + } + defer c.Close() + // Expect a control message over our new connection. + b := make([]byte, 8192) + oob := make([]byte, 8192) + n, oobn, _, _, err := c.ReadMsgUnix(b, oob) + if err != nil { + return -1, errors.Wrapf(err, "error reading socket descriptor: %v") + } + if n > 0 { + logrus.Debugf("socket descriptor is for %q", string(b[:n])) + } + if oobn > len(oob) { + return -1, errors.Errorf("too much out-of-bounds data (%d bytes)", oobn) + } + // Parse the control message. + scm, err := unix.ParseSocketControlMessage(oob[:oobn]) + if err != nil { + return -1, errors.Wrapf(err, "error parsing out-of-bound data as a socket control message") + } + logrus.Debugf("control messages: %v", scm) + // Expect to get a descriptor. + terminalFD := -1 + for i := range scm { + fds, err := unix.ParseUnixRights(&scm[i]) + if err != nil { + return -1, errors.Wrapf(err, "error parsing unix rights control message: %v") + } + logrus.Debugf("fds: %v", fds) + if len(fds) == 0 { + continue + } + terminalFD = fds[0] + break + } + if terminalFD == -1 { + return -1, errors.Errorf("unable to read terminal descriptor") + } + // Set the pseudoterminal's size to the configured size, or our own. + winsize := &unix.Winsize{} + if terminalSize != nil { + // Use configured sizes. + winsize.Row = uint16(terminalSize.Height) + winsize.Col = uint16(terminalSize.Width) + } else { + if terminal.IsTerminal(unix.Stdin) { + // Use the size of our terminal. + if winsize, err = unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ); err != nil { + logrus.Warnf("error reading size of controlling terminal: %v", err) + winsize.Row = 0 + winsize.Col = 0 + } + } + } + if winsize.Row != 0 && winsize.Col != 0 { + if err = unix.IoctlSetWinsize(terminalFD, unix.TIOCSWINSZ, winsize); err != nil { + logrus.Warnf("error setting size of container pseudoterminal: %v", err) + } + // FIXME - if we're connected to a terminal, we should + // be passing the updated terminal size down when we + // receive a SIGWINCH. + } + return terminalFD, nil +} + +// Create pipes to use for relaying stdio. +func runMakeStdioPipe(uid, gid int) ([][]int, error) { + stdioPipe := make([][]int, 3) + for i := range stdioPipe { + stdioPipe[i] = make([]int, 2) + if err := unix.Pipe(stdioPipe[i]); err != nil { + return nil, errors.Wrapf(err, "error creating pipe for container FD %d", i) + } + } + if err := unix.Fchown(stdioPipe[unix.Stdin][0], uid, gid); err != nil { + return nil, errors.Wrapf(err, "error setting owner of stdin pipe descriptor") + } + if err := unix.Fchown(stdioPipe[unix.Stdout][1], uid, gid); err != nil { + return nil, errors.Wrapf(err, "error setting owner of stdout pipe descriptor") + } + if err := unix.Fchown(stdioPipe[unix.Stderr][1], uid, gid); err != nil { + return nil, errors.Wrapf(err, "error setting owner of stderr pipe descriptor") + } + return stdioPipe, nil +} diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go new file mode 100644 index 000000000..a7519a092 --- /dev/null +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -0,0 +1,17 @@ +// +build linux + +package buildah + +import ( + "fmt" + "golang.org/x/sys/unix" + "os" +) + +func setChildProcess() error { + if err := unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(1), 0, 0, 0); err != nil { + fmt.Fprintf(os.Stderr, "prctl(PR_SET_CHILD_SUBREAPER, 1): %v\n", err) + return err + } + return nil +} diff --git a/vendor/github.com/containers/buildah/run_unsupport.go b/vendor/github.com/containers/buildah/run_unsupport.go new file mode 100644 index 000000000..4824a0c4e --- /dev/null +++ b/vendor/github.com/containers/buildah/run_unsupport.go @@ -0,0 +1,11 @@ +// +build !linux + +package buildah + +import ( + "github.com/pkg/errors" +) + +func setChildProcess() error { + return errors.New("function not supported on non-linux systems") +} diff --git a/vendor/github.com/containers/buildah/seccomp.go b/vendor/github.com/containers/buildah/seccomp.go new file mode 100644 index 000000000..a435b5f71 --- /dev/null +++ b/vendor/github.com/containers/buildah/seccomp.go @@ -0,0 +1,35 @@ +// +build seccomp,linux + +package buildah + +import ( + "io/ioutil" + + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + seccomp "github.com/seccomp/containers-golang" +) + +func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { + switch seccompProfilePath { + case "unconfined": + spec.Linux.Seccomp = nil + case "": + seccompConfig, err := seccomp.GetDefaultProfile(spec) + if err != nil { + return errors.Wrapf(err, "loading default seccomp profile failed") + } + spec.Linux.Seccomp = seccompConfig + default: + seccompProfile, err := ioutil.ReadFile(seccompProfilePath) + if err != nil { + return errors.Wrapf(err, "opening seccomp profile (%s) failed", seccompProfilePath) + } + seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec) + if err != nil { + return errors.Wrapf(err, "loading seccomp profile (%s) failed", seccompProfilePath) + } + spec.Linux.Seccomp = seccompConfig + } + return nil +} diff --git a/vendor/github.com/containers/buildah/seccomp_unsupported.go b/vendor/github.com/containers/buildah/seccomp_unsupported.go new file mode 100644 index 000000000..cba8390c5 --- /dev/null +++ b/vendor/github.com/containers/buildah/seccomp_unsupported.go @@ -0,0 +1,15 @@ +// +build !seccomp !linux + +package buildah + +import ( + "github.com/opencontainers/runtime-spec/specs-go" +) + +func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { + if spec.Linux != nil { + // runtime-tools may have supplied us with a default filter + spec.Linux.Seccomp = nil + } + return nil +} diff --git a/vendor/github.com/containers/buildah/selinux.go b/vendor/github.com/containers/buildah/selinux.go new file mode 100644 index 000000000..2b850cf9f --- /dev/null +++ b/vendor/github.com/containers/buildah/selinux.go @@ -0,0 +1,12 @@ +// +build selinux,linux + +package buildah + +import ( + "github.com/opencontainers/runtime-tools/generate" +) + +func setupSelinux(g *generate.Generator, processLabel, mountLabel string) { + g.SetProcessSelinuxLabel(processLabel) + g.SetLinuxMountLabel(mountLabel) +} diff --git a/vendor/github.com/containers/buildah/selinux_unsupported.go b/vendor/github.com/containers/buildah/selinux_unsupported.go new file mode 100644 index 000000000..0aa7c46e4 --- /dev/null +++ b/vendor/github.com/containers/buildah/selinux_unsupported.go @@ -0,0 +1,10 @@ +// +build !selinux !linux + +package buildah + +import ( + "github.com/opencontainers/runtime-tools/generate" +) + +func setupSelinux(g *generate.Generator, processLabel, mountLabel string) { +} diff --git a/vendor/github.com/containers/buildah/unmount.go b/vendor/github.com/containers/buildah/unmount.go new file mode 100644 index 000000000..cdb511170 --- /dev/null +++ b/vendor/github.com/containers/buildah/unmount.go @@ -0,0 +1,11 @@ +package buildah + +// Unmount unmounts a build container. +func (b *Builder) Unmount() error { + _, err := b.store.Unmount(b.ContainerID, false) + if err == nil { + b.MountPoint = "" + err = b.Save() + } + return err +} diff --git a/vendor/github.com/containers/buildah/unshare/unshare.c b/vendor/github.com/containers/buildah/unshare/unshare.c new file mode 100644 index 000000000..83864359b --- /dev/null +++ b/vendor/github.com/containers/buildah/unshare/unshare.c @@ -0,0 +1,110 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int _buildah_unshare_parse_envint(const char *envname) { + char *p, *q; + long l; + + p = getenv(envname); + if (p == NULL) { + return -1; + } + q = NULL; + l = strtol(p, &q, 10); + if ((q == NULL) || (*q != '\0')) { + fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p); + _exit(1); + } + unsetenv(envname); + return l; +} + +void _buildah_unshare(void) +{ + int flags, pidfd, continuefd, n, pgrp, sid, ctty, allow_setgroups; + char buf[2048]; + + flags = _buildah_unshare_parse_envint("_Buildah-unshare"); + if (flags == -1) { + return; + } + if ((flags & CLONE_NEWUSER) != 0) { + if (unshare(CLONE_NEWUSER) == -1) { + fprintf(stderr, "Error during unshare(CLONE_NEWUSER): %m\n"); + _exit(1); + } + } + pidfd = _buildah_unshare_parse_envint("_Buildah-pid-pipe"); + if (pidfd != -1) { + snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); + if (write(pidfd, buf, strlen(buf)) != strlen(buf)) { + fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); + _exit(1); + } + close(pidfd); + } + continuefd = _buildah_unshare_parse_envint("_Buildah-continue-pipe"); + if (continuefd != -1) { + n = read(continuefd, buf, sizeof(buf)); + if (n > 0) { + fprintf(stderr, "Error: %.*s\n", n, buf); + _exit(1); + } + close(continuefd); + } + sid = _buildah_unshare_parse_envint("_Buildah-setsid"); + if (sid == 1) { + if (setsid() == -1) { + fprintf(stderr, "Error during setsid: %m\n"); + _exit(1); + } + } + pgrp = _buildah_unshare_parse_envint("_Buildah-setpgrp"); + if (pgrp == 1) { + if (setpgrp() == -1) { + fprintf(stderr, "Error during setpgrp: %m\n"); + _exit(1); + } + } + ctty = _buildah_unshare_parse_envint("_Buildah-ctty"); + if (ctty != -1) { + if (ioctl(ctty, TIOCSCTTY, 0) == -1) { + fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty); + _exit(1); + } + } + allow_setgroups = _buildah_unshare_parse_envint("_Buildah-allow-setgroups"); + if ((flags & CLONE_NEWUSER) != 0) { + if (allow_setgroups == 1) { + if (setgroups(0, NULL) != 0) { + fprintf(stderr, "Error during setgroups(0, NULL): %m\n"); + _exit(1); + } + } + if (setresgid(0, 0, 0) != 0) { + fprintf(stderr, "Error during setresgid(0): %m\n"); + _exit(1); + } + if (setresuid(0, 0, 0) != 0) { + fprintf(stderr, "Error during setresuid(0): %m\n"); + _exit(1); + } + } + if ((flags & ~CLONE_NEWUSER) != 0) { + if (unshare(flags & ~CLONE_NEWUSER) == -1) { + fprintf(stderr, "Error during unshare(...): %m\n"); + _exit(1); + } + } + return; +} diff --git a/vendor/github.com/containers/buildah/unshare/unshare.go b/vendor/github.com/containers/buildah/unshare/unshare.go new file mode 100644 index 000000000..d89dfc053 --- /dev/null +++ b/vendor/github.com/containers/buildah/unshare/unshare.go @@ -0,0 +1,273 @@ +// +build linux + +package unshare + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "runtime" + "strconv" + "strings" + "syscall" + + "github.com/containers/buildah/util" + "github.com/containers/storage/pkg/reexec" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and +// handles setting ID maps and other related settings by triggering +// initialization code in the child. +type Cmd struct { + *exec.Cmd + UnshareFlags int + UseNewuidmap bool + UidMappings []specs.LinuxIDMapping + UseNewgidmap bool + GidMappings []specs.LinuxIDMapping + GidMappingsEnableSetgroups bool + Setsid bool + Setpgrp bool + Ctty *os.File + OOMScoreAdj *int + Hook func(pid int) error +} + +// Command creates a new Cmd which can be customized. +func Command(args ...string) *Cmd { + cmd := reexec.Command(args...) + return &Cmd{ + Cmd: cmd, + } +} + +func (c *Cmd) Start() error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // Set an environment variable to tell the child to synchronize its startup. + if c.Env == nil { + c.Env = os.Environ() + } + c.Env = append(c.Env, fmt.Sprintf("_Buildah-unshare=%d", c.UnshareFlags)) + + // Create the pipe for reading the child's PID. + pidRead, pidWrite, err := os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating pid pipe") + } + c.Env = append(c.Env, fmt.Sprintf("_Buildah-pid-pipe=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, pidWrite) + + // Create the pipe for letting the child know to proceed. + continueRead, continueWrite, err := os.Pipe() + if err != nil { + pidRead.Close() + pidWrite.Close() + return errors.Wrapf(err, "error creating pid pipe") + } + c.Env = append(c.Env, fmt.Sprintf("_Buildah-continue-pipe=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, continueRead) + + // Pass along other instructions. + if c.Setsid { + c.Env = append(c.Env, "_Buildah-setsid=1") + } + if c.Setpgrp { + c.Env = append(c.Env, "_Buildah-setpgrp=1") + } + if c.Ctty != nil { + c.Env = append(c.Env, fmt.Sprintf("_Buildah-ctty=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, c.Ctty) + } + if c.GidMappingsEnableSetgroups { + c.Env = append(c.Env, "_Buildah-allow-setgroups=1") + } else { + c.Env = append(c.Env, "_Buildah-allow-setgroups=0") + } + + // Make sure we clean up our pipes. + defer func() { + if pidRead != nil { + pidRead.Close() + } + if pidWrite != nil { + pidWrite.Close() + } + if continueRead != nil { + continueRead.Close() + } + if continueWrite != nil { + continueWrite.Close() + } + }() + + // Start the new process. + err = c.Cmd.Start() + if err != nil { + return err + } + + // Close the ends of the pipes that the parent doesn't need. + continueRead.Close() + continueRead = nil + pidWrite.Close() + pidWrite = nil + + // Read the child's PID from the pipe. + pidString := "" + b := new(bytes.Buffer) + io.Copy(b, pidRead) + pidString = b.String() + pid, err := strconv.Atoi(pidString) + if err != nil { + fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) + return errors.Wrapf(err, "error parsing PID %q", pidString) + } + pidString = fmt.Sprintf("%d", pid) + + // If we created a new user namespace, set any specified mappings. + if c.UnshareFlags&syscall.CLONE_NEWUSER != 0 { + // Always set "setgroups". + setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening setgroups: %v", err) + return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString) + } + defer setgroups.Close() + if c.GidMappingsEnableSetgroups { + if _, err := fmt.Fprintf(setgroups, "allow"); err != nil { + fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err) + return errors.Wrapf(err, "error opening \"allow\" to /proc/%s/setgroups", pidString) + } + } else { + if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { + fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err) + return errors.Wrapf(err, "error writing \"deny\" to /proc/%s/setgroups", pidString) + } + } + + if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 { + uidmap, gidmap, err := util.GetHostIDMappings("") + if err != nil { + fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err) + return errors.Wrapf(err, "error reading ID mappings in parent") + } + if len(c.UidMappings) == 0 { + c.UidMappings = uidmap + for i := range c.UidMappings { + c.UidMappings[i].HostID = c.UidMappings[i].ContainerID + } + } + if len(c.GidMappings) == 0 { + c.GidMappings = gidmap + for i := range c.GidMappings { + c.GidMappings[i].HostID = c.GidMappings[i].ContainerID + } + } + } + + if len(c.GidMappings) > 0 { + // Build the GID map, since writing to the proc file has to be done all at once. + g := new(bytes.Buffer) + for _, m := range c.GidMappings { + fmt.Fprintf(g, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + // Set the GID map. + if c.UseNewgidmap { + cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) + g.Reset() + cmd.Stdout = g + cmd.Stderr = g + err := cmd.Run() + if err != nil { + fmt.Fprintf(continueWrite, "error running newgidmap: %v: %s", err, g.String()) + return errors.Wrapf(err, "error running newgidmap: %s", g.String()) + } + } else { + gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err) + return errors.Wrapf(err, "error opening /proc/%s/gid_map", pidString) + } + defer gidmap.Close() + if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil { + fmt.Fprintf(continueWrite, "error writing /proc/%s/gid_map: %v", pidString, err) + return errors.Wrapf(err, "error writing /proc/%s/gid_map", pidString) + } + } + } + + if len(c.UidMappings) > 0 { + // Build the UID map, since writing to the proc file has to be done all at once. + u := new(bytes.Buffer) + for _, m := range c.UidMappings { + fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + // Set the GID map. + if c.UseNewuidmap { + cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) + u.Reset() + cmd.Stdout = u + cmd.Stderr = u + err := cmd.Run() + if err != nil { + fmt.Fprintf(continueWrite, "error running newuidmap: %v: %s", err, u.String()) + return errors.Wrapf(err, "error running newuidmap: %s", u.String()) + } + } else { + uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err) + return errors.Wrapf(err, "error opening /proc/%s/uid_map", pidString) + } + defer uidmap.Close() + if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil { + fmt.Fprintf(continueWrite, "error writing /proc/%s/uid_map: %v", pidString, err) + return errors.Wrapf(err, "error writing /proc/%s/uid_map", pidString) + } + } + } + } + + if c.OOMScoreAdj != nil { + oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err) + return errors.Wrapf(err, "error opening /proc/%s/oom_score_adj", pidString) + } + defer oomScoreAdj.Close() + if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil { + fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err) + return errors.Wrapf(err, "error writing \"%d\" to /proc/%s/oom_score_adj", c.OOMScoreAdj, pidString) + } + } + // Run any additional setup that we want to do before the child starts running proper. + if c.Hook != nil { + if err = c.Hook(pid); err != nil { + fmt.Fprintf(continueWrite, "hook error: %v", err) + return err + } + } + + return nil +} + +func (c *Cmd) Run() error { + if err := c.Start(); err != nil { + return err + } + return c.Wait() +} + +func (c *Cmd) CombinedOutput() ([]byte, error) { + return nil, errors.New("unshare: CombinedOutput() not implemented") +} + +func (c *Cmd) Output() ([]byte, error) { + return nil, errors.New("unshare: Output() not implemented") +} diff --git a/vendor/github.com/containers/buildah/unshare/unshare_cgo.go b/vendor/github.com/containers/buildah/unshare/unshare_cgo.go new file mode 100644 index 000000000..26a0b2c20 --- /dev/null +++ b/vendor/github.com/containers/buildah/unshare/unshare_cgo.go @@ -0,0 +1,10 @@ +// +build linux,cgo,!gccgo + +package unshare + +// #cgo CFLAGS: -Wall +// extern void _buildah_unshare(void); +// void __attribute__((constructor)) init(void) { +// _buildah_unshare(); +// } +import "C" diff --git a/vendor/github.com/containers/buildah/unshare/unshare_gccgo.go b/vendor/github.com/containers/buildah/unshare/unshare_gccgo.go new file mode 100644 index 000000000..c4811782a --- /dev/null +++ b/vendor/github.com/containers/buildah/unshare/unshare_gccgo.go @@ -0,0 +1,25 @@ +// +build linux,cgo,gccgo + +package unshare + +// #cgo CFLAGS: -Wall -Wextra +// extern void _buildah_unshare(void); +// void __attribute__((constructor)) init(void) { +// _buildah_unshare(); +// } +import "C" + +// This next bit is straight out of libcontainer. + +// AlwaysFalse is here to stay false +// (and be exported so the compiler doesn't optimize out its reference) +var AlwaysFalse bool + +func init() { + if AlwaysFalse { + // by referencing this C init() in a noop test, it will ensure the compiler + // links in the C function. + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134 + C.init() + } +} diff --git a/vendor/github.com/containers/buildah/unshare/unshare_unsupported.go b/vendor/github.com/containers/buildah/unshare/unshare_unsupported.go new file mode 100644 index 000000000..feeceae66 --- /dev/null +++ b/vendor/github.com/containers/buildah/unshare/unshare_unsupported.go @@ -0,0 +1 @@ +package unshare diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go new file mode 100644 index 000000000..ef9be87fb --- /dev/null +++ b/vendor/github.com/containers/buildah/util.go @@ -0,0 +1,196 @@ +package buildah + +import ( + "archive/tar" + "io" + "os" + "sync" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/pkg/sysregistriesv2" + "github.com/containers/image/types" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/reexec" + rspec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// InitReexec is a wrapper for reexec.Init(). It should be called at +// the start of main(), and if it returns true, main() should return +// immediately. +func InitReexec() bool { + return reexec.Init() +} + +func copyStringStringMap(m map[string]string) map[string]string { + n := map[string]string{} + for k, v := range m { + n[k] = v + } + return n +} + +func copyStringSlice(s []string) []string { + t := make([]string, len(s)) + copy(t, s) + return t +} + +func convertStorageIDMaps(UIDMap, GIDMap []idtools.IDMap) ([]rspec.LinuxIDMapping, []rspec.LinuxIDMapping) { + uidmap := make([]rspec.LinuxIDMapping, 0, len(UIDMap)) + gidmap := make([]rspec.LinuxIDMapping, 0, len(GIDMap)) + for _, m := range UIDMap { + uidmap = append(uidmap, rspec.LinuxIDMapping{ + HostID: uint32(m.HostID), + ContainerID: uint32(m.ContainerID), + Size: uint32(m.Size), + }) + } + for _, m := range GIDMap { + gidmap = append(gidmap, rspec.LinuxIDMapping{ + HostID: uint32(m.HostID), + ContainerID: uint32(m.ContainerID), + Size: uint32(m.Size), + }) + } + return uidmap, gidmap +} + +func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMap, []idtools.IDMap) { + uidmap := make([]idtools.IDMap, 0, len(UIDMap)) + gidmap := make([]idtools.IDMap, 0, len(GIDMap)) + for _, m := range UIDMap { + uidmap = append(uidmap, idtools.IDMap{ + HostID: int(m.HostID), + ContainerID: int(m.ContainerID), + Size: int(m.Size), + }) + } + for _, m := range GIDMap { + gidmap = append(gidmap, idtools.IDMap{ + HostID: int(m.HostID), + ContainerID: int(m.ContainerID), + Size: int(m.Size), + }) + } + return uidmap, gidmap +} + +// copyFileWithTar returns a function which copies a single file from outside +// of any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func (b *Builder) copyFileWithTar(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error { + convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) + untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap) + archiver := chrootarchive.NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + contentReader, contentWriter, err := os.Pipe() + if err != nil { + return err + } + defer contentReader.Close() + defer contentWriter.Close() + var hashError error + var hashWorker sync.WaitGroup + hashWorker.Add(1) + go func() { + t := tar.NewReader(contentReader) + _, err := t.Next() + if err != nil { + hashError = err + } + if _, err = io.Copy(hasher, t); err != nil && err != io.EOF { + hashError = err + } + hashWorker.Done() + }() + err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options) + hashWorker.Wait() + if err == nil { + err = hashError + } + return err + } + } + return archiver.CopyFileWithTar +} + +// copyWithTar returns a function which copies a directory tree from outside of +// any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func (b *Builder) copyWithTar(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error { + convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) + untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap) + archiver := chrootarchive.NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.CopyWithTar +} + +// untarPath returns a function which extracts an archive in a specified +// location into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error { + convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) + untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap) + archiver := chrootarchive.NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.UntarPath +} + +// tarPath returns a function which creates an archive of a specified +// location in the container's filesystem, mapping permissions using the +// container's ID maps +func (b *Builder) tarPath() func(path string) (io.ReadCloser, error) { + convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) + tarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap) + return func(path string) (io.ReadCloser, error) { + return archive.TarWithOptions(path, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: tarMappings.UIDs(), + GIDMaps: tarMappings.GIDs(), + }) + } +} + +// getRegistries obtains the list of search registries defined in the global registries file. +func getRegistries(sc *types.SystemContext) ([]string, error) { + var searchRegistries []string + registries, err := sysregistriesv2.GetRegistries(sc) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse the registries.conf file") + } + for _, registry := range sysregistriesv2.FindUnqualifiedSearchRegistries(registries) { + if !registry.Blocked { + searchRegistries = append(searchRegistries, registry.URL) + } + } + return searchRegistries, nil +} + +// hasRegistry returns a bool/err response if the image has a registry in its +// name +func hasRegistry(imageName string) (bool, error) { + imgRef, err := reference.Parse(imageName) + if err != nil { + return false, err + } + registry := reference.Domain(imgRef.(reference.Named)) + if registry != "" { + return true, nil + } + return false, nil +} diff --git a/vendor/github.com/containers/buildah/util/types.go b/vendor/github.com/containers/buildah/util/types.go new file mode 100644 index 000000000..dc5f4b6c8 --- /dev/null +++ b/vendor/github.com/containers/buildah/util/types.go @@ -0,0 +1,35 @@ +package util + +const ( + // DefaultRuntime is the default command to use to run the container. + DefaultRuntime = "runc" + // DefaultCNIPluginPath is the default location of CNI plugin helpers. + DefaultCNIPluginPath = "/usr/libexec/cni:/opt/cni/bin" + // DefaultCNIConfigDir is the default location of CNI configuration files. + DefaultCNIConfigDir = "/etc/cni/net.d" +) + +var ( + // DefaultCapabilities is the list of capabilities which we grant by + // default to containers which are running under UID 0. + DefaultCapabilities = []string{ + "CAP_AUDIT_WRITE", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_MKNOD", + "CAP_NET_BIND_SERVICE", + "CAP_SETFCAP", + "CAP_SETGID", + "CAP_SETPCAP", + "CAP_SETUID", + "CAP_SYS_CHROOT", + } + // DefaultNetworkSysctl is the list of Kernel parameters which we + // grant by default to containers which are running under UID 0. + DefaultNetworkSysctl = map[string]string{ + "net.ipv4.ping_group_range": "0 0", + } +) diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go new file mode 100644 index 000000000..93323232d --- /dev/null +++ b/vendor/github.com/containers/buildah/util/util.go @@ -0,0 +1,494 @@ +package util + +import ( + "bufio" + "fmt" + "io" + "net/url" + "os" + "path" + "path/filepath" + "strconv" + "strings" + + "github.com/containers/image/directory" + dockerarchive "github.com/containers/image/docker/archive" + "github.com/containers/image/docker/reference" + ociarchive "github.com/containers/image/oci/archive" + "github.com/containers/image/pkg/sysregistriesv2" + "github.com/containers/image/signature" + is "github.com/containers/image/storage" + "github.com/containers/image/tarball" + "github.com/containers/image/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + "github.com/docker/distribution/registry/api/errcode" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + minimumTruncatedIDLength = 3 +) + +var ( + // RegistryDefaultPathPrefix contains a per-registry listing of default prefixes + // to prepend to image names that only contain a single path component. + RegistryDefaultPathPrefix = map[string]string{ + "index.docker.io": "library", + "docker.io": "library", + } + // Transports contains the possible transports used for images + Transports = map[string]string{ + dockerarchive.Transport.Name(): "", + ociarchive.Transport.Name(): "", + directory.Transport.Name(): "", + tarball.Transport.Name(): "", + } + // DockerArchive is the transport we prepend to an image name + // when saving to docker-archive + DockerArchive = dockerarchive.Transport.Name() + // OCIArchive is the transport we prepend to an image name + // when saving to oci-archive + OCIArchive = ociarchive.Transport.Name() + // DirTransport is the transport for pushing and pulling + // images to and from a directory + DirTransport = directory.Transport.Name() + // TarballTransport is the transport for importing a tar archive + // and creating a filesystem image + TarballTransport = tarball.Transport.Name() +) + +// ResolveName checks if name is a valid image name, and if that name doesn't +// include a domain portion, returns a list of the names which it might +// correspond to in the set of configured registries. +func ResolveName(name string, firstRegistry string, sc *types.SystemContext, store storage.Store) ([]string, error) { + if name == "" { + return nil, nil + } + + // Maybe it's a truncated image ID. Don't prepend a registry name, then. + if len(name) >= minimumTruncatedIDLength { + if img, err := store.Image(name); err == nil && img != nil && strings.HasPrefix(img.ID, name) { + // It's a truncated version of the ID of an image that's present in local storage; + // we need only expand the ID. + return []string{img.ID}, nil + } + } + + // If the image includes a transport's name as a prefix, use it as-is. + split := strings.SplitN(name, ":", 2) + if len(split) == 2 { + if _, ok := Transports[split[0]]; ok { + return []string{split[1]}, nil + } + } + + // If the image name already included a domain component, we're done. + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, errors.Wrapf(err, "error parsing image name %q", name) + } + if named.String() == name { + // Parsing produced the same result, so there was a domain name in there to begin with. + return []string{name}, nil + } + if reference.Domain(named) != "" && RegistryDefaultPathPrefix[reference.Domain(named)] != "" { + // If this domain can cause us to insert something in the middle, check if that happened. + repoPath := reference.Path(named) + domain := reference.Domain(named) + tag := "" + if tagged, ok := named.(reference.Tagged); ok { + tag = ":" + tagged.Tag() + } + digest := "" + if digested, ok := named.(reference.Digested); ok { + digest = "@" + digested.Digest().String() + } + defaultPrefix := RegistryDefaultPathPrefix[reference.Domain(named)] + "/" + if strings.HasPrefix(repoPath, defaultPrefix) && path.Join(domain, repoPath[len(defaultPrefix):])+tag+digest == name { + // Yup, parsing just inserted a bit in the middle, so there was a domain name there to begin with. + return []string{name}, nil + } + } + + // Figure out the list of registries. + var registries []string + allRegistries, err := sysregistriesv2.GetRegistries(sc) + if err != nil { + logrus.Debugf("unable to read configured registries to complete %q: %v", name, err) + registries = []string{} + } + for _, registry := range sysregistriesv2.FindUnqualifiedSearchRegistries(allRegistries) { + if !registry.Blocked { + registries = append(registries, registry.URL) + } + } + + // Create all of the combinations. Some registries need an additional component added, so + // use our lookaside map to keep track of them. If there are no configured registries, we'll + // return a name using "localhost" as the registry name. + candidates := []string{} + initRegistries := []string{"localhost"} + if firstRegistry != "" && firstRegistry != "localhost" { + initRegistries = append([]string{firstRegistry}, initRegistries...) + } + for _, registry := range append(initRegistries, registries...) { + if registry == "" { + continue + } + middle := "" + if prefix, ok := RegistryDefaultPathPrefix[registry]; ok && strings.IndexRune(name, '/') == -1 { + middle = prefix + } + candidate := path.Join(registry, middle, name) + candidates = append(candidates, candidate) + } + return candidates, nil +} + +// ExpandNames takes unqualified names, parses them as image names, and returns +// the fully expanded result, including a tag. Names which don't include a registry +// name will be marked for the most-preferred registry (i.e., the first one in our +// configuration). +func ExpandNames(names []string, firstRegistry string, systemContext *types.SystemContext, store storage.Store) ([]string, error) { + expanded := make([]string, 0, len(names)) + for _, n := range names { + var name reference.Named + nameList, err := ResolveName(n, firstRegistry, systemContext, store) + if err != nil { + return nil, errors.Wrapf(err, "error parsing name %q", n) + } + if len(nameList) == 0 { + named, err := reference.ParseNormalizedNamed(n) + if err != nil { + return nil, errors.Wrapf(err, "error parsing name %q", n) + } + name = named + } else { + named, err := reference.ParseNormalizedNamed(nameList[0]) + if err != nil { + return nil, errors.Wrapf(err, "error parsing name %q", nameList[0]) + } + name = named + } + name = reference.TagNameOnly(name) + tag := "" + digest := "" + if tagged, ok := name.(reference.NamedTagged); ok { + tag = ":" + tagged.Tag() + } + if digested, ok := name.(reference.Digested); ok { + digest = "@" + digested.Digest().String() + } + expanded = append(expanded, name.Name()+tag+digest) + } + return expanded, nil +} + +// FindImage locates the locally-stored image which corresponds to a given name. +func FindImage(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image string) (types.ImageReference, *storage.Image, error) { + var ref types.ImageReference + var img *storage.Image + var err error + names, err := ResolveName(image, firstRegistry, systemContext, store) + if err != nil { + return nil, nil, errors.Wrapf(err, "error parsing name %q", image) + } + for _, name := range names { + ref, err = is.Transport.ParseStoreReference(store, name) + if err != nil { + logrus.Debugf("error parsing reference to image %q: %v", name, err) + continue + } + img, err = is.Transport.GetStoreImage(store, ref) + if err != nil { + img2, err2 := store.Image(name) + if err2 != nil { + logrus.Debugf("error locating image %q: %v", name, err2) + continue + } + img = img2 + } + break + } + if ref == nil || img == nil { + return nil, nil, errors.Wrapf(err, "error locating image with name %q", image) + } + return ref, img, nil +} + +// AddImageNames adds the specified names to the specified image. +func AddImageNames(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image *storage.Image, addNames []string) error { + names, err := ExpandNames(addNames, firstRegistry, systemContext, store) + if err != nil { + return err + } + err = store.SetNames(image.ID, append(image.Names, names...)) + if err != nil { + return errors.Wrapf(err, "error adding names (%v) to image %q", names, image.ID) + } + return nil +} + +// GetFailureCause checks the type of the error "err" and returns a new +// error message that reflects the reason of the failure. +// In case err type is not a familiar one the error "defaultError" is returned. +func GetFailureCause(err, defaultError error) error { + switch nErr := errors.Cause(err).(type) { + case errcode.Errors: + return err + case errcode.Error, *url.Error: + return nErr + default: + return defaultError + } +} + +// WriteError writes `lastError` into `w` if not nil and return the next error `err` +func WriteError(w io.Writer, err error, lastError error) error { + if lastError != nil { + fmt.Fprintln(w, lastError) + } + return err +} + +// Runtime is the default command to use to run the container. +func Runtime() string { + runtime := os.Getenv("BUILDAH_RUNTIME") + if runtime != "" { + return runtime + } + return DefaultRuntime +} + +// StringInSlice returns a boolean indicating if the exact value s is present +// in the slice slice. +func StringInSlice(s string, slice []string) bool { + for _, v := range slice { + if v == s { + return true + } + } + return false +} + +// GetHostIDs uses ID mappings to compute the host-level IDs that will +// correspond to a UID/GID pair in the container. +func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, uint32, error) { + uidMapped := true + for _, m := range uidmap { + uidMapped = false + if uid >= m.ContainerID && uid < m.ContainerID+m.Size { + uid = (uid - m.ContainerID) + m.HostID + uidMapped = true + break + } + } + if !uidMapped { + return 0, 0, errors.Errorf("container uses ID mappings, but doesn't map UID %d", uid) + } + gidMapped := true + for _, m := range gidmap { + gidMapped = false + if gid >= m.ContainerID && gid < m.ContainerID+m.Size { + gid = (gid - m.ContainerID) + m.HostID + gidMapped = true + break + } + } + if !gidMapped { + return 0, 0, errors.Errorf("container uses ID mappings, but doesn't map GID %d", gid) + } + return uid, gid, nil +} + +// GetHostRootIDs uses ID mappings in spec to compute the host-level IDs that will +// correspond to UID/GID 0/0 in the container. +func GetHostRootIDs(spec *specs.Spec) (uint32, uint32, error) { + if spec.Linux == nil { + return 0, 0, nil + } + return GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, 0, 0) +} + +// getHostIDMappings reads mappings from the named node under /proc. +func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) { + var mappings []specs.LinuxIDMapping + f, err := os.Open(path) + if err != nil { + return nil, errors.Wrapf(err, "error reading ID mappings from %q", path) + } + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + fields := strings.Fields(line) + if len(fields) != 3 { + return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields)) + } + cid, err := strconv.ParseUint(fields[0], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path) + } + hid, err := strconv.ParseUint(fields[1], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path) + } + size, err := strconv.ParseUint(fields[2], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path) + } + mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)}) + } + return mappings, nil +} + +// GetHostIDMappings reads mappings for the specified process (or the current +// process if pid is "self" or an empty string) from the kernel. +func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + if pid == "" { + pid = "self" + } + uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid)) + if err != nil { + return nil, nil, err + } + gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid)) + if err != nil { + return nil, nil, err + } + return uidmap, gidmap, nil +} + +// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid. +func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + mappings, err := idtools.NewIDMappings(user, group) + if err != nil { + return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group) + } + var uidmap, gidmap []specs.LinuxIDMapping + for _, m := range mappings.UIDs() { + uidmap = append(uidmap, specs.LinuxIDMapping{ + ContainerID: uint32(m.ContainerID), + HostID: uint32(m.HostID), + Size: uint32(m.Size), + }) + } + for _, m := range mappings.GIDs() { + gidmap = append(gidmap, specs.LinuxIDMapping{ + ContainerID: uint32(m.ContainerID), + HostID: uint32(m.HostID), + Size: uint32(m.Size), + }) + } + return uidmap, gidmap, nil +} + +// ParseIDMappings parses mapping triples. +func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { + nonDigitsToWhitespace := func(r rune) rune { + if strings.IndexRune("0123456789", r) == -1 { + return ' ' + } else { + return r + } + } + parseTriple := func(spec []string) (container, host, size uint32, err error) { + cid, err := strconv.ParseUint(spec[0], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err) + } + hid, err := strconv.ParseUint(spec[1], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err) + } + sz, err := strconv.ParseUint(spec[2], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err) + } + return uint32(cid), uint32(hid), uint32(sz), nil + } + parseIDMap := func(mapSpec []string, mapSetting string) (idmap []idtools.IDMap, err error) { + for _, idMapSpec := range mapSpec { + idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec)) + if len(idSpec)%3 != 0 { + return nil, errors.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting) + } + for i := range idSpec { + if i%3 != 0 { + continue + } + cid, hid, size, err := parseTriple(idSpec[i : i+3]) + if err != nil { + return nil, errors.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting) + } + mapping := idtools.IDMap{ + ContainerID: int(cid), + HostID: int(hid), + Size: int(size), + } + idmap = append(idmap, mapping) + } + } + return idmap, nil + } + uid, err := parseIDMap(uidmap, "userns-uid-map") + if err != nil { + return nil, nil, err + } + gid, err := parseIDMap(gidmap, "userns-gid-map") + if err != nil { + return nil, nil, err + } + return uid, gid, nil +} + +// UnsharedRootPath returns a location under ($XDG_DATA_HOME/containers/storage, +// or $HOME/.local/share/containers/storage, or +// (the user's home directory)/.local/share/containers/storage, or an error. +func UnsharedRootPath(homedir string) (string, error) { + // If $XDG_DATA_HOME is defined... + if envDataHome, haveDataHome := os.LookupEnv("XDG_DATA_HOME"); haveDataHome { + return filepath.Join(envDataHome, "containers", "storage"), nil + } + // If $XDG_DATA_HOME is not defined, but $HOME is defined... + if envHomedir, haveHomedir := os.LookupEnv("HOME"); haveHomedir { + // Default to the user's $HOME/.local/share/containers/storage subdirectory. + return filepath.Join(envHomedir, ".local", "share", "containers", "storage"), nil + } + // If we know where our home directory is... + if homedir != "" { + // Default to the user's homedir/.local/share/containers/storage subdirectory. + return filepath.Join(homedir, ".local", "share", "containers", "storage"), nil + } + return "", errors.New("unable to determine a --root location: neither $XDG_DATA_HOME nor $HOME is set") +} + +// UnsharedRunrootPath returns $XDG_RUNTIME_DIR/run, /var/run/user/(the user's UID)/run, or an error. +func UnsharedRunrootPath(uid string) (string, error) { + // If $XDG_RUNTIME_DIR is defined... + if envRuntimeDir, haveRuntimeDir := os.LookupEnv("XDG_RUNTIME_DIR"); haveRuntimeDir { + return filepath.Join(envRuntimeDir, "run"), nil + } + // If $XDG_RUNTIME_DIR is not defined, but we know our UID... + if uid != "" { + return filepath.Join("/var/run/user", uid, "run"), nil + } + return "", errors.New("unable to determine a --runroot location: $XDG_RUNTIME_DIR is not set, and we don't know our UID") +} + +// GetPolicyContext sets up, initializes and returns a new context for the specified policy +func GetPolicyContext(ctx *types.SystemContext) (*signature.PolicyContext, error) { + policy, err := signature.DefaultPolicy(ctx) + if err != nil { + return nil, err + } + + policyContext, err := signature.NewPolicyContext(policy) + if err != nil { + return nil, err + } + return policyContext, nil +} diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf new file mode 100644 index 000000000..0112a2d91 --- /dev/null +++ b/vendor/github.com/containers/buildah/vendor.conf @@ -0,0 +1,63 @@ +github.com/Azure/go-ansiterm master +github.com/blang/semver master +github.com/BurntSushi/toml master +github.com/containerd/continuity master +github.com/containernetworking/cni v0.7.0-alpha1 +github.com/seccomp/containers-golang master +github.com/containers/image d8b5cf2b804a48489e5203d51254ef576794049d +github.com/containers/storage 243c4cd616afdf06b4a975f18c4db083d26b1641 +github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 +github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00 +github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 +github.com/docker/engine-api master +github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d +github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 +github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20 +github.com/fsouza/go-dockerclient master +github.com/ghodss/yaml master +github.com/gogo/protobuf master +github.com/golang/glog master +github.com/gorilla/context master +github.com/gorilla/mux master +github.com/hashicorp/errwrap master +github.com/hashicorp/go-cleanhttp master +github.com/hashicorp/go-multierror master +github.com/imdario/mergo master +github.com/mattn/go-runewidth master +github.com/mattn/go-shellwords master +github.com/Microsoft/go-winio master +github.com/Microsoft/hcsshim master +github.com/mistifyio/go-zfs master +github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c +github.com/mtrmac/gpgme master +github.com/Nvveen/Gotty master +github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc +github.com/opencontainers/image-spec v1.0.0 +github.com/opencontainers/runc master +github.com/opencontainers/runtime-spec v1.0.0 +github.com/opencontainers/runtime-tools master +github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a +github.com/openshift/imagebuilder master +github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 +github.com/pborman/uuid master +github.com/pkg/errors master +github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac +github.com/containers/libpod 2afadeec6696fefac468a49c8ba24b0bc275aa75 +github.com/sirupsen/logrus master +github.com/syndtr/gocapability master +github.com/tchap/go-patricia master +github.com/ulikunitz/xz v0.5.4 +github.com/urfave/cli 934abfb2f102315b5794e15ebc7949e4ca253920 +github.com/vbatts/tar-split v0.10.2 +github.com/xeipuuv/gojsonpointer master +github.com/xeipuuv/gojsonreference master +github.com/xeipuuv/gojsonschema master +golang.org/x/crypto master +golang.org/x/net master +golang.org/x/sys master +golang.org/x/text master +gopkg.in/cheggaaa/pb.v1 v1.0.13 +gopkg.in/yaml.v2 cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b +k8s.io/apimachinery master +k8s.io/client-go master +k8s.io/kubernetes master diff --git a/vendor/github.com/projectatomic/buildah/LICENSE b/vendor/github.com/projectatomic/buildah/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/vendor/github.com/projectatomic/buildah/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/projectatomic/buildah/README.md b/vendor/github.com/projectatomic/buildah/README.md deleted file mode 100644 index 0d3d19ee3..000000000 --- a/vendor/github.com/projectatomic/buildah/README.md +++ /dev/null @@ -1,120 +0,0 @@ -![buildah logo](https://cdn.rawgit.com/projectatomic/buildah/master/logos/buildah-logo_large.png) - -# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool that facilitates building [Open Container Initiative (OCI)](https://www.opencontainers.org/) container images - -[![Go Report Card](https://goreportcard.com/badge/github.com/projectatomic/buildah)](https://goreportcard.com/report/github.com/projectatomic/buildah) -[![Travis](https://travis-ci.org/projectatomic/buildah.svg?branch=master)](https://travis-ci.org/projectatomic/buildah) - -The Buildah package provides a command line tool that can be used to -* create a working container, either from scratch or using an image as a starting point -* create an image, either from a working container or via the instructions in a Dockerfile -* images can be built in either the OCI image format or the traditional upstream docker image format -* mount a working container's root filesystem for manipulation -* unmount a working container's root filesystem -* use the updated contents of a container's root filesystem as a filesystem layer to create a new image -* delete a working container or an image -* rename a local container - -## Buildah Information for Developers - -**[Buildah Demos](demos)** - -**[Changelog](CHANGELOG.md)** - -**[Contributing](CONTRIBUTING.md)** - -**[Development Plan](developmentplan.md)** - -**[Installation notes](install.md)** - -**[Troubleshooting Guide](troubleshooting.md)** - -**[Tutorials](docs/tutorials)** - -## Buildah and Podman relationship - -Buildah and Podman are two complementary Open-source projects that are available on -most Linux platforms and both projects reside at [GitHub.com](https://github.com) -with Buildah [here](https://github.com/projectatomic/buildah) and -Podman [here](https://github.com/containers/libpod). Both Buildah and Podman are -command line tools that work on OCI images and containers. The two projects -differentiate in their specialization. - -Buildah specializes in building OCI images. Buildah's commands replicate all -of the commands that are found in a Dockerfile. Buildah’s goal is also to -provide a lower level coreutils interface to build images, allowing people to build -containers without requiring a Dockerfile. The intent with Buildah is to allow other -scripting languages to build container images, without requiring a daemon. - -Podman specializes in all of the commands and functions that help you to maintain and modify -OCI images, such as pulling and tagging. It also allows you to create, run, and maintain those containers -created from those images. - -A major difference between Podman and Buildah is their concept of a container. Podman -allows users to create "traditional containers" where the intent of these containers is -to be long lived. While Buildah containers are really just created to allow content -to be added back to the container image. An easy way to think of it is the -`buildah run` command emulates the RUN command in a Dockerfile while the `podman run` -command emulates the `docker run` command in functionality. Because of this and their underlying -storage differences, you can not see Podman containers from within Buildah or vice versa. - -In short Buildah is an efficient way to create OCI images while Podman allows -you to manage and maintain those images and containers in a production environment using -familiar container cli commands. For more details, see the -[Container Tools Guide](https://github.com/projectatomic/buildah/tree/master/docs/containertools). - -## Example - -From [`./examples/lighttpd.sh`](examples/lighttpd.sh): - -```bash -$ cat > lighttpd.sh <<"EOF" -#!/bin/bash -x - -ctr1=`buildah from ${1:-fedora}` - -## Get all updates and install our minimal httpd server -buildah run $ctr1 -- dnf update -y -buildah run $ctr1 -- dnf install -y lighttpd - -## Include some buildtime annotations -buildah config --annotation "com.example.build.host=$(uname -n)" $ctr1 - -## Run our server and expose the port -buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" $ctr1 -buildah config --port 80 $ctr1 - -## Commit this container to an image name -buildah commit $ctr1 ${2:-$USER/lighttpd} -EOF - -$ chmod +x lighttpd.sh -$ sudo ./lighttpd.sh -``` - -## Commands -| Command | Description | -| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | -| [buildah-add(1)](/docs/buildah-add.md) | Add the contents of a file, URL, or a directory to the container. | -| [buildah-bud(1)](/docs/buildah-bud.md) | Build an image using instructions from Dockerfiles. | -| [buildah-commit(1)](/docs/buildah-commit.md) | Create an image from a working container. | -| [buildah-config(1)](/docs/buildah-config.md) | Update image configuration settings. | -| [buildah-containers(1)](/docs/buildah-containers.md) | List the working containers and their base images. | -| [buildah-copy(1)](/docs/buildah-copy.md) | Copies the contents of a file, URL, or directory into a container's working directory. | -| [buildah-from(1)](/docs/buildah-from.md) | Creates a new working container, either from scratch or using a specified image as a starting point. | -| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. | -| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. | -| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. | -| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. | -| [buildah-rename(1)](/docs/buildah-rename.md) | Rename a local container. | -| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. | -| [buildah-rmi(1)](/docs/buildah-rmi.md) | Removes one or more images. | -| [buildah-run(1)](/docs/buildah-run.md) | Run a command inside of the container. | -| [buildah-tag(1)](/docs/buildah-tag.md) | Add an additional name to a local image. | -| [buildah-umount(1)](/docs/buildah-umount.md) | Unmount a working container's root file system. | -| [buildah-unshare(1)](/docs/buildah-unshare.md) | Launch a command in a user namespace with modified ID mappings. | -| [buildah-version(1)](/docs/buildah-version.md) | Display the Buildah Version Information | - -**Future goals include:** -* more CI tests -* additional CLI commands (?) diff --git a/vendor/github.com/projectatomic/buildah/add.go b/vendor/github.com/projectatomic/buildah/add.go deleted file mode 100644 index 27c07c323..000000000 --- a/vendor/github.com/projectatomic/buildah/add.go +++ /dev/null @@ -1,242 +0,0 @@ -package buildah - -import ( - "io" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/containers/libpod/pkg/chrootuser" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/idtools" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/projectatomic/buildah/util" - "github.com/sirupsen/logrus" -) - -// AddAndCopyOptions holds options for add and copy commands. -type AddAndCopyOptions struct { - // Chown is a spec for the user who should be given ownership over the - // newly-added content, potentially overriding permissions which would - // otherwise match those of local files and directories being copied. - Chown string - // All of the data being copied will pass through Hasher, if set. - // If the sources are URLs or files, their contents will be passed to - // Hasher. - // If the sources include directory trees, Hasher will be passed - // tar-format archives of the directory trees. - Hasher io.Writer -} - -// addURL copies the contents of the source URL to the destination. This is -// its own function so that deferred closes happen after we're done pulling -// down each item of potentially many. -func addURL(destination, srcurl string, owner idtools.IDPair, hasher io.Writer) error { - logrus.Debugf("saving %q to %q", srcurl, destination) - resp, err := http.Get(srcurl) - if err != nil { - return errors.Wrapf(err, "error getting %q", srcurl) - } - defer resp.Body.Close() - f, err := os.Create(destination) - if err != nil { - return errors.Wrapf(err, "error creating %q", destination) - } - if err = f.Chown(owner.UID, owner.GID); err != nil { - return errors.Wrapf(err, "error setting owner of %q", destination) - } - if last := resp.Header.Get("Last-Modified"); last != "" { - if mtime, err2 := time.Parse(time.RFC1123, last); err2 != nil { - logrus.Debugf("error parsing Last-Modified time %q: %v", last, err2) - } else { - defer func() { - if err3 := os.Chtimes(destination, time.Now(), mtime); err3 != nil { - logrus.Debugf("error setting mtime to Last-Modified time %q: %v", last, err3) - } - }() - } - } - defer f.Close() - bodyReader := io.Reader(resp.Body) - if hasher != nil { - bodyReader = io.TeeReader(bodyReader, hasher) - } - n, err := io.Copy(f, bodyReader) - if err != nil { - return errors.Wrapf(err, "error reading contents for %q", destination) - } - if resp.ContentLength >= 0 && n != resp.ContentLength { - return errors.Errorf("error reading contents for %q: wrong length (%d != %d)", destination, n, resp.ContentLength) - } - if err := f.Chmod(0600); err != nil { - return errors.Wrapf(err, "error setting permissions on %q", destination) - } - return nil -} - -// Add copies the contents of the specified sources into the container's root -// filesystem, optionally extracting contents of local files that look like -// non-empty archives. -func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, source ...string) error { - mountPoint, err := b.Mount(b.MountLabel) - if err != nil { - return err - } - defer func() { - if err2 := b.Unmount(); err2 != nil { - logrus.Errorf("error unmounting container: %v", err2) - } - }() - // Find out which user (and group) the destination should belong to. - user, err := b.user(mountPoint, options.Chown) - if err != nil { - return err - } - containerOwner := idtools.IDPair{UID: int(user.UID), GID: int(user.GID)} - hostUID, hostGID, err := util.GetHostIDs(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap, user.UID, user.GID) - if err != nil { - return err - } - hostOwner := idtools.IDPair{UID: int(hostUID), GID: int(hostGID)} - dest := mountPoint - if destination != "" && filepath.IsAbs(destination) { - dest = filepath.Join(dest, destination) - } else { - if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, b.WorkDir()), 0755, hostOwner); err != nil { - return err - } - dest = filepath.Join(dest, b.WorkDir(), destination) - } - // If the destination was explicitly marked as a directory by ending it - // with a '/', create it so that we can be sure that it's a directory, - // and any files we're copying will be placed in the directory. - if len(destination) > 0 && destination[len(destination)-1] == os.PathSeparator { - if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil { - return err - } - } - // Make sure the destination's parent directory is usable. - if destpfi, err2 := os.Stat(filepath.Dir(dest)); err2 == nil && !destpfi.IsDir() { - return errors.Errorf("%q already exists, but is not a subdirectory)", filepath.Dir(dest)) - } - // Now look at the destination itself. - destfi, err := os.Stat(dest) - if err != nil { - if !os.IsNotExist(err) { - return errors.Wrapf(err, "couldn't determine what %q is", dest) - } - destfi = nil - } - if len(source) > 1 && (destfi == nil || !destfi.IsDir()) { - return errors.Errorf("destination %q is not a directory", dest) - } - copyFileWithTar := b.copyFileWithTar(&containerOwner, options.Hasher) - copyWithTar := b.copyWithTar(&containerOwner, options.Hasher) - untarPath := b.untarPath(nil, options.Hasher) - for _, src := range source { - if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { - // We assume that source is a file, and we're copying - // it to the destination. If the destination is - // already a directory, create a file inside of it. - // Otherwise, the destination is the file to which - // we'll save the contents. - url, err := url.Parse(src) - if err != nil { - return errors.Wrapf(err, "error parsing URL %q", src) - } - d := dest - if destfi != nil && destfi.IsDir() { - d = filepath.Join(dest, path.Base(url.Path)) - } - if err := addURL(d, src, hostOwner, options.Hasher); err != nil { - return err - } - continue - } - - glob, err := filepath.Glob(src) - if err != nil { - return errors.Wrapf(err, "invalid glob %q", src) - } - if len(glob) == 0 { - return errors.Wrapf(syscall.ENOENT, "no files found matching %q", src) - } - for _, gsrc := range glob { - esrc, err := filepath.EvalSymlinks(gsrc) - if err != nil { - return errors.Wrapf(err, "error evaluating symlinks %q", gsrc) - } - srcfi, err := os.Stat(esrc) - if err != nil { - return errors.Wrapf(err, "error reading %q", esrc) - } - if srcfi.IsDir() { - // The source is a directory, so copy the contents of - // the source directory into the target directory. Try - // to create it first, so that if there's a problem, - // we'll discover why that won't work. - if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil { - return err - } - logrus.Debugf("copying %q to %q", esrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*") - if err := copyWithTar(esrc, dest); err != nil { - return errors.Wrapf(err, "error copying %q to %q", esrc, dest) - } - continue - } - if !extract || !archive.IsArchivePath(esrc) { - // This source is a file, and either it's not an - // archive, or we don't care whether or not it's an - // archive. - d := dest - if destfi != nil && destfi.IsDir() { - d = filepath.Join(dest, filepath.Base(gsrc)) - } - // Copy the file, preserving attributes. - logrus.Debugf("copying %q to %q", esrc, d) - if err := copyFileWithTar(esrc, d); err != nil { - return errors.Wrapf(err, "error copying %q to %q", esrc, d) - } - continue - } - // We're extracting an archive into the destination directory. - logrus.Debugf("extracting contents of %q into %q", esrc, dest) - if err := untarPath(esrc, dest); err != nil { - return errors.Wrapf(err, "error extracting %q into %q", esrc, dest) - } - } - } - return nil -} - -// user returns the user (and group) information which the destination should belong to. -func (b *Builder) user(mountPoint string, userspec string) (specs.User, error) { - if userspec == "" { - userspec = b.User() - } - - uid, gid, err := chrootuser.GetUser(mountPoint, userspec) - u := specs.User{ - UID: uid, - GID: gid, - Username: userspec, - } - if !strings.Contains(userspec, ":") { - groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID)) - if err2 != nil { - if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil { - err = err2 - } - } else { - u.AdditionalGids = groups - } - - } - return u, err -} diff --git a/vendor/github.com/projectatomic/buildah/bind/mount.go b/vendor/github.com/projectatomic/buildah/bind/mount.go deleted file mode 100644 index 695bde554..000000000 --- a/vendor/github.com/projectatomic/buildah/bind/mount.go +++ /dev/null @@ -1,295 +0,0 @@ -// +build linux - -package bind - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/mount" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/projectatomic/buildah/util" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// SetupIntermediateMountNamespace creates a new mount namespace and bind -// mounts all bind-mount sources into a subdirectory of bundlePath that can -// only be reached by the root user of the container's user namespace, except -// for Mounts which include the NoBindOption option in their options list. The -// NoBindOption will then merely be removed. -func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmountAll func() error, err error) { - defer stripNoBindOption(spec) - - // We expect a root directory to be defined. - if spec.Root == nil { - return nil, errors.Errorf("configuration has no root filesystem?") - } - rootPath := spec.Root.Path - - // Create a new mount namespace in which to do the things we're doing. - if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { - return nil, errors.Wrapf(err, "error creating new mount namespace for %v", spec.Process.Args) - } - - // Make all of our mounts private to our namespace. - if err := mount.MakeRPrivate("/"); err != nil { - return nil, errors.Wrapf(err, "error making mounts private to mount namespace for %v", spec.Process.Args) - } - - // Make sure the bundle directory is searchable. We created it with - // TempDir(), so it should have started with permissions set to 0700. - info, err := os.Stat(bundlePath) - if err != nil { - return nil, errors.Wrapf(err, "error checking permissions on %q", bundlePath) - } - if err = os.Chmod(bundlePath, info.Mode()|0111); err != nil { - return nil, errors.Wrapf(err, "error loosening permissions on %q", bundlePath) - } - - // Figure out who needs to be able to reach these bind mounts in order - // for the container to be started. - rootUID, rootGID, err := util.GetHostRootIDs(spec) - if err != nil { - return nil, err - } - - // Hand back a callback that the caller can use to clean up everything - // we're doing here. - unmount := []string{} - unmountAll = func() (err error) { - for _, mountpoint := range unmount { - // Unmount it and anything under it. - if err2 := UnmountMountpoints(mountpoint, nil); err2 != nil { - logrus.Warnf("pkg/bind: error unmounting %q: %v", mountpoint, err2) - if err == nil { - err = err2 - } - } - if err2 := unix.Unmount(mountpoint, unix.MNT_DETACH); err2 != nil { - if errno, ok := err2.(syscall.Errno); !ok || errno != syscall.EINVAL { - logrus.Warnf("pkg/bind: error detaching %q: %v", mountpoint, err2) - if err == nil { - err = err2 - } - } - } - // Remove just the mountpoint. - retry := 10 - remove := unix.Unlink - err2 := remove(mountpoint) - for err2 != nil && retry > 0 { - if errno, ok := err2.(syscall.Errno); ok { - switch errno { - default: - retry = 0 - continue - case syscall.EISDIR: - remove = unix.Rmdir - err2 = remove(mountpoint) - case syscall.EBUSY: - if err3 := unix.Unmount(mountpoint, unix.MNT_DETACH); err3 == nil { - err2 = remove(mountpoint) - } - } - retry-- - } - } - if err2 != nil { - logrus.Warnf("pkg/bind: error removing %q: %v", mountpoint, err2) - if err == nil { - err = err2 - } - } - } - return err - } - - // Create a top-level directory that the "root" user will be able to - // access, that "root" from containers which use different mappings, or - // other unprivileged users outside of containers, shouldn't be able to - // access. - mnt := filepath.Join(bundlePath, "mnt") - if err = idtools.MkdirAndChown(mnt, 0100, idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}); err != nil { - return unmountAll, errors.Wrapf(err, "error creating %q owned by the container's root user", mnt) - } - - // Make that directory private, and add it to the list of locations we - // unmount at cleanup time. - if err = mount.MakeRPrivate(mnt); err != nil { - return unmountAll, errors.Wrapf(err, "error marking filesystem at %q as private", mnt) - } - unmount = append([]string{mnt}, unmount...) - - // Create a bind mount for the root filesystem and add it to the list. - rootfs := filepath.Join(mnt, "rootfs") - if err = os.Mkdir(rootfs, 0000); err != nil { - return unmountAll, errors.Wrapf(err, "error creating directory %q", rootfs) - } - if err = unix.Mount(rootPath, rootfs, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil { - return unmountAll, errors.Wrapf(err, "error bind mounting root filesystem from %q to %q", rootPath, rootfs) - } - logrus.Debugf("bind mounted %q to %q", rootPath, rootfs) - unmount = append([]string{rootfs}, unmount...) - spec.Root.Path = rootfs - - // Do the same for everything we're binding in. - mounts := make([]specs.Mount, 0, len(spec.Mounts)) - for i := range spec.Mounts { - // If we're not using an intermediate, leave it in the list. - if leaveBindMountAlone(spec.Mounts[i]) { - mounts = append(mounts, spec.Mounts[i]) - continue - } - // Check if the source is a directory or something else. - info, err := os.Stat(spec.Mounts[i].Source) - if err != nil { - if os.IsNotExist(err) { - logrus.Warnf("couldn't find %q on host to bind mount into container", spec.Mounts[i].Source) - continue - } - return unmountAll, errors.Wrapf(err, "error checking if %q is a directory", spec.Mounts[i].Source) - } - stage := filepath.Join(mnt, fmt.Sprintf("buildah-bind-target-%d", i)) - if info.IsDir() { - // If the source is a directory, make one to use as the - // mount target. - if err = os.Mkdir(stage, 0000); err != nil { - return unmountAll, errors.Wrapf(err, "error creating directory %q", stage) - } - } else { - // If the source is not a directory, create an empty - // file to use as the mount target. - file, err := os.OpenFile(stage, os.O_WRONLY|os.O_CREATE, 0000) - if err != nil { - return unmountAll, errors.Wrapf(err, "error creating file %q", stage) - } - file.Close() - } - // Bind mount the source from wherever it is to a place where - // we know the runtime helper will be able to get to it... - if err = unix.Mount(spec.Mounts[i].Source, stage, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil { - return unmountAll, errors.Wrapf(err, "error bind mounting bind object from %q to %q", spec.Mounts[i].Source, stage) - } - logrus.Debugf("bind mounted %q to %q", spec.Mounts[i].Source, stage) - spec.Mounts[i].Source = stage - // ... and update the source location that we'll pass to the - // runtime to our intermediate location. - mounts = append(mounts, spec.Mounts[i]) - unmount = append([]string{stage}, unmount...) - } - spec.Mounts = mounts - - return unmountAll, nil -} - -// Decide if the mount should not be redirected to an intermediate location first. -func leaveBindMountAlone(mount specs.Mount) bool { - // If we know we shouldn't do a redirection for this mount, skip it. - if util.StringInSlice(NoBindOption, mount.Options) { - return true - } - // If we're not bind mounting it in, we don't need to do anything for it. - if mount.Type != "bind" && !util.StringInSlice("bind", mount.Options) && !util.StringInSlice("rbind", mount.Options) { - return true - } - return false -} - -// UnmountMountpoints unmounts the given mountpoints and anything that's hanging -// off of them, rather aggressively. If a mountpoint also appears in the -// mountpointsToRemove slice, the mountpoints are removed after they are -// unmounted. -func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error { - mounts, err := mount.GetMounts() - if err != nil { - return errors.Wrapf(err, "error retrieving list of mounts") - } - // getChildren returns the list of mount IDs that hang off of the - // specified ID. - getChildren := func(id int) []int { - var list []int - for _, info := range mounts { - if info.Parent == id { - list = append(list, info.ID) - } - } - return list - } - // getTree returns the list of mount IDs that hang off of the specified - // ID, and off of those mount IDs, etc. - getTree := func(id int) []int { - mounts := []int{id} - i := 0 - for i < len(mounts) { - children := getChildren(mounts[i]) - mounts = append(mounts, children...) - i++ - } - return mounts - } - // getMountByID looks up the mount info with the specified ID - getMountByID := func(id int) *mount.Info { - for i := range mounts { - if mounts[i].ID == id { - return mounts[i] - } - } - return nil - } - // getMountByPoint looks up the mount info with the specified mountpoint - getMountByPoint := func(mountpoint string) *mount.Info { - for i := range mounts { - if mounts[i].Mountpoint == mountpoint { - return mounts[i] - } - } - return nil - } - // find the top of the tree we're unmounting - top := getMountByPoint(mountpoint) - if top == nil { - return errors.Wrapf(err, "%q is not mounted", mountpoint) - } - // add all of the mounts that are hanging off of it - tree := getTree(top.ID) - // unmount each mountpoint, working from the end of the list (leaf nodes) to the top - for i := range tree { - var st unix.Stat_t - id := tree[len(tree)-i-1] - mount := getMountByID(id) - // check if this mountpoint is mounted - if err := unix.Lstat(mount.Mountpoint, &st); err != nil { - return errors.Wrapf(err, "error checking if %q is mounted", mount.Mountpoint) - } - if mount.Major != int(unix.Major(st.Dev)) || mount.Minor != int(unix.Minor(st.Dev)) { - logrus.Debugf("%q is apparently not really mounted, skipping", mount.Mountpoint) - continue - } - // do the unmount - if err := unix.Unmount(mount.Mountpoint, 0); err != nil { - // if it was busy, detach it - if errno, ok := err.(syscall.Errno); ok && errno == syscall.EBUSY { - err = unix.Unmount(mount.Mountpoint, unix.MNT_DETACH) - } - if err != nil { - // if it was invalid (not mounted), hide the error, else return it - if errno, ok := err.(syscall.Errno); !ok || errno != syscall.EINVAL { - logrus.Warnf("error unmounting %q: %v", mount.Mountpoint, err) - continue - } - } - } - // if we're also supposed to remove this thing, do that, too - if util.StringInSlice(mount.Mountpoint, mountpointsToRemove) { - if err := os.Remove(mount.Mountpoint); err != nil { - return errors.Wrapf(err, "error removing %q", mount.Mountpoint) - } - } - } - return nil -} diff --git a/vendor/github.com/projectatomic/buildah/bind/mount_unsupported.go b/vendor/github.com/projectatomic/buildah/bind/mount_unsupported.go deleted file mode 100644 index 88ca2ca8b..000000000 --- a/vendor/github.com/projectatomic/buildah/bind/mount_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package bind - -import ( - "github.com/opencontainers/runtime-spec/specs-go" -) - -// SetupIntermediateMountNamespace returns a no-op unmountAll() and no error. -func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmountAll func() error, err error) { - stripNoBindOption(spec) - return func() error { return nil }, nil -} diff --git a/vendor/github.com/projectatomic/buildah/bind/util.go b/vendor/github.com/projectatomic/buildah/bind/util.go deleted file mode 100644 index 4408c53bb..000000000 --- a/vendor/github.com/projectatomic/buildah/bind/util.go +++ /dev/null @@ -1,39 +0,0 @@ -package bind - -import ( - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/projectatomic/buildah/util" -) - -const ( - // NoBindOption is an option which, if present in a Mount structure's - // options list, will cause SetupIntermediateMountNamespace to not - // redirect it through a bind mount. - NoBindOption = "nobuildahbind" -) - -func stripNoBindOption(spec *specs.Spec) { - for i := range spec.Mounts { - if util.StringInSlice(NoBindOption, spec.Mounts[i].Options) { - prunedOptions := make([]string, 0, len(spec.Mounts[i].Options)) - for _, option := range spec.Mounts[i].Options { - if option != NoBindOption { - prunedOptions = append(prunedOptions, option) - } - } - spec.Mounts[i].Options = prunedOptions - } - } -} - -func dedupeStringSlice(slice []string) []string { - done := make([]string, 0, len(slice)) - m := make(map[string]struct{}) - for _, s := range slice { - if _, present := m[s]; !present { - m[s] = struct{}{} - done = append(done, s) - } - } - return done -} diff --git a/vendor/github.com/projectatomic/buildah/buildah.go b/vendor/github.com/projectatomic/buildah/buildah.go deleted file mode 100644 index 60688b372..000000000 --- a/vendor/github.com/projectatomic/buildah/buildah.go +++ /dev/null @@ -1,513 +0,0 @@ -package buildah - -import ( - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/ioutils" - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/projectatomic/buildah/docker" - "github.com/projectatomic/buildah/util" -) - -const ( - // Package is the name of this package, used in help output and to - // identify working containers. - Package = "buildah" - // Version for the Package. Bump version in contrib/rpm/buildah.spec - // too. - Version = "1.4-dev" - // The value we use to identify what type of information, currently a - // serialized Builder structure, we are using as per-container state. - // This should only be changed when we make incompatible changes to - // that data structure, as it's used to distinguish containers which - // are "ours" from ones that aren't. - containerType = Package + " 0.0.1" - // The file in the per-container directory which we use to store our - // per-container state. If it isn't there, then the container isn't - // one of our build containers. - stateFile = Package + ".json" -) - -// PullPolicy takes the value PullIfMissing, PullAlways, or PullNever. -type PullPolicy int - -const ( - // PullIfMissing is one of the values that BuilderOptions.PullPolicy - // can take, signalling that the source image should be pulled from a - // registry if a local copy of it is not already present. - PullIfMissing PullPolicy = iota - // PullAlways is one of the values that BuilderOptions.PullPolicy can - // take, signalling that a fresh, possibly updated, copy of the image - // should be pulled from a registry before the build proceeds. - PullAlways - // PullNever is one of the values that BuilderOptions.PullPolicy can - // take, signalling that the source image should not be pulled from a - // registry if a local copy of it is not already present. - PullNever -) - -// String converts a PullPolicy into a string. -func (p PullPolicy) String() string { - switch p { - case PullIfMissing: - return "PullIfMissing" - case PullAlways: - return "PullAlways" - case PullNever: - return "PullNever" - } - return fmt.Sprintf("unrecognized policy %d", p) -} - -// NetworkConfigurationPolicy takes the value NetworkDefault, NetworkDisabled, -// or NetworkEnabled. -type NetworkConfigurationPolicy int - -const ( - // NetworkDefault is one of the values that BuilderOptions.ConfigureNetwork - // can take, signalling that the default behavior should be used. - NetworkDefault NetworkConfigurationPolicy = iota - // NetworkDisabled is one of the values that BuilderOptions.ConfigureNetwork - // can take, signalling that network interfaces should NOT be configured for - // newly-created network namespaces. - NetworkDisabled - // NetworkEnabled is one of the values that BuilderOptions.ConfigureNetwork - // can take, signalling that network interfaces should be configured for - // newly-created network namespaces. - NetworkEnabled -) - -// String formats a NetworkConfigurationPolicy as a string. -func (p NetworkConfigurationPolicy) String() string { - switch p { - case NetworkDefault: - return "NetworkDefault" - case NetworkDisabled: - return "NetworkDisabled" - case NetworkEnabled: - return "NetworkEnabled" - } - return fmt.Sprintf("unknown NetworkConfigurationPolicy %d", p) -} - -// Builder objects are used to represent containers which are being used to -// build images. They also carry potential updates which will be applied to -// the image's configuration when the container's contents are used to build an -// image. -type Builder struct { - store storage.Store - - // Args define variables that users can pass at build-time to the builder - Args map[string]string - // Type is used to help identify a build container's metadata. It - // should not be modified. - Type string `json:"type"` - // FromImage is the name of the source image which was used to create - // the container, if one was used. It should not be modified. - FromImage string `json:"image,omitempty"` - // FromImageID is the ID of the source image which was used to create - // the container, if one was used. It should not be modified. - FromImageID string `json:"image-id"` - // Config is the source image's configuration. It should not be - // modified. - Config []byte `json:"config,omitempty"` - // Manifest is the source image's manifest. It should not be modified. - Manifest []byte `json:"manifest,omitempty"` - - // Container is the name of the build container. It should not be modified. - Container string `json:"container-name,omitempty"` - // ContainerID is the ID of the build container. It should not be modified. - ContainerID string `json:"container-id,omitempty"` - // MountPoint is the last location where the container's root - // filesystem was mounted. It should not be modified. - MountPoint string `json:"mountpoint,omitempty"` - // ProcessLabel is the SELinux process label associated with the container - ProcessLabel string `json:"process-label,omitempty"` - // MountLabel is the SELinux mount label associated with the container - MountLabel string `json:"mount-label,omitempty"` - - // ImageAnnotations is a set of key-value pairs which is stored in the - // image's manifest. - ImageAnnotations map[string]string `json:"annotations,omitempty"` - // ImageCreatedBy is a description of how this container was built. - ImageCreatedBy string `json:"created-by,omitempty"` - // ImageHistoryComment is a description of how our added layers were built. - ImageHistoryComment string `json:"history-comment,omitempty"` - - // Image metadata and runtime settings, in multiple formats. - OCIv1 v1.Image `json:"ociv1,omitempty"` - Docker docker.V2Image `json:"docker,omitempty"` - // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format. - DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"` - - // Isolation controls how we handle "RUN" statements and the Run() method. - Isolation Isolation - // NamespaceOptions controls how we set up the namespaces for processes that we run in the container. - NamespaceOptions NamespaceOptions - // ConfigureNetwork controls whether or not network interfaces and - // routing are configured for a new network namespace (i.e., when not - // joining another's namespace and not just using the host's - // namespace), effectively deciding whether or not the process has a - // usable network. - ConfigureNetwork NetworkConfigurationPolicy - // CNIPluginPath is the location of CNI plugin helpers, if they should be - // run from a location other than the default location. - CNIPluginPath string - // CNIConfigDir is the location of CNI configuration files, if the files in - // the default configuration directory shouldn't be used. - CNIConfigDir string - // ID mapping options to use when running processes in the container with non-host user namespaces. - IDMappingOptions IDMappingOptions - // AddCapabilities is a list of capabilities to add to the default set when running - // commands in the container. - AddCapabilities []string - // DropCapabilities is a list of capabilities to remove from the default set, - // after processing the AddCapabilities set, when running commands in the container. - // If a capability appears in both lists, it will be dropped. - DropCapabilities []string - - CommonBuildOpts *CommonBuildOptions - // TopLayer is the top layer of the image - TopLayer string - // Format for the build Image - Format string -} - -// BuilderInfo are used as objects to display container information -type BuilderInfo struct { - Type string - FromImage string - FromImageID string - Config string - Manifest string - Container string - ContainerID string - MountPoint string - ProcessLabel string - MountLabel string - ImageAnnotations map[string]string - ImageCreatedBy string - OCIv1 v1.Image - Docker docker.V2Image - DefaultMountsFilePath string - Isolation string - NamespaceOptions NamespaceOptions - ConfigureNetwork string - CNIPluginPath string - CNIConfigDir string - IDMappingOptions IDMappingOptions - DefaultCapabilities []string - AddCapabilities []string - DropCapabilities []string -} - -// GetBuildInfo gets a pointer to a Builder object and returns a BuilderInfo object from it. -// This is used in the inspect command to display Manifest and Config as string and not []byte. -func GetBuildInfo(b *Builder) BuilderInfo { - return BuilderInfo{ - Type: b.Type, - FromImage: b.FromImage, - FromImageID: b.FromImageID, - Config: string(b.Config), - Manifest: string(b.Manifest), - Container: b.Container, - ContainerID: b.ContainerID, - MountPoint: b.MountPoint, - ProcessLabel: b.ProcessLabel, - ImageAnnotations: b.ImageAnnotations, - ImageCreatedBy: b.ImageCreatedBy, - OCIv1: b.OCIv1, - Docker: b.Docker, - DefaultMountsFilePath: b.DefaultMountsFilePath, - Isolation: b.Isolation.String(), - NamespaceOptions: b.NamespaceOptions, - ConfigureNetwork: fmt.Sprintf("%v", b.ConfigureNetwork), - CNIPluginPath: b.CNIPluginPath, - CNIConfigDir: b.CNIConfigDir, - IDMappingOptions: b.IDMappingOptions, - DefaultCapabilities: append([]string{}, util.DefaultCapabilities...), - AddCapabilities: append([]string{}, b.AddCapabilities...), - DropCapabilities: append([]string{}, b.DropCapabilities...), - } -} - -// CommonBuildOptions are resources that can be defined by flags for both buildah from and build-using-dockerfile -type CommonBuildOptions struct { - // AddHost is the list of hostnames to add to the build container's /etc/hosts. - AddHost []string - // CgroupParent is the path to cgroups under which the cgroup for the container will be created. - CgroupParent string - // CPUPeriod limits the CPU CFS (Completely Fair Scheduler) period - CPUPeriod uint64 - // CPUQuota limits the CPU CFS (Completely Fair Scheduler) quota - CPUQuota int64 - // CPUShares (relative weight - CPUShares uint64 - // CPUSetCPUs in which to allow execution (0-3, 0,1) - CPUSetCPUs string - // CPUSetMems memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. - CPUSetMems string - // Memory is the upper limit (in bytes) on how much memory running containers can use. - Memory int64 - // MemorySwap limits the amount of memory and swap together. - MemorySwap int64 - // LabelOpts is the a slice of fields of an SELinux context, given in "field:pair" format, or "disable". - // Recognized field names are "role", "type", and "level". - LabelOpts []string - // SeccompProfilePath is the pathname of a seccomp profile. - SeccompProfilePath string - // ApparmorProfile is the name of an apparmor profile. - ApparmorProfile string - // ShmSize is the "size" value to use when mounting an shmfs on the container's /dev/shm directory. - ShmSize string - // Ulimit specifies resource limit options, in the form type:softlimit[:hardlimit]. - // These types are recognized: - // "core": maximimum core dump size (ulimit -c) - // "cpu": maximum CPU time (ulimit -t) - // "data": maximum size of a process's data segment (ulimit -d) - // "fsize": maximum size of new files (ulimit -f) - // "locks": maximum number of file locks (ulimit -x) - // "memlock": maximum amount of locked memory (ulimit -l) - // "msgqueue": maximum amount of data in message queues (ulimit -q) - // "nice": niceness adjustment (nice -n, ulimit -e) - // "nofile": maximum number of open files (ulimit -n) - // "nproc": maximum number of processes (ulimit -u) - // "rss": maximum size of a process's (ulimit -m) - // "rtprio": maximum real-time scheduling priority (ulimit -r) - // "rttime": maximum amount of real-time execution between blocking syscalls - // "sigpending": maximum number of pending signals (ulimit -i) - // "stack": maximum stack size (ulimit -s) - Ulimit []string - // Volumes to bind mount into the container - Volumes []string -} - -// BuilderOptions are used to initialize a new Builder. -type BuilderOptions struct { - // Args define variables that users can pass at build-time to the builder - Args map[string]string - // FromImage is the name of the image which should be used as the - // starting point for the container. It can be set to an empty value - // or "scratch" to indicate that the container should not be based on - // an image. - FromImage string - // Container is a desired name for the build container. - Container string - // PullPolicy decides whether or not we should pull the image that - // we're using as a base image. It should be PullIfMissing, - // PullAlways, or PullNever. - PullPolicy PullPolicy - // Registry is a value which is prepended to the image's name, if it - // needs to be pulled and the image name alone can not be resolved to a - // reference to a source image. No separator is implicitly added. - Registry string - // Transport is a value which is prepended to the image's name, if it - // needs to be pulled and the image name alone, or the image name and - // the registry together, can not be resolved to a reference to a - // source image. No separator is implicitly added. - Transport string - // Mount signals to NewBuilder() that the container should be mounted - // immediately. - Mount bool - // SignaturePolicyPath specifies an override location for the signature - // policy which should be used for verifying the new image as it is - // being written. Except in specific circumstances, no value should be - // specified, indicating that the shared, system-wide default policy - // should be used. - SignaturePolicyPath string - // ReportWriter is an io.Writer which will be used to log the reading - // of the source image from a registry, if we end up pulling the image. - ReportWriter io.Writer - // github.com/containers/image/types SystemContext to hold credentials - // and other authentication/authorization information. - SystemContext *types.SystemContext - // DefaultMountsFilePath is the file path holding the mounts to be - // mounted in "host-path:container-path" format - DefaultMountsFilePath string - // Isolation controls how we handle "RUN" statements and the Run() - // method. - Isolation Isolation - // NamespaceOptions controls how we set up namespaces for processes that - // we might need to run using the container's root filesystem. - NamespaceOptions NamespaceOptions - // ConfigureNetwork controls whether or not network interfaces and - // routing are configured for a new network namespace (i.e., when not - // joining another's namespace and not just using the host's - // namespace), effectively deciding whether or not the process has a - // usable network. - ConfigureNetwork NetworkConfigurationPolicy - // CNIPluginPath is the location of CNI plugin helpers, if they should be - // run from a location other than the default location. - CNIPluginPath string - // CNIConfigDir is the location of CNI configuration files, if the files in - // the default configuration directory shouldn't be used. - CNIConfigDir string - // ID mapping options to use if we're setting up our own user namespace. - IDMappingOptions *IDMappingOptions - // AddCapabilities is a list of capabilities to add to the default set when - // running commands in the container. - AddCapabilities []string - // DropCapabilities is a list of capabilities to remove from the default set, - // after processing the AddCapabilities set, when running commands in the - // container. If a capability appears in both lists, it will be dropped. - DropCapabilities []string - - CommonBuildOpts *CommonBuildOptions - // Format for the container image - Format string -} - -// ImportOptions are used to initialize a Builder from an existing container -// which was created elsewhere. -type ImportOptions struct { - // Container is the name of the build container. - Container string - // SignaturePolicyPath specifies an override location for the signature - // policy which should be used for verifying the new image as it is - // being written. Except in specific circumstances, no value should be - // specified, indicating that the shared, system-wide default policy - // should be used. - SignaturePolicyPath string -} - -// ImportFromImageOptions are used to initialize a Builder from an image. -type ImportFromImageOptions struct { - // Image is the name or ID of the image we'd like to examine. - Image string - // SignaturePolicyPath specifies an override location for the signature - // policy which should be used for verifying the new image as it is - // being written. Except in specific circumstances, no value should be - // specified, indicating that the shared, system-wide default policy - // should be used. - SignaturePolicyPath string - // github.com/containers/image/types SystemContext to hold information - // about which registries we should check for completing image names - // that don't include a domain portion. - SystemContext *types.SystemContext -} - -// NewBuilder creates a new build container. -func NewBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) { - return newBuilder(ctx, store, options) -} - -// ImportBuilder creates a new build configuration using an already-present -// container. -func ImportBuilder(ctx context.Context, store storage.Store, options ImportOptions) (*Builder, error) { - return importBuilder(ctx, store, options) -} - -// ImportBuilderFromImage creates a new builder configuration using an image. -// The returned object can be modified and examined, but it can not be saved -// or committed because it is not associated with a working container. -func ImportBuilderFromImage(ctx context.Context, store storage.Store, options ImportFromImageOptions) (*Builder, error) { - return importBuilderFromImage(ctx, store, options) -} - -// OpenBuilder loads information about a build container given its name or ID. -func OpenBuilder(store storage.Store, container string) (*Builder, error) { - cdir, err := store.ContainerDirectory(container) - if err != nil { - return nil, err - } - buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) - if err != nil { - return nil, err - } - b := &Builder{} - err = json.Unmarshal(buildstate, &b) - if err != nil { - return nil, err - } - if b.Type != containerType { - return nil, errors.Errorf("container is not a %s container", Package) - } - b.store = store - b.fixupConfig() - return b, nil -} - -// OpenBuilderByPath loads information about a build container given a -// path to the container's root filesystem -func OpenBuilderByPath(store storage.Store, path string) (*Builder, error) { - containers, err := store.Containers() - if err != nil { - return nil, err - } - abs, err := filepath.Abs(path) - if err != nil { - return nil, err - } - builderMatchesPath := func(b *Builder, path string) bool { - return (b.MountPoint == path) - } - for _, container := range containers { - cdir, err := store.ContainerDirectory(container.ID) - if err != nil { - return nil, err - } - buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) - if err != nil { - return nil, err - } - b := &Builder{} - err = json.Unmarshal(buildstate, &b) - if err == nil && b.Type == containerType && builderMatchesPath(b, abs) { - b.store = store - b.fixupConfig() - return b, nil - } - } - return nil, storage.ErrContainerUnknown -} - -// OpenAllBuilders loads all containers which have a state file that we use in -// their data directory, typically so that they can be listed. -func OpenAllBuilders(store storage.Store) (builders []*Builder, err error) { - containers, err := store.Containers() - if err != nil { - return nil, err - } - for _, container := range containers { - cdir, err := store.ContainerDirectory(container.ID) - if err != nil { - return nil, err - } - buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile)) - if err != nil && os.IsNotExist(err) { - continue - } - b := &Builder{} - err = json.Unmarshal(buildstate, &b) - if err == nil && b.Type == containerType { - b.store = store - b.fixupConfig() - builders = append(builders, b) - } - } - return builders, nil -} - -// Save saves the builder's current state to the build container's metadata. -// This should not need to be called directly, as other methods of the Builder -// object take care of saving their state. -func (b *Builder) Save() error { - buildstate, err := json.Marshal(b) - if err != nil { - return err - } - cdir, err := b.store.ContainerDirectory(b.ContainerID) - if err != nil { - return err - } - return ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600) -} diff --git a/vendor/github.com/projectatomic/buildah/chroot/run.go b/vendor/github.com/projectatomic/buildah/chroot/run.go deleted file mode 100644 index c8aec181a..000000000 --- a/vendor/github.com/projectatomic/buildah/chroot/run.go +++ /dev/null @@ -1,1308 +0,0 @@ -// +build linux - -package chroot - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "syscall" - "unsafe" - - "github.com/containers/storage/pkg/ioutils" - "github.com/containers/storage/pkg/mount" - "github.com/containers/storage/pkg/reexec" - "github.com/opencontainers/runc/libcontainer/apparmor" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/projectatomic/buildah/bind" - "github.com/projectatomic/buildah/unshare" - "github.com/projectatomic/buildah/util" - "github.com/sirupsen/logrus" - "github.com/syndtr/gocapability/capability" - "golang.org/x/crypto/ssh/terminal" - "golang.org/x/sys/unix" -) - -const ( - // runUsingChrootCommand is a command we use as a key for reexec - runUsingChrootCommand = "buildah-chroot-runtime" - // runUsingChrootExec is a command we use as a key for reexec - runUsingChrootExecCommand = "buildah-chroot-exec" -) - -var ( - rlimitsMap = map[string]int{ - "RLIMIT_AS": unix.RLIMIT_AS, - "RLIMIT_CORE": unix.RLIMIT_CORE, - "RLIMIT_CPU": unix.RLIMIT_CPU, - "RLIMIT_DATA": unix.RLIMIT_DATA, - "RLIMIT_FSIZE": unix.RLIMIT_FSIZE, - "RLIMIT_LOCKS": unix.RLIMIT_LOCKS, - "RLIMIT_MEMLOCK": unix.RLIMIT_MEMLOCK, - "RLIMIT_MSGQUEUE": unix.RLIMIT_MSGQUEUE, - "RLIMIT_NICE": unix.RLIMIT_NICE, - "RLIMIT_NOFILE": unix.RLIMIT_NOFILE, - "RLIMIT_NPROC": unix.RLIMIT_NPROC, - "RLIMIT_RSS": unix.RLIMIT_RSS, - "RLIMIT_RTPRIO": unix.RLIMIT_RTPRIO, - "RLIMIT_RTTIME": unix.RLIMIT_RTTIME, - "RLIMIT_SIGPENDING": unix.RLIMIT_SIGPENDING, - "RLIMIT_STACK": unix.RLIMIT_STACK, - } - rlimitsReverseMap = map[int]string{} -) - -func init() { - reexec.Register(runUsingChrootCommand, runUsingChrootMain) - reexec.Register(runUsingChrootExecCommand, runUsingChrootExecMain) - for limitName, limitNumber := range rlimitsMap { - rlimitsReverseMap[limitNumber] = limitName - } -} - -type runUsingChrootSubprocOptions struct { - Spec *specs.Spec - BundlePath string - UIDMappings []syscall.SysProcIDMap - GIDMappings []syscall.SysProcIDMap -} - -type runUsingChrootExecSubprocOptions struct { - Spec *specs.Spec - BundlePath string -} - -// RunUsingChroot runs a chrooted process, using some of the settings from the -// passed-in spec, and using the specified bundlePath to hold temporary files, -// directories, and mountpoints. -func RunUsingChroot(spec *specs.Spec, bundlePath string, stdin io.Reader, stdout, stderr io.Writer) (err error) { - var confwg sync.WaitGroup - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - // Write the runtime configuration, mainly for debugging. - specbytes, err := json.Marshal(spec) - if err != nil { - return err - } - if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil { - return errors.Wrapf(err, "error storing runtime configuration") - } - logrus.Debugf("config = %v", string(specbytes)) - - // Run the grandparent subprocess in a user namespace that reuses the mappings that we have. - uidmap, gidmap, err := util.GetHostIDMappings("") - if err != nil { - return err - } - for i := range uidmap { - uidmap[i].HostID = uidmap[i].ContainerID - } - for i := range gidmap { - gidmap[i].HostID = gidmap[i].ContainerID - } - - // Default to using stdin/stdout/stderr if we weren't passed objects to use. - if stdin == nil { - stdin = os.Stdin - } - if stdout == nil { - stdout = os.Stdout - } - if stderr == nil { - stderr = os.Stderr - } - - // Create a pipe for passing configuration down to the next process. - preader, pwriter, err := os.Pipe() - if err != nil { - return errors.Wrapf(err, "error creating configuration pipe") - } - config, conferr := json.Marshal(runUsingChrootSubprocOptions{ - Spec: spec, - BundlePath: bundlePath, - }) - if conferr != nil { - return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingChrootCommand) - } - - // Set our terminal's mode to raw, to pass handling of special - // terminal input to the terminal in the container. - if spec.Process.Terminal && terminal.IsTerminal(unix.Stdin) { - state, err := terminal.MakeRaw(unix.Stdin) - if err != nil { - logrus.Warnf("error setting terminal state: %v", err) - } else { - defer func() { - if err = terminal.Restore(unix.Stdin, state); err != nil { - logrus.Errorf("unable to restore terminal state: %v", err) - } - }() - } - } - - // Raise any resource limits that are higher than they are now, before - // we drop any more privileges. - if err = setRlimits(spec, false, true); err != nil { - return err - } - - // Start the grandparent subprocess. - cmd := unshare.Command(runUsingChrootCommand) - cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr - cmd.Dir = "/" - cmd.Env = append([]string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())}, os.Environ()...) - cmd.UnshareFlags = syscall.CLONE_NEWUSER - cmd.UidMappings = uidmap - cmd.GidMappings = gidmap - cmd.GidMappingsEnableSetgroups = true - - logrus.Debugf("Running %#v in %#v", cmd.Cmd, cmd) - confwg.Add(1) - go func() { - _, conferr = io.Copy(pwriter, bytes.NewReader(config)) - pwriter.Close() - confwg.Done() - }() - cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) - err = cmd.Run() - confwg.Wait() - if err == nil { - return conferr - } - return err -} - -// main() for grandparent subprocess. Its main job is to shuttle stdio back -// and forth, managing a pseudo-terminal if we want one, for our child, the -// parent subprocess. -func runUsingChrootMain() { - var options runUsingChrootSubprocOptions - - runtime.LockOSThread() - - // Set logging. - if level := os.Getenv("LOGLEVEL"); level != "" { - if ll, err := strconv.Atoi(level); err == nil { - logrus.SetLevel(logrus.Level(ll)) - } - os.Unsetenv("LOGLEVEL") - } - - // Unpack our configuration. - confPipe := os.NewFile(3, "confpipe") - if confPipe == nil { - fmt.Fprintf(os.Stderr, "error reading options pipe\n") - os.Exit(1) - } - defer confPipe.Close() - if err := json.NewDecoder(confPipe).Decode(&options); err != nil { - fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err) - os.Exit(1) - } - - // Prepare to shuttle stdio back and forth. - rootUid32, rootGid32, err := util.GetHostRootIDs(options.Spec) - if err != nil { - logrus.Errorf("error determining ownership for container stdio") - os.Exit(1) - } - rootUid := int(rootUid32) - rootGid := int(rootGid32) - relays := make(map[int]int) - closeOnceRunning := []*os.File{} - var ctty *os.File - var stdin io.Reader - var stdinCopy io.WriteCloser - var stdout io.Writer - var stderr io.Writer - fdDesc := make(map[int]string) - deferred := func() {} - if options.Spec.Process.Terminal { - // Create a pseudo-terminal -- open a copy of the master side. - ptyMasterFd, err := unix.Open("/dev/ptmx", os.O_RDWR, 0600) - if err != nil { - logrus.Errorf("error opening PTY master using /dev/ptmx: %v", err) - os.Exit(1) - } - // Set the kernel's lock to "unlocked". - locked := 0 - if result, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(ptyMasterFd), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&locked))); int(result) == -1 { - logrus.Errorf("error locking PTY descriptor: %v", err) - os.Exit(1) - } - // Get a handle for the other end. - ptyFd, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(ptyMasterFd), unix.TIOCGPTPEER, unix.O_RDWR|unix.O_NOCTTY) - if int(ptyFd) == -1 { - if errno, isErrno := err.(syscall.Errno); !isErrno || (errno != syscall.EINVAL && errno != syscall.ENOTTY) { - logrus.Errorf("error getting PTY descriptor: %v", err) - os.Exit(1) - } - // EINVAL means the kernel's too old to understand TIOCGPTPEER. Try TIOCGPTN. - ptyN, err := unix.IoctlGetInt(ptyMasterFd, unix.TIOCGPTN) - if err != nil { - logrus.Errorf("error getting PTY number: %v", err) - os.Exit(1) - } - ptyName := fmt.Sprintf("/dev/pts/%d", ptyN) - fd, err := unix.Open(ptyName, unix.O_RDWR|unix.O_NOCTTY, 0620) - if err != nil { - logrus.Errorf("error opening PTY %q: %v", ptyName, err) - os.Exit(1) - } - ptyFd = uintptr(fd) - } - // Make notes about what's going where. - relays[ptyMasterFd] = unix.Stdout - relays[unix.Stdin] = ptyMasterFd - fdDesc[ptyMasterFd] = "container terminal" - fdDesc[unix.Stdin] = "stdin" - fdDesc[unix.Stdout] = "stdout" - winsize := &unix.Winsize{} - // Set the pseudoterminal's size to the configured size, or our own. - if options.Spec.Process.ConsoleSize != nil { - // Use configured sizes. - winsize.Row = uint16(options.Spec.Process.ConsoleSize.Height) - winsize.Col = uint16(options.Spec.Process.ConsoleSize.Width) - } else { - if terminal.IsTerminal(unix.Stdin) { - // Use the size of our terminal. - winsize, err = unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ) - if err != nil { - logrus.Debugf("error reading current terminal's size") - winsize.Row = 0 - winsize.Col = 0 - } - } - } - if winsize.Row != 0 && winsize.Col != 0 { - if err = unix.IoctlSetWinsize(int(ptyFd), unix.TIOCSWINSZ, winsize); err != nil { - logrus.Warnf("error setting terminal size for pty") - } - // FIXME - if we're connected to a terminal, we should - // be passing the updated terminal size down when we - // receive a SIGWINCH. - } - // Open an *os.File object that we can pass to our child. - ctty = os.NewFile(ptyFd, "/dev/tty") - // Set ownership for the PTY. - if err = ctty.Chown(rootUid, rootGid); err != nil { - var cttyInfo unix.Stat_t - err2 := unix.Fstat(int(ptyFd), &cttyInfo) - from := "" - op := "setting" - if err2 == nil { - op = "changing" - from = fmt.Sprintf("from %d/%d ", cttyInfo.Uid, cttyInfo.Gid) - } - logrus.Warnf("error %s ownership of container PTY %sto %d/%d: %v", op, from, rootUid, rootGid, err) - } - // Set permissions on the PTY. - if err = ctty.Chmod(0620); err != nil { - logrus.Errorf("error setting permissions of container PTY: %v", err) - os.Exit(1) - } - // Make a note that our child (the parent subprocess) should - // have the PTY connected to its stdio, and that we should - // close it once it's running. - stdin = ctty - stdout = ctty - stderr = ctty - closeOnceRunning = append(closeOnceRunning, ctty) - } else { - // Create pipes for stdio. - stdinRead, stdinWrite, err := os.Pipe() - if err != nil { - logrus.Errorf("error opening pipe for stdin: %v", err) - } - stdoutRead, stdoutWrite, err := os.Pipe() - if err != nil { - logrus.Errorf("error opening pipe for stdout: %v", err) - } - stderrRead, stderrWrite, err := os.Pipe() - if err != nil { - logrus.Errorf("error opening pipe for stderr: %v", err) - } - // Make notes about what's going where. - relays[unix.Stdin] = int(stdinWrite.Fd()) - relays[int(stdoutRead.Fd())] = unix.Stdout - relays[int(stderrRead.Fd())] = unix.Stderr - fdDesc[int(stdinWrite.Fd())] = "container stdin pipe" - fdDesc[int(stdoutRead.Fd())] = "container stdout pipe" - fdDesc[int(stderrRead.Fd())] = "container stderr pipe" - fdDesc[unix.Stdin] = "stdin" - fdDesc[unix.Stdout] = "stdout" - fdDesc[unix.Stderr] = "stderr" - // Set ownership for the pipes. - if err = stdinRead.Chown(rootUid, rootGid); err != nil { - logrus.Errorf("error setting ownership of container stdin pipe: %v", err) - os.Exit(1) - } - if err = stdoutWrite.Chown(rootUid, rootGid); err != nil { - logrus.Errorf("error setting ownership of container stdout pipe: %v", err) - os.Exit(1) - } - if err = stderrWrite.Chown(rootUid, rootGid); err != nil { - logrus.Errorf("error setting ownership of container stderr pipe: %v", err) - os.Exit(1) - } - // Make a note that our child (the parent subprocess) should - // have the pipes connected to its stdio, and that we should - // close its ends of them once it's running. - stdin = stdinRead - stdout = stdoutWrite - stderr = stderrWrite - closeOnceRunning = append(closeOnceRunning, stdinRead, stdoutWrite, stderrWrite) - stdinCopy = stdinWrite - defer stdoutRead.Close() - defer stderrRead.Close() - } - // A helper that returns false if err is an error that would cause us - // to give up. - logIfNotRetryable := func(err error, what string) (retry bool) { - if err == nil { - return true - } - if errno, isErrno := err.(syscall.Errno); isErrno { - switch errno { - case syscall.EINTR, syscall.EAGAIN: - return true - } - } - logrus.Error(what) - return false - } - for readFd, writeFd := range relays { - if err := unix.SetNonblock(readFd, true); err != nil { - logrus.Errorf("error setting descriptor %d (%s) non-blocking: %v", readFd, fdDesc[readFd], err) - return - } - if err := unix.SetNonblock(writeFd, false); err != nil { - logrus.Errorf("error setting descriptor %d (%s) blocking: %v", relays[writeFd], fdDesc[writeFd], err) - return - } - } - go func() { - buffers := make(map[int]*bytes.Buffer) - for _, writeFd := range relays { - buffers[writeFd] = new(bytes.Buffer) - } - pollTimeout := -1 - for len(relays) > 0 { - fds := make([]unix.PollFd, 0, len(relays)) - for fd := range relays { - fds = append(fds, unix.PollFd{Fd: int32(fd), Events: unix.POLLIN | unix.POLLHUP}) - } - _, err := unix.Poll(fds, pollTimeout) - if !logIfNotRetryable(err, fmt.Sprintf("poll: %v", err)) { - return - } - removeFds := make(map[int]struct{}) - for _, rfd := range fds { - if rfd.Revents&unix.POLLHUP == unix.POLLHUP { - removeFds[int(rfd.Fd)] = struct{}{} - } - if rfd.Revents&unix.POLLNVAL == unix.POLLNVAL { - logrus.Debugf("error polling descriptor %s: closed?", fdDesc[int(rfd.Fd)]) - removeFds[int(rfd.Fd)] = struct{}{} - } - if rfd.Revents&unix.POLLIN == 0 { - continue - } - b := make([]byte, 8192) - nread, err := unix.Read(int(rfd.Fd), b) - logIfNotRetryable(err, fmt.Sprintf("read %s: %v", fdDesc[int(rfd.Fd)], err)) - if nread > 0 { - if wfd, ok := relays[int(rfd.Fd)]; ok { - nwritten, err := buffers[wfd].Write(b[:nread]) - if err != nil { - logrus.Debugf("buffer: %v", err) - continue - } - if nwritten != nread { - logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nread, nwritten) - continue - } - } - // If this is the last of the data we'll be able to read - // from this descriptor, read as much as there is to read. - for rfd.Revents&unix.POLLHUP == unix.POLLHUP { - nr, err := unix.Read(int(rfd.Fd), b) - logIfNotRetryable(err, fmt.Sprintf("read %s: %v", fdDesc[int(rfd.Fd)], err)) - if nr <= 0 { - break - } - if wfd, ok := relays[int(rfd.Fd)]; ok { - nwritten, err := buffers[wfd].Write(b[:nr]) - if err != nil { - logrus.Debugf("buffer: %v", err) - break - } - if nwritten != nr { - logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nr, nwritten) - break - } - } - } - } - if nread == 0 { - removeFds[int(rfd.Fd)] = struct{}{} - } - } - pollTimeout = -1 - for wfd, buffer := range buffers { - if buffer.Len() > 0 { - nwritten, err := unix.Write(wfd, buffer.Bytes()) - logIfNotRetryable(err, fmt.Sprintf("write %s: %v", fdDesc[wfd], err)) - if nwritten >= 0 { - _ = buffer.Next(nwritten) - } - } - if buffer.Len() > 0 { - pollTimeout = 100 - } - } - for rfd := range removeFds { - if !options.Spec.Process.Terminal && rfd == unix.Stdin { - stdinCopy.Close() - } - delete(relays, rfd) - } - } - }() - - // Set up mounts and namespaces, and run the parent subprocess. - status, err := runUsingChroot(options.Spec, options.BundlePath, ctty, stdin, stdout, stderr, closeOnceRunning) - deferred() - if err != nil { - fmt.Fprintf(os.Stderr, "error running subprocess: %v\n", err) - os.Exit(1) - } - - // Pass the process's exit status back to the caller by exiting with the same status. - if status.Exited() { - if status.ExitStatus() != 0 { - fmt.Fprintf(os.Stderr, "subprocess exited with status %d\n", status.ExitStatus()) - } - os.Exit(status.ExitStatus()) - } else if status.Signaled() { - fmt.Fprintf(os.Stderr, "subprocess exited on %s\n", status.Signal()) - os.Exit(1) - } -} - -// runUsingChroot, still in the grandparent process, sets up various bind -// mounts and then runs the parent process in its own user namespace with the -// necessary ID mappings. -func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io.Reader, stdout, stderr io.Writer, closeOnceRunning []*os.File) (wstatus unix.WaitStatus, err error) { - var confwg sync.WaitGroup - - // Create a new mount namespace for ourselves and bind mount everything to a new location. - undoIntermediates, err := bind.SetupIntermediateMountNamespace(spec, bundlePath) - if err != nil { - return 1, err - } - defer func() { - undoIntermediates() - }() - - // Bind mount in our filesystems. - undoChroots, err := setupChrootBindMounts(spec, bundlePath) - if err != nil { - return 1, err - } - defer func() { - undoChroots() - }() - - // Create a pipe for passing configuration down to the next process. - preader, pwriter, err := os.Pipe() - if err != nil { - return 1, errors.Wrapf(err, "error creating configuration pipe") - } - config, conferr := json.Marshal(runUsingChrootExecSubprocOptions{ - Spec: spec, - BundlePath: bundlePath, - }) - if conferr != nil { - fmt.Fprintf(os.Stderr, "error re-encoding configuration for %q", runUsingChrootExecCommand) - os.Exit(1) - } - - // Apologize for the namespace configuration that we're about to ignore. - logNamespaceDiagnostics(spec) - - // If we have configured ID mappings, set them here so that they can apply to the child. - hostUidmap, hostGidmap, err := util.GetHostIDMappings("") - if err != nil { - return 1, err - } - uidmap, gidmap := spec.Linux.UIDMappings, spec.Linux.GIDMappings - if len(uidmap) == 0 { - // No UID mappings are configured for the container. Borrow our parent's mappings. - uidmap = append([]specs.LinuxIDMapping{}, hostUidmap...) - for i := range uidmap { - uidmap[i].HostID = uidmap[i].ContainerID - } - } - if len(gidmap) == 0 { - // No GID mappings are configured for the container. Borrow our parent's mappings. - gidmap = append([]specs.LinuxIDMapping{}, hostGidmap...) - for i := range gidmap { - gidmap[i].HostID = gidmap[i].ContainerID - } - } - - // Start the parent subprocess. - cmd := unshare.Command(append([]string{runUsingChrootExecCommand}, spec.Process.Args...)...) - cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr - cmd.Dir = "/" - cmd.Env = append([]string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())}, os.Environ()...) - cmd.UnshareFlags = syscall.CLONE_NEWUSER | syscall.CLONE_NEWUTS | syscall.CLONE_NEWNS - cmd.UidMappings = uidmap - cmd.GidMappings = gidmap - cmd.GidMappingsEnableSetgroups = true - if ctty != nil { - cmd.Setsid = true - cmd.Ctty = ctty - } - cmd.OOMScoreAdj = spec.Process.OOMScoreAdj - cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) - cmd.Hook = func(int) error { - for _, f := range closeOnceRunning { - f.Close() - } - return nil - } - - logrus.Debugf("Running %#v in %#v", cmd.Cmd, cmd) - confwg.Add(1) - go func() { - _, conferr = io.Copy(pwriter, bytes.NewReader(config)) - pwriter.Close() - confwg.Done() - }() - err = cmd.Run() - confwg.Wait() - if err != nil { - if exitError, ok := err.(*exec.ExitError); ok { - if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { - if waitStatus.Exited() { - if waitStatus.ExitStatus() != 0 { - fmt.Fprintf(os.Stderr, "subprocess exited with status %d\n", waitStatus.ExitStatus()) - } - os.Exit(waitStatus.ExitStatus()) - } else if waitStatus.Signaled() { - fmt.Fprintf(os.Stderr, "subprocess exited on %s\n", waitStatus.Signal()) - os.Exit(1) - } - } - } - fmt.Fprintf(os.Stderr, "process exited with error: %v", err) - os.Exit(1) - } - - return 0, nil -} - -// main() for parent subprocess. Its main job is to try to make our -// environment look like the one described by the runtime configuration blob, -// and then launch the intended command as a child. -func runUsingChrootExecMain() { - args := os.Args[1:] - var options runUsingChrootExecSubprocOptions - var err error - - runtime.LockOSThread() - - // Set logging. - if level := os.Getenv("LOGLEVEL"); level != "" { - if ll, err := strconv.Atoi(level); err == nil { - logrus.SetLevel(logrus.Level(ll)) - } - os.Unsetenv("LOGLEVEL") - } - - // Unpack our configuration. - confPipe := os.NewFile(3, "confpipe") - if confPipe == nil { - fmt.Fprintf(os.Stderr, "error reading options pipe\n") - os.Exit(1) - } - defer confPipe.Close() - if err := json.NewDecoder(confPipe).Decode(&options); err != nil { - fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err) - os.Exit(1) - } - - // Set the hostname. We're already in a distinct UTS namespace and are admins in the user - // namespace which created it, so we shouldn't get a permissions error, but seccomp policy - // might deny our attempt to call sethostname() anyway, so log a debug message for that. - if options.Spec.Hostname != "" { - if err := unix.Sethostname([]byte(options.Spec.Hostname)); err != nil { - logrus.Debugf("failed to set hostname %q for process: %v", options.Spec.Hostname, err) - } - } - - // Try to chroot into the root. Do this before we potentially block the syscall via the - // seccomp profile. - var oldst, newst unix.Stat_t - if err := unix.Stat(options.Spec.Root.Path, &oldst); err != nil { - fmt.Fprintf(os.Stderr, "error stat()ing intended root directory %q: %v\n", options.Spec.Root.Path, err) - os.Exit(1) - } - if err := unix.Chdir(options.Spec.Root.Path); err != nil { - fmt.Fprintf(os.Stderr, "error chdir()ing to intended root directory %q: %v\n", options.Spec.Root.Path, err) - os.Exit(1) - } - if err := unix.Chroot(options.Spec.Root.Path); err != nil { - fmt.Fprintf(os.Stderr, "error chroot()ing into directory %q: %v\n", options.Spec.Root.Path, err) - os.Exit(1) - } - if err := unix.Stat("/", &newst); err != nil { - fmt.Fprintf(os.Stderr, "error stat()ing current root directory: %v\n", err) - os.Exit(1) - } - if oldst.Dev != newst.Dev || oldst.Ino != newst.Ino { - fmt.Fprintf(os.Stderr, "unknown error chroot()ing into directory %q: %v\n", options.Spec.Root.Path, err) - os.Exit(1) - } - logrus.Debugf("chrooted into %q", options.Spec.Root.Path) - - // not doing because it's still shared: creating devices - // not doing because it's not applicable: setting annotations - // not doing because it's still shared: setting sysctl settings - // not doing because cgroupfs is read only: configuring control groups - // -> this means we can use the freezer to make sure there aren't any lingering processes - // -> this means we ignore cgroups-based controls - // not doing because we don't set any in the config: running hooks - // not doing because we don't set it in the config: setting rootfs read-only - // not doing because we don't set it in the config: setting rootfs propagation - logrus.Debugf("setting apparmor profile") - if err = setApparmorProfile(options.Spec); err != nil { - fmt.Fprintf(os.Stderr, "error setting apparmor profile for process: %v\n", err) - os.Exit(1) - } - if err = setSelinuxLabel(options.Spec); err != nil { - fmt.Fprintf(os.Stderr, "error setting SELinux label for process: %v\n", err) - os.Exit(1) - } - logrus.Debugf("setting capabilities") - if err := setCapabilities(options.Spec); err != nil { - fmt.Fprintf(os.Stderr, "error setting capabilities for process %v\n", err) - os.Exit(1) - } - if err = setSeccomp(options.Spec); err != nil { - fmt.Fprintf(os.Stderr, "error setting seccomp filter for process: %v\n", err) - os.Exit(1) - } - logrus.Debugf("setting resource limits") - if err = setRlimits(options.Spec, false, false); err != nil { - fmt.Fprintf(os.Stderr, "error setting process resource limits for process: %v\n", err) - os.Exit(1) - } - - // Try to change to the directory. - cwd := options.Spec.Process.Cwd - if !filepath.IsAbs(cwd) { - cwd = "/" + cwd - } - cwd = filepath.Clean(cwd) - if err := unix.Chdir("/"); err != nil { - fmt.Fprintf(os.Stderr, "error chdir()ing into new root directory %q: %v\n", options.Spec.Root.Path, err) - os.Exit(1) - } - if err := unix.Chdir(cwd); err != nil { - fmt.Fprintf(os.Stderr, "error chdir()ing into directory %q under root %q: %v\n", cwd, options.Spec.Root.Path, err) - os.Exit(1) - } - logrus.Debugf("changed working directory to %q", cwd) - - // Drop privileges. - user := options.Spec.Process.User - if len(user.AdditionalGids) > 0 { - gids := make([]int, len(user.AdditionalGids)) - for i := range user.AdditionalGids { - gids[i] = int(user.AdditionalGids[i]) - } - logrus.Debugf("setting supplemental groups") - if err = syscall.Setgroups(gids); err != nil { - fmt.Fprintf(os.Stderr, "error setting supplemental groups list: %v", err) - os.Exit(1) - } - } else { - logrus.Debugf("clearing supplemental groups") - if err = syscall.Setgroups([]int{}); err != nil { - fmt.Fprintf(os.Stderr, "error clearing supplemental groups list: %v", err) - os.Exit(1) - } - } - logrus.Debugf("setting gid") - if err = syscall.Setresgid(int(user.GID), int(user.GID), int(user.GID)); err != nil { - fmt.Fprintf(os.Stderr, "error setting GID: %v", err) - os.Exit(1) - } - logrus.Debugf("setting uid") - if err = syscall.Setresuid(int(user.UID), int(user.UID), int(user.UID)); err != nil { - fmt.Fprintf(os.Stderr, "error setting UID: %v", err) - os.Exit(1) - } - - // Actually run the specified command. - cmd := exec.Command(args[0], args[1:]...) - cmd.Env = options.Spec.Process.Env - cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr - cmd.Dir = cwd - logrus.Debugf("Running %#v (PATH = %q)", cmd, os.Getenv("PATH")) - if err = cmd.Run(); err != nil { - if exitError, ok := err.(*exec.ExitError); ok { - if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { - if waitStatus.Exited() { - if waitStatus.ExitStatus() != 0 { - fmt.Fprintf(os.Stderr, "subprocess exited with status %d\n", waitStatus.ExitStatus()) - } - os.Exit(waitStatus.ExitStatus()) - } else if waitStatus.Signaled() { - fmt.Fprintf(os.Stderr, "subprocess exited on %s\n", waitStatus.Signal()) - os.Exit(1) - } - } - } - fmt.Fprintf(os.Stderr, "process exited with error: %v", err) - os.Exit(1) - } -} - -// logNamespaceDiagnostics knows which namespaces we want to create. -// Output debug messages when that differs from what we're being asked to do. -func logNamespaceDiagnostics(spec *specs.Spec) { - sawMountNS := false - sawUserNS := false - sawUTSNS := false - for _, ns := range spec.Linux.Namespaces { - switch ns.Type { - case specs.CgroupNamespace: - if ns.Path != "" { - logrus.Debugf("unable to join cgroup namespace, sorry about that") - } else { - logrus.Debugf("unable to create cgroup namespace, sorry about that") - } - case specs.IPCNamespace: - if ns.Path != "" { - logrus.Debugf("unable to join IPC namespace, sorry about that") - } else { - logrus.Debugf("unable to create IPC namespace, sorry about that") - } - case specs.MountNamespace: - if ns.Path != "" { - logrus.Debugf("unable to join mount namespace %q, creating a new one", ns.Path) - } - sawMountNS = true - case specs.NetworkNamespace: - if ns.Path != "" { - logrus.Debugf("unable to join network namespace, sorry about that") - } else { - logrus.Debugf("unable to create network namespace, sorry about that") - } - case specs.PIDNamespace: - if ns.Path != "" { - logrus.Debugf("unable to join PID namespace, sorry about that") - } else { - logrus.Debugf("unable to create PID namespace, sorry about that") - } - case specs.UserNamespace: - if ns.Path != "" { - logrus.Debugf("unable to join user namespace %q, creating a new one", ns.Path) - } - sawUserNS = true - case specs.UTSNamespace: - if ns.Path != "" { - logrus.Debugf("unable to join UTS namespace %q, creating a new one", ns.Path) - } - sawUTSNS = true - } - } - if !sawMountNS { - logrus.Debugf("mount namespace not requested, but creating a new one anyway") - } - if !sawUserNS { - logrus.Debugf("user namespace not requested, but creating a new one anyway") - } - if !sawUTSNS { - logrus.Debugf("UTS namespace not requested, but creating a new one anyway") - } -} - -// setApparmorProfile sets the apparmor profile for ourselves, and hopefully any child processes that we'll start. -func setApparmorProfile(spec *specs.Spec) error { - if !apparmor.IsEnabled() || spec.Process.ApparmorProfile == "" { - return nil - } - if err := apparmor.ApplyProfile(spec.Process.ApparmorProfile); err != nil { - return errors.Wrapf(err, "error setting apparmor profile to %q", spec.Process.ApparmorProfile) - } - return nil -} - -// setCapabilities sets capabilities for ourselves, to be more or less inherited by any processes that we'll start. -func setCapabilities(spec *specs.Spec) error { - caps, err := capability.NewPid(0) - if err != nil { - return errors.Wrapf(err, "error reading capabilities of current process") - } - capMap := map[capability.CapType][]string{ - capability.BOUNDING: spec.Process.Capabilities.Bounding, - capability.EFFECTIVE: spec.Process.Capabilities.Effective, - capability.INHERITABLE: spec.Process.Capabilities.Inheritable, - capability.PERMITTED: spec.Process.Capabilities.Permitted, - capability.AMBIENT: spec.Process.Capabilities.Ambient, - } - knownCaps := capability.List() - for capType, capList := range capMap { - caps.Clear(capType) - for _, capToSet := range capList { - cap := capability.CAP_LAST_CAP - for _, c := range knownCaps { - if strings.EqualFold("CAP_"+c.String(), capToSet) { - cap = c - break - } - } - if cap == capability.CAP_LAST_CAP { - return errors.Errorf("error mapping capability %q to a number", capToSet) - } - caps.Set(capType, cap) - } - } - for capType := range capMap { - if err = caps.Apply(capType); err != nil { - return errors.Wrapf(err, "error setting %s capabilities to %#v", capType.String(), capMap[capType]) - } - } - return nil -} - -// parses the resource limits for ourselves and any processes that -// we'll start into a format that's more in line with the kernel APIs -func parseRlimits(spec *specs.Spec) (map[int]unix.Rlimit, error) { - if spec.Process == nil { - return nil, nil - } - parsed := make(map[int]unix.Rlimit) - for _, limit := range spec.Process.Rlimits { - resource, recognized := rlimitsMap[strings.ToUpper(limit.Type)] - if !recognized { - return nil, errors.Errorf("error parsing limit type %q", limit.Type) - } - parsed[resource] = unix.Rlimit{Cur: limit.Soft, Max: limit.Hard} - } - return parsed, nil -} - -// setRlimits sets any resource limits that we want to apply to processes that -// we'll start. -func setRlimits(spec *specs.Spec, onlyLower, onlyRaise bool) error { - limits, err := parseRlimits(spec) - if err != nil { - return err - } - for resource, desired := range limits { - var current unix.Rlimit - if err := unix.Getrlimit(resource, ¤t); err != nil { - return errors.Wrapf(err, "error reading %q limit", rlimitsReverseMap[resource]) - } - if desired.Max > current.Max && onlyLower { - // this would raise a hard limit, and we're only here to lower them - continue - } - if desired.Max < current.Max && onlyRaise { - // this would lower a hard limit, and we're only here to raise them - continue - } - if err := unix.Setrlimit(resource, &desired); err != nil { - return errors.Wrapf(err, "error setting %q limit to soft=%d,hard=%d (was soft=%d,hard=%d)", rlimitsReverseMap[resource], desired.Cur, desired.Max, current.Cur, current.Max) - } - } - return nil -} - -// setupChrootBindMounts actually bind mounts things under the rootfs, and returns a -// callback that will clean up its work. -func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func() error, err error) { - var fs unix.Statfs_t - removes := []string{} - undoBinds = func() error { - if err2 := bind.UnmountMountpoints(spec.Root.Path, removes); err2 != nil { - logrus.Warnf("pkg/chroot: error unmounting %q: %v", spec.Root.Path, err2) - if err == nil { - err = err2 - } - } - return err - } - - // Now bind mount all of those things to be under the rootfs's location in this - // mount namespace. - commonFlags := uintptr(unix.MS_BIND | unix.MS_REC | unix.MS_PRIVATE) - bindFlags := commonFlags | unix.MS_NODEV - devFlags := commonFlags | unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_RDONLY - procFlags := devFlags | unix.MS_NODEV - sysFlags := devFlags | unix.MS_NODEV | unix.MS_RDONLY - - // Bind /dev read-only. - subDev := filepath.Join(spec.Root.Path, "/dev") - if err := unix.Mount("/dev", subDev, "bind", devFlags, ""); err != nil { - if os.IsNotExist(err) { - err = os.Mkdir(subDev, 0700) - if err == nil { - err = unix.Mount("/dev", subDev, "bind", devFlags, "") - } - } - if err != nil { - return undoBinds, errors.Wrapf(err, "error bind mounting /dev from host into mount namespace") - } - } - // Make sure it's read-only. - if err = unix.Statfs(subDev, &fs); err != nil { - return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", subDev) - } - if fs.Flags&unix.ST_RDONLY == 0 { - if err := unix.Mount(subDev, subDev, "bind", devFlags|unix.MS_REMOUNT, ""); err != nil { - return undoBinds, errors.Wrapf(err, "error remounting /dev in mount namespace read-only") - } - } - logrus.Debugf("bind mounted %q to %q", "/dev", filepath.Join(spec.Root.Path, "/dev")) - - // Bind /proc read-only. - subProc := filepath.Join(spec.Root.Path, "/proc") - if err := unix.Mount("/proc", subProc, "bind", procFlags, ""); err != nil { - if os.IsNotExist(err) { - err = os.Mkdir(subProc, 0700) - if err == nil { - err = unix.Mount("/proc", subProc, "bind", procFlags, "") - } - } - if err != nil { - return undoBinds, errors.Wrapf(err, "error bind mounting /proc from host into mount namespace") - } - } - logrus.Debugf("bind mounted %q to %q", "/proc", filepath.Join(spec.Root.Path, "/proc")) - - // Bind /sys read-only. - subSys := filepath.Join(spec.Root.Path, "/sys") - if err := unix.Mount("/sys", subSys, "bind", sysFlags, ""); err != nil { - if os.IsNotExist(err) { - err = os.Mkdir(subSys, 0700) - if err == nil { - err = unix.Mount("/sys", subSys, "bind", sysFlags, "") - } - } - if err != nil { - return undoBinds, errors.Wrapf(err, "error bind mounting /sys from host into mount namespace") - } - } - // Make sure it's read-only. - if err = unix.Statfs(subSys, &fs); err != nil { - return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", subSys) - } - if fs.Flags&unix.ST_RDONLY == 0 { - if err := unix.Mount(subSys, subSys, "bind", sysFlags|unix.MS_REMOUNT, ""); err != nil { - return undoBinds, errors.Wrapf(err, "error remounting /sys in mount namespace read-only") - } - } - logrus.Debugf("bind mounted %q to %q", "/sys", filepath.Join(spec.Root.Path, "/sys")) - - // Add /sys/fs/selinux to the set of masked paths, to ensure that we don't have processes - // attempting to interact with labeling, when they aren't allowed to do so. - spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/selinux") - // Add /sys/fs/cgroup to the set of masked paths, to ensure that we don't have processes - // attempting to mess with cgroup configuration, when they aren't allowed to do so. - spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/cgroup") - - // Bind mount in everything we've been asked to mount. - for _, m := range spec.Mounts { - // Skip anything that we just mounted. - switch m.Destination { - case "/dev", "/proc", "/sys": - logrus.Debugf("already bind mounted %q on %q", m.Destination, filepath.Join(spec.Root.Path, m.Destination)) - continue - default: - if strings.HasPrefix(m.Destination, "/dev/") { - continue - } - if strings.HasPrefix(m.Destination, "/proc/") { - continue - } - if strings.HasPrefix(m.Destination, "/sys/") { - continue - } - } - // Skip anything that isn't a bind or tmpfs mount. - if m.Type != "bind" && m.Type != "tmpfs" { - logrus.Debugf("skipping mount of type %q on %q", m.Type, m.Destination) - continue - } - // If the target is there, we can just mount it. - var srcinfo os.FileInfo - switch m.Type { - case "bind": - srcinfo, err = os.Stat(m.Source) - if err != nil { - return undoBinds, errors.Wrapf(err, "error examining %q for mounting in mount namespace", m.Source) - } - case "tmpfs": - srcinfo, err = os.Stat("/") - if err != nil { - return undoBinds, errors.Wrapf(err, "error examining / to use as a template for a tmpfs") - } - } - target := filepath.Join(spec.Root.Path, m.Destination) - if _, err := os.Stat(target); err != nil { - // If the target can't be stat()ted, check the error. - if !os.IsNotExist(err) { - return undoBinds, errors.Wrapf(err, "error examining %q for mounting in mount namespace", target) - } - // The target isn't there yet, so create it, and make a - // note to remove it later. - if srcinfo.IsDir() { - if err = os.MkdirAll(target, 0111); err != nil { - return undoBinds, errors.Wrapf(err, "error creating mountpoint %q in mount namespace", target) - } - removes = append(removes, target) - } else { - if err = os.MkdirAll(filepath.Dir(target), 0111); err != nil { - return undoBinds, errors.Wrapf(err, "error ensuring parent of mountpoint %q (%q) is present in mount namespace", target, filepath.Dir(target)) - } - var file *os.File - if file, err = os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0); err != nil { - return undoBinds, errors.Wrapf(err, "error creating mountpoint %q in mount namespace", target) - } - file.Close() - removes = append(removes, target) - } - } - requestFlags := bindFlags - expectedFlags := uintptr(0) - if util.StringInSlice("nodev", m.Options) { - requestFlags |= unix.MS_NODEV - expectedFlags |= unix.ST_NODEV - } - if util.StringInSlice("noexec", m.Options) { - requestFlags |= unix.MS_NOEXEC - expectedFlags |= unix.ST_NOEXEC - } - if util.StringInSlice("nosuid", m.Options) { - requestFlags |= unix.MS_NOSUID - expectedFlags |= unix.ST_NOSUID - } - if util.StringInSlice("ro", m.Options) { - requestFlags |= unix.MS_RDONLY - expectedFlags |= unix.ST_RDONLY - } - switch m.Type { - case "bind": - // Do the bind mount. - if err := unix.Mount(m.Source, target, "", requestFlags, ""); err != nil { - return undoBinds, errors.Wrapf(err, "error bind mounting %q from host to %q in mount namespace (%q)", m.Source, m.Destination, target) - } - logrus.Debugf("bind mounted %q to %q", m.Source, target) - case "tmpfs": - // Mount a tmpfs. - if err := mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil { - return undoBinds, errors.Wrapf(err, "error mounting tmpfs to %q in mount namespace (%q, %q)", m.Destination, target, strings.Join(m.Options, ",")) - } - logrus.Debugf("mounted a tmpfs to %q", target) - } - if err = unix.Statfs(target, &fs); err != nil { - return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", subSys) - } - if uintptr(fs.Flags)&expectedFlags != expectedFlags { - if err := unix.Mount(target, target, "bind", requestFlags|unix.MS_REMOUNT, ""); err != nil { - return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace with expected flags") - } - } - } - - // Set up any read-only paths that we need to. If we're running inside - // of a container, some of these locations will already be read-only. - for _, roPath := range spec.Linux.ReadonlyPaths { - r := filepath.Join(spec.Root.Path, roPath) - target, err := filepath.EvalSymlinks(r) - if err != nil { - if os.IsNotExist(err) { - // No target, no problem. - continue - } - return undoBinds, errors.Wrapf(err, "error checking %q for symlinks before marking it read-only", r) - } - // Check if the location is already read-only. - var fs unix.Statfs_t - if err = unix.Statfs(target, &fs); err != nil { - if os.IsNotExist(err) { - // No target, no problem. - continue - } - return undoBinds, errors.Wrapf(err, "error checking if directory %q is already read-only", target) - } - if fs.Flags&unix.ST_RDONLY != 0 { - continue - } - // Mount the location over itself, so that we can remount it as read-only. - roFlags := uintptr(unix.MS_NODEV | unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_RDONLY) - if err := unix.Mount(target, target, "", roFlags|unix.MS_BIND|unix.MS_REC, ""); err != nil { - if os.IsNotExist(err) { - // No target, no problem. - continue - } - return undoBinds, errors.Wrapf(err, "error bind mounting %q onto itself in preparation for making it read-only", target) - } - // Remount the location read-only. - if err = unix.Statfs(target, &fs); err != nil { - return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", target) - } - if fs.Flags&unix.ST_RDONLY == 0 { - if err := unix.Mount(target, target, "", roFlags|unix.MS_BIND|unix.MS_REMOUNT, ""); err != nil { - return undoBinds, errors.Wrapf(err, "error remounting %q in mount namespace read-only", target) - } - } - // Check again. - if err = unix.Statfs(target, &fs); err != nil { - return undoBinds, errors.Wrapf(err, "error checking if directory %q was remounted read-only", target) - } - if fs.Flags&unix.ST_RDONLY == 0 { - return undoBinds, errors.Wrapf(err, "error verifying that %q in mount namespace was remounted read-only", target) - } - } - - // Create an empty directory for to use for masking directories. - roEmptyDir := filepath.Join(bundlePath, "empty") - if len(spec.Linux.MaskedPaths) > 0 { - if err := os.Mkdir(roEmptyDir, 0700); err != nil { - return undoBinds, errors.Wrapf(err, "error creating empty directory %q", roEmptyDir) - } - removes = append(removes, roEmptyDir) - } - - // Set up any masked paths that we need to. If we're running inside of - // a container, some of these locations will already be read-only tmpfs - // filesystems or bind mounted to os.DevNull. If we're not running - // inside of a container, and nobody else has done that, we'll do it. - for _, masked := range spec.Linux.MaskedPaths { - t := filepath.Join(spec.Root.Path, masked) - target, err := filepath.EvalSymlinks(t) - if err != nil { - target = t - } - // Get some info about the null device. - nullinfo, err := os.Stat(os.DevNull) - if err != nil { - return undoBinds, errors.Wrapf(err, "error examining %q for masking in mount namespace", os.DevNull) - } - // Get some info about the target. - targetinfo, err := os.Stat(target) - if err != nil { - if os.IsNotExist(err) { - // No target, no problem. - continue - } - return undoBinds, errors.Wrapf(err, "error examining %q for masking in mount namespace", target) - } - if targetinfo.IsDir() { - // The target's a directory. Check if it's a read-only filesystem. - var statfs unix.Statfs_t - if err = unix.Statfs(target, &statfs); err != nil { - return undoBinds, errors.Wrapf(err, "error checking if directory %q is a mountpoint", target) - } - isReadOnly := statfs.Flags&unix.MS_RDONLY != 0 - // Check if any of the IDs we're mapping could read it. - isAccessible := true - var stat unix.Stat_t - if err = unix.Stat(target, &stat); err != nil { - return undoBinds, errors.Wrapf(err, "error checking permissions on directory %q", target) - } - isAccessible = false - if stat.Mode&unix.S_IROTH|unix.S_IXOTH != 0 { - isAccessible = true - } - if !isAccessible && stat.Mode&unix.S_IROTH|unix.S_IXOTH != 0 { - if len(spec.Linux.GIDMappings) > 0 { - for _, mapping := range spec.Linux.GIDMappings { - if stat.Gid >= mapping.ContainerID && stat.Gid < mapping.ContainerID+mapping.Size { - isAccessible = true - break - } - } - } - } - if !isAccessible && stat.Mode&unix.S_IRUSR|unix.S_IXUSR != 0 { - if len(spec.Linux.UIDMappings) > 0 { - for _, mapping := range spec.Linux.UIDMappings { - if stat.Uid >= mapping.ContainerID && stat.Uid < mapping.ContainerID+mapping.Size { - isAccessible = true - break - } - } - } - } - // Check if it's empty. - hasContent := false - directory, err := os.Open(target) - if err != nil { - if !os.IsPermission(err) { - return undoBinds, errors.Wrapf(err, "error opening directory %q", target) - } - } else { - names, err := directory.Readdirnames(0) - directory.Close() - if err != nil { - return undoBinds, errors.Wrapf(err, "error reading contents of directory %q", target) - } - hasContent = false - for _, name := range names { - switch name { - case ".", "..": - continue - default: - hasContent = true - } - if hasContent { - break - } - } - } - // The target's a directory, so read-only bind mount an empty directory on it. - roFlags := uintptr(syscall.MS_BIND | syscall.MS_NOSUID | syscall.MS_NODEV | syscall.MS_NOEXEC | syscall.MS_RDONLY) - if !isReadOnly || (hasContent && isAccessible) { - if err = unix.Mount(roEmptyDir, target, "bind", roFlags, ""); err != nil { - return undoBinds, errors.Wrapf(err, "error masking directory %q in mount namespace", target) - } - if err = unix.Statfs(target, &fs); err != nil { - return undoBinds, errors.Wrapf(err, "error checking if directory %q was mounted read-only in mount namespace", target) - } - if fs.Flags&unix.ST_RDONLY == 0 { - if err = unix.Mount(target, target, "", roFlags|syscall.MS_REMOUNT, ""); err != nil { - return undoBinds, errors.Wrapf(err, "error making sure directory %q in mount namespace is read only", target) - } - } - } - } else { - // The target's not a directory, so bind mount os.DevNull over it, unless it's already os.DevNull. - if !os.SameFile(nullinfo, targetinfo) { - if err = unix.Mount(os.DevNull, target, "", uintptr(syscall.MS_BIND|syscall.MS_RDONLY|syscall.MS_PRIVATE), ""); err != nil { - return undoBinds, errors.Wrapf(err, "error masking non-directory %q in mount namespace", target) - } - } - } - } - return undoBinds, nil -} diff --git a/vendor/github.com/projectatomic/buildah/chroot/seccomp.go b/vendor/github.com/projectatomic/buildah/chroot/seccomp.go deleted file mode 100644 index f2c55017f..000000000 --- a/vendor/github.com/projectatomic/buildah/chroot/seccomp.go +++ /dev/null @@ -1,142 +0,0 @@ -// +build linux,seccomp - -package chroot - -import ( - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - libseccomp "github.com/seccomp/libseccomp-golang" - "github.com/sirupsen/logrus" -) - -// setSeccomp sets the seccomp filter for ourselves and any processes that we'll start. -func setSeccomp(spec *specs.Spec) error { - logrus.Debugf("setting seccomp configuration") - if spec.Linux.Seccomp == nil { - return nil - } - mapAction := func(specAction specs.LinuxSeccompAction) libseccomp.ScmpAction { - switch specAction { - case specs.ActKill: - return libseccomp.ActKill - case specs.ActTrap: - return libseccomp.ActTrap - case specs.ActErrno: - return libseccomp.ActErrno - case specs.ActTrace: - return libseccomp.ActTrace - case specs.ActAllow: - return libseccomp.ActAllow - } - return libseccomp.ActInvalid - } - mapArch := func(specArch specs.Arch) libseccomp.ScmpArch { - switch specArch { - case specs.ArchX86: - return libseccomp.ArchX86 - case specs.ArchX86_64: - return libseccomp.ArchAMD64 - case specs.ArchX32: - return libseccomp.ArchX32 - case specs.ArchARM: - return libseccomp.ArchARM - case specs.ArchAARCH64: - return libseccomp.ArchARM64 - case specs.ArchMIPS: - return libseccomp.ArchMIPS - case specs.ArchMIPS64: - return libseccomp.ArchMIPS64 - case specs.ArchMIPS64N32: - return libseccomp.ArchMIPS64N32 - case specs.ArchMIPSEL: - return libseccomp.ArchMIPSEL - case specs.ArchMIPSEL64: - return libseccomp.ArchMIPSEL64 - case specs.ArchMIPSEL64N32: - return libseccomp.ArchMIPSEL64N32 - case specs.ArchPPC: - return libseccomp.ArchPPC - case specs.ArchPPC64: - return libseccomp.ArchPPC64 - case specs.ArchPPC64LE: - return libseccomp.ArchPPC64LE - case specs.ArchS390: - return libseccomp.ArchS390 - case specs.ArchS390X: - return libseccomp.ArchS390X - case specs.ArchPARISC: - /* fallthrough */ /* for now */ - case specs.ArchPARISC64: - /* fallthrough */ /* for now */ - } - return libseccomp.ArchInvalid - } - mapOp := func(op specs.LinuxSeccompOperator) libseccomp.ScmpCompareOp { - switch op { - case specs.OpNotEqual: - return libseccomp.CompareNotEqual - case specs.OpLessThan: - return libseccomp.CompareLess - case specs.OpLessEqual: - return libseccomp.CompareLessOrEqual - case specs.OpEqualTo: - return libseccomp.CompareEqual - case specs.OpGreaterEqual: - return libseccomp.CompareGreaterEqual - case specs.OpGreaterThan: - return libseccomp.CompareGreater - case specs.OpMaskedEqual: - return libseccomp.CompareMaskedEqual - } - return libseccomp.CompareInvalid - } - - filter, err := libseccomp.NewFilter(mapAction(spec.Linux.Seccomp.DefaultAction)) - if err != nil { - return errors.Wrapf(err, "error creating seccomp filter with default action %q", spec.Linux.Seccomp.DefaultAction) - } - for _, arch := range spec.Linux.Seccomp.Architectures { - if err = filter.AddArch(mapArch(arch)); err != nil { - return errors.Wrapf(err, "error adding architecture %q(%q) to seccomp filter", arch, mapArch(arch)) - } - } - for _, rule := range spec.Linux.Seccomp.Syscalls { - scnames := make(map[libseccomp.ScmpSyscall]string) - for _, name := range rule.Names { - scnum, err := libseccomp.GetSyscallFromName(name) - if err != nil { - logrus.Debugf("error mapping syscall %q to a syscall, ignoring %q rule for %q", name, rule.Action, name) - continue - } - scnames[scnum] = name - } - for scnum := range scnames { - if len(rule.Args) == 0 { - if err = filter.AddRule(scnum, mapAction(rule.Action)); err != nil { - return errors.Wrapf(err, "error adding a rule (%q:%q) to seccomp filter", scnames[scnum], rule.Action) - } - continue - } - var conditions []libseccomp.ScmpCondition - for _, arg := range rule.Args { - condition, err := libseccomp.MakeCondition(arg.Index, mapOp(arg.Op), arg.Value, arg.ValueTwo) - if err != nil { - return errors.Wrapf(err, "error building a seccomp condition %d:%v:%d:%d", arg.Index, arg.Op, arg.Value, arg.ValueTwo) - } - conditions = append(conditions, condition) - } - if err = filter.AddRuleConditional(scnum, mapAction(rule.Action), conditions); err != nil { - return errors.Wrapf(err, "error adding a conditional rule (%q:%q) to seccomp filter", scnames[scnum], rule.Action) - } - } - } - if err = filter.SetNoNewPrivsBit(spec.Process.NoNewPrivileges); err != nil { - return errors.Wrapf(err, "error setting no-new-privileges bit to %v", spec.Process.NoNewPrivileges) - } - err = filter.Load() - filter.Release() - if err != nil { - return errors.Wrapf(err, "error activating seccomp filter") - } - return nil -} diff --git a/vendor/github.com/projectatomic/buildah/chroot/seccomp_unsupported.go b/vendor/github.com/projectatomic/buildah/chroot/seccomp_unsupported.go deleted file mode 100644 index a5b74bf09..000000000 --- a/vendor/github.com/projectatomic/buildah/chroot/seccomp_unsupported.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !linux !seccomp - -package chroot - -import ( - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -func setSeccomp(spec *specs.Spec) error { - if spec.Linux.Seccomp != nil { - return errors.New("configured a seccomp filter without seccomp support?") - } - return nil -} diff --git a/vendor/github.com/projectatomic/buildah/chroot/selinux.go b/vendor/github.com/projectatomic/buildah/chroot/selinux.go deleted file mode 100644 index 3e62d743d..000000000 --- a/vendor/github.com/projectatomic/buildah/chroot/selinux.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build linux,selinux - -package chroot - -import ( - "github.com/opencontainers/runtime-spec/specs-go" - selinux "github.com/opencontainers/selinux/go-selinux" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// setSelinuxLabel sets the process label for child processes that we'll start. -func setSelinuxLabel(spec *specs.Spec) error { - logrus.Debugf("setting selinux label") - if spec.Process.SelinuxLabel != "" && selinux.EnforceMode() != selinux.Disabled { - if err := label.SetProcessLabel(spec.Process.SelinuxLabel); err != nil { - return errors.Wrapf(err, "error setting process label to %q", spec.Process.SelinuxLabel) - } - } - return nil -} diff --git a/vendor/github.com/projectatomic/buildah/chroot/selinux_unsupported.go b/vendor/github.com/projectatomic/buildah/chroot/selinux_unsupported.go deleted file mode 100644 index 1c6f48912..000000000 --- a/vendor/github.com/projectatomic/buildah/chroot/selinux_unsupported.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !linux !selinux - -package chroot - -import ( - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -func setSelinuxLabel(spec *specs.Spec) error { - if spec.Linux.MountLabel != "" { - return errors.New("configured an SELinux mount label without SELinux support?") - } - if spec.Process.SelinuxLabel != "" { - return errors.New("configured an SELinux process label without SELinux support?") - } - return nil -} diff --git a/vendor/github.com/projectatomic/buildah/chroot/unsupported.go b/vendor/github.com/projectatomic/buildah/chroot/unsupported.go deleted file mode 100644 index 5312c0024..000000000 --- a/vendor/github.com/projectatomic/buildah/chroot/unsupported.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !linux - -package chroot - -import ( - "io" - - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -// RunUsingChroot is not supported. -func RunUsingChroot(spec *specs.Spec, bundlePath string, stdin io.Reader, stdout, stderr io.Writer) (err error) { - return errors.Errorf("--isolation chroot is not supported on this platform") -} diff --git a/vendor/github.com/projectatomic/buildah/chroot/util.go b/vendor/github.com/projectatomic/buildah/chroot/util.go deleted file mode 100644 index 34cc77260..000000000 --- a/vendor/github.com/projectatomic/buildah/chroot/util.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build linux - -package chroot - -func dedupeStringSlice(slice []string) []string { - done := make([]string, 0, len(slice)) - m := make(map[string]struct{}) - for _, s := range slice { - if _, present := m[s]; !present { - m[s] = struct{}{} - done = append(done, s) - } - } - return done -} diff --git a/vendor/github.com/projectatomic/buildah/commit.go b/vendor/github.com/projectatomic/buildah/commit.go deleted file mode 100644 index 2d49832a7..000000000 --- a/vendor/github.com/projectatomic/buildah/commit.go +++ /dev/null @@ -1,189 +0,0 @@ -package buildah - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "time" - - cp "github.com/containers/image/copy" - "github.com/containers/image/signature" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/pkg/errors" - "github.com/projectatomic/buildah/util" - "github.com/sirupsen/logrus" -) - -// CommitOptions can be used to alter how an image is committed. -type CommitOptions struct { - // PreferredManifestType is the preferred type of image manifest. The - // image configuration format will be of a compatible type. - PreferredManifestType string - // Compression specifies the type of compression which is applied to - // layer blobs. The default is to not use compression, but - // archive.Gzip is recommended. - Compression archive.Compression - // SignaturePolicyPath specifies an override location for the signature - // policy which should be used for verifying the new image as it is - // being written. Except in specific circumstances, no value should be - // specified, indicating that the shared, system-wide default policy - // should be used. - SignaturePolicyPath string - // AdditionalTags is a list of additional names to add to the image, if - // the transport to which we're writing the image gives us a way to add - // them. - AdditionalTags []string - // ReportWriter is an io.Writer which will be used to log the writing - // of the new image. - ReportWriter io.Writer - // HistoryTimestamp is the timestamp used when creating new items in the - // image's history. If unset, the current time will be used. - HistoryTimestamp *time.Time - // github.com/containers/image/types SystemContext to hold credentials - // and other authentication/authorization information. - SystemContext *types.SystemContext - // IIDFile tells the builder to write the image ID to the specified file - IIDFile string - // Squash tells the builder to produce an image with a single layer - // instead of with possibly more than one layer. - Squash bool - - // OnBuild is a list of commands to be run by images based on this image - OnBuild []string - // Parent is the base image that this image was created by. - Parent string -} - -// PushOptions can be used to alter how an image is copied somewhere. -type PushOptions struct { - // Compression specifies the type of compression which is applied to - // layer blobs. The default is to not use compression, but - // archive.Gzip is recommended. - Compression archive.Compression - // SignaturePolicyPath specifies an override location for the signature - // policy which should be used for verifying the new image as it is - // being written. Except in specific circumstances, no value should be - // specified, indicating that the shared, system-wide default policy - // should be used. - SignaturePolicyPath string - // ReportWriter is an io.Writer which will be used to log the writing - // of the new image. - ReportWriter io.Writer - // Store is the local storage store which holds the source image. - Store storage.Store - // github.com/containers/image/types SystemContext to hold credentials - // and other authentication/authorization information. - SystemContext *types.SystemContext - // ManifestType is the format to use when saving the imge using the 'dir' transport - // possible options are oci, v2s1, and v2s2 - ManifestType string -} - -// Commit writes the contents of the container, along with its updated -// configuration, to a new image in the specified location, and if we know how, -// add any additional tags that were specified. Returns the ID of the new image -// if commit was successful and the image destination was local -func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options CommitOptions) (string, error) { - var imgID string - - systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) - policy, err := signature.DefaultPolicy(systemContext) - if err != nil { - return imgID, errors.Wrapf(err, "error obtaining default signature policy") - } - policyContext, err := signature.NewPolicyContext(policy) - if err != nil { - return imgID, errors.Wrapf(err, "error creating new signature policy context") - } - defer func() { - if err2 := policyContext.Destroy(); err2 != nil { - logrus.Debugf("error destroying signature policy context: %v", err2) - } - }() - // Check if the base image is already in the destination and it's some kind of local - // storage. If so, we can skip recompressing any layers that come from the base image. - exportBaseLayers := true - if transport, destIsStorage := dest.Transport().(is.StoreTransport); destIsStorage && b.FromImageID != "" { - if baseref, err := transport.ParseReference(b.FromImageID); baseref != nil && err == nil { - if img, err := transport.GetImage(baseref); img != nil && err == nil { - exportBaseLayers = false - } - } - } - src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.Compression, options.HistoryTimestamp) - if err != nil { - return imgID, errors.Wrapf(err, "error computing layer digests and building metadata") - } - // "Copy" our image to where it needs to be. - err = cp.Image(ctx, policyContext, dest, src, getCopyOptions(options.ReportWriter, nil, systemContext, "")) - if err != nil { - return imgID, errors.Wrapf(err, "error copying layers and metadata") - } - if len(options.AdditionalTags) > 0 { - switch dest.Transport().Name() { - case is.Transport.Name(): - img, err := is.Transport.GetStoreImage(b.store, dest) - if err != nil { - return imgID, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest)) - } - err = util.AddImageNames(b.store, "", systemContext, img, options.AdditionalTags) - if err != nil { - return imgID, errors.Wrapf(err, "error setting image names to %v", append(img.Names, options.AdditionalTags...)) - } - logrus.Debugf("assigned names %v to image %q", img.Names, img.ID) - default: - logrus.Warnf("don't know how to add tags to images stored in %q transport", dest.Transport().Name()) - } - } - - img, err := is.Transport.GetStoreImage(b.store, dest) - if err != nil && err != storage.ErrImageUnknown { - return imgID, err - } - - if err == nil { - imgID = img.ID - - if options.IIDFile != "" { - if err := ioutil.WriteFile(options.IIDFile, []byte(img.ID), 0644); err != nil { - return imgID, errors.Wrapf(err, "failed to write Image ID File %q", options.IIDFile) - } - } - } - - return imgID, nil -} - -// Push copies the contents of the image to a new location. -func Push(ctx context.Context, image string, dest types.ImageReference, options PushOptions) error { - systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) - policy, err := signature.DefaultPolicy(systemContext) - if err != nil { - return errors.Wrapf(err, "error obtaining default signature policy") - } - policyContext, err := signature.NewPolicyContext(policy) - if err != nil { - return errors.Wrapf(err, "error creating new signature policy context") - } - // Look up the image. - src, img, err := util.FindImage(options.Store, "", systemContext, image) - if err != nil { - return err - } - // Copy everything. - err = cp.Image(ctx, policyContext, dest, src, getCopyOptions(options.ReportWriter, nil, systemContext, options.ManifestType)) - if err != nil { - return errors.Wrapf(err, "error copying layers and metadata") - } - if options.ReportWriter != nil { - fmt.Fprintf(options.ReportWriter, "") - } - digest := "@" + img.Digest.Hex() - fmt.Printf("Successfully pushed %s%s\n", dest.StringWithinTransport(), digest) - return nil -} diff --git a/vendor/github.com/projectatomic/buildah/common.go b/vendor/github.com/projectatomic/buildah/common.go deleted file mode 100644 index dcf922dc9..000000000 --- a/vendor/github.com/projectatomic/buildah/common.go +++ /dev/null @@ -1,35 +0,0 @@ -package buildah - -import ( - "io" - - cp "github.com/containers/image/copy" - "github.com/containers/image/types" -) - -const ( - // OCI used to define the "oci" image format - OCI = "oci" - // DOCKER used to define the "docker" image format - DOCKER = "docker" -) - -func getCopyOptions(reportWriter io.Writer, sourceSystemContext *types.SystemContext, destinationSystemContext *types.SystemContext, manifestType string) *cp.Options { - return &cp.Options{ - ReportWriter: reportWriter, - SourceCtx: sourceSystemContext, - DestinationCtx: destinationSystemContext, - ForceManifestMIMEType: manifestType, - } -} - -func getSystemContext(defaults *types.SystemContext, signaturePolicyPath string) *types.SystemContext { - sc := &types.SystemContext{} - if defaults != nil { - *sc = *defaults - } - if signaturePolicyPath != "" { - sc.SignaturePolicyPath = signaturePolicyPath - } - return sc -} diff --git a/vendor/github.com/projectatomic/buildah/config.go b/vendor/github.com/projectatomic/buildah/config.go deleted file mode 100644 index 2f4d8319a..000000000 --- a/vendor/github.com/projectatomic/buildah/config.go +++ /dev/null @@ -1,545 +0,0 @@ -package buildah - -import ( - "context" - "encoding/json" - "os" - "runtime" - "strings" - "time" - - "github.com/containers/image/manifest" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/containers/storage/pkg/stringid" - ociv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/projectatomic/buildah/docker" - "github.com/sirupsen/logrus" -) - -// unmarshalConvertedConfig obtains the config blob of img valid for the wantedManifestMIMEType format -// (either as it exists, or converting the image if necessary), and unmarshals it into dest. -// NOTE: The MIME type is of the _manifest_, not of the _config_ that is returned. -func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.Image, wantedManifestMIMEType string) error { - _, actualManifestMIMEType, err := img.Manifest(ctx) - if err != nil { - return errors.Wrapf(err, "error getting manifest MIME type for %q", transports.ImageName(img.Reference())) - } - if wantedManifestMIMEType != actualManifestMIMEType { - img, err = img.UpdatedImage(ctx, types.ManifestUpdateOptions{ - ManifestMIMEType: wantedManifestMIMEType, - InformationOnly: types.ManifestUpdateInformation{ // Strictly speaking, every value in here is invalid. But… - Destination: nil, // Destination is technically required, but actually necessary only for conversion _to_ v2s1. Leave it nil, we will crash if that ever changes. - LayerInfos: nil, // LayerInfos is necessary for size information in v2s2/OCI manifests, but the code can work with nil, and we are not reading the converted manifest at all. - LayerDiffIDs: nil, // LayerDiffIDs are actually embedded in the converted manifest, but the code can work with nil, and the values are not needed until pushing the finished image, at which time containerImageRef.NewImageSource builds the values from scratch. - }, - }) - if err != nil { - return errors.Wrapf(err, "error converting image %q to %s", transports.ImageName(img.Reference()), wantedManifestMIMEType) - } - } - config, err := img.ConfigBlob(ctx) - if err != nil { - return errors.Wrapf(err, "error reading %s config from %q", wantedManifestMIMEType, transports.ImageName(img.Reference())) - } - if err := json.Unmarshal(config, dest); err != nil { - return errors.Wrapf(err, "error parsing %s configuration from %q", wantedManifestMIMEType, transports.ImageName(img.Reference())) - } - return nil -} - -func (b *Builder) initConfig(ctx context.Context, img types.Image) error { - if img != nil { // A pre-existing image, as opposed to a "FROM scratch" new one. - rawManifest, manifestMIMEType, err := img.Manifest(ctx) - if err != nil { - return errors.Wrapf(err, "error reading image manifest for %q", transports.ImageName(img.Reference())) - } - rawConfig, err := img.ConfigBlob(ctx) - if err != nil { - return errors.Wrapf(err, "error reading image configuration for %q", transports.ImageName(img.Reference())) - } - b.Manifest = rawManifest - b.Config = rawConfig - - dimage := docker.V2Image{} - if err := unmarshalConvertedConfig(ctx, &dimage, img, manifest.DockerV2Schema2MediaType); err != nil { - return err - } - b.Docker = dimage - - oimage := ociv1.Image{} - if err := unmarshalConvertedConfig(ctx, &oimage, img, ociv1.MediaTypeImageManifest); err != nil { - return err - } - b.OCIv1 = oimage - - if manifestMIMEType == ociv1.MediaTypeImageManifest { - // Attempt to recover format-specific data from the manifest. - v1Manifest := ociv1.Manifest{} - if err := json.Unmarshal(b.Manifest, &v1Manifest); err != nil { - return errors.Wrapf(err, "error parsing OCI manifest") - } - b.ImageAnnotations = v1Manifest.Annotations - } - } - - b.fixupConfig() - return nil -} - -func (b *Builder) fixupConfig() { - if b.Docker.Config != nil { - // Prefer image-level settings over those from the container it was built from. - b.Docker.ContainerConfig = *b.Docker.Config - } - b.Docker.Config = &b.Docker.ContainerConfig - b.Docker.DockerVersion = "" - now := time.Now().UTC() - if b.Docker.Created.IsZero() { - b.Docker.Created = now - } - if b.OCIv1.Created == nil || b.OCIv1.Created.IsZero() { - b.OCIv1.Created = &now - } - if b.OS() == "" { - b.SetOS(runtime.GOOS) - } - if b.Architecture() == "" { - b.SetArchitecture(runtime.GOARCH) - } - if b.Format == Dockerv2ImageManifest && b.Hostname() == "" { - b.SetHostname(stringid.TruncateID(stringid.GenerateRandomID())) - } -} - -// Annotations returns a set of key-value pairs from the image's manifest. -func (b *Builder) Annotations() map[string]string { - return copyStringStringMap(b.ImageAnnotations) -} - -// SetAnnotation adds or overwrites a key's value from the image's manifest. -// Note: this setting is not present in the Docker v2 image format, so it is -// discarded when writing images using Docker v2 formats. -func (b *Builder) SetAnnotation(key, value string) { - if b.ImageAnnotations == nil { - b.ImageAnnotations = map[string]string{} - } - b.ImageAnnotations[key] = value -} - -// UnsetAnnotation removes a key and its value from the image's manifest, if -// it's present. -func (b *Builder) UnsetAnnotation(key string) { - delete(b.ImageAnnotations, key) -} - -// ClearAnnotations removes all keys and their values from the image's -// manifest. -func (b *Builder) ClearAnnotations() { - b.ImageAnnotations = map[string]string{} -} - -// CreatedBy returns a description of how this image was built. -func (b *Builder) CreatedBy() string { - return b.ImageCreatedBy -} - -// SetCreatedBy sets the description of how this image was built. -func (b *Builder) SetCreatedBy(how string) { - b.ImageCreatedBy = how -} - -// OS returns a name of the OS on which the container, or a container built -// using an image built from this container, is intended to be run. -func (b *Builder) OS() string { - return b.OCIv1.OS -} - -// SetOS sets the name of the OS on which the container, or a container built -// using an image built from this container, is intended to be run. -func (b *Builder) SetOS(os string) { - b.OCIv1.OS = os - b.Docker.OS = os -} - -// Architecture returns a name of the architecture on which the container, or a -// container built using an image built from this container, is intended to be -// run. -func (b *Builder) Architecture() string { - return b.OCIv1.Architecture -} - -// SetArchitecture sets the name of the architecture on which the container, or -// a container built using an image built from this container, is intended to -// be run. -func (b *Builder) SetArchitecture(arch string) { - b.OCIv1.Architecture = arch - b.Docker.Architecture = arch -} - -// Maintainer returns contact information for the person who built the image. -func (b *Builder) Maintainer() string { - return b.OCIv1.Author -} - -// SetMaintainer sets contact information for the person who built the image. -func (b *Builder) SetMaintainer(who string) { - b.OCIv1.Author = who - b.Docker.Author = who -} - -// User returns information about the user as whom the container, or a -// container built using an image built from this container, should be run. -func (b *Builder) User() string { - return b.OCIv1.Config.User -} - -// SetUser sets information about the user as whom the container, or a -// container built using an image built from this container, should be run. -// Acceptable forms are a user name or ID, optionally followed by a colon and a -// group name or ID. -func (b *Builder) SetUser(spec string) { - b.OCIv1.Config.User = spec - b.Docker.Config.User = spec -} - -// OnBuild returns the OnBuild value from the container. -func (b *Builder) OnBuild() []string { - return copyStringSlice(b.Docker.Config.OnBuild) -} - -// ClearOnBuild removes all values from the OnBuild structure -func (b *Builder) ClearOnBuild() { - b.Docker.Config.OnBuild = []string{} -} - -// SetOnBuild sets a trigger instruction to be executed when the image is used -// as the base of another image. -// Note: this setting is not present in the OCIv1 image format, so it is -// discarded when writing images using OCIv1 formats. -func (b *Builder) SetOnBuild(onBuild string) { - if onBuild != "" && b.Format != Dockerv2ImageManifest { - logrus.Errorf("ONBUILD is not supported for OCI Image formats, %s will be ignored. Must use `docker` format", onBuild) - } - b.Docker.Config.OnBuild = append(b.Docker.Config.OnBuild, onBuild) -} - -// WorkDir returns the default working directory for running commands in the -// container, or in a container built using an image built from this container. -func (b *Builder) WorkDir() string { - return b.OCIv1.Config.WorkingDir -} - -// SetWorkDir sets the location of the default working directory for running -// commands in the container, or in a container built using an image built from -// this container. -func (b *Builder) SetWorkDir(there string) { - b.OCIv1.Config.WorkingDir = there - b.Docker.Config.WorkingDir = there -} - -// Shell returns the default shell for running commands in the -// container, or in a container built using an image built from this container. -func (b *Builder) Shell() []string { - return copyStringSlice(b.Docker.Config.Shell) -} - -// SetShell sets the default shell for running -// commands in the container, or in a container built using an image built from -// this container. -// Note: this setting is not present in the OCIv1 image format, so it is -// discarded when writing images using OCIv1 formats. -func (b *Builder) SetShell(shell []string) { - if len(shell) > 0 && b.Format != Dockerv2ImageManifest { - logrus.Errorf("SHELL is not supported for OCI Image format, %s will be ignored. Must use `docker` format", shell) - } - - b.Docker.Config.Shell = copyStringSlice(shell) -} - -// Env returns a list of key-value pairs to be set when running commands in the -// container, or in a container built using an image built from this container. -func (b *Builder) Env() []string { - return copyStringSlice(b.OCIv1.Config.Env) -} - -// SetEnv adds or overwrites a value to the set of environment strings which -// should be set when running commands in the container, or in a container -// built using an image built from this container. -func (b *Builder) SetEnv(k string, v string) { - reset := func(s *[]string) { - getenv := func(name string) string { - for i := range *s { - val := strings.SplitN((*s)[i], "=", 2) - if len(val) == 2 && val[0] == name { - return val[1] - } - } - return name - } - n := []string{} - for i := range *s { - if !strings.HasPrefix((*s)[i], k+"=") { - n = append(n, (*s)[i]) - } - v = os.Expand(v, getenv) - } - n = append(n, k+"="+v) - *s = n - } - reset(&b.OCIv1.Config.Env) - reset(&b.Docker.Config.Env) -} - -// UnsetEnv removes a value from the set of environment strings which should be -// set when running commands in this container, or in a container built using -// an image built from this container. -func (b *Builder) UnsetEnv(k string) { - unset := func(s *[]string) { - n := []string{} - for i := range *s { - if !strings.HasPrefix((*s)[i], k+"=") { - n = append(n, (*s)[i]) - } - } - *s = n - } - unset(&b.OCIv1.Config.Env) - unset(&b.Docker.Config.Env) -} - -// ClearEnv removes all values from the set of environment strings which should -// be set when running commands in this container, or in a container built -// using an image built from this container. -func (b *Builder) ClearEnv() { - b.OCIv1.Config.Env = []string{} - b.Docker.Config.Env = []string{} -} - -// Cmd returns the default command, or command parameters if an Entrypoint is -// set, to use when running a container built from an image built from this -// container. -func (b *Builder) Cmd() []string { - return copyStringSlice(b.OCIv1.Config.Cmd) -} - -// SetCmd sets the default command, or command parameters if an Entrypoint is -// set, to use when running a container built from an image built from this -// container. -func (b *Builder) SetCmd(cmd []string) { - b.OCIv1.Config.Cmd = copyStringSlice(cmd) - b.Docker.Config.Cmd = copyStringSlice(cmd) -} - -// Entrypoint returns the command to be run for containers built from images -// built from this container. -func (b *Builder) Entrypoint() []string { - if len(b.OCIv1.Config.Entrypoint) > 0 { - return copyStringSlice(b.OCIv1.Config.Entrypoint) - } - return nil -} - -// SetEntrypoint sets the command to be run for in containers built from images -// built from this container. -func (b *Builder) SetEntrypoint(ep []string) { - b.OCIv1.Config.Entrypoint = copyStringSlice(ep) - b.Docker.Config.Entrypoint = copyStringSlice(ep) -} - -// Labels returns a set of key-value pairs from the image's runtime -// configuration. -func (b *Builder) Labels() map[string]string { - return copyStringStringMap(b.OCIv1.Config.Labels) -} - -// SetLabel adds or overwrites a key's value from the image's runtime -// configuration. -func (b *Builder) SetLabel(k string, v string) { - if b.OCIv1.Config.Labels == nil { - b.OCIv1.Config.Labels = map[string]string{} - } - b.OCIv1.Config.Labels[k] = v - if b.Docker.Config.Labels == nil { - b.Docker.Config.Labels = map[string]string{} - } - b.Docker.Config.Labels[k] = v -} - -// UnsetLabel removes a key and its value from the image's runtime -// configuration, if it's present. -func (b *Builder) UnsetLabel(k string) { - delete(b.OCIv1.Config.Labels, k) - delete(b.Docker.Config.Labels, k) -} - -// ClearLabels removes all keys and their values from the image's runtime -// configuration. -func (b *Builder) ClearLabels() { - b.OCIv1.Config.Labels = map[string]string{} - b.Docker.Config.Labels = map[string]string{} -} - -// Ports returns the set of ports which should be exposed when a container -// based on an image built from this container is run. -func (b *Builder) Ports() []string { - p := []string{} - for k := range b.OCIv1.Config.ExposedPorts { - p = append(p, k) - } - return p -} - -// SetPort adds or overwrites an exported port in the set of ports which should -// be exposed when a container based on an image built from this container is -// run. -func (b *Builder) SetPort(p string) { - if b.OCIv1.Config.ExposedPorts == nil { - b.OCIv1.Config.ExposedPorts = map[string]struct{}{} - } - b.OCIv1.Config.ExposedPorts[p] = struct{}{} - if b.Docker.Config.ExposedPorts == nil { - b.Docker.Config.ExposedPorts = make(docker.PortSet) - } - b.Docker.Config.ExposedPorts[docker.Port(p)] = struct{}{} -} - -// UnsetPort removes an exposed port from the set of ports which should be -// exposed when a container based on an image built from this container is run. -func (b *Builder) UnsetPort(p string) { - delete(b.OCIv1.Config.ExposedPorts, p) - delete(b.Docker.Config.ExposedPorts, docker.Port(p)) -} - -// ClearPorts empties the set of ports which should be exposed when a container -// based on an image built from this container is run. -func (b *Builder) ClearPorts() { - b.OCIv1.Config.ExposedPorts = map[string]struct{}{} - b.Docker.Config.ExposedPorts = docker.PortSet{} -} - -// Volumes returns a list of filesystem locations which should be mounted from -// outside of the container when a container built from an image built from -// this container is run. -func (b *Builder) Volumes() []string { - v := []string{} - for k := range b.OCIv1.Config.Volumes { - v = append(v, k) - } - if len(v) > 0 { - return v - } - return nil -} - -// AddVolume adds a location to the image's list of locations which should be -// mounted from outside of the container when a container based on an image -// built from this container is run. -func (b *Builder) AddVolume(v string) { - if b.OCIv1.Config.Volumes == nil { - b.OCIv1.Config.Volumes = map[string]struct{}{} - } - b.OCIv1.Config.Volumes[v] = struct{}{} - if b.Docker.Config.Volumes == nil { - b.Docker.Config.Volumes = map[string]struct{}{} - } - b.Docker.Config.Volumes[v] = struct{}{} -} - -// RemoveVolume removes a location from the list of locations which should be -// mounted from outside of the container when a container based on an image -// built from this container is run. -func (b *Builder) RemoveVolume(v string) { - delete(b.OCIv1.Config.Volumes, v) - delete(b.Docker.Config.Volumes, v) -} - -// ClearVolumes removes all locations from the image's list of locations which -// should be mounted from outside of the container when a container based on an -// image built from this container is run. -func (b *Builder) ClearVolumes() { - b.OCIv1.Config.Volumes = map[string]struct{}{} - b.Docker.Config.Volumes = map[string]struct{}{} -} - -// Hostname returns the hostname which will be set in the container and in -// containers built using images built from the container. -func (b *Builder) Hostname() string { - return b.Docker.Config.Hostname -} - -// SetHostname sets the hostname which will be set in the container and in -// containers built using images built from the container. -// Note: this setting is not present in the OCIv1 image format, so it is -// discarded when writing images using OCIv1 formats. -func (b *Builder) SetHostname(name string) { - if name != "" && b.Format != Dockerv2ImageManifest { - logrus.Errorf("HOSTNAME is not supported for OCI Image format, hostname %s will be ignored. Must use `docker` format", name) - } - b.Docker.Config.Hostname = name -} - -// Domainname returns the domainname which will be set in the container and in -// containers built using images built from the container. -func (b *Builder) Domainname() string { - return b.Docker.Config.Domainname -} - -// SetDomainname sets the domainname which will be set in the container and in -// containers built using images built from the container. -// Note: this setting is not present in the OCIv1 image format, so it is -// discarded when writing images using OCIv1 formats. -func (b *Builder) SetDomainname(name string) { - if name != "" && b.Format != Dockerv2ImageManifest { - logrus.Errorf("DOMAINNAME is not supported for OCI Image format, domainname %s will be ignored. Must use `docker` format", name) - } - b.Docker.Config.Domainname = name -} - -// SetDefaultMountsFilePath sets the mounts file path for testing purposes -func (b *Builder) SetDefaultMountsFilePath(path string) { - b.DefaultMountsFilePath = path -} - -// Comment returns the comment which will be set in the container and in -// containers built using images built from the container -func (b *Builder) Comment() string { - return b.Docker.Comment -} - -// SetComment sets the comment which will be set in the container and in -// containers built using images built from the container. -// Note: this setting is not present in the OCIv1 image format, so it is -// discarded when writing images using OCIv1 formats. -func (b *Builder) SetComment(comment string) { - if comment != "" && b.Format != Dockerv2ImageManifest { - logrus.Errorf("COMMENT is not supported for OCI Image format, comment %s will be ignored. Must use `docker` format", comment) - } - b.Docker.Comment = comment -} - -// HistoryComment returns the comment which will be used in the history item -// which will describe the latest layer when we commit an image. -func (b *Builder) HistoryComment() string { - return b.ImageHistoryComment -} - -// SetHistoryComment sets the comment which will be used in the history item -// which will describe the latest layer when we commit an image. -func (b *Builder) SetHistoryComment(comment string) { - b.ImageHistoryComment = comment -} - -// StopSignal returns the signal which will be set in the container and in -// containers built using images buiilt from the container -func (b *Builder) StopSignal() string { - return b.Docker.Config.StopSignal -} - -// SetStopSignal sets the signal which will be set in the container and in -// containers built using images built from the container. -func (b *Builder) SetStopSignal(stopSignal string) { - b.OCIv1.Config.StopSignal = stopSignal - b.Docker.Config.StopSignal = stopSignal -} diff --git a/vendor/github.com/projectatomic/buildah/delete.go b/vendor/github.com/projectatomic/buildah/delete.go deleted file mode 100644 index 8de774ff9..000000000 --- a/vendor/github.com/projectatomic/buildah/delete.go +++ /dev/null @@ -1,18 +0,0 @@ -package buildah - -import ( - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" -) - -// Delete removes the working container. The buildah.Builder object should not -// be used after this method is called. -func (b *Builder) Delete() error { - if err := b.store.DeleteContainer(b.ContainerID); err != nil { - return errors.Wrapf(err, "error deleting build container") - } - b.MountPoint = "" - b.Container = "" - b.ContainerID = "" - return label.ReleaseLabel(b.ProcessLabel) -} diff --git a/vendor/github.com/projectatomic/buildah/docker/types.go b/vendor/github.com/projectatomic/buildah/docker/types.go deleted file mode 100644 index 759fc1246..000000000 --- a/vendor/github.com/projectatomic/buildah/docker/types.go +++ /dev/null @@ -1,262 +0,0 @@ -package docker - -// -// Types extracted from Docker -// - -import ( - "time" - - "github.com/containers/image/pkg/strslice" - "github.com/opencontainers/go-digest" -) - -// github.com/moby/moby/image/rootfs.go -const TypeLayers = "layers" - -// github.com/docker/distribution/manifest/schema2/manifest.go -const V2S2MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar" - -// github.com/moby/moby/image/rootfs.go -// RootFS describes images root filesystem -// This is currently a placeholder that only supports layers. In the future -// this can be made into an interface that supports different implementations. -type V2S2RootFS struct { - Type string `json:"type"` - DiffIDs []digest.Digest `json:"diff_ids,omitempty"` -} - -// github.com/moby/moby/image/image.go -// History stores build commands that were used to create an image -type V2S2History struct { - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // CreatedBy keeps the Dockerfile command used while building the image - CreatedBy string `json:"created_by,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // EmptyLayer is set to true if this history item did not generate a - // layer. Otherwise, the history item is associated with the next - // layer in the RootFS section. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// github.com/moby/moby/image/image.go -// ID is the content-addressable ID of an image. -type ID digest.Digest - -// github.com/moby/moby/api/types/container/config.go -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// github.com/docker/go-connections/nat/nat.go -// PortSet is a collection of structs indexed by Port -type PortSet map[Port]struct{} - -// github.com/docker/go-connections/nat/nat.go -// Port is a string containing port number and protocol in the format "80/tcp" -type Port string - -// github.com/moby/moby/api/types/container/config.go -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} - -// github.com/docker/distribution/manifest/schema1/config_builder.go -// For non-top-level layers, create fake V1Compatibility strings that -// fit the format and don't collide with anything else, but don't -// result in runnable images on their own. -type V1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig struct { - Cmd []string - } `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` -} - -// github.com/moby/moby/image/image.go -// V1Image stores the V1 image configuration. -type V1Image struct { - // ID is a unique 64 character identifier of the image - ID string `json:"id,omitempty"` - // Parent is the ID of the parent image - Parent string `json:"parent,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Container is the id of the container used to commit - Container string `json:"container,omitempty"` - // ContainerConfig is the configuration of the container that is committed into the image - ContainerConfig Config `json:"container_config,omitempty"` - // DockerVersion specifies the version of Docker that was used to build the image - DockerVersion string `json:"docker_version,omitempty"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *Config `json:"config,omitempty"` - // Architecture is the hardware that the image is build and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` - // Size is the total size of the image including all layers it is composed of - Size int64 `json:",omitempty"` -} - -// github.com/moby/moby/image/image.go -// Image stores the image configuration -type V2Image struct { - V1Image - Parent ID `json:"parent,omitempty"` - RootFS *V2S2RootFS `json:"rootfs,omitempty"` - History []V2S2History `json:"history,omitempty"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - - // rawJSON caches the immutable JSON associated with this image. - rawJSON []byte - - // computedID is the ID computed from the hash of the image config. - // Not to be confused with the legacy V1 ID in V1Image. - computedID ID -} - -// github.com/docker/distribution/manifest/versioned.go -// Versioned provides a struct with the manifest schemaVersion and mediaType. -// Incoming content with unknown schema version can be decoded against this -// struct to check the version. -type V2Versioned struct { - // SchemaVersion is the image manifest schema that this image follows - SchemaVersion int `json:"schemaVersion"` - - // MediaType is the media type of this schema. - MediaType string `json:"mediaType,omitempty"` -} - -// github.com/docker/distribution/manifest/schema1/manifest.go -// FSLayer is a container struct for BlobSums defined in an image manifest -type V2S1FSLayer struct { - // BlobSum is the tarsum of the referenced filesystem image layer - BlobSum digest.Digest `json:"blobSum"` -} - -// github.com/docker/distribution/manifest/schema1/manifest.go -// History stores unstructured v1 compatibility information -type V2S1History struct { - // V1Compatibility is the raw v1 compatibility information - V1Compatibility string `json:"v1Compatibility"` -} - -// github.com/docker/distribution/manifest/schema1/manifest.go -// Manifest provides the base accessible fields for working with V2 image -// format in the registry. -type V2S1Manifest struct { - V2Versioned - - // Name is the name of the image's repository - Name string `json:"name"` - - // Tag is the tag of the image specified by this manifest - Tag string `json:"tag"` - - // Architecture is the host architecture on which this image is intended to - // run - Architecture string `json:"architecture"` - - // FSLayers is a list of filesystem layer blobSums contained in this image - FSLayers []V2S1FSLayer `json:"fsLayers"` - - // History is a list of unstructured historical data for v1 compatibility - History []V2S1History `json:"history"` -} - -// github.com/docker/distribution/blobs.go -// Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -type V2S2Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Size in bytes of content. - Size int64 `json:"size,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // URLs contains the source URLs of this content. - URLs []string `json:"urls,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. -} - -// github.com/docker/distribution/manifest/schema2/manifest.go -// Manifest defines a schema2 manifest. -type V2S2Manifest struct { - V2Versioned - - // Config references the image configuration as a blob. - Config V2S2Descriptor `json:"config"` - - // Layers lists descriptors for the layers referenced by the - // configuration. - Layers []V2S2Descriptor `json:"layers"` -} diff --git a/vendor/github.com/projectatomic/buildah/image.go b/vendor/github.com/projectatomic/buildah/image.go deleted file mode 100644 index b94720f59..000000000 --- a/vendor/github.com/projectatomic/buildah/image.go +++ /dev/null @@ -1,634 +0,0 @@ -package buildah - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/manifest" - is "github.com/containers/image/storage" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/ioutils" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go" - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/projectatomic/buildah/docker" - "github.com/sirupsen/logrus" -) - -const ( - // OCIv1ImageManifest is the MIME type of an OCIv1 image manifest, - // suitable for specifying as a value of the PreferredManifestType - // member of a CommitOptions structure. It is also the default. - OCIv1ImageManifest = v1.MediaTypeImageManifest - // Dockerv2ImageManifest is the MIME type of a Docker v2s2 image - // manifest, suitable for specifying as a value of the - // PreferredManifestType member of a CommitOptions structure. - Dockerv2ImageManifest = manifest.DockerV2Schema2MediaType -) - -type containerImageRef struct { - store storage.Store - compression archive.Compression - name reference.Named - names []string - containerID string - mountLabel string - layerID string - oconfig []byte - dconfig []byte - created time.Time - createdBy string - historyComment string - annotations map[string]string - preferredManifestType string - exporting bool - squash bool - tarPath func(path string) (io.ReadCloser, error) - parent string -} - -type containerImageSource struct { - path string - ref *containerImageRef - store storage.Store - containerID string - mountLabel string - layerID string - names []string - compression archive.Compression - config []byte - configDigest digest.Digest - manifest []byte - manifestType string - exporting bool -} - -func (i *containerImageRef) NewImage(ctx context.Context, sc *types.SystemContext) (types.ImageCloser, error) { - src, err := i.NewImageSource(ctx, sc) - if err != nil { - return nil, err - } - return image.FromSource(ctx, sc, src) -} - -func expectedOCIDiffIDs(image v1.Image) int { - expected := 0 - for _, history := range image.History { - if !history.EmptyLayer { - expected = expected + 1 - } - } - return expected -} - -func expectedDockerDiffIDs(image docker.V2Image) int { - expected := 0 - for _, history := range image.History { - if !history.EmptyLayer { - expected = expected + 1 - } - } - return expected -} - -// Compute the media types which we need to attach to a layer, given the type of -// compression that we'll be applying. -func (i *containerImageRef) computeLayerMIMEType(what string) (omediaType, dmediaType string, err error) { - omediaType = v1.MediaTypeImageLayer - //TODO: Convert to manifest.DockerV2Schema2LayerUncompressedMediaType once available - dmediaType = docker.V2S2MediaTypeUncompressedLayer - if i.compression != archive.Uncompressed { - switch i.compression { - case archive.Gzip: - omediaType = v1.MediaTypeImageLayerGzip - dmediaType = manifest.DockerV2Schema2LayerMediaType - logrus.Debugf("compressing %s with gzip", what) - case archive.Bzip2: - // Until the image specs define a media type for bzip2-compressed layers, even if we know - // how to decompress them, we can't try to compress layers with bzip2. - return "", "", errors.New("media type for bzip2-compressed layers is not defined") - case archive.Xz: - // Until the image specs define a media type for xz-compressed layers, even if we know - // how to decompress them, we can't try to compress layers with xz. - return "", "", errors.New("media type for xz-compressed layers is not defined") - default: - logrus.Debugf("compressing %s with unknown compressor(?)", what) - } - } - return omediaType, dmediaType, nil -} - -// Extract the container's whole filesystem as if it were a single layer. -func (i *containerImageRef) extractRootfs() (io.ReadCloser, error) { - mountPoint, err := i.store.Mount(i.containerID, i.mountLabel) - if err != nil { - return nil, errors.Wrapf(err, "error extracting container %q", i.containerID) - } - rc, err := i.tarPath(mountPoint) - if err != nil { - return nil, errors.Wrapf(err, "error extracting container %q", i.containerID) - } - return ioutils.NewReadCloserWrapper(rc, func() error { - err := rc.Close() - if err != nil { - err = errors.Wrapf(err, "error closing tar archive of container %q", i.containerID) - } - if _, err2 := i.store.Unmount(i.containerID, false); err == nil { - if err2 != nil { - err2 = errors.Wrapf(err2, "error unmounting container %q", i.containerID) - } - err = err2 - } - return err - }), nil -} - -// Build fresh copies of the container configuration structures so that we can edit them -// without making unintended changes to the original Builder. -func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, docker.V2Image, docker.V2S2Manifest, error) { - created := i.created - - // Build an empty image, and then decode over it. - oimage := v1.Image{} - if err := json.Unmarshal(i.oconfig, &oimage); err != nil { - return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err - } - // Always replace this value, since we're newer than our base image. - oimage.Created = &created - // Clear the list of diffIDs, since we always repopulate it. - oimage.RootFS.Type = docker.TypeLayers - oimage.RootFS.DiffIDs = []digest.Digest{} - // Only clear the history if we're squashing, otherwise leave it be so that we can append - // entries to it. - if i.squash { - oimage.History = []v1.History{} - } - - // Build an empty image, and then decode over it. - dimage := docker.V2Image{} - if err := json.Unmarshal(i.dconfig, &dimage); err != nil { - return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err - } - dimage.Parent = docker.ID(digest.FromString(i.parent)) - // Always replace this value, since we're newer than our base image. - dimage.Created = created - // Clear the list of diffIDs, since we always repopulate it. - dimage.RootFS = &docker.V2S2RootFS{} - dimage.RootFS.Type = docker.TypeLayers - dimage.RootFS.DiffIDs = []digest.Digest{} - // Only clear the history if we're squashing, otherwise leave it be so that we can append - // entries to it. - if i.squash { - dimage.History = []docker.V2S2History{} - } - - // Build empty manifests. The Layers lists will be populated later. - omanifest := v1.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: v1.Descriptor{ - MediaType: v1.MediaTypeImageConfig, - }, - Layers: []v1.Descriptor{}, - Annotations: i.annotations, - } - - dmanifest := docker.V2S2Manifest{ - V2Versioned: docker.V2Versioned{ - SchemaVersion: 2, - MediaType: manifest.DockerV2Schema2MediaType, - }, - Config: docker.V2S2Descriptor{ - MediaType: manifest.DockerV2Schema2ConfigMediaType, - }, - Layers: []docker.V2S2Descriptor{}, - } - - return oimage, omanifest, dimage, dmanifest, nil -} - -func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.SystemContext) (src types.ImageSource, err error) { - // Decide which type of manifest and configuration output we're going to provide. - manifestType := i.preferredManifestType - // If it's not a format we support, return an error. - if manifestType != v1.MediaTypeImageManifest && manifestType != manifest.DockerV2Schema2MediaType { - return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)", - manifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType) - } - // Start building the list of layers using the read-write layer. - layers := []string{} - layerID := i.layerID - layer, err := i.store.Layer(layerID) - if err != nil { - return nil, errors.Wrapf(err, "unable to read layer %q", layerID) - } - // Walk the list of parent layers, prepending each as we go. If we're squashing, - // stop at the layer ID of the top layer, which we won't really be using anyway. - for layer != nil { - layers = append(append([]string{}, layerID), layers...) - layerID = layer.Parent - if layerID == "" || i.squash { - err = nil - break - } - layer, err = i.store.Layer(layerID) - if err != nil { - return nil, errors.Wrapf(err, "unable to read layer %q", layerID) - } - } - logrus.Debugf("layer list: %q", layers) - - // Make a temporary directory to hold blobs. - path, err := ioutil.TempDir(os.TempDir(), Package) - if err != nil { - return nil, err - } - logrus.Debugf("using %q to hold temporary data", path) - defer func() { - if src == nil { - err2 := os.RemoveAll(path) - if err2 != nil { - logrus.Errorf("error removing %q: %v", path, err) - } - } - }() - - // Build fresh copies of the configurations and manifest so that we don't mess with any - // values in the Builder object itself. - oimage, omanifest, dimage, dmanifest, err := i.createConfigsAndManifests() - if err != nil { - return nil, err - } - - // Extract each layer and compute its digests, both compressed (if requested) and uncompressed. - for _, layerID := range layers { - what := fmt.Sprintf("layer %q", layerID) - if i.squash { - what = fmt.Sprintf("container %q", i.containerID) - } - // The default layer media type assumes no compression. - omediaType := v1.MediaTypeImageLayer - dmediaType := docker.V2S2MediaTypeUncompressedLayer - // If we're not re-exporting the data, and we're reusing layers individually, reuse - // the blobsum and diff IDs. - if !i.exporting && !i.squash && layerID != i.layerID { - layer, err2 := i.store.Layer(layerID) - if err2 != nil { - return nil, errors.Wrapf(err, "unable to locate layer %q", layerID) - } - if layer.UncompressedDigest == "" { - return nil, errors.Errorf("unable to look up size of layer %q", layerID) - } - layerBlobSum := layer.UncompressedDigest - layerBlobSize := layer.UncompressedSize - // Note this layer in the manifest, using the uncompressed blobsum. - olayerDescriptor := v1.Descriptor{ - MediaType: omediaType, - Digest: layerBlobSum, - Size: layerBlobSize, - } - omanifest.Layers = append(omanifest.Layers, olayerDescriptor) - dlayerDescriptor := docker.V2S2Descriptor{ - MediaType: dmediaType, - Digest: layerBlobSum, - Size: layerBlobSize, - } - dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor) - // Note this layer in the list of diffIDs, again using the uncompressed blobsum. - oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, layerBlobSum) - dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, layerBlobSum) - continue - } - // Figure out if we need to change the media type, in case we're using compression. - omediaType, dmediaType, err = i.computeLayerMIMEType(what) - if err != nil { - return nil, err - } - // Start reading either the layer or the whole container rootfs. - noCompression := archive.Uncompressed - diffOptions := &storage.DiffOptions{ - Compression: &noCompression, - } - var rc io.ReadCloser - if i.squash { - // Extract the root filesystem as a single layer. - rc, err = i.extractRootfs() - if err != nil { - return nil, err - } - defer rc.Close() - } else { - // Extract this layer, one of possibly many. - rc, err = i.store.Diff("", layerID, diffOptions) - if err != nil { - return nil, errors.Wrapf(err, "error extracting %s", what) - } - defer rc.Close() - } - srcHasher := digest.Canonical.Digester() - reader := io.TeeReader(rc, srcHasher.Hash()) - // Set up to write the possibly-recompressed blob. - layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600) - if err != nil { - return nil, errors.Wrapf(err, "error opening file for %s", what) - } - destHasher := digest.Canonical.Digester() - counter := ioutils.NewWriteCounter(layerFile) - multiWriter := io.MultiWriter(counter, destHasher.Hash()) - // Compress the layer, if we're recompressing it. - writer, err := archive.CompressStream(multiWriter, i.compression) - if err != nil { - return nil, errors.Wrapf(err, "error compressing %s", what) - } - size, err := io.Copy(writer, reader) - if err != nil { - return nil, errors.Wrapf(err, "error storing %s to file", what) - } - writer.Close() - layerFile.Close() - if i.compression == archive.Uncompressed { - if size != counter.Count { - return nil, errors.Errorf("error storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count) - } - } else { - size = counter.Count - } - logrus.Debugf("%s size is %d bytes", what, size) - // Rename the layer so that we can more easily find it by digest later. - err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String())) - if err != nil { - return nil, errors.Wrapf(err, "error storing %s to file", what) - } - // Add a note in the manifest about the layer. The blobs are identified by their possibly- - // compressed blob digests. - olayerDescriptor := v1.Descriptor{ - MediaType: omediaType, - Digest: destHasher.Digest(), - Size: size, - } - omanifest.Layers = append(omanifest.Layers, olayerDescriptor) - dlayerDescriptor := docker.V2S2Descriptor{ - MediaType: dmediaType, - Digest: destHasher.Digest(), - Size: size, - } - dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor) - // Add a note about the diffID, which is always the layer's uncompressed digest. - oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest()) - dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest()) - } - - // Build history notes in the image configurations. - onews := v1.History{ - Created: &i.created, - CreatedBy: i.createdBy, - Author: oimage.Author, - Comment: i.historyComment, - EmptyLayer: false, - } - oimage.History = append(oimage.History, onews) - dnews := docker.V2S2History{ - Created: i.created, - CreatedBy: i.createdBy, - Author: dimage.Author, - Comment: i.historyComment, - EmptyLayer: false, - } - dimage.History = append(dimage.History, dnews) - dimage.Parent = docker.ID(digest.FromString(i.parent)) - - // Sanity check that we didn't just create a mismatch between non-empty layers in the - // history and the number of diffIDs. - expectedDiffIDs := expectedOCIDiffIDs(oimage) - if len(oimage.RootFS.DiffIDs) != expectedDiffIDs { - return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs)) - } - expectedDiffIDs = expectedDockerDiffIDs(dimage) - if len(dimage.RootFS.DiffIDs) != expectedDiffIDs { - return nil, errors.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs)) - } - - // Encode the image configuration blob. - oconfig, err := json.Marshal(&oimage) - if err != nil { - return nil, err - } - logrus.Debugf("OCIv1 config = %s", oconfig) - - // Add the configuration blob to the manifest. - omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig) - omanifest.Config.Size = int64(len(oconfig)) - omanifest.Config.MediaType = v1.MediaTypeImageConfig - - // Encode the manifest. - omanifestbytes, err := json.Marshal(&omanifest) - if err != nil { - return nil, err - } - logrus.Debugf("OCIv1 manifest = %s", omanifestbytes) - - // Encode the image configuration blob. - dconfig, err := json.Marshal(&dimage) - if err != nil { - return nil, err - } - logrus.Debugf("Docker v2s2 config = %s", dconfig) - - // Add the configuration blob to the manifest. - dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig) - dmanifest.Config.Size = int64(len(dconfig)) - dmanifest.Config.MediaType = manifest.DockerV2Schema2ConfigMediaType - - // Encode the manifest. - dmanifestbytes, err := json.Marshal(&dmanifest) - if err != nil { - return nil, err - } - logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes) - - // Decide which manifest and configuration blobs we'll actually output. - var config []byte - var imageManifest []byte - switch manifestType { - case v1.MediaTypeImageManifest: - imageManifest = omanifestbytes - config = oconfig - case manifest.DockerV2Schema2MediaType: - imageManifest = dmanifestbytes - config = dconfig - default: - panic("unreachable code: unsupported manifest type") - } - src = &containerImageSource{ - path: path, - ref: i, - store: i.store, - containerID: i.containerID, - mountLabel: i.mountLabel, - layerID: i.layerID, - names: i.names, - compression: i.compression, - config: config, - configDigest: digest.Canonical.FromBytes(config), - manifest: imageManifest, - manifestType: manifestType, - exporting: i.exporting, - } - return src, nil -} - -func (i *containerImageRef) NewImageDestination(ctx context.Context, sc *types.SystemContext) (types.ImageDestination, error) { - return nil, errors.Errorf("can't write to a container") -} - -func (i *containerImageRef) DockerReference() reference.Named { - return i.name -} - -func (i *containerImageRef) StringWithinTransport() string { - if len(i.names) > 0 { - return i.names[0] - } - return "" -} - -func (i *containerImageRef) DeleteImage(context.Context, *types.SystemContext) error { - // we were never here - return nil -} - -func (i *containerImageRef) PolicyConfigurationIdentity() string { - return "" -} - -func (i *containerImageRef) PolicyConfigurationNamespaces() []string { - return nil -} - -func (i *containerImageRef) Transport() types.ImageTransport { - return is.Transport -} - -func (i *containerImageSource) Close() error { - err := os.RemoveAll(i.path) - if err != nil { - logrus.Errorf("error removing %q: %v", i.path, err) - } - return err -} - -func (i *containerImageSource) Reference() types.ImageReference { - return i.ref -} - -func (i *containerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - return nil, errors.Errorf("containerImageSource does not support manifest lists") - } - return nil, nil -} - -func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - return nil, "", errors.Errorf("containerImageSource does not support manifest lists") - } - return i.manifest, i.manifestType, nil -} - -func (i *containerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} - -func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) { - if blob.Digest == i.configDigest { - logrus.Debugf("start reading config") - reader := bytes.NewReader(i.config) - closer := func() error { - logrus.Debugf("finished reading config") - return nil - } - return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil - } - layerFile, err := os.OpenFile(filepath.Join(i.path, blob.Digest.String()), os.O_RDONLY, 0600) - if err != nil { - logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err) - return nil, -1, err - } - size = -1 - st, err := layerFile.Stat() - if err != nil { - logrus.Warnf("error reading size of layer %q: %v", blob.Digest.String(), err) - } else { - size = st.Size() - } - logrus.Debugf("reading layer %q", blob.Digest.String()) - closer := func() error { - layerFile.Close() - logrus.Debugf("finished reading layer %q", blob.Digest.String()) - return nil - } - return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil -} - -func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squash bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) { - var name reference.Named - container, err := b.store.Container(b.ContainerID) - if err != nil { - return nil, errors.Wrapf(err, "error locating container %q", b.ContainerID) - } - if len(container.Names) > 0 { - if parsed, err2 := reference.ParseNamed(container.Names[0]); err2 == nil { - name = parsed - } - } - if manifestType == "" { - manifestType = OCIv1ImageManifest - } - oconfig, err := json.Marshal(&b.OCIv1) - if err != nil { - return nil, errors.Wrapf(err, "error encoding OCI-format image configuration") - } - dconfig, err := json.Marshal(&b.Docker) - if err != nil { - return nil, errors.Wrapf(err, "error encoding docker-format image configuration") - } - created := time.Now().UTC() - if historyTimestamp != nil { - created = historyTimestamp.UTC() - } - - ref := &containerImageRef{ - store: b.store, - compression: compress, - name: name, - names: container.Names, - containerID: container.ID, - mountLabel: b.MountLabel, - layerID: container.LayerID, - oconfig: oconfig, - dconfig: dconfig, - created: created, - createdBy: b.CreatedBy(), - historyComment: b.HistoryComment(), - annotations: b.Annotations(), - preferredManifestType: manifestType, - exporting: exporting, - squash: squash, - tarPath: b.tarPath(), - parent: parent, - } - return ref, nil -} diff --git a/vendor/github.com/projectatomic/buildah/imagebuildah/build.go b/vendor/github.com/projectatomic/buildah/imagebuildah/build.go deleted file mode 100644 index 08d0f6268..000000000 --- a/vendor/github.com/projectatomic/buildah/imagebuildah/build.go +++ /dev/null @@ -1,1337 +0,0 @@ -package imagebuildah - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "time" - - cp "github.com/containers/image/copy" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/stringid" - "github.com/docker/docker/builder/dockerfile/parser" - docker "github.com/fsouza/go-dockerclient" - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/openshift/imagebuilder" - "github.com/pkg/errors" - "github.com/projectatomic/buildah" - "github.com/projectatomic/buildah/util" - "github.com/sirupsen/logrus" -) - -const ( - PullIfMissing = buildah.PullIfMissing - PullAlways = buildah.PullAlways - PullNever = buildah.PullNever - - Gzip = archive.Gzip - Bzip2 = archive.Bzip2 - Xz = archive.Xz - Uncompressed = archive.Uncompressed -) - -// Mount is a mountpoint for the build container. -type Mount specs.Mount - -// BuildOptions can be used to alter how an image is built. -type BuildOptions struct { - // ContextDirectory is the default source location for COPY and ADD - // commands. - ContextDirectory string - // PullPolicy controls whether or not we pull images. It should be one - // of PullIfMissing, PullAlways, or PullNever. - PullPolicy buildah.PullPolicy - // Registry is a value which is prepended to the image's name, if it - // needs to be pulled and the image name alone can not be resolved to a - // reference to a source image. No separator is implicitly added. - Registry string - // Transport is a value which is prepended to the image's name, if it - // needs to be pulled and the image name alone, or the image name and - // the registry together, can not be resolved to a reference to a - // source image. No separator is implicitly added. - Transport string - // IgnoreUnrecognizedInstructions tells us to just log instructions we - // don't recognize, and try to keep going. - IgnoreUnrecognizedInstructions bool - // Quiet tells us whether or not to announce steps as we go through them. - Quiet bool - // Isolation controls how Run() runs things. - Isolation buildah.Isolation - // Runtime is the name of the command to run for RUN instructions when - // Isolation is either IsolationDefault or IsolationOCI. It should - // accept the same arguments and flags that runc does. - Runtime string - // RuntimeArgs adds global arguments for the runtime. - RuntimeArgs []string - // TransientMounts is a list of mounts that won't be kept in the image. - TransientMounts []Mount - // Compression specifies the type of compression which is applied to - // layer blobs. The default is to not use compression, but - // archive.Gzip is recommended. - Compression archive.Compression - // Arguments which can be interpolated into Dockerfiles - Args map[string]string - // Name of the image to write to. - Output string - // Additional tags to add to the image that we write, if we know of a - // way to add them. - AdditionalTags []string - // Log is a callback that will print a progress message. If no value - // is supplied, the message will be sent to Err (or os.Stderr, if Err - // is nil) by default. - Log func(format string, args ...interface{}) - // In is connected to stdin for RUN instructions. - In io.Reader - // Out is a place where non-error log messages are sent. - Out io.Writer - // Err is a place where error log messages should be sent. - Err io.Writer - // SignaturePolicyPath specifies an override location for the signature - // policy which should be used for verifying the new image as it is - // being written. Except in specific circumstances, no value should be - // specified, indicating that the shared, system-wide default policy - // should be used. - SignaturePolicyPath string - // ReportWriter is an io.Writer which will be used to report the - // progress of the (possible) pulling of the source image and the - // writing of the new image. - ReportWriter io.Writer - // OutputFormat is the format of the output image's manifest and - // configuration data. - // Accepted values are buildah.OCIv1ImageManifest and buildah.Dockerv2ImageManifest. - OutputFormat string - // SystemContext holds parameters used for authentication. - SystemContext *types.SystemContext - // NamespaceOptions controls how we set up namespaces processes that we - // might need when handling RUN instructions. - NamespaceOptions []buildah.NamespaceOption - // ConfigureNetwork controls whether or not network interfaces and - // routing are configured for a new network namespace (i.e., when not - // joining another's namespace and not just using the host's - // namespace), effectively deciding whether or not the process has a - // usable network. - ConfigureNetwork buildah.NetworkConfigurationPolicy - // CNIPluginPath is the location of CNI plugin helpers, if they should be - // run from a location other than the default location. - CNIPluginPath string - // CNIConfigDir is the location of CNI configuration files, if the files in - // the default configuration directory shouldn't be used. - CNIConfigDir string - // ID mapping options to use if we're setting up our own user namespace - // when handling RUN instructions. - IDMappingOptions *buildah.IDMappingOptions - // AddCapabilities is a list of capabilities to add to the default set when - // handling RUN instructions. - AddCapabilities []string - // DropCapabilities is a list of capabilities to remove from the default set - // when handling RUN instructions. If a capability appears in both lists, it - // will be dropped. - DropCapabilities []string - CommonBuildOpts *buildah.CommonBuildOptions - // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format - DefaultMountsFilePath string - // IIDFile tells the builder to write the image ID to the specified file - IIDFile string - // Squash tells the builder to produce an image with a single layer - // instead of with possibly more than one layer. - Squash bool - // Labels metadata for an image - Labels []string - // Annotation metadata for an image - Annotations []string - // OnBuild commands to be run by images based on this image - OnBuild []string - // Layers tells the builder to create a cache of images for each step in the Dockerfile - Layers bool - // NoCache tells the builder to build the image from scratch without checking for a cache. - // It creates a new set of cached images for the build. - NoCache bool - // RemoveIntermediateCtrs tells the builder whether to remove intermediate containers used - // during the build process. Default is true. - RemoveIntermediateCtrs bool - // ForceRmIntermediateCtrs tells the builder to remove all intermediate containers even if - // the build was unsuccessful. - ForceRmIntermediateCtrs bool -} - -// Executor is a buildah-based implementation of the imagebuilder.Executor -// interface. -type Executor struct { - index int - name string - named map[string]*Executor - store storage.Store - contextDir string - builder *buildah.Builder - pullPolicy buildah.PullPolicy - registry string - transport string - ignoreUnrecognizedInstructions bool - quiet bool - runtime string - runtimeArgs []string - transientMounts []Mount - compression archive.Compression - output string - outputFormat string - additionalTags []string - log func(format string, args ...interface{}) - in io.Reader - out io.Writer - err io.Writer - signaturePolicyPath string - systemContext *types.SystemContext - mountPoint string - preserved int - volumes imagebuilder.VolumeSet - volumeCache map[string]string - volumeCacheInfo map[string]os.FileInfo - reportWriter io.Writer - isolation buildah.Isolation - namespaceOptions []buildah.NamespaceOption - configureNetwork buildah.NetworkConfigurationPolicy - cniPluginPath string - cniConfigDir string - idmappingOptions *buildah.IDMappingOptions - commonBuildOptions *buildah.CommonBuildOptions - defaultMountsFilePath string - iidfile string - squash bool - labels []string - annotations []string - onbuild []string - layers bool - topLayers []string - noCache bool - removeIntermediateCtrs bool - forceRmIntermediateCtrs bool - containerIDs []string // Stores the IDs of the successful intermediate containers used during layer build -} - -// withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME. -func (b *Executor) withName(name string, index int) *Executor { - if b.named == nil { - b.named = make(map[string]*Executor) - } - copied := *b - copied.index = index - copied.name = name - child := &copied - b.named[name] = child - if idx := strconv.Itoa(index); idx != name { - b.named[idx] = child - } - return child -} - -// Preserve informs the executor that from this point on, it needs to ensure -// that only COPY and ADD instructions can modify the contents of this -// directory or anything below it. -// The Executor handles this by caching the contents of directories which have -// been marked this way before executing a RUN instruction, invalidating that -// cache when an ADD or COPY instruction sets any location under the directory -// as the destination, and using the cache to reset the contents of the -// directory tree after processing each RUN instruction. -// It would be simpler if we could just mark the directory as a read-only bind -// mount of itself during Run(), but the directory is expected to be remain -// writeable, even if any changes within it are ultimately discarded. -func (b *Executor) Preserve(path string) error { - logrus.Debugf("PRESERVE %q", path) - if b.volumes.Covers(path) { - // This path is already a subdirectory of a volume path that - // we're already preserving, so there's nothing new to be done - // except ensure that it exists. - archivedPath := filepath.Join(b.mountPoint, path) - if err := os.MkdirAll(archivedPath, 0755); err != nil { - return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath) - } - if err := b.volumeCacheInvalidate(path); err != nil { - return errors.Wrapf(err, "error ensuring volume path %q is preserved", archivedPath) - } - return nil - } - // Figure out where the cache for this volume would be stored. - b.preserved++ - cacheDir, err := b.store.ContainerDirectory(b.builder.ContainerID) - if err != nil { - return errors.Errorf("unable to locate temporary directory for container") - } - cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", b.preserved)) - // Save info about the top level of the location that we'll be archiving. - archivedPath := filepath.Join(b.mountPoint, path) - - // Try and resolve the symlink (if one exists) - // Set archivedPath and path based on whether a symlink is found or not - if symLink, err := resolveSymLink(b.mountPoint, path); err == nil { - archivedPath = filepath.Join(b.mountPoint, symLink) - path = symLink - } else { - return errors.Wrapf(err, "error reading symbolic link to %q", path) - } - - st, err := os.Stat(archivedPath) - if os.IsNotExist(err) { - if err = os.MkdirAll(archivedPath, 0755); err != nil { - return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath) - } - st, err = os.Stat(archivedPath) - } - if err != nil { - logrus.Debugf("error reading info about %q: %v", archivedPath, err) - return errors.Wrapf(err, "error reading info about volume path %q", archivedPath) - } - b.volumeCacheInfo[path] = st - if !b.volumes.Add(path) { - // This path is not a subdirectory of a volume path that we're - // already preserving, so adding it to the list should work. - return errors.Errorf("error adding %q to the volume cache", path) - } - b.volumeCache[path] = cacheFile - // Now prune cache files for volumes that are now supplanted by this one. - removed := []string{} - for cachedPath := range b.volumeCache { - // Walk our list of cached volumes, and check that they're - // still in the list of locations that we need to cache. - found := false - for _, volume := range b.volumes { - if volume == cachedPath { - // We need to keep this volume's cache. - found = true - break - } - } - if !found { - // We don't need to keep this volume's cache. Make a - // note to remove it. - removed = append(removed, cachedPath) - } - } - // Actually remove the caches that we decided to remove. - for _, cachedPath := range removed { - archivedPath := filepath.Join(b.mountPoint, cachedPath) - logrus.Debugf("no longer need cache of %q in %q", archivedPath, b.volumeCache[cachedPath]) - if err := os.Remove(b.volumeCache[cachedPath]); err != nil { - return errors.Wrapf(err, "error removing %q", b.volumeCache[cachedPath]) - } - delete(b.volumeCache, cachedPath) - } - return nil -} - -// Remove any volume cache item which will need to be re-saved because we're -// writing to part of it. -func (b *Executor) volumeCacheInvalidate(path string) error { - invalidated := []string{} - for cachedPath := range b.volumeCache { - if strings.HasPrefix(path, cachedPath+string(os.PathSeparator)) { - invalidated = append(invalidated, cachedPath) - } - } - for _, cachedPath := range invalidated { - if err := os.Remove(b.volumeCache[cachedPath]); err != nil { - return errors.Wrapf(err, "error removing volume cache %q", b.volumeCache[cachedPath]) - } - archivedPath := filepath.Join(b.mountPoint, cachedPath) - logrus.Debugf("invalidated volume cache for %q from %q", archivedPath, b.volumeCache[cachedPath]) - delete(b.volumeCache, cachedPath) - } - return nil -} - -// Save the contents of each of the executor's list of volumes for which we -// don't already have a cache file. -func (b *Executor) volumeCacheSave() error { - for cachedPath, cacheFile := range b.volumeCache { - archivedPath := filepath.Join(b.mountPoint, cachedPath) - _, err := os.Stat(cacheFile) - if err == nil { - logrus.Debugf("contents of volume %q are already cached in %q", archivedPath, cacheFile) - continue - } - if !os.IsNotExist(err) { - return errors.Wrapf(err, "error checking for cache of %q in %q", archivedPath, cacheFile) - } - if err := os.MkdirAll(archivedPath, 0755); err != nil { - return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath) - } - logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile) - cache, err := os.Create(cacheFile) - if err != nil { - return errors.Wrapf(err, "error creating archive at %q", cacheFile) - } - defer cache.Close() - rc, err := archive.Tar(archivedPath, archive.Uncompressed) - if err != nil { - return errors.Wrapf(err, "error archiving %q", archivedPath) - } - defer rc.Close() - _, err = io.Copy(cache, rc) - if err != nil { - return errors.Wrapf(err, "error archiving %q to %q", archivedPath, cacheFile) - } - } - return nil -} - -// Restore the contents of each of the executor's list of volumes. -func (b *Executor) volumeCacheRestore() error { - for cachedPath, cacheFile := range b.volumeCache { - archivedPath := filepath.Join(b.mountPoint, cachedPath) - logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile) - cache, err := os.Open(cacheFile) - if err != nil { - return errors.Wrapf(err, "error opening archive at %q", cacheFile) - } - defer cache.Close() - if err := os.RemoveAll(archivedPath); err != nil { - return errors.Wrapf(err, "error clearing volume path %q", archivedPath) - } - if err := os.MkdirAll(archivedPath, 0755); err != nil { - return errors.Wrapf(err, "error recreating volume path %q", archivedPath) - } - err = archive.Untar(cache, archivedPath, nil) - if err != nil { - return errors.Wrapf(err, "error extracting archive at %q", archivedPath) - } - if st, ok := b.volumeCacheInfo[cachedPath]; ok { - if err := os.Chmod(archivedPath, st.Mode()); err != nil { - return errors.Wrapf(err, "error restoring permissions on %q", archivedPath) - } - if err := os.Chown(archivedPath, 0, 0); err != nil { - return errors.Wrapf(err, "error setting ownership on %q", archivedPath) - } - if err := os.Chtimes(archivedPath, st.ModTime(), st.ModTime()); err != nil { - return errors.Wrapf(err, "error restoring datestamps on %q", archivedPath) - } - } - } - return nil -} - -// Copy copies data into the working tree. The "Download" field is how -// imagebuilder tells us the instruction was "ADD" and not "COPY". -func (b *Executor) Copy(excludes []string, copies ...imagebuilder.Copy) error { - for _, copy := range copies { - logrus.Debugf("COPY %#v, %#v", excludes, copy) - if err := b.volumeCacheInvalidate(copy.Dest); err != nil { - return err - } - sources := []string{} - for _, src := range copy.Src { - if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { - sources = append(sources, src) - } else if len(copy.From) > 0 { - if other, ok := b.named[copy.From]; ok && other.index < b.index { - sources = append(sources, filepath.Join(other.mountPoint, src)) - } else { - return errors.Errorf("the stage %q has not been built", copy.From) - } - } else { - sources = append(sources, filepath.Join(b.contextDir, src)) - } - } - - options := buildah.AddAndCopyOptions{ - Chown: copy.Chown, - } - - if err := b.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil { - return err - } - } - return nil -} - -func convertMounts(mounts []Mount) []specs.Mount { - specmounts := []specs.Mount{} - for _, m := range mounts { - s := specs.Mount{ - Destination: m.Destination, - Type: m.Type, - Source: m.Source, - Options: m.Options, - } - specmounts = append(specmounts, s) - } - return specmounts -} - -// Run executes a RUN instruction using the working container as a root -// directory. -func (b *Executor) Run(run imagebuilder.Run, config docker.Config) error { - logrus.Debugf("RUN %#v, %#v", run, config) - if b.builder == nil { - return errors.Errorf("no build container available") - } - stdin := b.in - if stdin == nil { - devNull, err := os.Open(os.DevNull) - if err != nil { - return errors.Errorf("error opening %q for reading: %v", os.DevNull, err) - } - defer devNull.Close() - stdin = devNull - } - options := buildah.RunOptions{ - Hostname: config.Hostname, - Runtime: b.runtime, - Args: b.runtimeArgs, - Mounts: convertMounts(b.transientMounts), - Env: config.Env, - User: config.User, - WorkingDir: config.WorkingDir, - Entrypoint: config.Entrypoint, - Cmd: config.Cmd, - Stdin: stdin, - Stdout: b.out, - Stderr: b.err, - Quiet: b.quiet, - } - if config.NetworkDisabled { - options.ConfigureNetwork = buildah.NetworkDisabled - } else { - options.ConfigureNetwork = buildah.NetworkEnabled - } - - args := run.Args - if run.Shell { - args = append([]string{"/bin/sh", "-c"}, args...) - } - if err := b.volumeCacheSave(); err != nil { - return err - } - err := b.builder.Run(args, options) - if err2 := b.volumeCacheRestore(); err2 != nil { - if err == nil { - return err2 - } - } - return err -} - -// UnrecognizedInstruction is called when we encounter an instruction that the -// imagebuilder parser didn't understand. -func (b *Executor) UnrecognizedInstruction(step *imagebuilder.Step) error { - errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", step.Command) - err := fmt.Sprintf(errStr+"%#v", step) - if b.ignoreUnrecognizedInstructions { - logrus.Debugf(err) - return nil - } - - switch logrus.GetLevel() { - case logrus.ErrorLevel: - logrus.Errorf(errStr) - case logrus.DebugLevel: - logrus.Debugf(err) - default: - logrus.Errorf("+(UNHANDLED LOGLEVEL) %#v", step) - } - - return errors.Errorf(err) -} - -// NewExecutor creates a new instance of the imagebuilder.Executor interface. -func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) { - exec := Executor{ - store: store, - contextDir: options.ContextDirectory, - pullPolicy: options.PullPolicy, - registry: options.Registry, - transport: options.Transport, - ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions, - quiet: options.Quiet, - runtime: options.Runtime, - runtimeArgs: options.RuntimeArgs, - transientMounts: options.TransientMounts, - compression: options.Compression, - output: options.Output, - outputFormat: options.OutputFormat, - additionalTags: options.AdditionalTags, - signaturePolicyPath: options.SignaturePolicyPath, - systemContext: options.SystemContext, - volumeCache: make(map[string]string), - volumeCacheInfo: make(map[string]os.FileInfo), - log: options.Log, - in: options.In, - out: options.Out, - err: options.Err, - reportWriter: options.ReportWriter, - isolation: options.Isolation, - namespaceOptions: options.NamespaceOptions, - configureNetwork: options.ConfigureNetwork, - cniPluginPath: options.CNIPluginPath, - cniConfigDir: options.CNIConfigDir, - idmappingOptions: options.IDMappingOptions, - commonBuildOptions: options.CommonBuildOpts, - defaultMountsFilePath: options.DefaultMountsFilePath, - iidfile: options.IIDFile, - squash: options.Squash, - labels: append([]string{}, options.Labels...), - annotations: append([]string{}, options.Annotations...), - layers: options.Layers, - noCache: options.NoCache, - removeIntermediateCtrs: options.RemoveIntermediateCtrs, - forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs, - } - if exec.err == nil { - exec.err = os.Stderr - } - if exec.out == nil { - exec.out = os.Stdout - } - if exec.log == nil { - stepCounter := 0 - exec.log = func(format string, args ...interface{}) { - stepCounter++ - prefix := fmt.Sprintf("STEP %d: ", stepCounter) - suffix := "\n" - fmt.Fprintf(exec.err, prefix+format+suffix, args...) - } - } - return &exec, nil -} - -// Prepare creates a working container based on specified image, or if one -// isn't specified, the first FROM instruction we can find in the parsed tree. -func (b *Executor) Prepare(ctx context.Context, ib *imagebuilder.Builder, node *parser.Node, from string) error { - if from == "" { - base, err := ib.From(node) - if err != nil { - logrus.Debugf("Prepare(node.Children=%#v)", node.Children) - return errors.Wrapf(err, "error determining starting point for build") - } - from = base - } - logrus.Debugf("FROM %#v", from) - if !b.quiet { - b.log("FROM %s", from) - } - builderOptions := buildah.BuilderOptions{ - Args: ib.Args, - FromImage: from, - PullPolicy: b.pullPolicy, - Registry: b.registry, - Transport: b.transport, - SignaturePolicyPath: b.signaturePolicyPath, - ReportWriter: b.reportWriter, - SystemContext: b.systemContext, - Isolation: b.isolation, - NamespaceOptions: b.namespaceOptions, - ConfigureNetwork: b.configureNetwork, - CNIPluginPath: b.cniPluginPath, - CNIConfigDir: b.cniConfigDir, - IDMappingOptions: b.idmappingOptions, - CommonBuildOpts: b.commonBuildOptions, - DefaultMountsFilePath: b.defaultMountsFilePath, - Format: b.outputFormat, - } - builder, err := buildah.NewBuilder(ctx, b.store, builderOptions) - if err != nil { - return errors.Wrapf(err, "error creating build container") - } - volumes := map[string]struct{}{} - for _, v := range builder.Volumes() { - volumes[v] = struct{}{} - } - dConfig := docker.Config{ - Hostname: builder.Hostname(), - Domainname: builder.Domainname(), - User: builder.User(), - Env: builder.Env(), - Cmd: builder.Cmd(), - Image: from, - Volumes: volumes, - WorkingDir: builder.WorkDir(), - Entrypoint: builder.Entrypoint(), - Labels: builder.Labels(), - Shell: builder.Shell(), - StopSignal: builder.StopSignal(), - OnBuild: builder.OnBuild(), - } - var rootfs *docker.RootFS - if builder.Docker.RootFS != nil { - rootfs = &docker.RootFS{ - Type: builder.Docker.RootFS.Type, - } - for _, id := range builder.Docker.RootFS.DiffIDs { - rootfs.Layers = append(rootfs.Layers, id.String()) - } - } - dImage := docker.Image{ - Parent: builder.FromImage, - ContainerConfig: dConfig, - Container: builder.Container, - Author: builder.Maintainer(), - Architecture: builder.Architecture(), - RootFS: rootfs, - } - dImage.Config = &dImage.ContainerConfig - err = ib.FromImage(&dImage, node) - if err != nil { - if err2 := builder.Delete(); err2 != nil { - logrus.Debugf("error deleting container which we failed to update: %v", err2) - } - return errors.Wrapf(err, "error updating build context") - } - mountPoint, err := builder.Mount(builder.MountLabel) - if err != nil { - if err2 := builder.Delete(); err2 != nil { - logrus.Debugf("error deleting container which we failed to mount: %v", err2) - } - return errors.Wrapf(err, "error mounting new container") - } - b.mountPoint = mountPoint - b.builder = builder - // Add the top layer of this image to b.topLayers so we can keep track of them - // when building with cached images. - b.topLayers = append(b.topLayers, builder.TopLayer) - logrus.Debugln("Container ID:", builder.ContainerID) - return nil -} - -// Delete deletes the working container, if we have one. The Executor object -// should not be used to build another image, as the name of the output image -// isn't resettable. -func (b *Executor) Delete() (err error) { - if b.builder != nil { - err = b.builder.Delete() - b.builder = nil - } - return err -} - -// resolveNameToImageRef creates a types.ImageReference from b.output -func (b *Executor) resolveNameToImageRef() (types.ImageReference, error) { - var ( - imageRef types.ImageReference - err error - ) - if b.output != "" { - imageRef, err = alltransports.ParseImageName(b.output) - if err != nil { - candidates, err := util.ResolveName(b.output, "", b.systemContext, b.store) - if err != nil { - return nil, errors.Wrapf(err, "error parsing target image name %q: %v", b.output) - } - if len(candidates) == 0 { - return nil, errors.Errorf("error parsing target image name %q", b.output) - } - imageRef2, err2 := is.Transport.ParseStoreReference(b.store, candidates[0]) - if err2 != nil { - return nil, errors.Wrapf(err, "error parsing target image name %q", b.output) - } - return imageRef2, nil - } - return imageRef, nil - } - imageRef, err = is.Transport.ParseStoreReference(b.store, "@"+stringid.GenerateRandomID()) - if err != nil { - return nil, errors.Wrapf(err, "error parsing reference for image to be written") - } - return imageRef, nil -} - -// Execute runs each of the steps in the parsed tree, in turn. -func (b *Executor) Execute(ctx context.Context, ib *imagebuilder.Builder, node *parser.Node) error { - checkForLayers := true - children := node.Children - commitName := b.output - for i, node := range node.Children { - step := ib.Step() - if err := step.Resolve(node); err != nil { - return errors.Wrapf(err, "error resolving step %+v", *node) - } - logrus.Debugf("Parsed Step: %+v", *step) - if !b.quiet { - b.log("%s", step.Original) - } - requiresStart := false - if i < len(node.Children)-1 { - requiresStart = ib.RequiresStart(&parser.Node{Children: node.Children[i+1:]}) - } - - if !b.layers && !b.noCache { - err := ib.Run(step, b, requiresStart) - if err != nil { - return errors.Wrapf(err, "error building at step %+v", *step) - } - continue - } - - if i < len(children)-1 { - b.output = "" - } else { - b.output = commitName - } - - var ( - cacheID string - err error - imgID string - ) - // checkForLayers will be true if b.layers is true and a cached intermediate image is found. - // checkForLayers is set to false when either there is no cached image or a break occurs where - // the instructions in the Dockerfile change from a previous build. - // Don't check for cache if b.noCache is set to true. - if checkForLayers && !b.noCache { - cacheID, err = b.layerExists(ctx, node, children[:i]) - if err != nil { - return errors.Wrap(err, "error checking if cached image exists from a previous build") - } - } - - if cacheID != "" { - fmt.Fprintf(b.out, "--> Using cache %s\n", cacheID) - } - - // If a cache is found for the last step, that means nothing in the - // Dockerfile changed. Just create a copy of the existing image and - // save it with the new name passed in by the user. - if cacheID != "" && i == len(children)-1 { - if err := b.copyExistingImage(ctx, cacheID); err != nil { - return err - } - break - } - - if cacheID == "" || !checkForLayers { - checkForLayers = false - err := ib.Run(step, b, requiresStart) - if err != nil { - return errors.Wrapf(err, "error building at step %+v", *step) - } - } - - // Commit if no cache is found - if cacheID == "" { - imgID, err = b.Commit(ctx, ib, getCreatedBy(node)) - if err != nil { - return errors.Wrapf(err, "error committing container for step %+v", *step) - } - if i == len(children)-1 { - b.log("COMMIT %s", b.output) - } - } else { - // Cache is found, assign imgID the id of the cached image so - // it is used to create the container for the next step. - imgID = cacheID - } - // Add container ID of successful intermediate container to b.containerIDs - b.containerIDs = append(b.containerIDs, b.builder.ContainerID) - // Prepare for the next step with imgID as the new base image. - if i != len(children)-1 { - if err := b.Prepare(ctx, ib, node, imgID); err != nil { - return errors.Wrap(err, "error preparing container for next step") - } - } - } - return nil -} - -// copyExistingImage creates a copy of an image already in store -func (b *Executor) copyExistingImage(ctx context.Context, cacheID string) error { - // Get the destination Image Reference - dest, err := b.resolveNameToImageRef() - if err != nil { - return err - } - - policyContext, err := util.GetPolicyContext(b.systemContext) - if err != nil { - return err - } - defer policyContext.Destroy() - - // Look up the source image, expecting it to be in local storage - src, err := is.Transport.ParseStoreReference(b.store, cacheID) - if err != nil { - return errors.Wrapf(err, "error getting source imageReference for %q", cacheID) - } - if err := cp.Image(ctx, policyContext, dest, src, nil); err != nil { - return errors.Wrapf(err, "error copying image %q", cacheID) - } - b.log("COMMIT %s", b.output) - return nil -} - -// layerExists returns true if an intermediate image of currNode exists in the image store from a previous build. -// It verifies tihis by checking the parent of the top layer of the image and the history. -func (b *Executor) layerExists(ctx context.Context, currNode *parser.Node, children []*parser.Node) (string, error) { - // Get the list of images available in the image store - images, err := b.store.Images() - if err != nil { - return "", errors.Wrap(err, "error getting image list from store") - } - for _, image := range images { - layer, err := b.store.Layer(image.TopLayer) - if err != nil { - return "", errors.Wrapf(err, "error getting top layer info") - } - // If the parent of the top layer of an image is equal to the last entry in b.topLayers - // it means that this image is potentially a cached intermediate image from a previous - // build. Next we double check that the history of this image is equivalent to the previous - // lines in the Dockerfile up till the point we are at in the build. - if layer.Parent == b.topLayers[len(b.topLayers)-1] { - history, err := b.getImageHistory(ctx, image.ID) - if err != nil { - return "", errors.Wrapf(err, "error getting history of %q", image.ID) - } - // children + currNode is the point of the Dockerfile we are currently at. - if historyMatches(append(children, currNode), history) { - // This checks if the files copied during build have been changed if the node is - // a COPY or ADD command. - filesMatch, err := b.copiedFilesMatch(currNode, history[len(history)-1].Created) - if err != nil { - return "", errors.Wrapf(err, "error checking if copied files match") - } - if filesMatch { - return image.ID, nil - } - } - } - } - return "", nil -} - -// getImageHistory returns the history of imageID. -func (b *Executor) getImageHistory(ctx context.Context, imageID string) ([]v1.History, error) { - imageRef, err := is.Transport.ParseStoreReference(b.store, "@"+imageID) - if err != nil { - return nil, errors.Wrapf(err, "error getting image reference %q", imageID) - } - ref, err := imageRef.NewImage(ctx, nil) - if err != nil { - return nil, errors.Wrap(err, "error creating new image from reference") - } - oci, err := ref.OCIConfig(ctx) - if err != nil { - return nil, errors.Wrapf(err, "error getting oci config of image %q", imageID) - } - return oci.History, nil -} - -// getCreatedBy returns the command the image at node will be created by. -func getCreatedBy(node *parser.Node) string { - if node.Value == "run" { - return "/bin/sh -c " + node.Original[4:] - } - return "/bin/sh -c #(nop) " + node.Original -} - -// historyMatches returns true if the history of the image matches the lines -// in the Dockerfile till the point of build we are at. -// Used to verify whether a cache of the intermediate image exists and whether -// to run the build again. -func historyMatches(children []*parser.Node, history []v1.History) bool { - i := len(history) - 1 - for j := len(children) - 1; j >= 0; j-- { - instruction := children[j].Original - if children[j].Value == "run" { - instruction = instruction[4:] - } - if !strings.Contains(history[i].CreatedBy, instruction) { - return false - } - i-- - } - return true -} - -// getFilesToCopy goes through node to get all the src files that are copied, added or downloaded. -// It is possible for the Dockerfile to have src as hom*, which means all files that have hom as a prefix. -// Another format is hom?.txt, which means all files that have that name format with the ? replaced by another character. -func (b *Executor) getFilesToCopy(node *parser.Node) ([]string, error) { - currNode := node.Next - var src []string - for currNode.Next != nil { - if currNode.Next == nil { - break - } - if strings.HasPrefix(currNode.Value, "http://") || strings.HasPrefix(currNode.Value, "https://") { - src = append(src, currNode.Value) - currNode = currNode.Next - continue - } - matches, err := filepath.Glob(filepath.Join(b.contextDir, currNode.Value)) - if err != nil { - return nil, errors.Wrapf(err, "error finding match for pattern %q", currNode.Value) - } - src = append(src, matches...) - currNode = currNode.Next - } - return src, nil -} - -// copiedFilesMatch checks to see if the node instruction is a COPY or ADD. -// If it is either of those two it checks the timestamps on all the files copied/added -// by the dockerfile. If the host version has a time stamp greater than the time stamp -// of the build, the build will not use the cached version and will rebuild. -func (b *Executor) copiedFilesMatch(node *parser.Node, historyTime *time.Time) (bool, error) { - if node.Value != "add" && node.Value != "copy" { - return true, nil - } - - src, err := b.getFilesToCopy(node) - if err != nil { - return false, err - } - for _, item := range src { - // for urls, check the Last-Modified field in the header. - if strings.HasPrefix(item, "http://") || strings.HasPrefix(item, "https://") { - urlContentNew, err := urlContentModified(item, historyTime) - if err != nil { - return false, err - } - if urlContentNew { - return false, nil - } - continue - } - // For local files, walk the file tree and check the time stamps. - timeIsGreater := false - err := filepath.Walk(item, func(path string, info os.FileInfo, err error) error { - if info.ModTime().After(*historyTime) { - timeIsGreater = true - return nil - } - return nil - }) - if err != nil { - return false, errors.Wrapf(err, "error walking file tree %q", item) - } - if timeIsGreater { - return false, nil - } - } - return true, nil -} - -// urlContentModified sends a get request to the url and checks if the header has a value in -// Last-Modified, and if it does compares the time stamp to that of the history of the cached image. -// returns true if there is no Last-Modified value in the header. -func urlContentModified(url string, historyTime *time.Time) (bool, error) { - resp, err := http.Get(url) - if err != nil { - return false, errors.Wrapf(err, "error getting %q", url) - } - if lastModified := resp.Header.Get("Last-Modified"); lastModified != "" { - lastModifiedTime, err := time.Parse(time.RFC1123, lastModified) - if err != nil { - return false, errors.Wrapf(err, "error parsing time for %q", url) - } - return lastModifiedTime.After(*historyTime), nil - } - logrus.Debugf("Response header did not have Last-Modified %q, will rebuild.", url) - return true, nil -} - -// Commit writes the container's contents to an image, using a passed-in tag as -// the name if there is one, generating a unique ID-based one otherwise. -func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder, createdBy string) (string, error) { - imageRef, err := b.resolveNameToImageRef() - if err != nil { - return "", err - } - - if ib.Author != "" { - b.builder.SetMaintainer(ib.Author) - } - config := ib.Config() - b.builder.SetCreatedBy(createdBy) - b.builder.SetHostname(config.Hostname) - b.builder.SetDomainname(config.Domainname) - b.builder.SetUser(config.User) - b.builder.ClearPorts() - for p := range config.ExposedPorts { - b.builder.SetPort(string(p)) - } - for _, envSpec := range config.Env { - spec := strings.SplitN(envSpec, "=", 2) - b.builder.SetEnv(spec[0], spec[1]) - } - b.builder.SetCmd(config.Cmd) - b.builder.ClearVolumes() - for v := range config.Volumes { - b.builder.AddVolume(v) - } - b.builder.ClearOnBuild() - for _, onBuildSpec := range config.OnBuild { - b.builder.SetOnBuild(onBuildSpec) - } - b.builder.SetWorkDir(config.WorkingDir) - b.builder.SetEntrypoint(config.Entrypoint) - b.builder.SetShell(config.Shell) - b.builder.SetStopSignal(config.StopSignal) - b.builder.ClearLabels() - for k, v := range config.Labels { - b.builder.SetLabel(k, v) - } - for _, labelSpec := range b.labels { - label := strings.SplitN(labelSpec, "=", 2) - if len(label) > 1 { - b.builder.SetLabel(label[0], label[1]) - } else { - b.builder.SetLabel(label[0], "") - } - } - for _, annotationSpec := range b.annotations { - annotation := strings.SplitN(annotationSpec, "=", 2) - if len(annotation) > 1 { - b.builder.SetAnnotation(annotation[0], annotation[1]) - } else { - b.builder.SetAnnotation(annotation[0], "") - } - } - if imageRef != nil { - logName := transports.ImageName(imageRef) - logrus.Debugf("COMMIT %q", logName) - if !b.quiet && !b.layers && !b.noCache { - b.log("COMMIT %s", logName) - } - } else { - logrus.Debugf("COMMIT") - if !b.quiet && !b.layers && !b.noCache { - b.log("COMMIT") - } - } - writer := b.reportWriter - if b.layers || b.noCache { - writer = nil - } - options := buildah.CommitOptions{ - Compression: b.compression, - SignaturePolicyPath: b.signaturePolicyPath, - AdditionalTags: b.additionalTags, - ReportWriter: writer, - PreferredManifestType: b.outputFormat, - IIDFile: b.iidfile, - Squash: b.squash, - Parent: b.builder.FromImageID, - } - imgID, err := b.builder.Commit(ctx, imageRef, options) - if err != nil { - return "", err - } - if options.IIDFile == "" && imgID != "" { - fmt.Fprintf(b.out, "--> %s\n", imgID) - } - return imgID, nil -} - -// Build takes care of the details of running Prepare/Execute/Commit/Delete -// over each of the one or more parsed Dockerfiles and stages. -func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) error { - if len(stages) == 0 { - errors.New("error building: no stages to build") - } - var ( - stageExecutor *Executor - lastErr error - ) - for _, stage := range stages { - stageExecutor = b.withName(stage.Name, stage.Position) - if err := stageExecutor.Prepare(ctx, stage.Builder, stage.Node, ""); err != nil { - return err - } - // Always remove the intermediate/build containers, even if the build was unsuccessful. - // If building with layers, remove all intermediate/build containers if b.forceRmIntermediateCtrs - // is true. - if b.forceRmIntermediateCtrs || (!b.layers && !b.noCache) { - defer stageExecutor.Delete() - } - if err := stageExecutor.Execute(ctx, stage.Builder, stage.Node); err != nil { - lastErr = err - } - - // Delete the successful intermediate containers if an error in the build - // process occurs and b.removeIntermediateCtrs is true. - if lastErr != nil { - if b.removeIntermediateCtrs { - stageExecutor.deleteSuccessfulIntermediateCtrs() - } - return lastErr - } - b.containerIDs = append(b.containerIDs, stageExecutor.containerIDs...) - } - - if !b.layers && !b.noCache { - _, err := stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "") - if err != nil { - return err - } - } - // If building with layers and b.removeIntermediateCtrs is true - // only remove intermediate container for each step if an error - // during the build process doesn't occur. - // If the build is unsuccessful, the container created at the step - // the failure happened will persist in the container store. - // This if condition will be false if not building with layers and - // the removal of intermediate/build containers will be handled by the - // defer statement above. - if b.removeIntermediateCtrs && (b.layers || b.noCache) { - if err := b.deleteSuccessfulIntermediateCtrs(); err != nil { - return errors.Errorf("Failed to cleanup intermediate containers") - } - } - return nil -} - -// BuildDockerfiles parses a set of one or more Dockerfiles (which may be -// URLs), creates a new Executor, and then runs Prepare/Execute/Commit/Delete -// over the entire set of instructions. -func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOptions, paths ...string) error { - if len(paths) == 0 { - return errors.Errorf("error building: no dockerfiles specified") - } - var dockerfiles []io.ReadCloser - defer func(dockerfiles ...io.ReadCloser) { - for _, d := range dockerfiles { - d.Close() - } - }(dockerfiles...) - for _, dfile := range paths { - var data io.ReadCloser - - if strings.HasPrefix(dfile, "http://") || strings.HasPrefix(dfile, "https://") { - logrus.Debugf("reading remote Dockerfile %q", dfile) - resp, err := http.Get(dfile) - if err != nil { - return errors.Wrapf(err, "error getting %q", dfile) - } - if resp.ContentLength == 0 { - resp.Body.Close() - return errors.Errorf("no contents in %q", dfile) - } - data = resp.Body - } else { - // If the Dockerfile isn't found try prepending the - // context directory to it. - if _, err := os.Stat(dfile); os.IsNotExist(err) { - dfile = filepath.Join(options.ContextDirectory, dfile) - } - logrus.Debugf("reading local Dockerfile %q", dfile) - contents, err := os.Open(dfile) - if err != nil { - return errors.Wrapf(err, "error reading %q", dfile) - } - dinfo, err := contents.Stat() - if err != nil { - contents.Close() - return errors.Wrapf(err, "error reading info about %q", dfile) - } - if dinfo.Mode().IsRegular() && dinfo.Size() == 0 { - contents.Close() - return errors.Wrapf(err, "no contents in %q", dfile) - } - data = contents - } - - // pre-process Dockerfiles with ".in" suffix - if strings.HasSuffix(dfile, ".in") { - pData, err := preprocessDockerfileContents(data, options.ContextDirectory) - if err != nil { - return err - } - data = *pData - } - - dockerfiles = append(dockerfiles, data) - } - mainNode, err := imagebuilder.ParseDockerfile(dockerfiles[0]) - if err != nil { - return errors.Wrapf(err, "error parsing main Dockerfile") - } - for _, d := range dockerfiles[1:] { - additionalNode, err := imagebuilder.ParseDockerfile(d) - if err != nil { - return errors.Wrapf(err, "error parsing additional Dockerfile") - } - mainNode.Children = append(mainNode.Children, additionalNode.Children...) - } - exec, err := NewExecutor(store, options) - if err != nil { - return errors.Wrapf(err, "error creating build executor") - } - b := imagebuilder.NewBuilder(options.Args) - stages := imagebuilder.NewStages(mainNode, b) - return exec.Build(ctx, stages) -} - -// deleteSuccessfulIntermediateCtrs goes through the container IDs in b.containerIDs -// and deletes the containers associated with that ID. -func (b *Executor) deleteSuccessfulIntermediateCtrs() error { - var lastErr error - for _, ctr := range b.containerIDs { - if err := b.store.DeleteContainer(ctr); err != nil { - logrus.Errorf("error deleting build container %q: %v\n", ctr, err) - lastErr = err - } - } - return lastErr -} - -// preprocessDockerfileContents runs CPP(1) in preprocess-only mode on the input -// dockerfile content and will use ctxDir as the base include path. -// -// Note: we cannot use cmd.StdoutPipe() as cmd.Wait() closes it. -func preprocessDockerfileContents(r io.ReadCloser, ctxDir string) (rdrCloser *io.ReadCloser, err error) { - cppPath := "/usr/bin/cpp" - if _, err = os.Stat(cppPath); err != nil { - if os.IsNotExist(err) { - err = errors.Errorf("error: Dockerfile.in support requires %s to be installed", cppPath) - } - return nil, err - } - - stdout := bytes.Buffer{} - stderr := bytes.Buffer{} - - cmd := exec.Command(cppPath, "-E", "-iquote", ctxDir, "-") - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - pipe, err := cmd.StdinPipe() - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - pipe.Close() - } - }() - - if err = cmd.Start(); err != nil { - return nil, err - } - - if _, err = io.Copy(pipe, r); err != nil { - return nil, err - } - - pipe.Close() - if err = cmd.Wait(); err != nil { - if stderr.Len() > 0 { - err = fmt.Errorf("%v: %s", err, strings.TrimSpace(stderr.String())) - } - return nil, errors.Wrapf(err, "error pre-processing Dockerfile") - } - - rc := ioutil.NopCloser(bytes.NewReader(stdout.Bytes())) - return &rc, nil -} diff --git a/vendor/github.com/projectatomic/buildah/imagebuildah/chroot_symlink.go b/vendor/github.com/projectatomic/buildah/imagebuildah/chroot_symlink.go deleted file mode 100644 index f1fec7f70..000000000 --- a/vendor/github.com/projectatomic/buildah/imagebuildah/chroot_symlink.go +++ /dev/null @@ -1,145 +0,0 @@ -package imagebuildah - -import ( - "flag" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/containers/storage/pkg/reexec" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -const ( - symlinkChrootedCommand = "chrootsymlinks-resolve" - maxSymlinksResolved = 40 -) - -func init() { - reexec.Register(symlinkChrootedCommand, resolveChrootedSymlinks) -} - -func resolveChrootedSymlinks() { - status := 0 - flag.Parse() - if len(flag.Args()) < 1 { - os.Exit(1) - } - // Our first parameter is the directory to chroot into. - if err := unix.Chdir(flag.Arg(0)); err != nil { - fmt.Fprintf(os.Stderr, "chdir(): %v\n", err) - os.Exit(1) - } - if err := unix.Chroot(flag.Arg(0)); err != nil { - fmt.Fprintf(os.Stderr, "chroot(): %v\n", err) - os.Exit(1) - } - - // Our second parameter is the path name to evaluate for symbolic links - symLink, err := getSymbolicLink(flag.Arg(0), flag.Arg(1)) - if err != nil { - fmt.Fprintf(os.Stderr, "error getting symbolic links: %v\n", err) - os.Exit(1) - } - if _, err := os.Stdout.WriteString(symLink); err != nil { - fmt.Fprintf(os.Stderr, "error writing string to stdout: %v\n", err) - os.Exit(1) - } - os.Exit(status) -} - -func resolveSymLink(rootdir, filename string) (string, error) { - // The child process expects a chroot and one path that - // will be consulted relative to the chroot directory and evaluated - // for any symbolic links present. - cmd := reexec.Command(symlinkChrootedCommand, rootdir, filename) - output, err := cmd.CombinedOutput() - if err != nil { - return "", errors.Wrapf(err, string(output)) - } - - // Hand back the resolved symlink, will be "" if a symlink is not found - return string(output), nil -} - -// getSymbolic link goes through each part of the path and continues resolving symlinks as they appear. -// Returns what the whole target path for what "path" resolves to. -func getSymbolicLink(rootdir, path string) (string, error) { - var ( - symPath string - symLinksResolved int - ) - - // Splitting path as we need to resolve each parth of the path at a time - splitPath := strings.Split(path, "/") - if splitPath[0] == "" { - splitPath = splitPath[1:] - symPath = "/" - } - - for _, p := range splitPath { - // If we have resolved 40 symlinks, that means something is terribly wrong - // will return an error and exit - if symLinksResolved >= maxSymlinksResolved { - return "", errors.Errorf("have resolved %q symlinks, something is terribly wrong!", maxSymlinksResolved) - } - - symPath = filepath.Join(symPath, p) - isSymlink, resolvedPath, err := hasSymlink(symPath) - if err != nil { - return "", errors.Wrapf(err, "error checking symlink for %q", symPath) - } - // if isSymlink is true, check if resolvedPath is potentially another symlink - // keep doing this till resolvedPath is not a symlink and isSymlink is false - for isSymlink == true { - // Need to keep track of number of symlinks resolved - // Will also return an error if the symlink points to itself as that will exceed maxSymlinksResolved - if symLinksResolved >= maxSymlinksResolved { - return "", errors.Errorf("have resolved %q symlinks, something is terribly wrong!", maxSymlinksResolved) - } - isSymlink, resolvedPath, err = hasSymlink(resolvedPath) - if err != nil { - return "", errors.Wrapf(err, "error checking symlink for %q", resolvedPath) - } - symLinksResolved++ - } - // Assign resolvedPath to symPath. The next part of the loop will append the next part of the original path - // and continue resolving - symPath = resolvedPath - symLinksResolved++ - } - return symPath, nil -} - -// hasSymlink returns true and the target if path is symlink -// otherwise it returns false and path -func hasSymlink(path string) (bool, string, error) { - info, err := os.Lstat(path) - if os.IsNotExist(err) { - if err = os.MkdirAll(path, 0755); err != nil { - return false, "", errors.Wrapf(err, "error ensuring volume path %q exists", path) - } - info, err = os.Lstat(path) - if err != nil { - return false, "", errors.Wrapf(err, "error running lstat on %q", path) - } - } - // Return false and path as path is not a symlink - if info.Mode()&os.ModeSymlink != os.ModeSymlink { - return false, path, nil - } - - // Read the symlink to get what it points to - targetDir, err := os.Readlink(path) - if err != nil { - return false, "", errors.Wrapf(err, "error reading link %q", path) - } - // if the symlink points to a relative path, prepend the path till now to the resolved path - if !filepath.IsAbs(targetDir) { - targetDir = filepath.Join(path, targetDir) - } - // run filepath.Clean to remove the ".." from relative paths - return true, filepath.Clean(targetDir), nil -} diff --git a/vendor/github.com/projectatomic/buildah/imagebuildah/util.go b/vendor/github.com/projectatomic/buildah/imagebuildah/util.go deleted file mode 100644 index b437ea1cb..000000000 --- a/vendor/github.com/projectatomic/buildah/imagebuildah/util.go +++ /dev/null @@ -1,113 +0,0 @@ -package imagebuildah - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - "os/exec" - "path" - "path/filepath" - "strings" - - "github.com/containers/storage/pkg/chrootarchive" - "github.com/pkg/errors" - "github.com/projectatomic/buildah" - "github.com/sirupsen/logrus" -) - -func cloneToDirectory(url, dir string) error { - if !strings.HasPrefix(url, "git://") { - url = "git://" + url - } - logrus.Debugf("cloning %q to %q", url, dir) - cmd := exec.Command("git", "clone", url, dir) - return cmd.Run() -} - -func downloadToDirectory(url, dir string) error { - logrus.Debugf("extracting %q to %q", url, dir) - resp, err := http.Get(url) - if err != nil { - return errors.Wrapf(err, "error getting %q", url) - } - defer resp.Body.Close() - if resp.ContentLength == 0 { - return errors.Errorf("no contents in %q", url) - } - if err := chrootarchive.Untar(resp.Body, dir, nil); err != nil { - resp1, err := http.Get(url) - if err != nil { - return errors.Wrapf(err, "error getting %q", url) - } - defer resp1.Body.Close() - body, err := ioutil.ReadAll(resp1.Body) - if err != nil { - return errors.Wrapf(err, "Failed to read %q", url) - } - dockerfile := filepath.Join(dir, "Dockerfile") - // Assume this is a Dockerfile - if err := ioutil.WriteFile(dockerfile, body, 0600); err != nil { - return errors.Wrapf(err, "Failed to write %q to %q", url, dockerfile) - } - } - return nil -} - -// TempDirForURL checks if the passed-in string looks like a URL. If it is, -// TempDirForURL creates a temporary directory, arranges for its contents to be -// the contents of that URL, and returns the temporary directory's path, along -// with the name of a subdirectory which should be used as the build context -// (which may be empty or "."). Removal of the temporary directory is the -// responsibility of the caller. If the string doesn't look like a URL, -// TempDirForURL returns empty strings and a nil error code. -func TempDirForURL(dir, prefix, url string) (name string, subdir string, err error) { - if !strings.HasPrefix(url, "http://") && - !strings.HasPrefix(url, "https://") && - !strings.HasPrefix(url, "git://") && - !strings.HasPrefix(url, "github.com/") { - return "", "", nil - } - name, err = ioutil.TempDir(dir, prefix) - if err != nil { - return "", "", errors.Wrapf(err, "error creating temporary directory for %q", url) - } - if strings.HasPrefix(url, "git://") { - err = cloneToDirectory(url, name) - if err != nil { - if err2 := os.Remove(name); err2 != nil { - logrus.Debugf("error removing temporary directory %q: %v", name, err2) - } - return "", "", err - } - return name, "", nil - } - if strings.HasPrefix(url, "github.com/") { - ghurl := url - url = fmt.Sprintf("https://%s/archive/master.tar.gz", ghurl) - logrus.Debugf("resolving url %q to %q", ghurl, url) - subdir = path.Base(ghurl) + "-master" - } - if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") { - err = downloadToDirectory(url, name) - if err != nil { - if err2 := os.Remove(name); err2 != nil { - logrus.Debugf("error removing temporary directory %q: %v", name, err2) - } - return "", subdir, err - } - return name, subdir, nil - } - logrus.Debugf("don't know how to retrieve %q", url) - if err2 := os.Remove(name); err2 != nil { - logrus.Debugf("error removing temporary directory %q: %v", name, err2) - } - return "", "", errors.Errorf("unreachable code reached") -} - -// InitReexec is a wrapper for buildah.InitReexec(). It should be called at -// the start of main(), and if it returns true, main() should return -// immediately. -func InitReexec() bool { - return buildah.InitReexec() -} diff --git a/vendor/github.com/projectatomic/buildah/import.go b/vendor/github.com/projectatomic/buildah/import.go deleted file mode 100644 index 31288334a..000000000 --- a/vendor/github.com/projectatomic/buildah/import.go +++ /dev/null @@ -1,131 +0,0 @@ -package buildah - -import ( - "context" - - is "github.com/containers/image/storage" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/projectatomic/buildah/docker" - "github.com/projectatomic/buildah/util" -) - -func importBuilderDataFromImage(ctx context.Context, store storage.Store, systemContext *types.SystemContext, imageID, containerName, containerID string) (*Builder, error) { - if imageID == "" { - return nil, errors.Errorf("Internal error: imageID is empty in importBuilderDataFromImage") - } - - uidmap, gidmap := convertStorageIDMaps(storage.DefaultStoreOptions.UIDMap, storage.DefaultStoreOptions.GIDMap) - - ref, err := is.Transport.ParseStoreReference(store, imageID) - if err != nil { - return nil, errors.Wrapf(err, "no such image %q", imageID) - } - src, err2 := ref.NewImage(ctx, systemContext) - if err2 != nil { - return nil, errors.Wrapf(err2, "error instantiating image") - } - defer src.Close() - - imageName := "" - if img, err3 := store.Image(imageID); err3 == nil { - if len(img.Names) > 0 { - imageName = img.Names[0] - } - if img.TopLayer != "" { - layer, err4 := store.Layer(img.TopLayer) - if err4 != nil { - return nil, errors.Wrapf(err4, "error reading information about image's top layer") - } - uidmap, gidmap = convertStorageIDMaps(layer.UIDMap, layer.GIDMap) - } - } - - defaultNamespaceOptions, err := DefaultNamespaceOptions() - if err != nil { - return nil, err - } - - builder := &Builder{ - store: store, - Type: containerType, - FromImage: imageName, - FromImageID: imageID, - Container: containerName, - ContainerID: containerID, - ImageAnnotations: map[string]string{}, - ImageCreatedBy: "", - NamespaceOptions: defaultNamespaceOptions, - IDMappingOptions: IDMappingOptions{ - HostUIDMapping: len(uidmap) == 0, - HostGIDMapping: len(uidmap) == 0, - UIDMap: uidmap, - GIDMap: gidmap, - }, - } - - if err := builder.initConfig(ctx, src); err != nil { - return nil, errors.Wrapf(err, "error preparing image configuration") - } - - return builder, nil -} - -func importBuilder(ctx context.Context, store storage.Store, options ImportOptions) (*Builder, error) { - if options.Container == "" { - return nil, errors.Errorf("container name must be specified") - } - - c, err := store.Container(options.Container) - if err != nil { - return nil, err - } - - systemContext := getSystemContext(&types.SystemContext{}, options.SignaturePolicyPath) - - builder, err := importBuilderDataFromImage(ctx, store, systemContext, c.ImageID, options.Container, c.ID) - if err != nil { - return nil, err - } - - if builder.FromImageID != "" { - if d, err2 := digest.Parse(builder.FromImageID); err2 == nil { - builder.Docker.Parent = docker.ID(d) - } else { - builder.Docker.Parent = docker.ID(digest.NewDigestFromHex(digest.Canonical.String(), builder.FromImageID)) - } - } - if builder.FromImage != "" { - builder.Docker.ContainerConfig.Image = builder.FromImage - } - builder.IDMappingOptions.UIDMap, builder.IDMappingOptions.GIDMap = convertStorageIDMaps(c.UIDMap, c.GIDMap) - - err = builder.Save() - if err != nil { - return nil, errors.Wrapf(err, "error saving builder state") - } - - return builder, nil -} - -func importBuilderFromImage(ctx context.Context, store storage.Store, options ImportFromImageOptions) (*Builder, error) { - if options.Image == "" { - return nil, errors.Errorf("image name must be specified") - } - - systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) - - _, img, err := util.FindImage(store, "", systemContext, options.Image) - if err != nil { - return nil, errors.Wrapf(err, "error locating image %q for importing settings", options.Image) - } - - builder, err := importBuilderDataFromImage(ctx, store, systemContext, img.ID, "", "") - if err != nil { - return nil, errors.Wrapf(err, "error importing build settings from image %q", options.Image) - } - - return builder, nil -} diff --git a/vendor/github.com/projectatomic/buildah/mount.go b/vendor/github.com/projectatomic/buildah/mount.go deleted file mode 100644 index 4f1ae3c6e..000000000 --- a/vendor/github.com/projectatomic/buildah/mount.go +++ /dev/null @@ -1,17 +0,0 @@ -package buildah - -// Mount mounts a container's root filesystem in a location which can be -// accessed from the host, and returns the location. -func (b *Builder) Mount(label string) (string, error) { - mountpoint, err := b.store.Mount(b.ContainerID, label) - if err != nil { - return "", err - } - b.MountPoint = mountpoint - - err = b.Save() - if err != nil { - return "", err - } - return mountpoint, nil -} diff --git a/vendor/github.com/projectatomic/buildah/new.go b/vendor/github.com/projectatomic/buildah/new.go deleted file mode 100644 index 0eb8d8e42..000000000 --- a/vendor/github.com/projectatomic/buildah/new.go +++ /dev/null @@ -1,370 +0,0 @@ -package buildah - -import ( - "context" - "fmt" - "os" - "strings" - - "github.com/containers/image/pkg/sysregistries" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" - "github.com/containers/storage" - multierror "github.com/hashicorp/go-multierror" - "github.com/opencontainers/selinux/go-selinux" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/openshift/imagebuilder" - "github.com/pkg/errors" - "github.com/projectatomic/buildah/util" - "github.com/sirupsen/logrus" -) - -const ( - // BaseImageFakeName is the "name" of a source image which we interpret - // as "no image". - BaseImageFakeName = imagebuilder.NoBaseImageSpecifier - - // DefaultTransport is a prefix that we apply to an image name if we - // can't find one in the local Store, in order to generate a source - // reference for the image that we can then copy to the local Store. - DefaultTransport = "docker://" - - // minimumTruncatedIDLength is the minimum length of an identifier that - // we'll accept as possibly being a truncated image ID. - minimumTruncatedIDLength = 3 -) - -func reserveSELinuxLabels(store storage.Store, id string) error { - if selinux.GetEnabled() { - containers, err := store.Containers() - if err != nil { - return err - } - - for _, c := range containers { - if id == c.ID { - continue - } else { - b, err := OpenBuilder(store, c.ID) - if err != nil { - if os.IsNotExist(err) { - // Ignore not exist errors since containers probably created by other tool - // TODO, we need to read other containers json data to reserve their SELinux labels - continue - } - return err - } - // Prevent different containers from using same MCS label - if err := label.ReserveLabel(b.ProcessLabel); err != nil { - return err - } - } - } - } - return nil -} - -func pullAndFindImage(ctx context.Context, store storage.Store, imageName string, options BuilderOptions, sc *types.SystemContext) (*storage.Image, types.ImageReference, error) { - pullOptions := PullOptions{ - ReportWriter: options.ReportWriter, - Store: store, - SystemContext: options.SystemContext, - Transport: options.Transport, - } - ref, err := pullImage(ctx, store, imageName, pullOptions, sc) - if err != nil { - logrus.Debugf("error pulling image %q: %v", imageName, err) - return nil, nil, err - } - img, err := is.Transport.GetStoreImage(store, ref) - if err != nil { - logrus.Debugf("error reading pulled image %q: %v", imageName, err) - return nil, nil, err - } - return img, ref, nil -} - -func getImageName(name string, img *storage.Image) string { - imageName := name - if len(img.Names) > 0 { - imageName = img.Names[0] - // When the image used by the container is a tagged image - // the container name might be set to the original image instead of - // the image given in the "form" command line. - // This loop is supposed to fix this. - for _, n := range img.Names { - if strings.Contains(n, name) { - imageName = n - break - } - } - } - return imageName -} - -func imageNamePrefix(imageName string) string { - prefix := imageName - s := strings.Split(imageName, "/") - if len(s) > 0 { - prefix = s[len(s)-1] - } - s = strings.Split(prefix, ":") - if len(s) > 0 { - prefix = s[0] - } - s = strings.Split(prefix, "@") - if len(s) > 0 { - prefix = s[0] - } - return prefix -} - -func newContainerIDMappingOptions(idmapOptions *IDMappingOptions) storage.IDMappingOptions { - var options storage.IDMappingOptions - if idmapOptions != nil { - options.HostUIDMapping = idmapOptions.HostUIDMapping - options.HostGIDMapping = idmapOptions.HostGIDMapping - uidmap, gidmap := convertRuntimeIDMaps(idmapOptions.UIDMap, idmapOptions.GIDMap) - if len(uidmap) > 0 && len(gidmap) > 0 { - options.UIDMap = uidmap - options.GIDMap = gidmap - } else { - options.HostUIDMapping = true - options.HostGIDMapping = true - } - } - return options -} - -func resolveImage(ctx context.Context, systemContext *types.SystemContext, store storage.Store, options BuilderOptions) (types.ImageReference, *storage.Image, error) { - var ref types.ImageReference - var img *storage.Image - images, err := util.ResolveName(options.FromImage, options.Registry, systemContext, store) - if err != nil { - return nil, nil, errors.Wrapf(err, "error parsing reference to image %q", options.FromImage) - } - var pullErrors *multierror.Error - for _, image := range images { - var err error - if len(image) >= minimumTruncatedIDLength { - if img, err = store.Image(image); err == nil && img != nil && strings.HasPrefix(img.ID, image) { - if ref, err = is.Transport.ParseStoreReference(store, img.ID); err != nil { - return nil, nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID) - } - break - } - } - - if options.PullPolicy == PullAlways { - pulledImg, pulledReference, err := pullAndFindImage(ctx, store, image, options, systemContext) - if err != nil { - pullErrors = multierror.Append(pullErrors, err) - logrus.Debugf("unable to pull and read image %q: %v", image, err) - continue - } - ref = pulledReference - img = pulledImg - break - } - - srcRef, err := alltransports.ParseImageName(image) - if err != nil { - if options.Transport == "" { - pullErrors = multierror.Append(pullErrors, err) - logrus.Debugf("error parsing image name %q: %v", image, err) - continue - } - transport := options.Transport - if transport != DefaultTransport { - transport = transport + ":" - } - srcRef2, err := alltransports.ParseImageName(transport + image) - if err != nil { - pullErrors = multierror.Append(pullErrors, err) - logrus.Debugf("error parsing image name %q: %v", image, err) - continue - } - srcRef = srcRef2 - } - - destImage, err := localImageNameForReference(ctx, store, srcRef, options.FromImage) - if err != nil { - return nil, nil, errors.Wrapf(err, "error computing local image name for %q", transports.ImageName(srcRef)) - } - if destImage == "" { - return nil, nil, errors.Errorf("error computing local image name for %q", transports.ImageName(srcRef)) - } - - ref, err = is.Transport.ParseStoreReference(store, destImage) - if err != nil { - return nil, nil, errors.Wrapf(err, "error parsing reference to image %q", destImage) - } - img, err = is.Transport.GetStoreImage(store, ref) - if err != nil { - if errors.Cause(err) == storage.ErrImageUnknown && options.PullPolicy != PullIfMissing { - pullErrors = multierror.Append(pullErrors, err) - logrus.Debugf("no such image %q: %v", transports.ImageName(ref), err) - continue - } - pulledImg, pulledReference, err := pullAndFindImage(ctx, store, image, options, systemContext) - if err != nil { - pullErrors = multierror.Append(pullErrors, err) - logrus.Debugf("unable to pull and read image %q: %v", image, err) - continue - } - ref = pulledReference - img = pulledImg - } - break - } - - if img == nil && pullErrors != nil { - return nil, nil, pullErrors - } - - return ref, img, nil -} - -func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) { - var ref types.ImageReference - var img *storage.Image - var err error - - if options.FromImage == BaseImageFakeName { - options.FromImage = "" - } - if options.Transport == "" { - options.Transport = DefaultTransport - } - - systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) - - if options.FromImage != "scratch" { - ref, img, err = resolveImage(ctx, systemContext, store, options) - if err != nil { - return nil, err - } - if options.FromImage != "" && (ref == nil || img == nil) { - // If options.FromImage is set but we ended up - // with nil in ref or in img then there was an error that - // we should return. - return nil, errors.Wrapf(storage.ErrImageUnknown, "image %q not found in %s registries", options.FromImage, sysregistries.RegistriesConfPath(systemContext)) - } - } - image := options.FromImage - imageID := "" - topLayer := "" - if img != nil { - image = getImageName(imageNamePrefix(image), img) - imageID = img.ID - topLayer = img.TopLayer - } - var src types.ImageCloser - if ref != nil { - src, err = ref.NewImage(ctx, systemContext) - if err != nil { - return nil, errors.Wrapf(err, "error instantiating image for %q", transports.ImageName(ref)) - } - defer src.Close() - } - - name := "working-container" - if options.Container != "" { - name = options.Container - } else { - if image != "" { - name = imageNamePrefix(image) + "-" + name - } - } - - coptions := storage.ContainerOptions{} - coptions.IDMappingOptions = newContainerIDMappingOptions(options.IDMappingOptions) - - container, err := store.CreateContainer("", []string{name}, imageID, "", "", &coptions) - suffix := 1 - for err != nil && errors.Cause(err) == storage.ErrDuplicateName && options.Container == "" { - suffix++ - tmpName := fmt.Sprintf("%s-%d", name, suffix) - if container, err = store.CreateContainer("", []string{tmpName}, imageID, "", "", &coptions); err == nil { - name = tmpName - } - } - if err != nil { - return nil, errors.Wrapf(err, "error creating container") - } - - defer func() { - if err != nil { - if err2 := store.DeleteContainer(container.ID); err != nil { - logrus.Errorf("error deleting container %q: %v", container.ID, err2) - } - } - }() - - if err = reserveSELinuxLabels(store, container.ID); err != nil { - return nil, err - } - processLabel, mountLabel, err := label.InitLabels(options.CommonBuildOpts.LabelOpts) - if err != nil { - return nil, err - } - uidmap, gidmap := convertStorageIDMaps(container.UIDMap, container.GIDMap) - - defaultNamespaceOptions, err := DefaultNamespaceOptions() - if err != nil { - return nil, err - } - - namespaceOptions := defaultNamespaceOptions - namespaceOptions.AddOrReplace(options.NamespaceOptions...) - - builder := &Builder{ - store: store, - Type: containerType, - FromImage: image, - FromImageID: imageID, - Container: name, - ContainerID: container.ID, - ImageAnnotations: map[string]string{}, - ImageCreatedBy: "", - ProcessLabel: processLabel, - MountLabel: mountLabel, - DefaultMountsFilePath: options.DefaultMountsFilePath, - Isolation: options.Isolation, - NamespaceOptions: namespaceOptions, - ConfigureNetwork: options.ConfigureNetwork, - CNIPluginPath: options.CNIPluginPath, - CNIConfigDir: options.CNIConfigDir, - IDMappingOptions: IDMappingOptions{ - HostUIDMapping: len(uidmap) == 0, - HostGIDMapping: len(uidmap) == 0, - UIDMap: uidmap, - GIDMap: gidmap, - }, - AddCapabilities: copyStringSlice(options.AddCapabilities), - DropCapabilities: copyStringSlice(options.DropCapabilities), - CommonBuildOpts: options.CommonBuildOpts, - TopLayer: topLayer, - Args: options.Args, - Format: options.Format, - } - - if options.Mount { - _, err = builder.Mount(mountLabel) - if err != nil { - return nil, errors.Wrapf(err, "error mounting build container") - } - } - - if err := builder.initConfig(ctx, src); err != nil { - return nil, errors.Wrapf(err, "error preparing image configuration") - } - err = builder.Save() - if err != nil { - return nil, errors.Wrapf(err, "error saving builder state") - } - - return builder, nil -} diff --git a/vendor/github.com/projectatomic/buildah/pkg/cli/common.go b/vendor/github.com/projectatomic/buildah/pkg/cli/common.go deleted file mode 100644 index a438daf6a..000000000 --- a/vendor/github.com/projectatomic/buildah/pkg/cli/common.go +++ /dev/null @@ -1,295 +0,0 @@ -package cli - -// the cli package contains urfave/cli related structs that help make up -// the command line for buildah commands. it resides here so other projects -// that vendor in this code can use them too. - -import ( - "fmt" - "os" - "strings" - - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/projectatomic/buildah" - "github.com/projectatomic/buildah/util" - "github.com/urfave/cli" -) - -var ( - usernsFlags = []cli.Flag{ - cli.StringFlag{ - Name: "userns", - Usage: "'container', `path` of user namespace to join, or 'host'", - }, - cli.StringSliceFlag{ - Name: "userns-uid-map", - Usage: "`containerID:hostID:length` UID mapping to use in user namespace", - }, - cli.StringSliceFlag{ - Name: "userns-gid-map", - Usage: "`containerID:hostID:length` GID mapping to use in user namespace", - }, - cli.StringFlag{ - Name: "userns-uid-map-user", - Usage: "`name` of entries from /etc/subuid to use to set user namespace UID mapping", - }, - cli.StringFlag{ - Name: "userns-gid-map-group", - Usage: "`name` of entries from /etc/subgid to use to set user namespace GID mapping", - }, - } - - NamespaceFlags = []cli.Flag{ - cli.StringFlag{ - Name: string(specs.IPCNamespace), - Usage: "'container', `path` of IPC namespace to join, or 'host'", - }, - cli.StringFlag{ - Name: string(specs.NetworkNamespace) + ", net", - Usage: "'container', `path` of network namespace to join, or 'host'", - }, - cli.StringFlag{ - Name: "cni-config-dir", - Usage: "`directory` of CNI configuration files", - Value: util.DefaultCNIConfigDir, - }, - cli.StringFlag{ - Name: "cni-plugin-path", - Usage: "`path` of CNI network plugins", - Value: util.DefaultCNIPluginPath, - }, - cli.StringFlag{ - Name: string(specs.PIDNamespace), - Usage: "'container', `path` of PID namespace to join, or 'host'", - }, - cli.StringFlag{ - Name: string(specs.UTSNamespace), - Usage: "'container', `path` of UTS namespace to join, or 'host'", - }, - } - - LayerFlags = []cli.Flag{ - cli.BoolFlag{ - Name: "layers", - Usage: fmt.Sprintf("cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override. (default %t)", UseLayers()), - }, - } - - BudFlags = []cli.Flag{ - cli.StringSliceFlag{ - Name: "annotation", - Usage: "Set metadata for an image (default [])", - }, - cli.StringFlag{ - Name: "authfile", - Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json", - }, - cli.StringSliceFlag{ - Name: "build-arg", - Usage: "`argument=value` to supply to the builder", - }, - cli.StringFlag{ - Name: "cache-from", - Usage: "Images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.", - }, - cli.StringFlag{ - Name: "cert-dir", - Value: "", - Usage: "use certificates at the specified path to access the registry", - }, - cli.BoolFlag{ - Name: "compress", - Usage: "This is legacy option, which has no effect on the image", - }, - cli.StringFlag{ - Name: "creds", - Value: "", - Usage: "use `[username[:password]]` for accessing the registry", - }, - cli.BoolFlag{ - Name: "disable-content-trust", - Usage: "This is a Docker specific option and is a NOOP", - }, - cli.StringSliceFlag{ - Name: "file, f", - Usage: "`pathname or URL` of a Dockerfile", - }, - cli.BoolFlag{ - Name: "force-rm", - Usage: "Always remove intermediate containers after a build, even if the build is unsuccessful.", - }, - cli.StringFlag{ - Name: "format", - Usage: "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.", - Value: DefaultFormat(), - }, - cli.StringFlag{ - Name: "iidfile", - Usage: "`file` to write the image ID to", - }, - cli.StringFlag{ - Name: "isolation", - Usage: "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.", - Value: DefaultIsolation(), - }, - cli.StringSliceFlag{ - Name: "label", - Usage: "Set metadata for an image (default [])", - }, - cli.BoolFlag{ - Name: "no-cache", - Usage: "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.", - }, - cli.StringFlag{ - Name: "logfile", - Usage: "log to `file` instead of stdout/stderr", - }, - cli.IntFlag{ - Name: "loglevel", - Usage: "adjust logging level (range from -2 to 3)", - }, - cli.BoolTFlag{ - Name: "pull", - Usage: "pull the image if not present", - }, - cli.BoolFlag{ - Name: "pull-always", - Usage: "pull the image, even if a version is present", - }, - cli.BoolFlag{ - Name: "quiet, q", - Usage: "refrain from announcing build instructions and image read/write progress", - }, - cli.BoolTFlag{ - Name: "rm", - Usage: "Remove intermediate containers after a successful build (default true)", - }, - cli.StringFlag{ - Name: "runtime", - Usage: "`path` to an alternate runtime. Use BUILDAH_RUNTIME environment variable to override.", - Value: util.Runtime(), - }, - cli.StringSliceFlag{ - Name: "runtime-flag", - Usage: "add global flags for the container runtime", - }, - cli.StringFlag{ - Name: "signature-policy", - Usage: "`pathname` of signature policy file (not usually used)", - }, - cli.BoolFlag{ - Name: "squash", - Usage: "Squash newly built layers into a single new layer. The build process does not currently support caching so this is a NOOP.", - }, - cli.StringSliceFlag{ - Name: "tag, t", - Usage: "tagged `name` to apply to the built image", - }, - cli.BoolTFlag{ - Name: "tls-verify", - Usage: "require HTTPS and verify certificates when accessing the registry", - }, - } - - FromAndBudFlags = append(append([]cli.Flag{ - cli.StringSliceFlag{ - Name: "add-host", - Usage: "add a custom host-to-IP mapping (`host:ip`) (default [])", - }, - cli.StringSliceFlag{ - Name: "cap-add", - Usage: "add the specified capability when running (default [])", - }, - cli.StringSliceFlag{ - Name: "cap-drop", - Usage: "drop the specified capability when running (default [])", - }, - cli.StringFlag{ - Name: "cgroup-parent", - Usage: "optional parent cgroup for the container", - }, - cli.Uint64Flag{ - Name: "cpu-period", - Usage: "limit the CPU CFS (Completely Fair Scheduler) period", - }, - cli.Int64Flag{ - Name: "cpu-quota", - Usage: "limit the CPU CFS (Completely Fair Scheduler) quota", - }, - cli.Uint64Flag{ - Name: "cpu-shares, c", - Usage: "CPU shares (relative weight)", - }, - cli.StringFlag{ - Name: "cpuset-cpus", - Usage: "CPUs in which to allow execution (0-3, 0,1)", - }, - cli.StringFlag{ - Name: "cpuset-mems", - Usage: "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.", - }, - cli.StringFlag{ - Name: "memory, m", - Usage: "memory limit (format: [], where unit = b, k, m or g)", - }, - cli.StringFlag{ - Name: "memory-swap", - Usage: "swap limit equal to memory plus swap: '-1' to enable unlimited swap", - }, - cli.StringSliceFlag{ - Name: "security-opt", - Usage: "security options (default [])", - }, - cli.StringFlag{ - Name: "shm-size", - Usage: "size of '/dev/shm'. The format is ``.", - Value: "65536k", - }, - cli.StringSliceFlag{ - Name: "ulimit", - Usage: "ulimit options (default [])", - }, - cli.StringSliceFlag{ - Name: "volume, v", - Usage: "bind mount a volume into the container (default [])", - }, - }, usernsFlags...), NamespaceFlags...) -) - -// UseLayers returns true if BUILDAH_LAYERS is set to "1" or "true" -// otherwise it returns false -func UseLayers() bool { - layers := os.Getenv("BUILDAH_LAYERS") - if strings.ToLower(layers) == "true" || layers == "1" { - return true - } - return false -} - -// DefaultFormat returns the default image format -func DefaultFormat() string { - format := os.Getenv("BUILDAH_FORMAT") - if format != "" { - return format - } - return buildah.OCI -} - -// DefaultIsolation returns the default image format -func DefaultIsolation() string { - isolation := os.Getenv("BUILDAH_ISOLATION") - if isolation != "" { - return isolation - } - return buildah.OCI -} - -func VerifyFlagsArgsOrder(args []string) error { - for _, arg := range args { - if strings.HasPrefix(arg, "-") { - return errors.Errorf("No options (%s) can be specified after the image or container name", arg) - } - } - return nil -} diff --git a/vendor/github.com/projectatomic/buildah/pkg/parse/parse.go b/vendor/github.com/projectatomic/buildah/pkg/parse/parse.go deleted file mode 100644 index 2dff18818..000000000 --- a/vendor/github.com/projectatomic/buildah/pkg/parse/parse.go +++ /dev/null @@ -1,572 +0,0 @@ -package parse - -// this package should contain functions that parse and validate -// user input and is shared either amongst buildah subcommands or -// would be useful to projects vendoring buildah - -import ( - "fmt" - "net" - "os" - "path/filepath" - "reflect" - "regexp" - "strconv" - "strings" - "unicode" - - "github.com/containers/image/types" - "github.com/containers/storage/pkg/idtools" - "github.com/docker/go-units" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/projectatomic/buildah" - "github.com/sirupsen/logrus" - "github.com/urfave/cli" - "golang.org/x/crypto/ssh/terminal" - "golang.org/x/sys/unix" -) - -const ( - // SeccompDefaultPath defines the default seccomp path - SeccompDefaultPath = "/usr/share/containers/seccomp.json" - // SeccompOverridePath if this exists it overrides the default seccomp path - SeccompOverridePath = "/etc/crio/seccomp.json" -) - -// CommonBuildOptions parses the build options from the bud cli -func CommonBuildOptions(c *cli.Context) (*buildah.CommonBuildOptions, error) { - var ( - memoryLimit int64 - memorySwap int64 - err error - ) - rlim := unix.Rlimit{Cur: 1048576, Max: 1048576} - defaultLimits := []string{} - if err := unix.Setrlimit(unix.RLIMIT_NOFILE, &rlim); err == nil { - defaultLimits = append(defaultLimits, fmt.Sprintf("nofile=%d:%d", rlim.Cur, rlim.Max)) - } - if err := unix.Setrlimit(unix.RLIMIT_NPROC, &rlim); err == nil { - defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", rlim.Cur, rlim.Max)) - } - if c.String("memory") != "" { - memoryLimit, err = units.RAMInBytes(c.String("memory")) - if err != nil { - return nil, errors.Wrapf(err, "invalid value for memory") - } - } - if c.String("memory-swap") != "" { - memorySwap, err = units.RAMInBytes(c.String("memory-swap")) - if err != nil { - return nil, errors.Wrapf(err, "invalid value for memory-swap") - } - } - if len(c.StringSlice("add-host")) > 0 { - for _, host := range c.StringSlice("add-host") { - if err := validateExtraHost(host); err != nil { - return nil, errors.Wrapf(err, "invalid value for add-host") - } - } - } - if _, err := units.FromHumanSize(c.String("shm-size")); err != nil { - return nil, errors.Wrapf(err, "invalid --shm-size") - } - if err := ParseVolumes(c.StringSlice("volume")); err != nil { - return nil, err - } - - commonOpts := &buildah.CommonBuildOptions{ - AddHost: c.StringSlice("add-host"), - CgroupParent: c.String("cgroup-parent"), - CPUPeriod: c.Uint64("cpu-period"), - CPUQuota: c.Int64("cpu-quota"), - CPUSetCPUs: c.String("cpuset-cpus"), - CPUSetMems: c.String("cpuset-mems"), - CPUShares: c.Uint64("cpu-shares"), - Memory: memoryLimit, - MemorySwap: memorySwap, - ShmSize: c.String("shm-size"), - Ulimit: append(defaultLimits, c.StringSlice("ulimit")...), - Volumes: c.StringSlice("volume"), - } - if err := parseSecurityOpts(c.StringSlice("security-opt"), commonOpts); err != nil { - return nil, err - } - return commonOpts, nil -} - -func parseSecurityOpts(securityOpts []string, commonOpts *buildah.CommonBuildOptions) error { - for _, opt := range securityOpts { - if opt == "no-new-privileges" { - return errors.Errorf("no-new-privileges is not supported") - } - con := strings.SplitN(opt, "=", 2) - if len(con) != 2 { - return errors.Errorf("Invalid --security-opt name=value pair: %q", opt) - } - - switch con[0] { - case "label": - commonOpts.LabelOpts = append(commonOpts.LabelOpts, con[1]) - case "apparmor": - commonOpts.ApparmorProfile = con[1] - case "seccomp": - commonOpts.SeccompProfilePath = con[1] - default: - return errors.Errorf("Invalid --security-opt 2: %q", opt) - } - - } - - if commonOpts.SeccompProfilePath == "" { - if _, err := os.Stat(SeccompOverridePath); err == nil { - commonOpts.SeccompProfilePath = SeccompOverridePath - } else { - if !os.IsNotExist(err) { - return errors.Wrapf(err, "can't check if %q exists", SeccompOverridePath) - } - if _, err := os.Stat(SeccompDefaultPath); err != nil { - if !os.IsNotExist(err) { - return errors.Wrapf(err, "can't check if %q exists", SeccompDefaultPath) - } - } else { - commonOpts.SeccompProfilePath = SeccompDefaultPath - } - } - } - return nil -} - -// ParseVolumes validates the host and container paths passed in to the --volume flag -func ParseVolumes(volumes []string) error { - if len(volumes) == 0 { - return nil - } - for _, volume := range volumes { - arr := strings.SplitN(volume, ":", 3) - if len(arr) < 2 { - return errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume) - } - if err := validateVolumeHostDir(arr[0]); err != nil { - return err - } - if err := validateVolumeCtrDir(arr[1]); err != nil { - return err - } - if len(arr) > 2 { - if err := validateVolumeOpts(arr[2]); err != nil { - return err - } - } - } - return nil -} - -func validateVolumeHostDir(hostDir string) error { - if !filepath.IsAbs(hostDir) { - return errors.Errorf("invalid host path, must be an absolute path %q", hostDir) - } - if _, err := os.Stat(hostDir); err != nil { - return errors.Wrapf(err, "error checking path %q", hostDir) - } - return nil -} - -func validateVolumeCtrDir(ctrDir string) error { - if !filepath.IsAbs(ctrDir) { - return errors.Errorf("invalid container path, must be an absolute path %q", ctrDir) - } - return nil -} - -func validateVolumeOpts(option string) error { - var foundRootPropagation, foundRWRO, foundLabelChange int - options := strings.Split(option, ",") - for _, opt := range options { - switch opt { - case "rw", "ro": - if foundRWRO > 1 { - return errors.Errorf("invalid options %q, can only specify 1 'rw' or 'ro' option", option) - } - foundRWRO++ - case "z", "Z": - if foundLabelChange > 1 { - return errors.Errorf("invalid options %q, can only specify 1 'z' or 'Z' option", option) - } - foundLabelChange++ - case "private", "rprivate", "shared", "rshared", "slave", "rslave", "unbindable", "runbindable": - if foundRootPropagation > 1 { - return errors.Errorf("invalid options %q, can only specify 1 '[r]shared', '[r]private', '[r]slave' or '[r]unbindable' option", option) - } - foundRootPropagation++ - default: - return errors.Errorf("invalid option type %q", option) - } - } - return nil -} - -// validateExtraHost validates that the specified string is a valid extrahost and returns it. -// ExtraHost is in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6). -// for add-host flag -func validateExtraHost(val string) error { - // allow for IPv6 addresses in extra hosts by only splitting on first ":" - arr := strings.SplitN(val, ":", 2) - if len(arr) != 2 || len(arr[0]) == 0 { - return fmt.Errorf("bad format for add-host: %q", val) - } - if _, err := validateIPAddress(arr[1]); err != nil { - return fmt.Errorf("invalid IP address in add-host: %q", arr[1]) - } - return nil -} - -// validateIPAddress validates an Ip address. -// for dns, ip, and ip6 flags also -func validateIPAddress(val string) (string, error) { - var ip = net.ParseIP(strings.TrimSpace(val)) - if ip != nil { - return ip.String(), nil - } - return "", fmt.Errorf("%s is not an ip address", val) -} - -// ValidateFlags searches for StringFlags or StringSlice flags that never had -// a value set. This commonly occurs when the CLI mistakenly takes the next -// option and uses it as a value. -func ValidateFlags(c *cli.Context, flags []cli.Flag) error { - re, err := regexp.Compile("^-.+") - if err != nil { - return errors.Wrap(err, "compiling regex failed") - } - - // The --cmd flag can have a following command i.e. --cmd="--help". - // Let's skip this check just for the --cmd flag. - for _, flag := range flags { - switch reflect.TypeOf(flag).String() { - case "cli.StringSliceFlag": - { - f := flag.(cli.StringSliceFlag) - name := strings.Split(f.Name, ",") - if f.Name == "cmd" { - continue - } - val := c.StringSlice(name[0]) - for _, v := range val { - if ok := re.MatchString(v); ok { - return errors.Errorf("option --%s requires a value", name[0]) - } - } - } - case "cli.StringFlag": - { - f := flag.(cli.StringFlag) - name := strings.Split(f.Name, ",") - if f.Name == "cmd" { - continue - } - val := c.String(name[0]) - if ok := re.MatchString(val); ok { - return errors.Errorf("option --%s requires a value", name[0]) - } - } - } - } - return nil -} - -// SystemContextFromOptions returns a SystemContext populated with values -// per the input parameters provided by the caller for the use in authentication. -func SystemContextFromOptions(c *cli.Context) (*types.SystemContext, error) { - ctx := &types.SystemContext{ - DockerCertPath: c.String("cert-dir"), - } - if c.IsSet("tls-verify") { - ctx.DockerInsecureSkipTLSVerify = !c.BoolT("tls-verify") - } - if c.IsSet("creds") { - var err error - ctx.DockerAuthConfig, err = getDockerAuth(c.String("creds")) - if err != nil { - return nil, err - } - } - if c.IsSet("signature-policy") { - ctx.SignaturePolicyPath = c.String("signature-policy") - } - if c.IsSet("authfile") { - ctx.AuthFilePath = c.String("authfile") - } - if c.GlobalIsSet("registries-conf") { - ctx.SystemRegistriesConfPath = c.GlobalString("registries-conf") - } - if c.GlobalIsSet("registries-conf-dir") { - ctx.RegistriesDirPath = c.GlobalString("registries-conf-dir") - } - ctx.DockerRegistryUserAgent = fmt.Sprintf("Buildah/%s", buildah.Version) - return ctx, nil -} - -func parseCreds(creds string) (string, string) { - if creds == "" { - return "", "" - } - up := strings.SplitN(creds, ":", 2) - if len(up) == 1 { - return up[0], "" - } - if up[0] == "" { - return "", up[1] - } - return up[0], up[1] -} - -func getDockerAuth(creds string) (*types.DockerAuthConfig, error) { - username, password := parseCreds(creds) - if username == "" { - fmt.Print("Username: ") - fmt.Scanln(&username) - } - if password == "" { - fmt.Print("Password: ") - termPassword, err := terminal.ReadPassword(0) - if err != nil { - return nil, errors.Wrapf(err, "could not read password from terminal") - } - password = string(termPassword) - } - - return &types.DockerAuthConfig{ - Username: username, - Password: password, - }, nil -} - -// IDMappingOptions parses the build options related to user namespaces and ID mapping. -func IDMappingOptions(c *cli.Context) (usernsOptions buildah.NamespaceOptions, idmapOptions *buildah.IDMappingOptions, err error) { - user := c.String("userns-uid-map-user") - group := c.String("userns-gid-map-group") - // If only the user or group was specified, use the same value for the - // other, since we need both in order to initialize the maps using the - // names. - if user == "" && group != "" { - user = group - } - if group == "" && user != "" { - group = user - } - // Either start with empty maps or the name-based maps. - mappings := idtools.NewIDMappingsFromMaps(nil, nil) - if user != "" && group != "" { - submappings, err := idtools.NewIDMappings(user, group) - if err != nil { - return nil, nil, err - } - mappings = submappings - } - // We'll parse the UID and GID mapping options the same way. - buildIDMap := func(basemap []idtools.IDMap, option string) ([]specs.LinuxIDMapping, error) { - outmap := make([]specs.LinuxIDMapping, 0, len(basemap)) - // Start with the name-based map entries. - for _, m := range basemap { - outmap = append(outmap, specs.LinuxIDMapping{ - ContainerID: uint32(m.ContainerID), - HostID: uint32(m.HostID), - Size: uint32(m.Size), - }) - } - // Parse the flag's value as one or more triples (if it's even - // been set), and append them. - var spec []string - if c.GlobalIsSet(option) { - spec = c.GlobalStringSlice(option) - } - if c.IsSet(option) { - spec = c.StringSlice(option) - } - idmap, err := parseIDMap(spec) - if err != nil { - return nil, err - } - for _, m := range idmap { - outmap = append(outmap, specs.LinuxIDMapping{ - ContainerID: m[0], - HostID: m[1], - Size: m[2], - }) - } - return outmap, nil - } - uidmap, err := buildIDMap(mappings.UIDs(), "userns-uid-map") - if err != nil { - return nil, nil, err - } - gidmap, err := buildIDMap(mappings.GIDs(), "userns-gid-map") - if err != nil { - return nil, nil, err - } - // If we only have one map or the other populated at this point, then - // use the same mapping for both, since we know that no user or group - // name was specified, but a specific mapping was for one or the other. - if len(uidmap) == 0 && len(gidmap) != 0 { - uidmap = gidmap - } - if len(gidmap) == 0 && len(uidmap) != 0 { - gidmap = uidmap - } - // By default, having mappings configured means we use a user - // namespace. Otherwise, we don't. - usernsOption := buildah.NamespaceOption{ - Name: string(specs.UserNamespace), - Host: len(uidmap) == 0 && len(gidmap) == 0, - } - // If the user specifically requested that we either use or don't use - // user namespaces, override that default. - if c.IsSet("userns") { - how := c.String("userns") - switch how { - case "", "container": - usernsOption.Host = false - case "host": - usernsOption.Host = true - default: - if _, err := os.Stat(how); err != nil { - return nil, nil, errors.Wrapf(err, "error checking for %s namespace at %q", string(specs.UserNamespace), how) - } - logrus.Debugf("setting %q namespace to %q", string(specs.UserNamespace), how) - usernsOption.Path = how - } - } - usernsOptions = buildah.NamespaceOptions{usernsOption} - if !c.IsSet("net") { - usernsOptions = append(usernsOptions, buildah.NamespaceOption{ - Name: string(specs.NetworkNamespace), - Host: usernsOption.Host, - }) - } - // If the user requested that we use the host namespace, but also that - // we use mappings, that's not going to work. - if (len(uidmap) != 0 || len(gidmap) != 0) && usernsOption.Host { - return nil, nil, errors.Errorf("can not specify ID mappings while using host's user namespace") - } - return usernsOptions, &buildah.IDMappingOptions{ - HostUIDMapping: usernsOption.Host, - HostGIDMapping: usernsOption.Host, - UIDMap: uidmap, - GIDMap: gidmap, - }, nil -} - -func parseIDMap(spec []string) (m [][3]uint32, err error) { - for _, s := range spec { - args := strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsDigit(r) }) - if len(args)%3 != 0 { - return nil, fmt.Errorf("mapping %q is not in the form containerid:hostid:size[,...]", s) - } - for len(args) >= 3 { - cid, err := strconv.ParseUint(args[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("error parsing container ID %q from mapping %q as a number: %v", args[0], s, err) - } - hostid, err := strconv.ParseUint(args[1], 10, 32) - if err != nil { - return nil, fmt.Errorf("error parsing host ID %q from mapping %q as a number: %v", args[1], s, err) - } - size, err := strconv.ParseUint(args[2], 10, 32) - if err != nil { - return nil, fmt.Errorf("error parsing %q from mapping %q as a number: %v", args[2], s, err) - } - m = append(m, [3]uint32{uint32(cid), uint32(hostid), uint32(size)}) - args = args[3:] - } - } - return m, nil -} - -// NamespaceOptions parses the build options for all namespaces except for user namespace. -func NamespaceOptions(c *cli.Context) (namespaceOptions buildah.NamespaceOptions, networkPolicy buildah.NetworkConfigurationPolicy, err error) { - options := make(buildah.NamespaceOptions, 0, 7) - policy := buildah.NetworkDefault - for _, what := range []string{string(specs.IPCNamespace), "net", string(specs.PIDNamespace), string(specs.UTSNamespace)} { - if c.IsSet(what) { - how := c.String(what) - switch what { - case "net", "network": - what = string(specs.NetworkNamespace) - } - switch how { - case "", "container": - logrus.Debugf("setting %q namespace to %q", what, "") - options.AddOrReplace(buildah.NamespaceOption{ - Name: what, - }) - case "host": - logrus.Debugf("setting %q namespace to host", what) - options.AddOrReplace(buildah.NamespaceOption{ - Name: what, - Host: true, - }) - default: - if what == specs.NetworkNamespace { - if how == "none" { - options.AddOrReplace(buildah.NamespaceOption{ - Name: what, - }) - policy = buildah.NetworkDisabled - logrus.Debugf("setting network to disabled") - break - } - if !filepath.IsAbs(how) { - options.AddOrReplace(buildah.NamespaceOption{ - Name: what, - Path: how, - }) - policy = buildah.NetworkEnabled - logrus.Debugf("setting network configuration to %q", how) - break - } - } - if _, err := os.Stat(how); err != nil { - return nil, buildah.NetworkDefault, errors.Wrapf(err, "error checking for %s namespace at %q", what, how) - } - logrus.Debugf("setting %q namespace to %q", what, how) - options.AddOrReplace(buildah.NamespaceOption{ - Name: what, - Path: how, - }) - } - } - } - return options, policy, nil -} - -func defaultIsolation() (buildah.Isolation, error) { - isolation, isSet := os.LookupEnv("BUILDAH_ISOLATION") - if isSet { - if strings.HasPrefix(strings.ToLower(isolation), "oci") { - return buildah.IsolationOCI, nil - } else if strings.HasPrefix(strings.ToLower(isolation), "rootless") { - return buildah.IsolationOCIRootless, nil - } else if strings.HasPrefix(strings.ToLower(isolation), "chroot") { - return buildah.IsolationChroot, nil - } - return 0, errors.Errorf("unrecognized $BUILDAH_ISOLATION value %q", isolation) - } - return buildah.IsolationDefault, nil -} - -// IsolationOption parses the --isolation flag. -func IsolationOption(c *cli.Context) (buildah.Isolation, error) { - if c.String("isolation") != "" { - if strings.HasPrefix(strings.ToLower(c.String("isolation")), "oci") { - return buildah.IsolationOCI, nil - } else if strings.HasPrefix(strings.ToLower(c.String("isolation")), "rootless") { - return buildah.IsolationOCIRootless, nil - } else if strings.HasPrefix(strings.ToLower(c.String("isolation")), "chroot") { - return buildah.IsolationChroot, nil - } else { - return buildah.IsolationDefault, errors.Errorf("unrecognized isolation type %q", c.String("isolation")) - } - } - return defaultIsolation() -} diff --git a/vendor/github.com/projectatomic/buildah/pull.go b/vendor/github.com/projectatomic/buildah/pull.go deleted file mode 100644 index 1d2bb7f87..000000000 --- a/vendor/github.com/projectatomic/buildah/pull.go +++ /dev/null @@ -1,228 +0,0 @@ -package buildah - -import ( - "context" - "io" - "strings" - - cp "github.com/containers/image/copy" - "github.com/containers/image/docker/reference" - tarfile "github.com/containers/image/docker/tarfile" - ociarchive "github.com/containers/image/oci/archive" - "github.com/containers/image/pkg/sysregistries" - "github.com/containers/image/signature" - is "github.com/containers/image/storage" - "github.com/containers/image/transports" - "github.com/containers/image/transports/alltransports" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/pkg/errors" - "github.com/projectatomic/buildah/util" - "github.com/sirupsen/logrus" -) - -// PullOptions can be used to alter how an image is copied in from somewhere. -type PullOptions struct { - // SignaturePolicyPath specifies an override location for the signature - // policy which should be used for verifying the new image as it is - // being written. Except in specific circumstances, no value should be - // specified, indicating that the shared, system-wide default policy - // should be used. - SignaturePolicyPath string - // ReportWriter is an io.Writer which will be used to log the writing - // of the new image. - ReportWriter io.Writer - // Store is the local storage store which holds the source image. - Store storage.Store - // github.com/containers/image/types SystemContext to hold credentials - // and other authentication/authorization information. - SystemContext *types.SystemContext - // Transport is a value which is prepended to the image's name, if the - // image name alone can not be resolved to a reference to a source - // image. No separator is implicitly added. - Transport string -} - -func localImageNameForReference(ctx context.Context, store storage.Store, srcRef types.ImageReference, spec string) (string, error) { - if srcRef == nil { - return "", errors.Errorf("reference to image is empty") - } - split := strings.SplitN(spec, ":", 2) - file := split[len(split)-1] - var name string - switch srcRef.Transport().Name() { - case util.DockerArchive: - tarSource, err := tarfile.NewSourceFromFile(file) - if err != nil { - return "", err - } - manifest, err := tarSource.LoadTarManifest() - if err != nil { - return "", errors.Errorf("error retrieving manifest.json: %v", err) - } - // to pull the first image stored in the tar file - if len(manifest) == 0 { - // use the hex of the digest if no manifest is found - name, err = getImageDigest(ctx, srcRef, nil) - if err != nil { - return "", err - } - } else { - if len(manifest[0].RepoTags) > 0 { - name = manifest[0].RepoTags[0] - } else { - // If the input image has no repotags, we need to feed it a dest anyways - name, err = getImageDigest(ctx, srcRef, nil) - if err != nil { - return "", err - } - } - } - case util.OCIArchive: - // retrieve the manifest from index.json to access the image name - manifest, err := ociarchive.LoadManifestDescriptor(srcRef) - if err != nil { - return "", errors.Wrapf(err, "error loading manifest for %q", srcRef) - } - // if index.json has no reference name, compute the image digest instead - if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" { - name, err = getImageDigest(ctx, srcRef, nil) - if err != nil { - return "", err - } - } else { - name = manifest.Annotations["org.opencontainers.image.ref.name"] - } - case util.DirTransport: - // supports pull from a directory - name = split[1] - // remove leading "/" - if name[:1] == "/" { - name = name[1:] - } - default: - ref := srcRef.DockerReference() - if ref == nil { - name = srcRef.StringWithinTransport() - _, err := is.Transport.ParseStoreReference(store, name) - if err == nil { - return name, nil - } - if strings.LastIndex(name, "/") != -1 { - name = name[strings.LastIndex(name, "/")+1:] - _, err = is.Transport.ParseStoreReference(store, name) - if err == nil { - return name, nil - } - } - return "", errors.Errorf("reference to image %q is not a named reference", transports.ImageName(srcRef)) - } - - if named, ok := ref.(reference.Named); ok { - name = named.Name() - if namedTagged, ok := ref.(reference.NamedTagged); ok { - name = name + ":" + namedTagged.Tag() - } - if canonical, ok := ref.(reference.Canonical); ok { - name = name + "@" + canonical.Digest().String() - } - } - } - - if _, err := is.Transport.ParseStoreReference(store, name); err != nil { - return "", errors.Wrapf(err, "error parsing computed local image name %q", name) - } - return name, nil -} - -// Pull copies the contents of the image from somewhere else. -func Pull(ctx context.Context, imageName string, options PullOptions) (types.ImageReference, error) { - systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) - return pullImage(ctx, options.Store, imageName, options, systemContext) -} - -func pullImage(ctx context.Context, store storage.Store, imageName string, options PullOptions, sc *types.SystemContext) (types.ImageReference, error) { - spec := imageName - srcRef, err := alltransports.ParseImageName(spec) - if err != nil { - if options.Transport == "" { - return nil, errors.Wrapf(err, "error parsing image name %q", spec) - } - transport := options.Transport - if transport != DefaultTransport { - transport = transport + ":" - } - spec = transport + spec - srcRef2, err2 := alltransports.ParseImageName(spec) - if err2 != nil { - return nil, errors.Wrapf(err2, "error parsing image name %q", spec) - } - srcRef = srcRef2 - } - - destName, err := localImageNameForReference(ctx, store, srcRef, spec) - if err != nil { - return nil, errors.Wrapf(err, "error computing local image name for %q", transports.ImageName(srcRef)) - } - if destName == "" { - return nil, errors.Errorf("error computing local image name for %q", transports.ImageName(srcRef)) - } - - destRef, err := is.Transport.ParseStoreReference(store, destName) - if err != nil { - return nil, errors.Wrapf(err, "error parsing image name %q", destName) - } - - policy, err := signature.DefaultPolicy(sc) - if err != nil { - return nil, errors.Wrapf(err, "error obtaining default signature policy") - } - - policyContext, err := signature.NewPolicyContext(policy) - if err != nil { - return nil, errors.Wrapf(err, "error creating new signature policy context") - } - - defer func() { - if err2 := policyContext.Destroy(); err2 != nil { - logrus.Debugf("error destroying signature policy context: %v", err2) - } - }() - - logrus.Debugf("copying %q to %q", spec, destName) - pullError := cp.Image(ctx, policyContext, destRef, srcRef, getCopyOptions(options.ReportWriter, sc, nil, "")) - if pullError == nil { - return destRef, nil - } - - // If no image was found, we should handle. Lets be nicer to the user and see if we can figure out why. - registryPath := sysregistries.RegistriesConfPath(sc) - searchRegistries, err := getRegistries(sc) - if err != nil { - return nil, err - } - hasRegistryInName, err := hasRegistry(imageName) - if err != nil { - return nil, err - } - if !hasRegistryInName && len(searchRegistries) == 0 { - return nil, errors.Errorf("image name provided is a short name and no search registries are defined in %s: %s", registryPath, pullError) - } - return nil, pullError -} - -// getImageDigest creates an image object and uses the hex value of the digest as the image ID -// for parsing the store reference -func getImageDigest(ctx context.Context, src types.ImageReference, sc *types.SystemContext) (string, error) { - newImg, err := src.NewImage(ctx, sc) - if err != nil { - return "", err - } - defer newImg.Close() - - digest := newImg.ConfigInfo().Digest - if err = digest.Validate(); err != nil { - return "", errors.Wrapf(err, "error getting config info") - } - return "@" + digest.Hex(), nil -} diff --git a/vendor/github.com/projectatomic/buildah/run.go b/vendor/github.com/projectatomic/buildah/run.go deleted file mode 100644 index 2ce5add39..000000000 --- a/vendor/github.com/projectatomic/buildah/run.go +++ /dev/null @@ -1,1995 +0,0 @@ -package buildah - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/containernetworking/cni/libcni" - "github.com/containers/libpod/pkg/secrets" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/ioutils" - "github.com/containers/storage/pkg/reexec" - units "github.com/docker/go-units" - digest "github.com/opencontainers/go-digest" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/opencontainers/runtime-tools/generate" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - "github.com/projectatomic/buildah/bind" - "github.com/projectatomic/buildah/chroot" - "github.com/projectatomic/buildah/util" - "github.com/sirupsen/logrus" - "golang.org/x/crypto/ssh/terminal" - "golang.org/x/sys/unix" -) - -const ( - // DefaultWorkingDir is used if none was specified. - DefaultWorkingDir = "/" - // runUsingRuntimeCommand is a command we use as a key for reexec - runUsingRuntimeCommand = Package + "-oci-runtime" -) - -// TerminalPolicy takes the value DefaultTerminal, WithoutTerminal, or WithTerminal. -type TerminalPolicy int - -const ( - // DefaultTerminal indicates that this Run invocation should be - // connected to a pseudoterminal if we're connected to a terminal. - DefaultTerminal TerminalPolicy = iota - // WithoutTerminal indicates that this Run invocation should NOT be - // connected to a pseudoterminal. - WithoutTerminal - // WithTerminal indicates that this Run invocation should be connected - // to a pseudoterminal. - WithTerminal -) - -// String converts a TerminalPoliicy into a string. -func (t TerminalPolicy) String() string { - switch t { - case DefaultTerminal: - return "DefaultTerminal" - case WithoutTerminal: - return "WithoutTerminal" - case WithTerminal: - return "WithTerminal" - } - return fmt.Sprintf("unrecognized terminal setting %d", t) -} - -// NamespaceOption controls how we set up a namespace when launching processes. -type NamespaceOption struct { - // Name specifies the type of namespace, typically matching one of the - // ...Namespace constants defined in - // github.com/opencontainers/runtime-spec/specs-go. - Name string - // Host is used to force our processes to use the host's namespace of - // this type. - Host bool - // Path is the path of the namespace to attach our process to, if Host - // is not set. If Host is not set and Path is also empty, a new - // namespace will be created for the process that we're starting. - // If Name is specs.NetworkNamespace, if Path doesn't look like an - // absolute path, it is treated as a comma-separated list of CNI - // configuration names which will be selected from among all of the CNI - // network configurations which we find. - Path string -} - -// NamespaceOptions provides some helper methods for a slice of NamespaceOption -// structs. -type NamespaceOptions []NamespaceOption - -// IDMappingOptions controls how we set up UID/GID mapping when we set up a -// user namespace. -type IDMappingOptions struct { - HostUIDMapping bool - HostGIDMapping bool - UIDMap []specs.LinuxIDMapping - GIDMap []specs.LinuxIDMapping -} - -// Isolation provides a way to specify whether we're supposed to use a proper -// OCI runtime, or some other method for running commands. -type Isolation int - -const ( - // IsolationDefault is whatever we think will work best. - IsolationDefault Isolation = iota - // IsolationOCI is a proper OCI runtime. - IsolationOCI - // IsolationChroot is a more chroot-like environment: less isolation, - // but with fewer requirements. - IsolationChroot - // IsolationOCIRootless is a proper OCI runtime in rootless mode. - IsolationOCIRootless -) - -// String converts a Isolation into a string. -func (i Isolation) String() string { - switch i { - case IsolationDefault: - return "IsolationDefault" - case IsolationOCI: - return "IsolationOCI" - case IsolationChroot: - return "IsolationChroot" - case IsolationOCIRootless: - return "IsolationOCIRootless" - } - return fmt.Sprintf("unrecognized isolation type %d", i) -} - -// RunOptions can be used to alter how a command is run in the container. -type RunOptions struct { - // Hostname is the hostname we set for the running container. - Hostname string - // Isolation is either IsolationDefault, IsolationOCI, IsolationChroot, or IsolationOCIRootless. - Isolation Isolation - // Runtime is the name of the runtime to run. It should accept the - // same arguments that runc does, and produce similar output. - Runtime string - // Args adds global arguments for the runtime. - Args []string - // Mounts are additional mount points which we want to provide. - Mounts []specs.Mount - // Env is additional environment variables to set. - Env []string - // User is the user as whom to run the command. - User string - // WorkingDir is an override for the working directory. - WorkingDir string - // Shell is default shell to run in a container. - Shell string - // Cmd is an override for the configured default command. - Cmd []string - // Entrypoint is an override for the configured entry point. - Entrypoint []string - // NamespaceOptions controls how we set up the namespaces for the process. - NamespaceOptions NamespaceOptions - // ConfigureNetwork controls whether or not network interfaces and - // routing are configured for a new network namespace (i.e., when not - // joining another's namespace and not just using the host's - // namespace), effectively deciding whether or not the process has a - // usable network. - ConfigureNetwork NetworkConfigurationPolicy - // CNIPluginPath is the location of CNI plugin helpers, if they should be - // run from a location other than the default location. - CNIPluginPath string - // CNIConfigDir is the location of CNI configuration files, if the files in - // the default configuration directory shouldn't be used. - CNIConfigDir string - // Terminal provides a way to specify whether or not the command should - // be run with a pseudoterminal. By default (DefaultTerminal), a - // terminal is used if os.Stdout is connected to a terminal, but that - // decision can be overridden by specifying either WithTerminal or - // WithoutTerminal. - Terminal TerminalPolicy - // TerminalSize provides a way to set the number of rows and columns in - // a pseudo-terminal, if we create one, and Stdin/Stdout/Stderr aren't - // connected to a terminal. - TerminalSize *specs.Box - // The stdin/stdout/stderr descriptors to use. If set to nil, the - // corresponding files in the "os" package are used as defaults. - Stdin io.Reader `json:"-"` - Stdout io.Writer `json:"-"` - Stderr io.Writer `json:"-"` - // Quiet tells the run to turn off output to stdout. - Quiet bool - // AddCapabilities is a list of capabilities to add to the default set. - AddCapabilities []string - // DropCapabilities is a list of capabilities to remove from the default set, - // after processing the AddCapabilities set. If a capability appears in both - // lists, it will be dropped. - DropCapabilities []string -} - -// DefaultNamespaceOptions returns the default namespace settings from the -// runtime-tools generator library. -func DefaultNamespaceOptions() (NamespaceOptions, error) { - options := NamespaceOptions{ - {Name: string(specs.CgroupNamespace), Host: true}, - {Name: string(specs.IPCNamespace), Host: true}, - {Name: string(specs.MountNamespace), Host: true}, - {Name: string(specs.NetworkNamespace), Host: true}, - {Name: string(specs.PIDNamespace), Host: true}, - {Name: string(specs.UserNamespace), Host: true}, - {Name: string(specs.UTSNamespace), Host: true}, - } - g, err := generate.New("linux") - if err != nil { - return options, err - } - spec := g.Config - if spec.Linux != nil { - for _, ns := range spec.Linux.Namespaces { - options.AddOrReplace(NamespaceOption{ - Name: string(ns.Type), - Path: ns.Path, - }) - } - } - return options, nil -} - -// Find the configuration for the namespace of the given type. If there are -// duplicates, find the _last_ one of the type, since we assume it was appended -// more recently. -func (n *NamespaceOptions) Find(namespace string) *NamespaceOption { - for i := range *n { - j := len(*n) - 1 - i - if (*n)[j].Name == namespace { - return &((*n)[j]) - } - } - return nil -} - -// AddOrReplace either adds or replaces the configuration for a given namespace. -func (n *NamespaceOptions) AddOrReplace(options ...NamespaceOption) { -nextOption: - for _, option := range options { - for i := range *n { - j := len(*n) - 1 - i - if (*n)[j].Name == option.Name { - (*n)[j] = option - continue nextOption - } - } - *n = append(*n, option) - } -} - -func addRlimits(ulimit []string, g *generate.Generator) error { - var ( - ul *units.Ulimit - err error - ) - - for _, u := range ulimit { - if ul, err = units.ParseUlimit(u); err != nil { - return errors.Wrapf(err, "ulimit option %q requires name=SOFT:HARD, failed to be parsed", u) - } - - g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft)) - } - return nil -} - -func addHosts(hosts []string, w io.Writer) error { - buf := bufio.NewWriter(w) - for _, host := range hosts { - values := strings.SplitN(host, ":", 2) - if len(values) != 2 { - return errors.Errorf("unable to parse host entry %q: incorrect format", host) - } - if values[0] == "" { - return errors.Errorf("hostname in host entry %q is empty", host) - } - if values[1] == "" { - return errors.Errorf("IP address in host entry %q is empty", host) - } - fmt.Fprintf(buf, "%s\t%s\n", values[1], values[0]) - } - return buf.Flush() -} - -func addHostsToFile(hosts []string, filename string) error { - if len(hosts) == 0 { - return nil - } - file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, os.ModeAppend) - if err != nil { - return err - } - defer file.Close() - return addHosts(hosts, file) -} - -func addCommonOptsToSpec(commonOpts *CommonBuildOptions, g *generate.Generator) error { - // Resources - CPU - if commonOpts.CPUPeriod != 0 { - g.SetLinuxResourcesCPUPeriod(commonOpts.CPUPeriod) - } - if commonOpts.CPUQuota != 0 { - g.SetLinuxResourcesCPUQuota(commonOpts.CPUQuota) - } - if commonOpts.CPUShares != 0 { - g.SetLinuxResourcesCPUShares(commonOpts.CPUShares) - } - if commonOpts.CPUSetCPUs != "" { - g.SetLinuxResourcesCPUCpus(commonOpts.CPUSetCPUs) - } - if commonOpts.CPUSetMems != "" { - g.SetLinuxResourcesCPUMems(commonOpts.CPUSetMems) - } - - // Resources - Memory - if commonOpts.Memory != 0 { - g.SetLinuxResourcesMemoryLimit(commonOpts.Memory) - } - if commonOpts.MemorySwap != 0 { - g.SetLinuxResourcesMemorySwap(commonOpts.MemorySwap) - } - - // cgroup membership - if commonOpts.CgroupParent != "" { - g.SetLinuxCgroupsPath(commonOpts.CgroupParent) - } - - // Other process resource limits - if err := addRlimits(commonOpts.Ulimit, g); err != nil { - return err - } - - logrus.Debugf("Resources: %#v", commonOpts) - return nil -} - -func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, shmSize string, namespaceOptions NamespaceOptions) error { - // Start building a new list of mounts. - var mounts []specs.Mount - haveMount := func(destination string) bool { - for _, mount := range mounts { - if mount.Destination == destination { - // Already have something to mount there. - return true - } - } - return false - } - - ipc := namespaceOptions.Find(string(specs.IPCNamespace)) - hostIPC := ipc == nil || ipc.Host - net := namespaceOptions.Find(string(specs.NetworkNamespace)) - hostNetwork := net == nil || net.Host - user := namespaceOptions.Find(string(specs.UserNamespace)) - hostUser := user == nil || user.Host - - // Copy mounts from the generated list. - mountCgroups := true - specMounts := []specs.Mount{} - for _, specMount := range spec.Mounts { - // Override some of the mounts from the generated list if we're doing different things with namespaces. - if specMount.Destination == "/dev/shm" { - specMount.Options = []string{"nosuid", "noexec", "nodev", "mode=1777", "size=" + shmSize} - if hostIPC && !hostUser { - if _, err := os.Stat("/dev/shm"); err != nil && os.IsNotExist(err) { - continue - } - specMount = specs.Mount{ - Source: "/dev/shm", - Type: "bind", - Destination: "/dev/shm", - Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, - } - } - } - if specMount.Destination == "/dev/mqueue" { - if hostIPC && !hostUser { - if _, err := os.Stat("/dev/mqueue"); err != nil && os.IsNotExist(err) { - continue - } - specMount = specs.Mount{ - Source: "/dev/mqueue", - Type: "bind", - Destination: "/dev/mqueue", - Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, - } - } - } - if specMount.Destination == "/sys" { - if hostNetwork && !hostUser { - mountCgroups = false - if _, err := os.Stat("/sys"); err != nil && os.IsNotExist(err) { - continue - } - specMount = specs.Mount{ - Source: "/sys", - Type: "bind", - Destination: "/sys", - Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev", "ro"}, - } - } - } - specMounts = append(specMounts, specMount) - } - - // Add a mount for the cgroups filesystem, unless we're already - // recursively bind mounting all of /sys, in which case we shouldn't - // bother with it. - sysfsMount := []specs.Mount{} - if mountCgroups { - sysfsMount = []specs.Mount{{ - Destination: "/sys/fs/cgroup", - Type: "cgroup", - Source: "cgroup", - Options: []string{bind.NoBindOption, "nosuid", "noexec", "nodev", "relatime", "ro"}, - }} - } - - // Get the list of files we need to bind into the container. - bindFileMounts, err := runSetupBoundFiles(bundlePath, bindFiles) - if err != nil { - return err - } - - // After this point we need to know the per-container persistent storage directory. - cdir, err := b.store.ContainerDirectory(b.ContainerID) - if err != nil { - return errors.Wrapf(err, "error determining work directory for container %q", b.ContainerID) - } - - // Figure out which UID and GID to tell the secrets package to use - // for files that it creates. - rootUID, rootGID, err := util.GetHostRootIDs(spec) - if err != nil { - return err - } - - // Get the list of secrets mounts. - secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID)) - - // Add temporary copies of the contents of volume locations at the - // volume locations, unless we already have something there. - copyWithTar := b.copyWithTar(nil, nil) - builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes) - if err != nil { - return err - } - - // Get the list of explicitly-specified volume mounts. - volumes, err := runSetupVolumeMounts(spec.Linux.MountLabel, volumeMounts, optionMounts) - if err != nil { - return err - } - - // Add them all, in the preferred order, except where they conflict with something that was previously added. - for _, mount := range append(append(append(append(append(volumes, builtins...), secretMounts...), bindFileMounts...), specMounts...), sysfsMount...) { - if haveMount(mount.Destination) { - // Already mounting something there, no need to bother with this one. - continue - } - // Add the mount. - mounts = append(mounts, mount) - } - - // Set the list in the spec. - spec.Mounts = mounts - return nil -} - -func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount, err error) { - for dest, src := range bindFiles { - options := []string{"rbind"} - if strings.HasPrefix(src, bundlePath) { - options = append(options, bind.NoBindOption) - } - mounts = append(mounts, specs.Mount{ - Source: src, - Destination: dest, - Type: "bind", - Options: options, - }) - } - return mounts, nil -} - -func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWithTar func(srcPath, dstPath string) error, builtinVolumes []string) ([]specs.Mount, error) { - var mounts []specs.Mount - // Add temporary copies of the contents of volume locations at the - // volume locations, unless we already have something there. - for _, volume := range builtinVolumes { - subdir := digest.Canonical.FromString(volume).Hex() - volumePath := filepath.Join(containerDir, "buildah-volumes", subdir) - // If we need to, initialize the volume path's initial contents. - if _, err := os.Stat(volumePath); os.IsNotExist(err) { - if err = os.MkdirAll(volumePath, 0755); err != nil { - return nil, errors.Wrapf(err, "error creating directory %q for volume %q", volumePath, volume) - } - if err = label.Relabel(volumePath, mountLabel, false); err != nil { - return nil, errors.Wrapf(err, "error relabeling directory %q for volume %q", volumePath, volume) - } - srcPath := filepath.Join(mountPoint, volume) - if err = copyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) { - return nil, errors.Wrapf(err, "error populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath) - } - - } - // Add the bind mount. - mounts = append(mounts, specs.Mount{ - Source: volumePath, - Destination: volume, - Type: "bind", - Options: []string{"bind"}, - }) - } - return mounts, nil -} - -func runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount) ([]specs.Mount, error) { - var mounts []specs.Mount - - parseMount := func(host, container string, options []string) (specs.Mount, error) { - var foundrw, foundro, foundz, foundZ bool - var rootProp string - for _, opt := range options { - switch opt { - case "rw": - foundrw = true - case "ro": - foundro = true - case "z": - foundz = true - case "Z": - foundZ = true - case "private", "rprivate", "slave", "rslave", "shared", "rshared": - rootProp = opt - } - } - if !foundrw && !foundro { - options = append(options, "rw") - } - if foundz { - if err := label.Relabel(host, mountLabel, true); err != nil { - return specs.Mount{}, errors.Wrapf(err, "relabeling %q failed", host) - } - } - if foundZ { - if err := label.Relabel(host, mountLabel, false); err != nil { - return specs.Mount{}, errors.Wrapf(err, "relabeling %q failed", host) - } - } - if rootProp == "" { - options = append(options, "private") - } - return specs.Mount{ - Destination: container, - Type: "bind", - Source: host, - Options: options, - }, nil - } - // Bind mount volumes specified for this particular Run() invocation - for _, i := range optionMounts { - mount, err := parseMount(i.Source, i.Destination, append(i.Options, "rbind")) - if err != nil { - return nil, err - } - mounts = append(mounts, mount) - } - // Bind mount volumes given by the user when the container was created - for _, i := range volumeMounts { - var options []string - spliti := strings.Split(i, ":") - if len(spliti) > 2 { - options = strings.Split(spliti[2], ",") - } - options = append(options, "rbind") - mount, err := parseMount(spliti[0], spliti[1], options) - if err != nil { - return nil, err - } - mounts = append(mounts, mount) - } - return mounts, nil -} - -// addNetworkConfig copies files from host and sets them up to bind mount into container -func (b *Builder) addNetworkConfig(rdir, hostPath string, chownOpts *idtools.IDPair) (string, error) { - copyFileWithTar := b.copyFileWithTar(chownOpts, nil) - - cfile := filepath.Join(rdir, filepath.Base(hostPath)) - - if err := copyFileWithTar(hostPath, cfile); err != nil { - return "", errors.Wrapf(err, "error copying %q for container %q", cfile, b.ContainerID) - } - - if err := label.Relabel(cfile, b.MountLabel, false); err != nil { - return "", errors.Wrapf(err, "error relabeling %q in container %q", cfile, b.ContainerID) - } - - return cfile, nil -} - -func setupMaskedPaths(g *generate.Generator) { - for _, mp := range []string{ - "/proc/acpi", - "/proc/kcore", - "/proc/keys", - "/proc/latency_stats", - "/proc/timer_list", - "/proc/timer_stats", - "/proc/sched_debug", - "/proc/scsi", - "/sys/firmware", - } { - g.AddLinuxMaskedPaths(mp) - } -} - -func setupReadOnlyPaths(g *generate.Generator) { - for _, rp := range []string{ - "/proc/asound", - "/proc/bus", - "/proc/fs", - "/proc/irq", - "/proc/sys", - "/proc/sysrq-trigger", - } { - g.AddLinuxReadonlyPaths(rp) - } -} - -func setupCapAdd(g *generate.Generator, caps ...string) error { - for _, cap := range caps { - if err := g.AddProcessCapabilityBounding(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the bounding capability set", cap) - } - if err := g.AddProcessCapabilityEffective(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the effective capability set", cap) - } - if err := g.AddProcessCapabilityInheritable(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the inheritable capability set", cap) - } - if err := g.AddProcessCapabilityPermitted(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the permitted capability set", cap) - } - if err := g.AddProcessCapabilityAmbient(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the ambient capability set", cap) - } - } - return nil -} - -func setupCapDrop(g *generate.Generator, caps ...string) error { - for _, cap := range caps { - if err := g.DropProcessCapabilityBounding(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the bounding capability set", cap) - } - if err := g.DropProcessCapabilityEffective(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the effective capability set", cap) - } - if err := g.DropProcessCapabilityInheritable(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the inheritable capability set", cap) - } - if err := g.DropProcessCapabilityPermitted(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the permitted capability set", cap) - } - if err := g.DropProcessCapabilityAmbient(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the ambient capability set", cap) - } - } - return nil -} - -func setupCapabilities(g *generate.Generator, firstAdds, firstDrops, secondAdds, secondDrops []string) error { - g.ClearProcessCapabilities() - if err := setupCapAdd(g, util.DefaultCapabilities...); err != nil { - return err - } - if err := setupCapAdd(g, firstAdds...); err != nil { - return err - } - if err := setupCapDrop(g, firstDrops...); err != nil { - return err - } - if err := setupCapAdd(g, secondAdds...); err != nil { - return err - } - if err := setupCapDrop(g, secondDrops...); err != nil { - return err - } - return nil -} - -func setupTerminal(g *generate.Generator, terminalPolicy TerminalPolicy, terminalSize *specs.Box) { - switch terminalPolicy { - case DefaultTerminal: - onTerminal := terminal.IsTerminal(unix.Stdin) && terminal.IsTerminal(unix.Stdout) && terminal.IsTerminal(unix.Stderr) - if onTerminal { - logrus.Debugf("stdio is a terminal, defaulting to using a terminal") - } else { - logrus.Debugf("stdio is not a terminal, defaulting to not using a terminal") - } - g.SetProcessTerminal(onTerminal) - case WithTerminal: - g.SetProcessTerminal(true) - case WithoutTerminal: - g.SetProcessTerminal(false) - } - if terminalSize != nil { - g.SetProcessConsoleSize(terminalSize.Width, terminalSize.Height) - } -} - -func setupNamespaces(g *generate.Generator, namespaceOptions NamespaceOptions, idmapOptions IDMappingOptions, policy NetworkConfigurationPolicy) (configureNetwork bool, configureNetworks []string, configureUTS bool, err error) { - // Set namespace options in the container configuration. - configureUserns := false - specifiedNetwork := false - for _, namespaceOption := range namespaceOptions { - switch namespaceOption.Name { - case string(specs.UserNamespace): - configureUserns = false - if !namespaceOption.Host && namespaceOption.Path == "" { - configureUserns = true - } - case string(specs.NetworkNamespace): - specifiedNetwork = true - configureNetwork = false - if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) { - if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) { - configureNetworks = strings.Split(namespaceOption.Path, ",") - namespaceOption.Path = "" - } - configureNetwork = (policy != NetworkDisabled) - } - case string(specs.UTSNamespace): - configureUTS = false - if !namespaceOption.Host && namespaceOption.Path == "" { - configureUTS = true - } - } - if namespaceOption.Host { - if err := g.RemoveLinuxNamespace(namespaceOption.Name); err != nil { - return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", namespaceOption.Name) - } - } else if err := g.AddOrReplaceLinuxNamespace(namespaceOption.Name, namespaceOption.Path); err != nil { - if namespaceOption.Path == "" { - return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", namespaceOption.Name) - } - return false, nil, false, errors.Wrapf(err, "error adding %q namespace %q for run", namespaceOption.Name, namespaceOption.Path) - } - } - - // If we've got mappings, we're going to have to create a user namespace. - if len(idmapOptions.UIDMap) > 0 || len(idmapOptions.GIDMap) > 0 || configureUserns { - if err := g.AddOrReplaceLinuxNamespace(specs.UserNamespace, ""); err != nil { - return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.UserNamespace)) - } - hostUidmap, hostGidmap, err := util.GetHostIDMappings("") - if err != nil { - return false, nil, false, err - } - for _, m := range idmapOptions.UIDMap { - g.AddLinuxUIDMapping(m.HostID, m.ContainerID, m.Size) - } - if len(idmapOptions.UIDMap) == 0 { - for _, m := range hostUidmap { - g.AddLinuxUIDMapping(m.ContainerID, m.ContainerID, m.Size) - } - } - for _, m := range idmapOptions.GIDMap { - g.AddLinuxGIDMapping(m.HostID, m.ContainerID, m.Size) - } - if len(idmapOptions.GIDMap) == 0 { - for _, m := range hostGidmap { - g.AddLinuxGIDMapping(m.ContainerID, m.ContainerID, m.Size) - } - } - if !specifiedNetwork { - if err := g.AddOrReplaceLinuxNamespace(specs.NetworkNamespace, ""); err != nil { - return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.NetworkNamespace)) - } - configureNetwork = (policy != NetworkDisabled) - } - } else { - if err := g.RemoveLinuxNamespace(specs.UserNamespace); err != nil { - return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", string(specs.UserNamespace)) - } - if !specifiedNetwork { - if err := g.RemoveLinuxNamespace(specs.NetworkNamespace); err != nil { - return false, nil, false, errors.Wrapf(err, "error removing %q namespace for run", string(specs.NetworkNamespace)) - } - } - } - if configureNetwork { - for name, val := range util.DefaultNetworkSysctl { - g.AddLinuxSysctl(name, val) - } - } - return configureNetwork, configureNetworks, configureUTS, nil -} - -// Search for a command that isn't given as an absolute path using the $PATH -// under the rootfs. We can't resolve absolute symbolic links without -// chroot()ing, which we may not be able to do, so just accept a link as a -// valid resolution. -func runLookupPath(g *generate.Generator, command []string) []string { - // Look for the configured $PATH. - spec := g.Spec() - envPath := "" - for i := range spec.Process.Env { - if strings.HasPrefix(spec.Process.Env[i], "PATH=") { - envPath = spec.Process.Env[i] - } - } - // If there is no configured $PATH, supply one. - if envPath == "" { - defaultPath := "/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin" - envPath = "PATH=" + defaultPath - g.AddProcessEnv("PATH", defaultPath) - } - // No command, nothing to do. - if len(command) == 0 { - return command - } - // Command is already an absolute path, use it as-is. - if filepath.IsAbs(command[0]) { - return command - } - // For each element in the PATH, - for _, pathEntry := range filepath.SplitList(envPath[5:]) { - // if it's the empty string, it's ".", which is the Cwd, - if pathEntry == "" { - pathEntry = spec.Process.Cwd - } - // build the absolute path which it might be, - candidate := filepath.Join(pathEntry, command[0]) - // check if it's there, - if fi, err := os.Lstat(filepath.Join(spec.Root.Path, candidate)); fi != nil && err == nil { - // and if it's not a directory, and either a symlink or executable, - if !fi.IsDir() && ((fi.Mode()&os.ModeSymlink != 0) || (fi.Mode()&0111 != 0)) { - // use that. - return append([]string{candidate}, command[1:]...) - } - } - } - return command -} - -func (b *Builder) configureUIDGID(g *generate.Generator, mountPoint string, options RunOptions) error { - // Set the user UID/GID/supplemental group list/capabilities lists. - user, err := b.user(mountPoint, options.User) - if err != nil { - return err - } - if err := setupCapabilities(g, b.AddCapabilities, b.DropCapabilities, options.AddCapabilities, options.DropCapabilities); err != nil { - return err - } - g.SetProcessUID(user.UID) - g.SetProcessGID(user.GID) - for _, gid := range user.AdditionalGids { - g.AddProcessAdditionalGid(gid) - } - - // Remove capabilities if not running as root except Bounding set - if user.UID != 0 { - bounding := g.Config.Process.Capabilities.Bounding - g.ClearProcessCapabilities() - g.Config.Process.Capabilities.Bounding = bounding - } - - return nil -} - -func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions) { - g.ClearProcessEnv() - for _, envSpec := range append(b.Env(), options.Env...) { - env := strings.SplitN(envSpec, "=", 2) - if len(env) > 1 { - g.AddProcessEnv(env[0], env[1]) - } - } - - for src, dest := range b.Args { - g.AddProcessEnv(src, dest) - } -} - -func (b *Builder) configureNamespaces(g *generate.Generator, options RunOptions) (bool, []string, error) { - defaultNamespaceOptions, err := DefaultNamespaceOptions() - if err != nil { - return false, nil, err - } - - namespaceOptions := defaultNamespaceOptions - namespaceOptions.AddOrReplace(b.NamespaceOptions...) - namespaceOptions.AddOrReplace(options.NamespaceOptions...) - - networkPolicy := options.ConfigureNetwork - if networkPolicy == NetworkDefault { - networkPolicy = b.ConfigureNetwork - } - - configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(g, namespaceOptions, b.IDMappingOptions, networkPolicy) - if err != nil { - return false, nil, err - } - - if configureUTS { - if options.Hostname != "" { - g.SetHostname(options.Hostname) - } else if b.Hostname() != "" { - g.SetHostname(b.Hostname()) - } - } else { - g.SetHostname("") - } - return configureNetwork, configureNetworks, nil -} - -// Run runs the specified command in the container's root filesystem. -func (b *Builder) Run(command []string, options RunOptions) error { - p, err := ioutil.TempDir("", Package) - if err != nil { - return err - } - // On some hosts like AH, /tmp is a symlink and we need an - // absolute path. - path, err := filepath.EvalSymlinks(p) - if err != nil { - return err - } - logrus.Debugf("using %q to hold bundle data", path) - defer func() { - if err2 := os.RemoveAll(path); err2 != nil { - logrus.Errorf("error removing %q: %v", path, err2) - } - }() - - gp, err := generate.New("linux") - if err != nil { - return err - } - g := &gp - - isolation := options.Isolation - if isolation == IsolationDefault { - isolation = b.Isolation - if isolation == IsolationDefault { - isolation = IsolationOCI - } - } - if err := checkAndOverrideIsolationOptions(isolation, &options); err != nil { - return err - } - - b.configureEnvironment(g, options) - - if b.CommonBuildOpts == nil { - return errors.Errorf("Invalid format on container you must recreate the container") - } - - if err := addCommonOptsToSpec(b.CommonBuildOpts, g); err != nil { - return err - } - - if options.WorkingDir != "" { - g.SetProcessCwd(options.WorkingDir) - } else if b.WorkDir() != "" { - g.SetProcessCwd(b.WorkDir()) - } - setupSelinux(g, b.ProcessLabel, b.MountLabel) - mountPoint, err := b.Mount(b.MountLabel) - if err != nil { - return err - } - defer func() { - if err := b.Unmount(); err != nil { - logrus.Errorf("error unmounting container: %v", err) - } - }() - g.SetRootPath(mountPoint) - if len(command) > 0 { - command = runLookupPath(g, command) - g.SetProcessArgs(command) - } else { - g.SetProcessArgs(nil) - } - - setupMaskedPaths(g) - setupReadOnlyPaths(g) - - setupTerminal(g, options.Terminal, options.TerminalSize) - - configureNetwork, configureNetworks, err := b.configureNamespaces(g, options) - if err != nil { - return err - } - - if err := b.configureUIDGID(g, mountPoint, options); err != nil { - return err - } - - g.SetProcessApparmorProfile(b.CommonBuildOpts.ApparmorProfile) - - // Now grab the spec from the generator. Set the generator to nil so that future contributors - // will quickly be able to tell that they're supposed to be modifying the spec directly from here. - spec := g.Spec() - g = nil - - // Set the working directory, creating it if we must. - if spec.Process.Cwd == "" { - spec.Process.Cwd = DefaultWorkingDir - } - logrus.Debugf("ensuring working directory %q exists", filepath.Join(mountPoint, spec.Process.Cwd)) - if err = os.MkdirAll(filepath.Join(mountPoint, spec.Process.Cwd), 0755); err != nil { - return errors.Wrapf(err, "error ensuring working directory %q exists", spec.Process.Cwd) - } - - // Set the seccomp configuration using the specified profile name. Some syscalls are - // allowed if certain capabilities are to be granted (example: CAP_SYS_CHROOT and chroot), - // so we sorted out the capabilities lists first. - if err = setupSeccomp(spec, b.CommonBuildOpts.SeccompProfilePath); err != nil { - return err - } - - // Figure out who owns files that will appear to be owned by UID/GID 0 in the container. - rootUID, rootGID, err := util.GetHostRootIDs(spec) - if err != nil { - return err - } - rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} - - hostFile, err := b.addNetworkConfig(path, "/etc/hosts", rootIDPair) - if err != nil { - return err - } - resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf", rootIDPair) - if err != nil { - return err - } - - if err := addHostsToFile(b.CommonBuildOpts.AddHost, hostFile); err != nil { - return err - } - - bindFiles := map[string]string{ - "/etc/hosts": hostFile, - "/etc/resolv.conf": resolvFile, - } - err = b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, b.Volumes(), b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, append(b.NamespaceOptions, options.NamespaceOptions...)) - if err != nil { - return errors.Wrapf(err, "error resolving mountpoints for container") - } - - if options.CNIConfigDir == "" { - options.CNIConfigDir = b.CNIConfigDir - if b.CNIConfigDir == "" { - options.CNIConfigDir = util.DefaultCNIConfigDir - } - } - if options.CNIPluginPath == "" { - options.CNIPluginPath = b.CNIPluginPath - if b.CNIPluginPath == "" { - options.CNIPluginPath = util.DefaultCNIPluginPath - } - } - - switch isolation { - case IsolationOCI: - // The default is --rootless=auto, which makes troubleshooting a bit harder. - // rootlessFlag := []string{"--rootless=false"} - // for _, arg := range options.Args { - // if strings.HasPrefix(arg, "--rootless") { - // rootlessFlag = nil - // } - // } - // options.Args = append(options.Args, rootlessFlag...) - err = b.runUsingRuntimeSubproc(options, configureNetwork, configureNetworks, nil, spec, mountPoint, path, Package+"-"+filepath.Base(path)) - case IsolationChroot: - err = chroot.RunUsingChroot(spec, path, options.Stdin, options.Stdout, options.Stderr) - case IsolationOCIRootless: - if err := setupRootlessSpecChanges(spec, path, rootUID, rootGID); err != nil { - return err - } - rootlessFlag := []string{"--rootless=true"} - for _, arg := range options.Args { - if strings.HasPrefix(arg, "--rootless") { - rootlessFlag = nil - } - } - options.Args = append(options.Args, rootlessFlag...) - err = b.runUsingRuntimeSubproc(options, configureNetwork, configureNetworks, []string{"--no-new-keyring"}, spec, mountPoint, path, Package+"-"+filepath.Base(path)) - default: - err = errors.Errorf("don't know how to run this command") - } - return err -} - -func checkAndOverrideIsolationOptions(isolation Isolation, options *RunOptions) error { - switch isolation { - case IsolationOCIRootless: - if ns := options.NamespaceOptions.Find(string(specs.IPCNamespace)); ns == nil || ns.Host { - logrus.Debugf("Forcing use of an IPC namespace.") - } - options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.IPCNamespace)}) - if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host { - logrus.Debugf("Disabling network namespace.") - } - options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.NetworkNamespace), Host: true}) - if ns := options.NamespaceOptions.Find(string(specs.PIDNamespace)); ns == nil || ns.Host { - logrus.Debugf("Forcing use of a PID namespace.") - } - options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.PIDNamespace), Host: false}) - if ns := options.NamespaceOptions.Find(string(specs.UserNamespace)); ns == nil || ns.Host { - logrus.Debugf("Forcing use of a user namespace.") - } - options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.UserNamespace)}) - if ns := options.NamespaceOptions.Find(string(specs.UTSNamespace)); ns != nil && !ns.Host { - logrus.Debugf("Disabling UTS namespace.") - } - options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.UTSNamespace), Host: true}) - case IsolationOCI: - pidns := options.NamespaceOptions.Find(string(specs.PIDNamespace)) - userns := options.NamespaceOptions.Find(string(specs.UserNamespace)) - if (pidns == nil || pidns.Host) && (userns != nil && !userns.Host) { - return fmt.Errorf("not allowed to mix host PID namespace with container user namespace") - } - } - return nil -} - -func setupRootlessSpecChanges(spec *specs.Spec, bundleDir string, rootUID, rootGID uint32) error { - spec.Hostname = "" - spec.Process.User.AdditionalGids = nil - spec.Linux.Resources = nil - - emptyDir := filepath.Join(bundleDir, "empty") - if err := os.Mkdir(emptyDir, 0); err != nil { - return errors.Wrapf(err, "error creating %q", emptyDir) - } - - // Replace /sys with a read-only bind mount. - mounts := []specs.Mount{ - { - Source: "/dev", - Destination: "/dev", - Type: "tmpfs", - Options: []string{"private", "strictatime", "noexec", "nosuid", "mode=755", "size=65536k"}, - }, - { - Source: "mqueue", - Destination: "/dev/mqueue", - Type: "mqueue", - Options: []string{"private", "nodev", "noexec", "nosuid"}, - }, - { - Source: "pts", - Destination: "/dev/pts", - Type: "devpts", - Options: []string{"private", "noexec", "nosuid", "newinstance", "ptmxmode=0666", "mode=0620"}, - }, - { - Source: "shm", - Destination: "/dev/shm", - Type: "tmpfs", - Options: []string{"private", "nodev", "noexec", "nosuid", "mode=1777", "size=65536k"}, - }, - { - Source: "/proc", - Destination: "/proc", - Type: "proc", - Options: []string{"private", "nodev", "noexec", "nosuid"}, - }, - { - Source: "/sys", - Destination: "/sys", - Type: "bind", - Options: []string{bind.NoBindOption, "rbind", "private", "nodev", "noexec", "nosuid", "ro"}, - }, - } - // Cover up /sys/fs/cgroup and /sys/fs/selinux, if they exist in our source for /sys. - if _, err := os.Stat("/sys/fs/cgroup"); err == nil { - spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/cgroup") - } - if _, err := os.Stat("/sys/fs/selinux"); err == nil { - spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/selinux") - } - // Keep anything that isn't under /dev, /proc, or /sys. - for i := range spec.Mounts { - if spec.Mounts[i].Destination == "/dev" || strings.HasPrefix(spec.Mounts[i].Destination, "/dev/") || - spec.Mounts[i].Destination == "/proc" || strings.HasPrefix(spec.Mounts[i].Destination, "/proc/") || - spec.Mounts[i].Destination == "/sys" || strings.HasPrefix(spec.Mounts[i].Destination, "/sys/") { - continue - } - mounts = append(mounts, spec.Mounts[i]) - } - spec.Mounts = mounts - return nil -} - -type runUsingRuntimeSubprocOptions struct { - Options RunOptions - Spec *specs.Spec - RootPath string - BundlePath string - ConfigureNetwork bool - ConfigureNetworks []string - MoreCreateArgs []string - ContainerName string -} - -func (b *Builder) runUsingRuntimeSubproc(options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) { - var confwg sync.WaitGroup - config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{ - Options: options, - Spec: spec, - RootPath: rootPath, - BundlePath: bundlePath, - ConfigureNetwork: configureNetwork, - ConfigureNetworks: configureNetworks, - MoreCreateArgs: moreCreateArgs, - ContainerName: containerName, - }) - if conferr != nil { - return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand) - } - cmd := reexec.Command(runUsingRuntimeCommand) - cmd.Dir = bundlePath - cmd.Stdin = options.Stdin - if cmd.Stdin == nil { - cmd.Stdin = os.Stdin - } - cmd.Stdout = options.Stdout - if cmd.Stdout == nil { - cmd.Stdout = os.Stdout - } - cmd.Stderr = options.Stderr - if cmd.Stderr == nil { - cmd.Stderr = os.Stderr - } - cmd.Env = append(os.Environ(), fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())) - preader, pwriter, err := os.Pipe() - if err != nil { - return errors.Wrapf(err, "error creating configuration pipe") - } - confwg.Add(1) - go func() { - _, conferr = io.Copy(pwriter, bytes.NewReader(config)) - confwg.Done() - }() - cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) - defer preader.Close() - defer pwriter.Close() - err = cmd.Run() - confwg.Wait() - if err == nil { - return conferr - } - return err -} - -func init() { - reexec.Register(runUsingRuntimeCommand, runUsingRuntimeMain) -} - -func runUsingRuntimeMain() { - var options runUsingRuntimeSubprocOptions - // Set logging. - if level := os.Getenv("LOGLEVEL"); level != "" { - if ll, err := strconv.Atoi(level); err == nil { - logrus.SetLevel(logrus.Level(ll)) - } - } - // Unpack our configuration. - confPipe := os.NewFile(3, "confpipe") - if confPipe == nil { - fmt.Fprintf(os.Stderr, "error reading options pipe\n") - os.Exit(1) - } - defer confPipe.Close() - if err := json.NewDecoder(confPipe).Decode(&options); err != nil { - fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err) - os.Exit(1) - } - // Set ourselves up to read the container's exit status. We're doing this in a child process - // so that we won't mess with the setting in a caller of the library. This stubs to OS specific - // calls - if err := setChildProcess(); err != nil { - os.Exit(1) - } - // Run the container, start to finish. - status, err := runUsingRuntime(options.Options, options.ConfigureNetwork, options.ConfigureNetworks, options.MoreCreateArgs, options.Spec, options.RootPath, options.BundlePath, options.ContainerName) - if err != nil { - fmt.Fprintf(os.Stderr, "error running container: %v\n", err) - os.Exit(1) - } - // Pass the container's exit status back to the caller by exiting with the same status. - if status.Exited() { - os.Exit(status.ExitStatus()) - } else if status.Signaled() { - fmt.Fprintf(os.Stderr, "container exited on %s\n", status.Signal()) - os.Exit(1) - } - os.Exit(1) -} - -func runUsingRuntime(options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) { - // Lock the caller to a single OS-level thread. - runtime.LockOSThread() - - // Set up bind mounts for things that a namespaced user might not be able to get to directly. - unmountAll, err := bind.SetupIntermediateMountNamespace(spec, bundlePath) - if unmountAll != nil { - defer func() { - if err := unmountAll(); err != nil { - logrus.Error(err) - } - }() - } - if err != nil { - return 1, err - } - - // Write the runtime configuration. - specbytes, err := json.Marshal(spec) - if err != nil { - return 1, err - } - if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil { - return 1, errors.Wrapf(err, "error storing runtime configuration") - } - - logrus.Debugf("config = %v", string(specbytes)) - - // Decide which runtime to use. - runtime := options.Runtime - if runtime == "" { - runtime = util.Runtime() - } - - // Default to just passing down our stdio. - getCreateStdio := func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { - return os.Stdin, os.Stdout, os.Stderr - } - - // Figure out how we're doing stdio handling, and create pipes and sockets. - var stdio sync.WaitGroup - var consoleListener *net.UnixListener - var errorFds, closeBeforeReadingErrorFds []int - stdioPipe := make([][]int, 3) - copyConsole := false - copyPipes := false - finishCopy := make([]int, 2) - if err = unix.Pipe(finishCopy); err != nil { - return 1, errors.Wrapf(err, "error creating pipe for notifying to stop stdio") - } - finishedCopy := make(chan struct{}) - if spec.Process != nil { - if spec.Process.Terminal { - copyConsole = true - // Create a listening socket for accepting the container's terminal's PTY master. - socketPath := filepath.Join(bundlePath, "console.sock") - consoleListener, err = net.ListenUnix("unix", &net.UnixAddr{Name: socketPath, Net: "unix"}) - if err != nil { - return 1, errors.Wrapf(err, "error creating socket to receive terminal descriptor") - } - // Add console socket arguments. - moreCreateArgs = append(moreCreateArgs, "--console-socket", socketPath) - } else { - copyPipes = true - // Figure out who should own the pipes. - uid, gid, err := util.GetHostRootIDs(spec) - if err != nil { - return 1, err - } - // Create stdio pipes. - if stdioPipe, err = runMakeStdioPipe(int(uid), int(gid)); err != nil { - return 1, err - } - errorFds = []int{stdioPipe[unix.Stdout][0], stdioPipe[unix.Stderr][0]} - closeBeforeReadingErrorFds = []int{stdioPipe[unix.Stdout][1], stdioPipe[unix.Stderr][1]} - // Set stdio to our pipes. - getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { - stdin := os.NewFile(uintptr(stdioPipe[unix.Stdin][0]), "/dev/stdin") - stdout := os.NewFile(uintptr(stdioPipe[unix.Stdout][1]), "/dev/stdout") - stderr := os.NewFile(uintptr(stdioPipe[unix.Stderr][1]), "/dev/stderr") - return stdin, stdout, stderr - } - } - } else { - if options.Quiet { - // Discard stdout. - getCreateStdio = func() (io.ReadCloser, io.WriteCloser, io.WriteCloser) { - return os.Stdin, nil, os.Stderr - } - } - } - - // Build the commands that we'll execute. - pidFile := filepath.Join(bundlePath, "pid") - args := append(append(append(options.Args, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName) - create := exec.Command(runtime, args...) - create.Dir = bundlePath - stdin, stdout, stderr := getCreateStdio() - create.Stdin, create.Stdout, create.Stderr = stdin, stdout, stderr - if create.SysProcAttr == nil { - create.SysProcAttr = &syscall.SysProcAttr{} - } - - args = append(options.Args, "start", containerName) - start := exec.Command(runtime, args...) - start.Dir = bundlePath - start.Stderr = os.Stderr - - args = append(options.Args, "kill", containerName) - kill := exec.Command(runtime, args...) - kill.Dir = bundlePath - kill.Stderr = os.Stderr - - args = append(options.Args, "delete", containerName) - del := exec.Command(runtime, args...) - del.Dir = bundlePath - del.Stderr = os.Stderr - - // Actually create the container. - logrus.Debugf("Running %q", create.Args) - err = create.Run() - if err != nil { - return 1, errors.Wrapf(err, "error creating container for %v: %s", spec.Process.Args, runCollectOutput(errorFds, closeBeforeReadingErrorFds)) - } - defer func() { - err2 := del.Run() - if err2 != nil { - if err == nil { - err = errors.Wrapf(err2, "error deleting container") - } else { - logrus.Infof("error deleting container: %v", err2) - } - } - }() - - // Make sure we read the container's exit status when it exits. - pidValue, err := ioutil.ReadFile(pidFile) - if err != nil { - return 1, errors.Wrapf(err, "error reading pid from %q", pidFile) - } - pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue))) - if err != nil { - return 1, errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue)) - } - var reaping sync.WaitGroup - reaping.Add(1) - go func() { - defer reaping.Done() - var err error - _, err = unix.Wait4(pid, &wstatus, 0, nil) - if err != nil { - wstatus = 0 - logrus.Errorf("error waiting for container child process %d: %v\n", pid, err) - } - }() - - if configureNetwork { - teardown, err := runConfigureNetwork(options, configureNetworks, pid, containerName, spec.Process.Args) - if teardown != nil { - defer teardown() - } - if err != nil { - return 1, err - } - } - - if copyPipes { - // We don't need the ends of the pipes that belong to the container. - stdin.Close() - if stdout != nil { - stdout.Close() - } - stderr.Close() - } - - // Handle stdio for the container in the background. - stdio.Add(1) - go runCopyStdio(&stdio, copyPipes, stdioPipe, copyConsole, consoleListener, finishCopy, finishedCopy, spec) - - // Start the container. - logrus.Debugf("Running %q", start.Args) - err = start.Run() - if err != nil { - return 1, errors.Wrapf(err, "error starting container") - } - stopped := false - defer func() { - if !stopped { - err2 := kill.Run() - if err2 != nil { - if err == nil { - err = errors.Wrapf(err2, "error stopping container") - } else { - logrus.Infof("error stopping container: %v", err2) - } - } - } - }() - - // Wait for the container to exit. - for { - now := time.Now() - var state specs.State - args = append(options.Args, "state", containerName) - stat := exec.Command(runtime, args...) - stat.Dir = bundlePath - stat.Stderr = os.Stderr - stateOutput, stateErr := stat.Output() - if stateErr != nil { - return 1, errors.Wrapf(stateErr, "error reading container state") - } - if err = json.Unmarshal(stateOutput, &state); err != nil { - return 1, errors.Wrapf(stateErr, "error parsing container state %q", string(stateOutput)) - } - switch state.Status { - case "running": - case "stopped": - stopped = true - default: - return 1, errors.Errorf("container status unexpectedly changed to %q", state.Status) - } - if stopped { - break - } - select { - case <-finishedCopy: - stopped = true - case <-time.After(time.Until(now.Add(100 * time.Millisecond))): - continue - } - if stopped { - break - } - } - - // Close the writing end of the stop-handling-stdio notification pipe. - unix.Close(finishCopy[1]) - // Wait for the stdio copy goroutine to flush. - stdio.Wait() - // Wait until we finish reading the exit status. - reaping.Wait() - - return wstatus, nil -} - -func runCollectOutput(fds, closeBeforeReadingFds []int) string { - for _, fd := range closeBeforeReadingFds { - unix.Close(fd) - } - var b bytes.Buffer - buf := make([]byte, 8192) - for _, fd := range fds { - nread, err := unix.Read(fd, buf) - if err != nil { - if errno, isErrno := err.(syscall.Errno); isErrno { - switch errno { - default: - logrus.Errorf("error reading from pipe %d: %v", fd, err) - case syscall.EINTR, syscall.EAGAIN: - } - } else { - logrus.Errorf("unable to wait for data from pipe %d: %v", fd, err) - } - continue - } - for nread > 0 { - r := buf[:nread] - if nwritten, err := b.Write(r); err != nil || nwritten != len(r) { - if nwritten != len(r) { - logrus.Errorf("error buffering data from pipe %d: %v", fd, err) - break - } - } - nread, err = unix.Read(fd, buf) - if err != nil { - if errno, isErrno := err.(syscall.Errno); isErrno { - switch errno { - default: - logrus.Errorf("error reading from pipe %d: %v", fd, err) - case syscall.EINTR, syscall.EAGAIN: - } - } else { - logrus.Errorf("unable to wait for data from pipe %d: %v", fd, err) - } - break - } - } - } - return b.String() -} - -func runConfigureNetwork(options RunOptions, configureNetworks []string, pid int, containerName string, command []string) (teardown func(), err error) { - var netconf, undo []*libcni.NetworkConfigList - // Scan for CNI configuration files. - confdir := options.CNIConfigDir - files, err := libcni.ConfFiles(confdir, []string{".conf"}) - if err != nil { - return nil, errors.Wrapf(err, "error finding CNI networking configuration files named *.conf in directory %q", confdir) - } - lists, err := libcni.ConfFiles(confdir, []string{".conflist"}) - if err != nil { - return nil, errors.Wrapf(err, "error finding CNI networking configuration list files named *.conflist in directory %q", confdir) - } - logrus.Debugf("CNI network configuration file list: %#v", append(files, lists...)) - // Read the CNI configuration files. - for _, file := range files { - nc, err := libcni.ConfFromFile(file) - if err != nil { - return nil, errors.Wrapf(err, "error loading networking configuration from file %q for %v", file, command) - } - if len(configureNetworks) > 0 && nc.Network != nil && (nc.Network.Name == "" || !util.StringInSlice(nc.Network.Name, configureNetworks)) { - if nc.Network.Name == "" { - logrus.Debugf("configuration in %q has no name, skipping it", file) - } else { - logrus.Debugf("configuration in %q has name %q, skipping it", file, nc.Network.Name) - } - continue - } - cl, err := libcni.ConfListFromConf(nc) - if err != nil { - return nil, errors.Wrapf(err, "error converting networking configuration from file %q for %v", file, command) - } - logrus.Debugf("using network configuration from %q", file) - netconf = append(netconf, cl) - } - for _, list := range lists { - cl, err := libcni.ConfListFromFile(list) - if err != nil { - return nil, errors.Wrapf(err, "error loading networking configuration list from file %q for %v", list, command) - } - if len(configureNetworks) > 0 && (cl.Name == "" || !util.StringInSlice(cl.Name, configureNetworks)) { - if cl.Name == "" { - logrus.Debugf("configuration list in %q has no name, skipping it", list) - } else { - logrus.Debugf("configuration list in %q has name %q, skipping it", list, cl.Name) - } - continue - } - logrus.Debugf("using network configuration list from %q", list) - netconf = append(netconf, cl) - } - // Make sure we can access the container's network namespace, - // even after it exits, to successfully tear down the - // interfaces. Ensure this by opening a handle to the network - // namespace, and using our copy to both configure and - // deconfigure it. - netns := fmt.Sprintf("/proc/%d/ns/net", pid) - netFD, err := unix.Open(netns, unix.O_RDONLY, 0) - if err != nil { - return nil, errors.Wrapf(err, "error opening network namespace for %v", command) - } - mynetns := fmt.Sprintf("/proc/%d/fd/%d", unix.Getpid(), netFD) - // Build our search path for the plugins. - pluginPaths := strings.Split(options.CNIPluginPath, string(os.PathListSeparator)) - cni := libcni.CNIConfig{Path: pluginPaths} - // Configure the interfaces. - rtconf := make(map[*libcni.NetworkConfigList]*libcni.RuntimeConf) - teardown = func() { - for _, nc := range undo { - if err = cni.DelNetworkList(nc, rtconf[nc]); err != nil { - logrus.Errorf("error cleaning up network %v for %v: %v", rtconf[nc].IfName, command, err) - } - } - unix.Close(netFD) - } - for i, nc := range netconf { - // Build the runtime config for use with this network configuration. - rtconf[nc] = &libcni.RuntimeConf{ - ContainerID: containerName, - NetNS: mynetns, - IfName: fmt.Sprintf("if%d", i), - Args: [][2]string{}, - CapabilityArgs: map[string]interface{}{}, - } - // Bring it up. - _, err := cni.AddNetworkList(nc, rtconf[nc]) - if err != nil { - return teardown, errors.Wrapf(err, "error configuring network list %v for %v", rtconf[nc].IfName, command) - } - // Add it to the list of networks to take down when the container process exits. - undo = append([]*libcni.NetworkConfigList{nc}, undo...) - } - return teardown, nil -} - -func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copyConsole bool, consoleListener *net.UnixListener, finishCopy []int, finishedCopy chan struct{}, spec *specs.Spec) { - defer func() { - unix.Close(finishCopy[0]) - if copyPipes { - unix.Close(stdioPipe[unix.Stdin][1]) - unix.Close(stdioPipe[unix.Stdout][0]) - unix.Close(stdioPipe[unix.Stderr][0]) - } - stdio.Done() - finishedCopy <- struct{}{} - }() - // Map describing where data on an incoming descriptor should go. - relayMap := make(map[int]int) - // Map describing incoming and outgoing descriptors. - readDesc := make(map[int]string) - writeDesc := make(map[int]string) - // Buffers. - relayBuffer := make(map[int]*bytes.Buffer) - // Set up the terminal descriptor or pipes for polling. - if copyConsole { - // Accept a connection over our listening socket. - fd, err := runAcceptTerminal(consoleListener, spec.Process.ConsoleSize) - if err != nil { - logrus.Errorf("%v", err) - return - } - terminalFD := fd - // Input from our stdin, output from the terminal descriptor. - relayMap[unix.Stdin] = terminalFD - readDesc[unix.Stdin] = "stdin" - relayBuffer[terminalFD] = new(bytes.Buffer) - writeDesc[terminalFD] = "container terminal input" - relayMap[terminalFD] = unix.Stdout - readDesc[terminalFD] = "container terminal output" - relayBuffer[unix.Stdout] = new(bytes.Buffer) - writeDesc[unix.Stdout] = "output" - // Set our terminal's mode to raw, to pass handling of special - // terminal input to the terminal in the container. - if terminal.IsTerminal(unix.Stdin) { - if state, err := terminal.MakeRaw(unix.Stdin); err != nil { - logrus.Warnf("error setting terminal state: %v", err) - } else { - defer func() { - if err = terminal.Restore(unix.Stdin, state); err != nil { - logrus.Errorf("unable to restore terminal state: %v", err) - } - }() - } - } - } - if copyPipes { - // Input from our stdin, output from the stdout and stderr pipes. - relayMap[unix.Stdin] = stdioPipe[unix.Stdin][1] - readDesc[unix.Stdin] = "stdin" - relayBuffer[stdioPipe[unix.Stdin][1]] = new(bytes.Buffer) - writeDesc[stdioPipe[unix.Stdin][1]] = "container stdin" - relayMap[stdioPipe[unix.Stdout][0]] = unix.Stdout - readDesc[stdioPipe[unix.Stdout][0]] = "container stdout" - relayBuffer[unix.Stdout] = new(bytes.Buffer) - writeDesc[unix.Stdout] = "stdout" - relayMap[stdioPipe[unix.Stderr][0]] = unix.Stderr - readDesc[stdioPipe[unix.Stderr][0]] = "container stderr" - relayBuffer[unix.Stderr] = new(bytes.Buffer) - writeDesc[unix.Stderr] = "stderr" - } - // Set our reading descriptors to non-blocking. - for rfd, wfd := range relayMap { - if err := unix.SetNonblock(rfd, true); err != nil { - logrus.Errorf("error setting %s to nonblocking: %v", readDesc[rfd], err) - return - } - if err := unix.SetNonblock(wfd, false); err != nil { - logrus.Errorf("error setting descriptor %d (%s) blocking: %v", wfd, writeDesc[wfd], err) - } - } - // A helper that returns false if err is an error that would cause us - // to give up. - logIfNotRetryable := func(err error, what string) (retry bool) { - if err == nil { - return true - } - if errno, isErrno := err.(syscall.Errno); isErrno { - switch errno { - case syscall.EINTR, syscall.EAGAIN: - return true - } - } - logrus.Error(what) - return false - } - // Pass data back and forth. - pollTimeout := -1 - for len(relayMap) > 0 { - // Start building the list of descriptors to poll. - pollFds := make([]unix.PollFd, 0, len(relayMap)+1) - // Poll for a notification that we should stop handling stdio. - pollFds = append(pollFds, unix.PollFd{Fd: int32(finishCopy[0]), Events: unix.POLLIN | unix.POLLHUP}) - // Poll on our reading descriptors. - for rfd := range relayMap { - pollFds = append(pollFds, unix.PollFd{Fd: int32(rfd), Events: unix.POLLIN | unix.POLLHUP}) - } - buf := make([]byte, 8192) - // Wait for new data from any input descriptor, or a notification that we're done. - _, err := unix.Poll(pollFds, pollTimeout) - if !logIfNotRetryable(err, fmt.Sprintf("error waiting for stdio/terminal data to relay: %v", err)) { - return - } - removes := make(map[int]struct{}) - for _, pollFd := range pollFds { - // If this descriptor's just been closed from the other end, mark it for - // removal from the set that we're checking for. - if pollFd.Revents&unix.POLLHUP == unix.POLLHUP { - removes[int(pollFd.Fd)] = struct{}{} - } - // If the descriptor was closed elsewhere, remove it from our list. - if pollFd.Revents&unix.POLLNVAL != 0 { - logrus.Debugf("error polling descriptor %s: closed?", readDesc[int(pollFd.Fd)]) - removes[int(pollFd.Fd)] = struct{}{} - } - // If the POLLIN flag isn't set, then there's no data to be read from this descriptor. - if pollFd.Revents&unix.POLLIN == 0 { - // If we're using pipes and it's our stdin and it's closed, close the writing - // end of the corresponding pipe. - if copyPipes && int(pollFd.Fd) == unix.Stdin && pollFd.Revents&unix.POLLHUP != 0 { - unix.Close(stdioPipe[unix.Stdin][1]) - stdioPipe[unix.Stdin][1] = -1 - } - continue - } - // Read whatever there is to be read. - readFD := int(pollFd.Fd) - writeFD, needToRelay := relayMap[readFD] - if needToRelay { - n, err := unix.Read(readFD, buf) - if !logIfNotRetryable(err, fmt.Sprintf("unable to read %s data: %v", readDesc[readFD], err)) { - return - } - // If it's zero-length on our stdin and we're - // using pipes, it's an EOF, so close the stdin - // pipe's writing end. - if n == 0 && copyPipes && int(pollFd.Fd) == unix.Stdin { - unix.Close(stdioPipe[unix.Stdin][1]) - stdioPipe[unix.Stdin][1] = -1 - } - if n > 0 { - // Buffer the data in case we get blocked on where they need to go. - nwritten, err := relayBuffer[writeFD].Write(buf[:n]) - if err != nil { - logrus.Debugf("buffer: %v", err) - continue - } - if nwritten != n { - logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", n, nwritten) - continue - } - // If this is the last of the data we'll be able to read from this - // descriptor, read all that there is to read. - for pollFd.Revents&unix.POLLHUP == unix.POLLHUP { - nr, err := unix.Read(readFD, buf) - logIfNotRetryable(err, fmt.Sprintf("read %s: %v", readDesc[readFD], err)) - if nr <= 0 { - break - } - nwritten, err := relayBuffer[writeFD].Write(buf[:nr]) - if err != nil { - logrus.Debugf("buffer: %v", err) - break - } - if nwritten != nr { - logrus.Debugf("buffer: expected to buffer %d bytes, wrote %d", nr, nwritten) - break - } - } - } - } - } - // Try to drain the output buffers. Set the default timeout - // for the next poll() to 100ms if we still have data to write. - pollTimeout = -1 - for writeFD := range relayBuffer { - if relayBuffer[writeFD].Len() > 0 { - n, err := unix.Write(writeFD, relayBuffer[writeFD].Bytes()) - if !logIfNotRetryable(err, fmt.Sprintf("unable to write %s data: %v", writeDesc[writeFD], err)) { - return - } - if n > 0 { - relayBuffer[writeFD].Next(n) - } - } - if relayBuffer[writeFD].Len() > 0 { - pollTimeout = 100 - } - } - // Remove any descriptors which we don't need to poll any more from the poll descriptor list. - for remove := range removes { - delete(relayMap, remove) - } - // If the we-can-return pipe had anything for us, we're done. - for _, pollFd := range pollFds { - if int(pollFd.Fd) == finishCopy[0] && pollFd.Revents != 0 { - // The pipe is closed, indicating that we can stop now. - return - } - } - } -} - -func runAcceptTerminal(consoleListener *net.UnixListener, terminalSize *specs.Box) (int, error) { - defer consoleListener.Close() - c, err := consoleListener.AcceptUnix() - if err != nil { - return -1, errors.Wrapf(err, "error accepting socket descriptor connection") - } - defer c.Close() - // Expect a control message over our new connection. - b := make([]byte, 8192) - oob := make([]byte, 8192) - n, oobn, _, _, err := c.ReadMsgUnix(b, oob) - if err != nil { - return -1, errors.Wrapf(err, "error reading socket descriptor: %v") - } - if n > 0 { - logrus.Debugf("socket descriptor is for %q", string(b[:n])) - } - if oobn > len(oob) { - return -1, errors.Errorf("too much out-of-bounds data (%d bytes)", oobn) - } - // Parse the control message. - scm, err := unix.ParseSocketControlMessage(oob[:oobn]) - if err != nil { - return -1, errors.Wrapf(err, "error parsing out-of-bound data as a socket control message") - } - logrus.Debugf("control messages: %v", scm) - // Expect to get a descriptor. - terminalFD := -1 - for i := range scm { - fds, err := unix.ParseUnixRights(&scm[i]) - if err != nil { - return -1, errors.Wrapf(err, "error parsing unix rights control message: %v") - } - logrus.Debugf("fds: %v", fds) - if len(fds) == 0 { - continue - } - terminalFD = fds[0] - break - } - if terminalFD == -1 { - return -1, errors.Errorf("unable to read terminal descriptor") - } - // Set the pseudoterminal's size to the configured size, or our own. - winsize := &unix.Winsize{} - if terminalSize != nil { - // Use configured sizes. - winsize.Row = uint16(terminalSize.Height) - winsize.Col = uint16(terminalSize.Width) - } else { - if terminal.IsTerminal(unix.Stdin) { - // Use the size of our terminal. - if winsize, err = unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ); err != nil { - logrus.Warnf("error reading size of controlling terminal: %v", err) - winsize.Row = 0 - winsize.Col = 0 - } - } - } - if winsize.Row != 0 && winsize.Col != 0 { - if err = unix.IoctlSetWinsize(terminalFD, unix.TIOCSWINSZ, winsize); err != nil { - logrus.Warnf("error setting size of container pseudoterminal: %v", err) - } - // FIXME - if we're connected to a terminal, we should - // be passing the updated terminal size down when we - // receive a SIGWINCH. - } - return terminalFD, nil -} - -// Create pipes to use for relaying stdio. -func runMakeStdioPipe(uid, gid int) ([][]int, error) { - stdioPipe := make([][]int, 3) - for i := range stdioPipe { - stdioPipe[i] = make([]int, 2) - if err := unix.Pipe(stdioPipe[i]); err != nil { - return nil, errors.Wrapf(err, "error creating pipe for container FD %d", i) - } - } - if err := unix.Fchown(stdioPipe[unix.Stdin][0], uid, gid); err != nil { - return nil, errors.Wrapf(err, "error setting owner of stdin pipe descriptor") - } - if err := unix.Fchown(stdioPipe[unix.Stdout][1], uid, gid); err != nil { - return nil, errors.Wrapf(err, "error setting owner of stdout pipe descriptor") - } - if err := unix.Fchown(stdioPipe[unix.Stderr][1], uid, gid); err != nil { - return nil, errors.Wrapf(err, "error setting owner of stderr pipe descriptor") - } - return stdioPipe, nil -} diff --git a/vendor/github.com/projectatomic/buildah/run_linux.go b/vendor/github.com/projectatomic/buildah/run_linux.go deleted file mode 100644 index a7519a092..000000000 --- a/vendor/github.com/projectatomic/buildah/run_linux.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build linux - -package buildah - -import ( - "fmt" - "golang.org/x/sys/unix" - "os" -) - -func setChildProcess() error { - if err := unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(1), 0, 0, 0); err != nil { - fmt.Fprintf(os.Stderr, "prctl(PR_SET_CHILD_SUBREAPER, 1): %v\n", err) - return err - } - return nil -} diff --git a/vendor/github.com/projectatomic/buildah/run_unsupport.go b/vendor/github.com/projectatomic/buildah/run_unsupport.go deleted file mode 100644 index 4824a0c4e..000000000 --- a/vendor/github.com/projectatomic/buildah/run_unsupport.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !linux - -package buildah - -import ( - "github.com/pkg/errors" -) - -func setChildProcess() error { - return errors.New("function not supported on non-linux systems") -} diff --git a/vendor/github.com/projectatomic/buildah/seccomp.go b/vendor/github.com/projectatomic/buildah/seccomp.go deleted file mode 100644 index a435b5f71..000000000 --- a/vendor/github.com/projectatomic/buildah/seccomp.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build seccomp,linux - -package buildah - -import ( - "io/ioutil" - - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - seccomp "github.com/seccomp/containers-golang" -) - -func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { - switch seccompProfilePath { - case "unconfined": - spec.Linux.Seccomp = nil - case "": - seccompConfig, err := seccomp.GetDefaultProfile(spec) - if err != nil { - return errors.Wrapf(err, "loading default seccomp profile failed") - } - spec.Linux.Seccomp = seccompConfig - default: - seccompProfile, err := ioutil.ReadFile(seccompProfilePath) - if err != nil { - return errors.Wrapf(err, "opening seccomp profile (%s) failed", seccompProfilePath) - } - seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec) - if err != nil { - return errors.Wrapf(err, "loading seccomp profile (%s) failed", seccompProfilePath) - } - spec.Linux.Seccomp = seccompConfig - } - return nil -} diff --git a/vendor/github.com/projectatomic/buildah/seccomp_unsupported.go b/vendor/github.com/projectatomic/buildah/seccomp_unsupported.go deleted file mode 100644 index cba8390c5..000000000 --- a/vendor/github.com/projectatomic/buildah/seccomp_unsupported.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !seccomp !linux - -package buildah - -import ( - "github.com/opencontainers/runtime-spec/specs-go" -) - -func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { - if spec.Linux != nil { - // runtime-tools may have supplied us with a default filter - spec.Linux.Seccomp = nil - } - return nil -} diff --git a/vendor/github.com/projectatomic/buildah/selinux.go b/vendor/github.com/projectatomic/buildah/selinux.go deleted file mode 100644 index 2b850cf9f..000000000 --- a/vendor/github.com/projectatomic/buildah/selinux.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build selinux,linux - -package buildah - -import ( - "github.com/opencontainers/runtime-tools/generate" -) - -func setupSelinux(g *generate.Generator, processLabel, mountLabel string) { - g.SetProcessSelinuxLabel(processLabel) - g.SetLinuxMountLabel(mountLabel) -} diff --git a/vendor/github.com/projectatomic/buildah/selinux_unsupported.go b/vendor/github.com/projectatomic/buildah/selinux_unsupported.go deleted file mode 100644 index 0aa7c46e4..000000000 --- a/vendor/github.com/projectatomic/buildah/selinux_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !selinux !linux - -package buildah - -import ( - "github.com/opencontainers/runtime-tools/generate" -) - -func setupSelinux(g *generate.Generator, processLabel, mountLabel string) { -} diff --git a/vendor/github.com/projectatomic/buildah/unmount.go b/vendor/github.com/projectatomic/buildah/unmount.go deleted file mode 100644 index cdb511170..000000000 --- a/vendor/github.com/projectatomic/buildah/unmount.go +++ /dev/null @@ -1,11 +0,0 @@ -package buildah - -// Unmount unmounts a build container. -func (b *Builder) Unmount() error { - _, err := b.store.Unmount(b.ContainerID, false) - if err == nil { - b.MountPoint = "" - err = b.Save() - } - return err -} diff --git a/vendor/github.com/projectatomic/buildah/unshare/unshare.c b/vendor/github.com/projectatomic/buildah/unshare/unshare.c deleted file mode 100644 index 83864359b..000000000 --- a/vendor/github.com/projectatomic/buildah/unshare/unshare.c +++ /dev/null @@ -1,110 +0,0 @@ -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static int _buildah_unshare_parse_envint(const char *envname) { - char *p, *q; - long l; - - p = getenv(envname); - if (p == NULL) { - return -1; - } - q = NULL; - l = strtol(p, &q, 10); - if ((q == NULL) || (*q != '\0')) { - fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p); - _exit(1); - } - unsetenv(envname); - return l; -} - -void _buildah_unshare(void) -{ - int flags, pidfd, continuefd, n, pgrp, sid, ctty, allow_setgroups; - char buf[2048]; - - flags = _buildah_unshare_parse_envint("_Buildah-unshare"); - if (flags == -1) { - return; - } - if ((flags & CLONE_NEWUSER) != 0) { - if (unshare(CLONE_NEWUSER) == -1) { - fprintf(stderr, "Error during unshare(CLONE_NEWUSER): %m\n"); - _exit(1); - } - } - pidfd = _buildah_unshare_parse_envint("_Buildah-pid-pipe"); - if (pidfd != -1) { - snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); - if (write(pidfd, buf, strlen(buf)) != strlen(buf)) { - fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); - _exit(1); - } - close(pidfd); - } - continuefd = _buildah_unshare_parse_envint("_Buildah-continue-pipe"); - if (continuefd != -1) { - n = read(continuefd, buf, sizeof(buf)); - if (n > 0) { - fprintf(stderr, "Error: %.*s\n", n, buf); - _exit(1); - } - close(continuefd); - } - sid = _buildah_unshare_parse_envint("_Buildah-setsid"); - if (sid == 1) { - if (setsid() == -1) { - fprintf(stderr, "Error during setsid: %m\n"); - _exit(1); - } - } - pgrp = _buildah_unshare_parse_envint("_Buildah-setpgrp"); - if (pgrp == 1) { - if (setpgrp() == -1) { - fprintf(stderr, "Error during setpgrp: %m\n"); - _exit(1); - } - } - ctty = _buildah_unshare_parse_envint("_Buildah-ctty"); - if (ctty != -1) { - if (ioctl(ctty, TIOCSCTTY, 0) == -1) { - fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty); - _exit(1); - } - } - allow_setgroups = _buildah_unshare_parse_envint("_Buildah-allow-setgroups"); - if ((flags & CLONE_NEWUSER) != 0) { - if (allow_setgroups == 1) { - if (setgroups(0, NULL) != 0) { - fprintf(stderr, "Error during setgroups(0, NULL): %m\n"); - _exit(1); - } - } - if (setresgid(0, 0, 0) != 0) { - fprintf(stderr, "Error during setresgid(0): %m\n"); - _exit(1); - } - if (setresuid(0, 0, 0) != 0) { - fprintf(stderr, "Error during setresuid(0): %m\n"); - _exit(1); - } - } - if ((flags & ~CLONE_NEWUSER) != 0) { - if (unshare(flags & ~CLONE_NEWUSER) == -1) { - fprintf(stderr, "Error during unshare(...): %m\n"); - _exit(1); - } - } - return; -} diff --git a/vendor/github.com/projectatomic/buildah/unshare/unshare.go b/vendor/github.com/projectatomic/buildah/unshare/unshare.go deleted file mode 100644 index 4eea74956..000000000 --- a/vendor/github.com/projectatomic/buildah/unshare/unshare.go +++ /dev/null @@ -1,273 +0,0 @@ -// +build linux - -package unshare - -import ( - "bytes" - "fmt" - "io" - "os" - "os/exec" - "runtime" - "strconv" - "strings" - "syscall" - - "github.com/containers/storage/pkg/reexec" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/projectatomic/buildah/util" -) - -// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and -// handles setting ID maps and other related settings by triggering -// initialization code in the child. -type Cmd struct { - *exec.Cmd - UnshareFlags int - UseNewuidmap bool - UidMappings []specs.LinuxIDMapping - UseNewgidmap bool - GidMappings []specs.LinuxIDMapping - GidMappingsEnableSetgroups bool - Setsid bool - Setpgrp bool - Ctty *os.File - OOMScoreAdj *int - Hook func(pid int) error -} - -// Command creates a new Cmd which can be customized. -func Command(args ...string) *Cmd { - cmd := reexec.Command(args...) - return &Cmd{ - Cmd: cmd, - } -} - -func (c *Cmd) Start() error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - // Set an environment variable to tell the child to synchronize its startup. - if c.Env == nil { - c.Env = os.Environ() - } - c.Env = append(c.Env, fmt.Sprintf("_Buildah-unshare=%d", c.UnshareFlags)) - - // Create the pipe for reading the child's PID. - pidRead, pidWrite, err := os.Pipe() - if err != nil { - return errors.Wrapf(err, "error creating pid pipe") - } - c.Env = append(c.Env, fmt.Sprintf("_Buildah-pid-pipe=%d", len(c.ExtraFiles)+3)) - c.ExtraFiles = append(c.ExtraFiles, pidWrite) - - // Create the pipe for letting the child know to proceed. - continueRead, continueWrite, err := os.Pipe() - if err != nil { - pidRead.Close() - pidWrite.Close() - return errors.Wrapf(err, "error creating pid pipe") - } - c.Env = append(c.Env, fmt.Sprintf("_Buildah-continue-pipe=%d", len(c.ExtraFiles)+3)) - c.ExtraFiles = append(c.ExtraFiles, continueRead) - - // Pass along other instructions. - if c.Setsid { - c.Env = append(c.Env, "_Buildah-setsid=1") - } - if c.Setpgrp { - c.Env = append(c.Env, "_Buildah-setpgrp=1") - } - if c.Ctty != nil { - c.Env = append(c.Env, fmt.Sprintf("_Buildah-ctty=%d", len(c.ExtraFiles)+3)) - c.ExtraFiles = append(c.ExtraFiles, c.Ctty) - } - if c.GidMappingsEnableSetgroups { - c.Env = append(c.Env, "_Buildah-allow-setgroups=1") - } else { - c.Env = append(c.Env, "_Buildah-allow-setgroups=0") - } - - // Make sure we clean up our pipes. - defer func() { - if pidRead != nil { - pidRead.Close() - } - if pidWrite != nil { - pidWrite.Close() - } - if continueRead != nil { - continueRead.Close() - } - if continueWrite != nil { - continueWrite.Close() - } - }() - - // Start the new process. - err = c.Cmd.Start() - if err != nil { - return err - } - - // Close the ends of the pipes that the parent doesn't need. - continueRead.Close() - continueRead = nil - pidWrite.Close() - pidWrite = nil - - // Read the child's PID from the pipe. - pidString := "" - b := new(bytes.Buffer) - io.Copy(b, pidRead) - pidString = b.String() - pid, err := strconv.Atoi(pidString) - if err != nil { - fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) - return errors.Wrapf(err, "error parsing PID %q", pidString) - } - pidString = fmt.Sprintf("%d", pid) - - // If we created a new user namespace, set any specified mappings. - if c.UnshareFlags&syscall.CLONE_NEWUSER != 0 { - // Always set "setgroups". - setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) - if err != nil { - fmt.Fprintf(continueWrite, "error opening setgroups: %v", err) - return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString) - } - defer setgroups.Close() - if c.GidMappingsEnableSetgroups { - if _, err := fmt.Fprintf(setgroups, "allow"); err != nil { - fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err) - return errors.Wrapf(err, "error opening \"allow\" to /proc/%s/setgroups", pidString) - } - } else { - if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { - fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err) - return errors.Wrapf(err, "error writing \"deny\" to /proc/%s/setgroups", pidString) - } - } - - if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 { - uidmap, gidmap, err := util.GetHostIDMappings("") - if err != nil { - fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err) - return errors.Wrapf(err, "error reading ID mappings in parent") - } - if len(c.UidMappings) == 0 { - c.UidMappings = uidmap - for i := range c.UidMappings { - c.UidMappings[i].HostID = c.UidMappings[i].ContainerID - } - } - if len(c.GidMappings) == 0 { - c.GidMappings = gidmap - for i := range c.GidMappings { - c.GidMappings[i].HostID = c.GidMappings[i].ContainerID - } - } - } - - if len(c.GidMappings) > 0 { - // Build the GID map, since writing to the proc file has to be done all at once. - g := new(bytes.Buffer) - for _, m := range c.GidMappings { - fmt.Fprintf(g, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) - } - // Set the GID map. - if c.UseNewgidmap { - cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) - g.Reset() - cmd.Stdout = g - cmd.Stderr = g - err := cmd.Run() - if err != nil { - fmt.Fprintf(continueWrite, "error running newgidmap: %v: %s", err, g.String()) - return errors.Wrapf(err, "error running newgidmap: %s", g.String()) - } - } else { - gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) - if err != nil { - fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err) - return errors.Wrapf(err, "error opening /proc/%s/gid_map", pidString) - } - defer gidmap.Close() - if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil { - fmt.Fprintf(continueWrite, "error writing /proc/%s/gid_map: %v", pidString, err) - return errors.Wrapf(err, "error writing /proc/%s/gid_map", pidString) - } - } - } - - if len(c.UidMappings) > 0 { - // Build the UID map, since writing to the proc file has to be done all at once. - u := new(bytes.Buffer) - for _, m := range c.UidMappings { - fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) - } - // Set the GID map. - if c.UseNewuidmap { - cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) - u.Reset() - cmd.Stdout = u - cmd.Stderr = u - err := cmd.Run() - if err != nil { - fmt.Fprintf(continueWrite, "error running newuidmap: %v: %s", err, u.String()) - return errors.Wrapf(err, "error running newuidmap: %s", u.String()) - } - } else { - uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) - if err != nil { - fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err) - return errors.Wrapf(err, "error opening /proc/%s/uid_map", pidString) - } - defer uidmap.Close() - if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil { - fmt.Fprintf(continueWrite, "error writing /proc/%s/uid_map: %v", pidString, err) - return errors.Wrapf(err, "error writing /proc/%s/uid_map", pidString) - } - } - } - } - - if c.OOMScoreAdj != nil { - oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0) - if err != nil { - fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err) - return errors.Wrapf(err, "error opening /proc/%s/oom_score_adj", pidString) - } - defer oomScoreAdj.Close() - if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil { - fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err) - return errors.Wrapf(err, "error writing \"%d\" to /proc/%s/oom_score_adj", c.OOMScoreAdj, pidString) - } - } - // Run any additional setup that we want to do before the child starts running proper. - if c.Hook != nil { - if err = c.Hook(pid); err != nil { - fmt.Fprintf(continueWrite, "hook error: %v", err) - return err - } - } - - return nil -} - -func (c *Cmd) Run() error { - if err := c.Start(); err != nil { - return err - } - return c.Wait() -} - -func (c *Cmd) CombinedOutput() ([]byte, error) { - return nil, errors.New("unshare: CombinedOutput() not implemented") -} - -func (c *Cmd) Output() ([]byte, error) { - return nil, errors.New("unshare: Output() not implemented") -} diff --git a/vendor/github.com/projectatomic/buildah/unshare/unshare_cgo.go b/vendor/github.com/projectatomic/buildah/unshare/unshare_cgo.go deleted file mode 100644 index 26a0b2c20..000000000 --- a/vendor/github.com/projectatomic/buildah/unshare/unshare_cgo.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build linux,cgo,!gccgo - -package unshare - -// #cgo CFLAGS: -Wall -// extern void _buildah_unshare(void); -// void __attribute__((constructor)) init(void) { -// _buildah_unshare(); -// } -import "C" diff --git a/vendor/github.com/projectatomic/buildah/unshare/unshare_gccgo.go b/vendor/github.com/projectatomic/buildah/unshare/unshare_gccgo.go deleted file mode 100644 index c4811782a..000000000 --- a/vendor/github.com/projectatomic/buildah/unshare/unshare_gccgo.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux,cgo,gccgo - -package unshare - -// #cgo CFLAGS: -Wall -Wextra -// extern void _buildah_unshare(void); -// void __attribute__((constructor)) init(void) { -// _buildah_unshare(); -// } -import "C" - -// This next bit is straight out of libcontainer. - -// AlwaysFalse is here to stay false -// (and be exported so the compiler doesn't optimize out its reference) -var AlwaysFalse bool - -func init() { - if AlwaysFalse { - // by referencing this C init() in a noop test, it will ensure the compiler - // links in the C function. - // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134 - C.init() - } -} diff --git a/vendor/github.com/projectatomic/buildah/unshare/unshare_unsupported.go b/vendor/github.com/projectatomic/buildah/unshare/unshare_unsupported.go deleted file mode 100644 index feeceae66..000000000 --- a/vendor/github.com/projectatomic/buildah/unshare/unshare_unsupported.go +++ /dev/null @@ -1 +0,0 @@ -package unshare diff --git a/vendor/github.com/projectatomic/buildah/util.go b/vendor/github.com/projectatomic/buildah/util.go deleted file mode 100644 index ef9be87fb..000000000 --- a/vendor/github.com/projectatomic/buildah/util.go +++ /dev/null @@ -1,196 +0,0 @@ -package buildah - -import ( - "archive/tar" - "io" - "os" - "sync" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/pkg/sysregistriesv2" - "github.com/containers/image/types" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/chrootarchive" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/reexec" - rspec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -// InitReexec is a wrapper for reexec.Init(). It should be called at -// the start of main(), and if it returns true, main() should return -// immediately. -func InitReexec() bool { - return reexec.Init() -} - -func copyStringStringMap(m map[string]string) map[string]string { - n := map[string]string{} - for k, v := range m { - n[k] = v - } - return n -} - -func copyStringSlice(s []string) []string { - t := make([]string, len(s)) - copy(t, s) - return t -} - -func convertStorageIDMaps(UIDMap, GIDMap []idtools.IDMap) ([]rspec.LinuxIDMapping, []rspec.LinuxIDMapping) { - uidmap := make([]rspec.LinuxIDMapping, 0, len(UIDMap)) - gidmap := make([]rspec.LinuxIDMapping, 0, len(GIDMap)) - for _, m := range UIDMap { - uidmap = append(uidmap, rspec.LinuxIDMapping{ - HostID: uint32(m.HostID), - ContainerID: uint32(m.ContainerID), - Size: uint32(m.Size), - }) - } - for _, m := range GIDMap { - gidmap = append(gidmap, rspec.LinuxIDMapping{ - HostID: uint32(m.HostID), - ContainerID: uint32(m.ContainerID), - Size: uint32(m.Size), - }) - } - return uidmap, gidmap -} - -func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMap, []idtools.IDMap) { - uidmap := make([]idtools.IDMap, 0, len(UIDMap)) - gidmap := make([]idtools.IDMap, 0, len(GIDMap)) - for _, m := range UIDMap { - uidmap = append(uidmap, idtools.IDMap{ - HostID: int(m.HostID), - ContainerID: int(m.ContainerID), - Size: int(m.Size), - }) - } - for _, m := range GIDMap { - gidmap = append(gidmap, idtools.IDMap{ - HostID: int(m.HostID), - ContainerID: int(m.ContainerID), - Size: int(m.Size), - }) - } - return uidmap, gidmap -} - -// copyFileWithTar returns a function which copies a single file from outside -// of any container into our working container, mapping permissions using the -// container's ID maps, possibly overridden using the passed-in chownOpts -func (b *Builder) copyFileWithTar(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error { - convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) - untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap) - archiver := chrootarchive.NewArchiverWithChown(nil, chownOpts, untarMappings) - if hasher != nil { - originalUntar := archiver.Untar - archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - contentReader, contentWriter, err := os.Pipe() - if err != nil { - return err - } - defer contentReader.Close() - defer contentWriter.Close() - var hashError error - var hashWorker sync.WaitGroup - hashWorker.Add(1) - go func() { - t := tar.NewReader(contentReader) - _, err := t.Next() - if err != nil { - hashError = err - } - if _, err = io.Copy(hasher, t); err != nil && err != io.EOF { - hashError = err - } - hashWorker.Done() - }() - err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options) - hashWorker.Wait() - if err == nil { - err = hashError - } - return err - } - } - return archiver.CopyFileWithTar -} - -// copyWithTar returns a function which copies a directory tree from outside of -// any container into our working container, mapping permissions using the -// container's ID maps, possibly overridden using the passed-in chownOpts -func (b *Builder) copyWithTar(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error { - convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) - untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap) - archiver := chrootarchive.NewArchiverWithChown(nil, chownOpts, untarMappings) - if hasher != nil { - originalUntar := archiver.Untar - archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) - } - } - return archiver.CopyWithTar -} - -// untarPath returns a function which extracts an archive in a specified -// location into our working container, mapping permissions using the -// container's ID maps, possibly overridden using the passed-in chownOpts -func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error { - convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) - untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap) - archiver := chrootarchive.NewArchiverWithChown(nil, chownOpts, untarMappings) - if hasher != nil { - originalUntar := archiver.Untar - archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) - } - } - return archiver.UntarPath -} - -// tarPath returns a function which creates an archive of a specified -// location in the container's filesystem, mapping permissions using the -// container's ID maps -func (b *Builder) tarPath() func(path string) (io.ReadCloser, error) { - convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) - tarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap) - return func(path string) (io.ReadCloser, error) { - return archive.TarWithOptions(path, &archive.TarOptions{ - Compression: archive.Uncompressed, - UIDMaps: tarMappings.UIDs(), - GIDMaps: tarMappings.GIDs(), - }) - } -} - -// getRegistries obtains the list of search registries defined in the global registries file. -func getRegistries(sc *types.SystemContext) ([]string, error) { - var searchRegistries []string - registries, err := sysregistriesv2.GetRegistries(sc) - if err != nil { - return nil, errors.Wrapf(err, "unable to parse the registries.conf file") - } - for _, registry := range sysregistriesv2.FindUnqualifiedSearchRegistries(registries) { - if !registry.Blocked { - searchRegistries = append(searchRegistries, registry.URL) - } - } - return searchRegistries, nil -} - -// hasRegistry returns a bool/err response if the image has a registry in its -// name -func hasRegistry(imageName string) (bool, error) { - imgRef, err := reference.Parse(imageName) - if err != nil { - return false, err - } - registry := reference.Domain(imgRef.(reference.Named)) - if registry != "" { - return true, nil - } - return false, nil -} diff --git a/vendor/github.com/projectatomic/buildah/util/types.go b/vendor/github.com/projectatomic/buildah/util/types.go deleted file mode 100644 index dc5f4b6c8..000000000 --- a/vendor/github.com/projectatomic/buildah/util/types.go +++ /dev/null @@ -1,35 +0,0 @@ -package util - -const ( - // DefaultRuntime is the default command to use to run the container. - DefaultRuntime = "runc" - // DefaultCNIPluginPath is the default location of CNI plugin helpers. - DefaultCNIPluginPath = "/usr/libexec/cni:/opt/cni/bin" - // DefaultCNIConfigDir is the default location of CNI configuration files. - DefaultCNIConfigDir = "/etc/cni/net.d" -) - -var ( - // DefaultCapabilities is the list of capabilities which we grant by - // default to containers which are running under UID 0. - DefaultCapabilities = []string{ - "CAP_AUDIT_WRITE", - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FOWNER", - "CAP_FSETID", - "CAP_KILL", - "CAP_MKNOD", - "CAP_NET_BIND_SERVICE", - "CAP_SETFCAP", - "CAP_SETGID", - "CAP_SETPCAP", - "CAP_SETUID", - "CAP_SYS_CHROOT", - } - // DefaultNetworkSysctl is the list of Kernel parameters which we - // grant by default to containers which are running under UID 0. - DefaultNetworkSysctl = map[string]string{ - "net.ipv4.ping_group_range": "0 0", - } -) diff --git a/vendor/github.com/projectatomic/buildah/util/util.go b/vendor/github.com/projectatomic/buildah/util/util.go deleted file mode 100644 index 93323232d..000000000 --- a/vendor/github.com/projectatomic/buildah/util/util.go +++ /dev/null @@ -1,494 +0,0 @@ -package util - -import ( - "bufio" - "fmt" - "io" - "net/url" - "os" - "path" - "path/filepath" - "strconv" - "strings" - - "github.com/containers/image/directory" - dockerarchive "github.com/containers/image/docker/archive" - "github.com/containers/image/docker/reference" - ociarchive "github.com/containers/image/oci/archive" - "github.com/containers/image/pkg/sysregistriesv2" - "github.com/containers/image/signature" - is "github.com/containers/image/storage" - "github.com/containers/image/tarball" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/idtools" - "github.com/docker/distribution/registry/api/errcode" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - minimumTruncatedIDLength = 3 -) - -var ( - // RegistryDefaultPathPrefix contains a per-registry listing of default prefixes - // to prepend to image names that only contain a single path component. - RegistryDefaultPathPrefix = map[string]string{ - "index.docker.io": "library", - "docker.io": "library", - } - // Transports contains the possible transports used for images - Transports = map[string]string{ - dockerarchive.Transport.Name(): "", - ociarchive.Transport.Name(): "", - directory.Transport.Name(): "", - tarball.Transport.Name(): "", - } - // DockerArchive is the transport we prepend to an image name - // when saving to docker-archive - DockerArchive = dockerarchive.Transport.Name() - // OCIArchive is the transport we prepend to an image name - // when saving to oci-archive - OCIArchive = ociarchive.Transport.Name() - // DirTransport is the transport for pushing and pulling - // images to and from a directory - DirTransport = directory.Transport.Name() - // TarballTransport is the transport for importing a tar archive - // and creating a filesystem image - TarballTransport = tarball.Transport.Name() -) - -// ResolveName checks if name is a valid image name, and if that name doesn't -// include a domain portion, returns a list of the names which it might -// correspond to in the set of configured registries. -func ResolveName(name string, firstRegistry string, sc *types.SystemContext, store storage.Store) ([]string, error) { - if name == "" { - return nil, nil - } - - // Maybe it's a truncated image ID. Don't prepend a registry name, then. - if len(name) >= minimumTruncatedIDLength { - if img, err := store.Image(name); err == nil && img != nil && strings.HasPrefix(img.ID, name) { - // It's a truncated version of the ID of an image that's present in local storage; - // we need only expand the ID. - return []string{img.ID}, nil - } - } - - // If the image includes a transport's name as a prefix, use it as-is. - split := strings.SplitN(name, ":", 2) - if len(split) == 2 { - if _, ok := Transports[split[0]]; ok { - return []string{split[1]}, nil - } - } - - // If the image name already included a domain component, we're done. - named, err := reference.ParseNormalizedNamed(name) - if err != nil { - return nil, errors.Wrapf(err, "error parsing image name %q", name) - } - if named.String() == name { - // Parsing produced the same result, so there was a domain name in there to begin with. - return []string{name}, nil - } - if reference.Domain(named) != "" && RegistryDefaultPathPrefix[reference.Domain(named)] != "" { - // If this domain can cause us to insert something in the middle, check if that happened. - repoPath := reference.Path(named) - domain := reference.Domain(named) - tag := "" - if tagged, ok := named.(reference.Tagged); ok { - tag = ":" + tagged.Tag() - } - digest := "" - if digested, ok := named.(reference.Digested); ok { - digest = "@" + digested.Digest().String() - } - defaultPrefix := RegistryDefaultPathPrefix[reference.Domain(named)] + "/" - if strings.HasPrefix(repoPath, defaultPrefix) && path.Join(domain, repoPath[len(defaultPrefix):])+tag+digest == name { - // Yup, parsing just inserted a bit in the middle, so there was a domain name there to begin with. - return []string{name}, nil - } - } - - // Figure out the list of registries. - var registries []string - allRegistries, err := sysregistriesv2.GetRegistries(sc) - if err != nil { - logrus.Debugf("unable to read configured registries to complete %q: %v", name, err) - registries = []string{} - } - for _, registry := range sysregistriesv2.FindUnqualifiedSearchRegistries(allRegistries) { - if !registry.Blocked { - registries = append(registries, registry.URL) - } - } - - // Create all of the combinations. Some registries need an additional component added, so - // use our lookaside map to keep track of them. If there are no configured registries, we'll - // return a name using "localhost" as the registry name. - candidates := []string{} - initRegistries := []string{"localhost"} - if firstRegistry != "" && firstRegistry != "localhost" { - initRegistries = append([]string{firstRegistry}, initRegistries...) - } - for _, registry := range append(initRegistries, registries...) { - if registry == "" { - continue - } - middle := "" - if prefix, ok := RegistryDefaultPathPrefix[registry]; ok && strings.IndexRune(name, '/') == -1 { - middle = prefix - } - candidate := path.Join(registry, middle, name) - candidates = append(candidates, candidate) - } - return candidates, nil -} - -// ExpandNames takes unqualified names, parses them as image names, and returns -// the fully expanded result, including a tag. Names which don't include a registry -// name will be marked for the most-preferred registry (i.e., the first one in our -// configuration). -func ExpandNames(names []string, firstRegistry string, systemContext *types.SystemContext, store storage.Store) ([]string, error) { - expanded := make([]string, 0, len(names)) - for _, n := range names { - var name reference.Named - nameList, err := ResolveName(n, firstRegistry, systemContext, store) - if err != nil { - return nil, errors.Wrapf(err, "error parsing name %q", n) - } - if len(nameList) == 0 { - named, err := reference.ParseNormalizedNamed(n) - if err != nil { - return nil, errors.Wrapf(err, "error parsing name %q", n) - } - name = named - } else { - named, err := reference.ParseNormalizedNamed(nameList[0]) - if err != nil { - return nil, errors.Wrapf(err, "error parsing name %q", nameList[0]) - } - name = named - } - name = reference.TagNameOnly(name) - tag := "" - digest := "" - if tagged, ok := name.(reference.NamedTagged); ok { - tag = ":" + tagged.Tag() - } - if digested, ok := name.(reference.Digested); ok { - digest = "@" + digested.Digest().String() - } - expanded = append(expanded, name.Name()+tag+digest) - } - return expanded, nil -} - -// FindImage locates the locally-stored image which corresponds to a given name. -func FindImage(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image string) (types.ImageReference, *storage.Image, error) { - var ref types.ImageReference - var img *storage.Image - var err error - names, err := ResolveName(image, firstRegistry, systemContext, store) - if err != nil { - return nil, nil, errors.Wrapf(err, "error parsing name %q", image) - } - for _, name := range names { - ref, err = is.Transport.ParseStoreReference(store, name) - if err != nil { - logrus.Debugf("error parsing reference to image %q: %v", name, err) - continue - } - img, err = is.Transport.GetStoreImage(store, ref) - if err != nil { - img2, err2 := store.Image(name) - if err2 != nil { - logrus.Debugf("error locating image %q: %v", name, err2) - continue - } - img = img2 - } - break - } - if ref == nil || img == nil { - return nil, nil, errors.Wrapf(err, "error locating image with name %q", image) - } - return ref, img, nil -} - -// AddImageNames adds the specified names to the specified image. -func AddImageNames(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image *storage.Image, addNames []string) error { - names, err := ExpandNames(addNames, firstRegistry, systemContext, store) - if err != nil { - return err - } - err = store.SetNames(image.ID, append(image.Names, names...)) - if err != nil { - return errors.Wrapf(err, "error adding names (%v) to image %q", names, image.ID) - } - return nil -} - -// GetFailureCause checks the type of the error "err" and returns a new -// error message that reflects the reason of the failure. -// In case err type is not a familiar one the error "defaultError" is returned. -func GetFailureCause(err, defaultError error) error { - switch nErr := errors.Cause(err).(type) { - case errcode.Errors: - return err - case errcode.Error, *url.Error: - return nErr - default: - return defaultError - } -} - -// WriteError writes `lastError` into `w` if not nil and return the next error `err` -func WriteError(w io.Writer, err error, lastError error) error { - if lastError != nil { - fmt.Fprintln(w, lastError) - } - return err -} - -// Runtime is the default command to use to run the container. -func Runtime() string { - runtime := os.Getenv("BUILDAH_RUNTIME") - if runtime != "" { - return runtime - } - return DefaultRuntime -} - -// StringInSlice returns a boolean indicating if the exact value s is present -// in the slice slice. -func StringInSlice(s string, slice []string) bool { - for _, v := range slice { - if v == s { - return true - } - } - return false -} - -// GetHostIDs uses ID mappings to compute the host-level IDs that will -// correspond to a UID/GID pair in the container. -func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, uint32, error) { - uidMapped := true - for _, m := range uidmap { - uidMapped = false - if uid >= m.ContainerID && uid < m.ContainerID+m.Size { - uid = (uid - m.ContainerID) + m.HostID - uidMapped = true - break - } - } - if !uidMapped { - return 0, 0, errors.Errorf("container uses ID mappings, but doesn't map UID %d", uid) - } - gidMapped := true - for _, m := range gidmap { - gidMapped = false - if gid >= m.ContainerID && gid < m.ContainerID+m.Size { - gid = (gid - m.ContainerID) + m.HostID - gidMapped = true - break - } - } - if !gidMapped { - return 0, 0, errors.Errorf("container uses ID mappings, but doesn't map GID %d", gid) - } - return uid, gid, nil -} - -// GetHostRootIDs uses ID mappings in spec to compute the host-level IDs that will -// correspond to UID/GID 0/0 in the container. -func GetHostRootIDs(spec *specs.Spec) (uint32, uint32, error) { - if spec.Linux == nil { - return 0, 0, nil - } - return GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, 0, 0) -} - -// getHostIDMappings reads mappings from the named node under /proc. -func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) { - var mappings []specs.LinuxIDMapping - f, err := os.Open(path) - if err != nil { - return nil, errors.Wrapf(err, "error reading ID mappings from %q", path) - } - defer f.Close() - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - fields := strings.Fields(line) - if len(fields) != 3 { - return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields)) - } - cid, err := strconv.ParseUint(fields[0], 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path) - } - hid, err := strconv.ParseUint(fields[1], 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path) - } - size, err := strconv.ParseUint(fields[2], 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path) - } - mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)}) - } - return mappings, nil -} - -// GetHostIDMappings reads mappings for the specified process (or the current -// process if pid is "self" or an empty string) from the kernel. -func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { - if pid == "" { - pid = "self" - } - uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid)) - if err != nil { - return nil, nil, err - } - gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid)) - if err != nil { - return nil, nil, err - } - return uidmap, gidmap, nil -} - -// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid. -func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { - mappings, err := idtools.NewIDMappings(user, group) - if err != nil { - return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group) - } - var uidmap, gidmap []specs.LinuxIDMapping - for _, m := range mappings.UIDs() { - uidmap = append(uidmap, specs.LinuxIDMapping{ - ContainerID: uint32(m.ContainerID), - HostID: uint32(m.HostID), - Size: uint32(m.Size), - }) - } - for _, m := range mappings.GIDs() { - gidmap = append(gidmap, specs.LinuxIDMapping{ - ContainerID: uint32(m.ContainerID), - HostID: uint32(m.HostID), - Size: uint32(m.Size), - }) - } - return uidmap, gidmap, nil -} - -// ParseIDMappings parses mapping triples. -func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { - nonDigitsToWhitespace := func(r rune) rune { - if strings.IndexRune("0123456789", r) == -1 { - return ' ' - } else { - return r - } - } - parseTriple := func(spec []string) (container, host, size uint32, err error) { - cid, err := strconv.ParseUint(spec[0], 10, 32) - if err != nil { - return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err) - } - hid, err := strconv.ParseUint(spec[1], 10, 32) - if err != nil { - return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err) - } - sz, err := strconv.ParseUint(spec[2], 10, 32) - if err != nil { - return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err) - } - return uint32(cid), uint32(hid), uint32(sz), nil - } - parseIDMap := func(mapSpec []string, mapSetting string) (idmap []idtools.IDMap, err error) { - for _, idMapSpec := range mapSpec { - idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec)) - if len(idSpec)%3 != 0 { - return nil, errors.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting) - } - for i := range idSpec { - if i%3 != 0 { - continue - } - cid, hid, size, err := parseTriple(idSpec[i : i+3]) - if err != nil { - return nil, errors.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting) - } - mapping := idtools.IDMap{ - ContainerID: int(cid), - HostID: int(hid), - Size: int(size), - } - idmap = append(idmap, mapping) - } - } - return idmap, nil - } - uid, err := parseIDMap(uidmap, "userns-uid-map") - if err != nil { - return nil, nil, err - } - gid, err := parseIDMap(gidmap, "userns-gid-map") - if err != nil { - return nil, nil, err - } - return uid, gid, nil -} - -// UnsharedRootPath returns a location under ($XDG_DATA_HOME/containers/storage, -// or $HOME/.local/share/containers/storage, or -// (the user's home directory)/.local/share/containers/storage, or an error. -func UnsharedRootPath(homedir string) (string, error) { - // If $XDG_DATA_HOME is defined... - if envDataHome, haveDataHome := os.LookupEnv("XDG_DATA_HOME"); haveDataHome { - return filepath.Join(envDataHome, "containers", "storage"), nil - } - // If $XDG_DATA_HOME is not defined, but $HOME is defined... - if envHomedir, haveHomedir := os.LookupEnv("HOME"); haveHomedir { - // Default to the user's $HOME/.local/share/containers/storage subdirectory. - return filepath.Join(envHomedir, ".local", "share", "containers", "storage"), nil - } - // If we know where our home directory is... - if homedir != "" { - // Default to the user's homedir/.local/share/containers/storage subdirectory. - return filepath.Join(homedir, ".local", "share", "containers", "storage"), nil - } - return "", errors.New("unable to determine a --root location: neither $XDG_DATA_HOME nor $HOME is set") -} - -// UnsharedRunrootPath returns $XDG_RUNTIME_DIR/run, /var/run/user/(the user's UID)/run, or an error. -func UnsharedRunrootPath(uid string) (string, error) { - // If $XDG_RUNTIME_DIR is defined... - if envRuntimeDir, haveRuntimeDir := os.LookupEnv("XDG_RUNTIME_DIR"); haveRuntimeDir { - return filepath.Join(envRuntimeDir, "run"), nil - } - // If $XDG_RUNTIME_DIR is not defined, but we know our UID... - if uid != "" { - return filepath.Join("/var/run/user", uid, "run"), nil - } - return "", errors.New("unable to determine a --runroot location: $XDG_RUNTIME_DIR is not set, and we don't know our UID") -} - -// GetPolicyContext sets up, initializes and returns a new context for the specified policy -func GetPolicyContext(ctx *types.SystemContext) (*signature.PolicyContext, error) { - policy, err := signature.DefaultPolicy(ctx) - if err != nil { - return nil, err - } - - policyContext, err := signature.NewPolicyContext(policy) - if err != nil { - return nil, err - } - return policyContext, nil -} diff --git a/vendor/github.com/projectatomic/buildah/vendor.conf b/vendor/github.com/projectatomic/buildah/vendor.conf deleted file mode 100644 index 0112a2d91..000000000 --- a/vendor/github.com/projectatomic/buildah/vendor.conf +++ /dev/null @@ -1,63 +0,0 @@ -github.com/Azure/go-ansiterm master -github.com/blang/semver master -github.com/BurntSushi/toml master -github.com/containerd/continuity master -github.com/containernetworking/cni v0.7.0-alpha1 -github.com/seccomp/containers-golang master -github.com/containers/image d8b5cf2b804a48489e5203d51254ef576794049d -github.com/containers/storage 243c4cd616afdf06b4a975f18c4db083d26b1641 -github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 -github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00 -github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 -github.com/docker/engine-api master -github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d -github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 -github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20 -github.com/fsouza/go-dockerclient master -github.com/ghodss/yaml master -github.com/gogo/protobuf master -github.com/golang/glog master -github.com/gorilla/context master -github.com/gorilla/mux master -github.com/hashicorp/errwrap master -github.com/hashicorp/go-cleanhttp master -github.com/hashicorp/go-multierror master -github.com/imdario/mergo master -github.com/mattn/go-runewidth master -github.com/mattn/go-shellwords master -github.com/Microsoft/go-winio master -github.com/Microsoft/hcsshim master -github.com/mistifyio/go-zfs master -github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c -github.com/mtrmac/gpgme master -github.com/Nvveen/Gotty master -github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc -github.com/opencontainers/image-spec v1.0.0 -github.com/opencontainers/runc master -github.com/opencontainers/runtime-spec v1.0.0 -github.com/opencontainers/runtime-tools master -github.com/opencontainers/selinux b6fa367ed7f534f9ba25391cc2d467085dbb445a -github.com/openshift/imagebuilder master -github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 -github.com/pborman/uuid master -github.com/pkg/errors master -github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac -github.com/containers/libpod 2afadeec6696fefac468a49c8ba24b0bc275aa75 -github.com/sirupsen/logrus master -github.com/syndtr/gocapability master -github.com/tchap/go-patricia master -github.com/ulikunitz/xz v0.5.4 -github.com/urfave/cli 934abfb2f102315b5794e15ebc7949e4ca253920 -github.com/vbatts/tar-split v0.10.2 -github.com/xeipuuv/gojsonpointer master -github.com/xeipuuv/gojsonreference master -github.com/xeipuuv/gojsonschema master -golang.org/x/crypto master -golang.org/x/net master -golang.org/x/sys master -golang.org/x/text master -gopkg.in/cheggaaa/pb.v1 v1.0.13 -gopkg.in/yaml.v2 cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b -k8s.io/apimachinery master -k8s.io/client-go master -k8s.io/kubernetes master -- cgit v1.2.3-54-g00ecf