diff options
34 files changed, 658 insertions, 263 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index 4344b6b0a..848dc2b6d 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -31,9 +31,9 @@ env: #### #### Cache-image names to test with ### - FEDORA_CACHE_IMAGE_NAME: "fedora-29-libpod-5157899144265728" - PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-28-libpod-5157899144265728" - UBUNTU_CACHE_IMAGE_NAME: "ubuntu-18-libpod-5157899144265728" + FEDORA_CACHE_IMAGE_NAME: "fedora-29-libpod-4844850202017792" + PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-28-libpod-4844850202017792" + UBUNTU_CACHE_IMAGE_NAME: "ubuntu-18-libpod-4844850202017792" #### #### Variables for composing new cache-images (used in PR testing) from @@ -43,7 +43,7 @@ env: # Git commits to use while building dependencies into cache-images FEDORA_CNI_COMMIT: "412b6d31280682bb4fab4446f113c22ff1886554" CNI_COMMIT: "7480240de9749f9a0a5c8614b17f1f03e0c06ab9" - CONMON_COMMIT: "f02c053eb37010fc76d1e2966de7f2cb9f969ef2" + CONMON_COMMIT: "8455ce1ef385120deb827d0f0588c04357bad4c4" CRIU_COMMIT: "c74b83cd49c00589c0c0468ba5fe685b67fdbd0a" # Special image w/ nested-libvirt + tools for creating new cache and base images IMAGE_BUILDER_CACHE_IMAGE_NAME: "image-builder-image-1541772081" diff --git a/.copr/prepare.sh b/.copr/prepare.sh index 57c380b02..d7c5083ca 100644 --- a/.copr/prepare.sh +++ b/.copr/prepare.sh @@ -29,4 +29,4 @@ fi mkdir build/ git archive --prefix "libpod-${COMMIT_SHORT}/" --format "tar.gz" HEAD -o "build/libpod-${COMMIT_SHORT}.tar.gz" git clone https://github.com/containers/conmon -cd conmon && git checkout 59952292a3b07ac125575024ae21956efe0ecdfb && git archive --prefix "conmon/" --format "tar.gz" HEAD -o "../build/conmon.tar.gz" +cd conmon && git checkout 8455ce1ef385120deb827d0f0588c04357bad4c4 && git archive --prefix "conmon/" --format "tar.gz" HEAD -o "../build/conmon.tar.gz" diff --git a/Dockerfile b/Dockerfile index 0f2249dc2..214fbeb34 100644 --- a/Dockerfile +++ b/Dockerfile @@ -56,7 +56,7 @@ RUN set -x \ && rm -rf "$GOPATH" # Install conmon -ENV CONMON_COMMIT 59952292a3b07ac125575024ae21956efe0ecdfb +ENV CONMON_COMMIT 8455ce1ef385120deb827d0f0588c04357bad4c4 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/containers/conmon.git "$GOPATH/src/github.com/containers/conmon.git" \ diff --git a/Dockerfile.centos b/Dockerfile.centos index 7f4cef963..72b926bff 100644 --- a/Dockerfile.centos +++ b/Dockerfile.centos @@ -50,7 +50,7 @@ RUN set -x \ && go get github.com/onsi/gomega/... # Install conmon -ENV CONMON_COMMIT 59952292a3b07ac125575024ae21956efe0ecdfb +ENV CONMON_COMMIT 8455ce1ef385120deb827d0f0588c04357bad4c4 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/containers/conmon.git "$GOPATH/src/github.com/containers/conmon.git" \ diff --git a/Dockerfile.fedora b/Dockerfile.fedora index 5ea82c967..c34d4bb16 100644 --- a/Dockerfile.fedora +++ b/Dockerfile.fedora @@ -54,7 +54,7 @@ RUN set -x \ && go get github.com/onsi/gomega/... # Install conmon -ENV CONMON_COMMIT 59952292a3b07ac125575024ae21956efe0ecdfb +ENV CONMON_COMMIT 8455ce1ef385120deb827d0f0588c04357bad4c4 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/containers/conmon.git "$GOPATH/src/github.com/containers/conmon.git" \ diff --git a/contrib/cirrus/lib.sh b/contrib/cirrus/lib.sh index 462fa332a..97901cfc7 100644 --- a/contrib/cirrus/lib.sh +++ b/contrib/cirrus/lib.sh @@ -211,6 +211,8 @@ setup_rootless() { # Works with older versions of bash printf "${_env_var_name}=%q\n" "$(printenv $_env_var_name)" >> "/home/$ROOTLESS_USER/.bashrc" done + echo "Ensure the systems ssh process is up and running" + systemctl --wait restart sshd # a regular 'start' could hang forever } # Helper/wrapper script to only show stderr/stdout on non-zero exit diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in index 8c2ccd4b0..d755fd1aa 100644 --- a/contrib/spec/podman.spec.in +++ b/contrib/spec/podman.spec.in @@ -35,7 +35,7 @@ # People want conmon packaged with the copr rpm %global import_path_conmon github.com/containers/conmon %global git_conmon https://%{import_path_conmon} -%global commit_conmon 59952292a3b07ac125575024ae21956efe0ecdfb +%global commit_conmon 8455ce1ef385120deb827d0f0588c04357bad4c4 %global shortcommit_conmon %(c=%{commit_conmon}; echo ${c:0:7}) Name: podman diff --git a/docs/podman-create.1.md b/docs/podman-create.1.md index 88f8fe8c7..a4eebef4c 100644 --- a/docs/podman-create.1.md +++ b/docs/podman-create.1.md @@ -458,6 +458,7 @@ Current supported mount TYPES are bind, and tmpfs. Options specific to bind: · bind-propagation: shared, slave, private, rshared, rslave, or rprivate(default). See also mount(2). + . bind-nonrecursive: do not setup a recursive bind mount. By default it is recursive. Options specific to tmpfs: diff --git a/docs/podman-run.1.md b/docs/podman-run.1.md index 8facccd3c..eca98514b 100644 --- a/docs/podman-run.1.md +++ b/docs/podman-run.1.md @@ -471,6 +471,7 @@ Current supported mount TYPES are bind, and tmpfs. Options specific to bind: · bind-propagation: Z, z, shared, slave, private, rshared, rslave, or rprivate(default). See also mount(2). + . bind-nonrecursive: do not setup a recursive bind mount. By default it is recursive. Options specific to tmpfs: diff --git a/pkg/registries/registries.go b/pkg/registries/registries.go index 5c4ecd020..de63dcbf1 100644 --- a/pkg/registries/registries.go +++ b/pkg/registries/registries.go @@ -44,17 +44,7 @@ func getRegistries() ([]sysregistriesv2.Registry, error) { // GetRegistries obtains the list of search registries defined in the global registries file. func GetRegistries() ([]string, error) { - var searchRegistries []string - registries, err := getRegistries() - if err != nil { - return nil, err - } - for _, reg := range registries { - if reg.Search { - searchRegistries = append(searchRegistries, reg.Location) - } - } - return searchRegistries, nil + return sysregistriesv2.UnqualifiedSearchRegistries(&types.SystemContext{SystemRegistriesConfPath: SystemRegistriesConfPath()}) } // GetBlockedRegistries obtains the list of blocked registries defined in the global registries file. @@ -66,7 +56,7 @@ func GetBlockedRegistries() ([]string, error) { } for _, reg := range registries { if reg.Blocked { - blockedRegistries = append(blockedRegistries, reg.Location) + blockedRegistries = append(blockedRegistries, reg.Prefix) } } return blockedRegistries, nil @@ -81,7 +71,7 @@ func GetInsecureRegistries() ([]string, error) { } for _, reg := range registries { if reg.Insecure { - insecureRegistries = append(insecureRegistries, reg.Location) + insecureRegistries = append(insecureRegistries, reg.Prefix) } } return insecureRegistries, nil diff --git a/pkg/spec/storage.go b/pkg/spec/storage.go index e221b5cb5..283585ef8 100644 --- a/pkg/spec/storage.go +++ b/pkg/spec/storage.go @@ -384,7 +384,7 @@ func (config *CreateConfig) getMounts() (map[string]spec.Mount, map[string]*libp } finalNamedVolumes[volume.Dest] = volume default: - return nil, nil, errors.Errorf("invalid fylesystem type %q", kv[1]) + return nil, nil, errors.Errorf("invalid filesystem type %q", kv[1]) } } @@ -403,6 +403,8 @@ func getBindMount(args []string) (spec.Mount, error) { for _, val := range args { kv := strings.Split(val, "=") switch kv[0] { + case "bind-nonrecursive": + newMount.Options = append(newMount.Options, "bind") case "ro", "nosuid", "nodev", "noexec": // TODO: detect duplication of these options. // (Is this necessary?) @@ -574,7 +576,7 @@ func ValidateVolumeCtrDir(ctrDir string) error { // ValidateVolumeOpts validates a volume's options func ValidateVolumeOpts(options []string) error { - var foundRootPropagation, foundRWRO, foundLabelChange int + var foundRootPropagation, foundRWRO, foundLabelChange, bindType int for _, opt := range options { switch opt { case "rw", "ro": @@ -592,6 +594,11 @@ func ValidateVolumeOpts(options []string) error { if foundRootPropagation > 1 { return errors.Errorf("invalid options %q, can only specify 1 '[r]shared', '[r]private' or '[r]slave' option", strings.Join(options, ", ")) } + case "bind", "rbind": + bindType++ + if bindType > 1 { + return errors.Errorf("invalid options %q, can only specify 1 '[r]bind' option", strings.Join(options, ", ")) + } default: return errors.Errorf("invalid option type %q", opt) } diff --git a/pkg/util/mountOpts.go b/pkg/util/mountOpts.go index 59459807c..489e7eeef 100644 --- a/pkg/util/mountOpts.go +++ b/pkg/util/mountOpts.go @@ -17,10 +17,19 @@ var ( // sensible and follow convention. func ProcessOptions(options []string) []string { var ( - foundrw, foundro bool - rootProp string + foundbind, foundrw, foundro bool + rootProp string ) - options = append(options, "rbind") + for _, opt := range options { + switch opt { + case "bind", "rbind": + foundbind = true + break + } + } + if !foundbind { + options = append(options, "rbind") + } for _, opt := range options { switch opt { case "rw": diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go index 31720ea04..3ba3c2bb3 100644 --- a/test/e2e/run_test.go +++ b/test/e2e/run_test.go @@ -659,6 +659,14 @@ USER mail` Expect(isSharedOnly).Should(BeTrue()) }) + It("podman run --mount type=bind,bind-nonrecursive", func() { + SkipIfRootless() + session := podmanTest.Podman([]string{"run", "--mount", "type=bind,bind-nonrecursive,slave,src=/,target=/host", fedoraMinimal, "findmnt", "-nR", "/host"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(len(session.OutputToStringArray())).To(Equal(1)) + }) + It("podman run --pod automatically", func() { session := podmanTest.Podman([]string{"run", "--pod", "new:foobar", ALPINE, "ls"}) session.WaitWithDefaultTimeout() diff --git a/vendor.conf b/vendor.conf index b7cb584f0..9b9044b66 100644 --- a/vendor.conf +++ b/vendor.conf @@ -15,7 +15,7 @@ github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1 github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d github.com/containernetworking/cni v0.7.0-rc2 github.com/containernetworking/plugins v0.7.4 -github.com/containers/image 2c0349c99af7d90694b3faa0e9bde404d407b145 +github.com/containers/image v2.0.0 github.com/vbauerster/mpb v3.3.4 github.com/mattn/go-isatty v0.0.4 github.com/VividCortex/ewma v1.1.1 @@ -93,7 +93,7 @@ k8s.io/api kubernetes-1.10.13-beta.0 https://github.com/kubernetes/api k8s.io/apimachinery kubernetes-1.10.13-beta.0 https://github.com/kubernetes/apimachinery k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go github.com/mrunalp/fileutils 7d4729fb36185a7c1719923406c9d40e54fb93c7 -github.com/containers/buildah v1.8.3 +github.com/containers/buildah v1.8.4 github.com/varlink/go 0f1d566d194b9d6d48e0d47c5e4d822628919066 # TODO: Gotty has not been updated since 2012. Can we find replacement? github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512 diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go index 11bfb6a12..b03aa65b2 100644 --- a/vendor/github.com/containers/buildah/add.go +++ b/vendor/github.com/containers/buildah/add.go @@ -34,10 +34,14 @@ type AddAndCopyOptions struct { // If the sources include directory trees, Hasher will be passed // tar-format archives of the directory trees. Hasher io.Writer - // Exludes contents in the .dockerignore file + // Excludes is the contents of the .dockerignore file Excludes []string - // current directory on host + // The base directory for Excludes and data to copy in ContextDir string + // ID mapping options to use when contents to be copied are part of + // another container, and need ownerships to be mapped from the host to + // that container's values before copying them into the container. + IDMappingOptions *IDMappingOptions } // addURL copies the contents of the source URL to the destination. This is @@ -116,6 +120,12 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption hostOwner := idtools.IDPair{UID: int(hostUID), GID: int(hostGID)} dest := mountPoint if destination != "" && filepath.IsAbs(destination) { + dir := filepath.Dir(destination) + if dir != "." && dir != "/" { + if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, dir), 0755, hostOwner); err != nil { + return errors.Wrapf(err, "error creating directory %q", filepath.Join(dest, dir)) + } + } dest = filepath.Join(dest, destination) } else { if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, b.WorkDir()), 0755, hostOwner); err != nil { @@ -146,8 +156,8 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption if len(source) > 1 && (destfi == nil || !destfi.IsDir()) { return errors.Errorf("destination %q is not a directory", dest) } - copyFileWithTar := b.copyFileWithTar(&containerOwner, options.Hasher) - copyWithTar := b.copyWithTar(&containerOwner, options.Hasher) + copyFileWithTar := b.copyFileWithTar(options.IDMappingOptions, &containerOwner, options.Hasher) + copyWithTar := b.copyWithTar(options.IDMappingOptions, &containerOwner, options.Hasher) untarPath := b.untarPath(nil, options.Hasher) err = addHelper(excludes, extract, dest, destfi, hostOwner, options, copyFileWithTar, copyWithTar, untarPath, source...) if err != nil { diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 56c8f088f..b97e048cc 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -26,7 +26,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.8.3" + Version = "1.8.4" // The value we use to identify what type of information, currently a // serialized Builder structure, we are using as per-container state. // This should only be changed when we make incompatible changes to diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go index d6e5a61ea..ae60d9bbe 100644 --- a/vendor/github.com/containers/buildah/chroot/run.go +++ b/vendor/github.com/containers/buildah/chroot/run.go @@ -220,7 +220,6 @@ func runUsingChrootMain() { var stdout io.Writer var stderr io.Writer fdDesc := make(map[int]string) - deferred := func() {} if options.Spec.Process.Terminal { // Create a pseudo-terminal -- open a copy of the master side. ptyMasterFd, err := unix.Open("/dev/ptmx", os.O_RDWR, 0600) @@ -370,12 +369,16 @@ func runUsingChrootMain() { return } } + if err := unix.SetNonblock(relays[unix.Stdin], true); err != nil { + logrus.Errorf("error setting %d to nonblocking: %v", relays[unix.Stdin], err) + } go func() { buffers := make(map[int]*bytes.Buffer) for _, writeFd := range relays { buffers[writeFd] = new(bytes.Buffer) } pollTimeout := -1 + stdinClose := false for len(relays) > 0 { fds := make([]unix.PollFd, 0, len(relays)) for fd := range relays { @@ -395,6 +398,9 @@ func runUsingChrootMain() { removeFds[int(rfd.Fd)] = struct{}{} } if rfd.Revents&unix.POLLIN == 0 { + if stdinClose && stdinCopy == nil { + continue + } continue } b := make([]byte, 8192) @@ -449,8 +455,19 @@ func runUsingChrootMain() { if buffer.Len() > 0 { pollTimeout = 100 } + if wfd == relays[unix.Stdin] && stdinClose && buffer.Len() == 0 { + stdinCopy.Close() + delete(relays, unix.Stdin) + } } for rfd := range removeFds { + if rfd == unix.Stdin { + buffer, found := buffers[relays[unix.Stdin]] + if found && buffer.Len() > 0 { + stdinClose = true + continue + } + } if !options.Spec.Process.Terminal && rfd == unix.Stdin { stdinCopy.Close() } @@ -461,7 +478,6 @@ func runUsingChrootMain() { // Set up mounts and namespaces, and run the parent subprocess. status, err := runUsingChroot(options.Spec, options.BundlePath, ctty, stdin, stdout, stderr, closeOnceRunning) - deferred() if err != nil { fmt.Fprintf(os.Stderr, "error running subprocess: %v\n", err) os.Exit(1) diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index 215920cc3..dc2d323d4 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -707,7 +707,7 @@ func (b *Builder) makeImageRef(options CommitOptions, exporting bool) (types.Ima exporting: exporting, squash: options.Squash, emptyLayer: options.EmptyLayer, - tarPath: b.tarPath(), + tarPath: b.tarPath(&b.IDMappingOptions), parent: parent, blobDirectory: options.BlobDirectory, preEmptyLayers: b.PrependedEmptyLayers, diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index 3665251cd..20d6715f5 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -27,7 +27,7 @@ import ( "github.com/containers/image/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" - "github.com/cyphar/filepath-securejoin" + securejoin "github.com/cyphar/filepath-securejoin" docker "github.com/fsouza/go-dockerclient" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/runtime-spec/specs-go" @@ -210,7 +210,6 @@ type Executor struct { annotations []string onbuild []string layers bool - topLayers []string useCache bool removeIntermediateCtrs bool forceRmIntermediateCtrs bool @@ -515,26 +514,55 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err for _, src := range copy.Src { contextDir := s.executor.contextDir copyExcludes := excludes + var idMappingOptions *buildah.IDMappingOptions if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { sources = append(sources, src) } else if len(copy.From) > 0 { + var srcRoot string if other, ok := s.executor.stages[copy.From]; ok && other.index < s.index { - sources = append(sources, filepath.Join(other.mountPoint, src)) + srcRoot = other.mountPoint contextDir = other.mountPoint + idMappingOptions = &other.builder.IDMappingOptions } else if builder, ok := s.executor.containerMap[copy.From]; ok { - sources = append(sources, filepath.Join(builder.MountPoint, src)) + srcRoot = builder.MountPoint contextDir = builder.MountPoint + idMappingOptions = &builder.IDMappingOptions } else { return errors.Errorf("the stage %q has not been built", copy.From) } + srcSecure, err := securejoin.SecureJoin(srcRoot, src) + if err != nil { + return err + } + // If destination is a folder, we need to take extra care to + // ensure that files are copied with correct names (since + // resolving a symlink may result in a different name). + if hadFinalPathSeparator { + _, srcName := filepath.Split(src) + _, srcNameSecure := filepath.Split(srcSecure) + if srcName != srcNameSecure { + options := buildah.AddAndCopyOptions{ + Chown: copy.Chown, + ContextDir: contextDir, + Excludes: copyExcludes, + } + if err := s.builder.Add(filepath.Join(copy.Dest, srcName), copy.Download, options, srcSecure); err != nil { + return err + } + continue + } + } + sources = append(sources, srcSecure) + } else { sources = append(sources, filepath.Join(s.executor.contextDir, src)) copyExcludes = append(s.executor.excludes, excludes...) } options := buildah.AddAndCopyOptions{ - Chown: copy.Chown, - ContextDir: contextDir, - Excludes: copyExcludes, + Chown: copy.Chown, + ContextDir: contextDir, + Excludes: copyExcludes, + IDMappingOptions: idMappingOptions, } if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil { return err @@ -860,9 +888,6 @@ func (s *StageExecutor) prepare(ctx context.Context, stage imagebuilder.Stage, f // Make this our "current" working container. s.mountPoint = mountPoint s.builder = builder - // Add the top layer of this image to b.topLayers so we can - // keep track of them when building with cached images. - s.executor.topLayers = append(s.executor.topLayers, builder.TopLayer) } logrus.Debugln("Container ID:", builder.ContainerID) return builder, nil @@ -967,7 +992,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b } logImageID := func(imgID string) { if s.executor.iidfile == "" { - fmt.Fprintf(s.executor.out, "--> %s\n", imgID) + fmt.Fprintf(s.executor.out, "%s\n", imgID) } } @@ -985,7 +1010,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b // We don't need to squash the base image, so just // reuse the base image. logCommit(s.output, -1) - if imgID, ref, err = s.copyExistingImage(ctx, s.builder.FromImageID, s.output); err != nil { + if imgID, ref, err = s.tagExistingImage(ctx, s.builder.FromImageID, s.output); err != nil { return "", nil, err } } @@ -1110,7 +1135,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b imgID = cacheID if commitName != "" { logCommit(commitName, i) - if imgID, ref, err = s.copyExistingImage(ctx, cacheID, commitName); err != nil { + if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil { return "", nil, err } logImageID(imgID) @@ -1179,8 +1204,8 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b return imgID, ref, nil } -// copyExistingImage creates a copy of an image already in the store -func (s *StageExecutor) copyExistingImage(ctx context.Context, cacheID, output string) (string, reference.Canonical, error) { +// tagExistingImage adds names to an image already in the store +func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output string) (string, reference.Canonical, error) { // If we don't need to attach a name to the image, just return the cache ID. if output == "" { return cacheID, nil, nil @@ -1247,11 +1272,11 @@ func (s *StageExecutor) layerExists(ctx context.Context, currNode *parser.Node, return "", errors.Wrapf(err, "error getting top layer info") } } - // If the parent of the top layer of an image is equal to the last entry in b.topLayers + // If the parent of the top layer of an image is equal to the current build image's top layer, // it means that this image is potentially a cached intermediate image from a previous // build. Next we double check that the history of this image is equivalent to the previous // lines in the Dockerfile up till the point we are at in the build. - if imageTopLayer == nil || imageTopLayer.Parent == s.executor.topLayers[len(s.executor.topLayers)-1] || imageTopLayer.ID == s.executor.topLayers[len(s.executor.topLayers)-1] { + if imageTopLayer == nil || (s.builder.TopLayer != "" && (imageTopLayer.Parent == s.builder.TopLayer || imageTopLayer.ID == s.builder.TopLayer)) { history, err := s.executor.getImageHistory(ctx, image.ID) if err != nil { return "", errors.Wrapf(err, "error getting history of %q", image.ID) @@ -1340,26 +1365,8 @@ func (b *Executor) historyMatches(baseHistory []v1.History, child *parser.Node, return false } } - instruction := child.Original - switch strings.ToUpper(child.Value) { - case "RUN": - instruction = instruction[4:] - buildArgs := b.getBuildArgs() - // If a previous image was built with some build-args but the new build process doesn't have any build-args - // specified, the command might be expanded differently, so compare the lengths of the old instruction with - // the current one. 11 is the length of "/bin/sh -c " that is used to run the run commands. - if buildArgs == "" && len(history[len(baseHistory)].CreatedBy) > len(instruction)+11 { - return false - } - // There are build-args, so check if anything with the build-args has changed - if buildArgs != "" && !strings.Contains(history[len(baseHistory)].CreatedBy, buildArgs) { - return false - } - fallthrough - default: - if !strings.Contains(history[len(baseHistory)].CreatedBy, instruction) { - return false - } + if history[len(baseHistory)].CreatedBy != b.getCreatedBy(child) { + return false } return true } @@ -1373,6 +1380,7 @@ func (b *Executor) getBuildArgs() string { buildArgs = append(buildArgs, k+"="+v) } } + sort.Strings(buildArgs) return strings.Join(buildArgs, " ") } @@ -1545,7 +1553,6 @@ func (s *StageExecutor) commit(ctx context.Context, ib *imagebuilder.Builder, cr options := buildah.CommitOptions{ Compression: s.executor.compression, SignaturePolicyPath: s.executor.signaturePolicyPath, - AdditionalTags: s.executor.additionalTags, ReportWriter: writer, PreferredManifestType: s.executor.outputFormat, SystemContext: s.executor.systemContext, @@ -1731,6 +1738,24 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image fmt.Fprintf(b.out, "[Warning] one or more build args were not consumed: %v\n", unusedList) } + if len(b.additionalTags) > 0 { + if dest, err := b.resolveNameToImageRef(b.output); err == nil { + switch dest.Transport().Name() { + case is.Transport.Name(): + img, err := is.Transport.GetStoreImage(b.store, dest) + if err != nil { + return imageID, ref, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest)) + } + if err = util.AddImageNames(b.store, "", b.systemContext, img, b.additionalTags); err != nil { + return imageID, ref, errors.Wrapf(err, "error setting image names to %v", append(img.Names, b.additionalTags...)) + } + logrus.Debugf("assigned names %v to image %q", img.Names, img.ID) + default: + logrus.Warnf("don't know how to add tags to images stored in %q transport", dest.Transport().Name()) + } + } + } + if err := cleanup(); err != nil { return "", nil, err } diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go index 31f0c2cec..14d29a25b 100644 --- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go @@ -2,6 +2,7 @@ package overlay import ( "fmt" + "io/ioutil" "os" "path/filepath" "strings" @@ -15,13 +16,27 @@ import ( // MountTemp creates a subdir of the contentDir based on the source directory // from the source system. It then mounds up the source directory on to the // generated mount point and returns the mount point to the caller. -func MountTemp(store storage.Store, containerId, source, dest string, rootUID, rootGID int) (specs.Mount, string, error) { - mount := specs.Mount{} +func MountTemp(store storage.Store, containerId, source, dest string, rootUID, rootGID int) (mount specs.Mount, contentDir string, Err error) { - contentDir, err := store.ContainerDirectory(containerId) + containerDir, err := store.ContainerDirectory(containerId) if err != nil { return mount, "", err } + contentDir = filepath.Join(containerDir, "overlay") + if err := idtools.MkdirAllAs(contentDir, 0700, rootUID, rootGID); err != nil { + return mount, "", errors.Wrapf(err, "failed to create the overlay %s directory", contentDir) + } + + contentDir, err = ioutil.TempDir(contentDir, "") + if err != nil { + return mount, "", errors.Wrapf(err, "failed to create TempDir in the overlay %s directory", contentDir) + } + defer func() { + if Err != nil { + os.RemoveAll(contentDir) + } + }() + upperDir := filepath.Join(contentDir, "upper") workDir := filepath.Join(contentDir, "work") if err := idtools.MkdirAllAs(upperDir, 0700, rootUID, rootGID); err != nil { @@ -44,3 +59,13 @@ func MountTemp(store storage.Store, containerId, source, dest string, rootUID, r func RemoveTemp(contentDir string) error { return os.RemoveAll(contentDir) } + +// CleanupContent removes all temporary mountpoint and all content from +// directory +func CleanupContent(containerDir string) (Err error) { + contentDir := filepath.Join(containerDir, "overlay") + if err := os.RemoveAll(contentDir); err != nil && !os.IsNotExist(err) { + return errors.Wrapf(err, "failed to cleanup overlay %s directory", contentDir) + } + return nil +} diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index 6c58f1194..61e70cdd3 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -37,6 +37,7 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) { var ( memoryLimit int64 memorySwap int64 + noDNS bool err error ) @@ -67,9 +68,26 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) { } } + noDNS = false dnsServers, _ := c.Flags().GetStringSlice("dns") + for _, server := range dnsServers { + if strings.ToLower(server) == "none" { + noDNS = true + } + } + if noDNS && len(dnsServers) > 1 { + return nil, errors.Errorf("invalid --dns, --dns=none may not be used with any other --dns options") + } + dnsSearch, _ := c.Flags().GetStringSlice("dns-search") + if noDNS && len(dnsSearch) > 0 { + return nil, errors.Errorf("invalid --dns-search, --dns-search may not be used with --dns=none") + } + dnsOptions, _ := c.Flags().GetStringSlice("dns-option") + if noDNS && len(dnsOptions) > 0 { + return nil, errors.Errorf("invalid --dns-option, --dns-option may not be used with --dns=none") + } if _, err := units.FromHumanSize(c.Flag("shm-size").Value.String()); err != nil { return nil, errors.Wrapf(err, "invalid --shm-size") @@ -80,7 +98,7 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) { } cpuPeriod, _ := c.Flags().GetUint64("cpu-period") cpuQuota, _ := c.Flags().GetInt64("cpu-quota") - cpuShares, _ := c.Flags().GetUint64("cpu-shared") + cpuShares, _ := c.Flags().GetUint64("cpu-shares") httpProxy, _ := c.Flags().GetBool("http-proxy") ulimit, _ := c.Flags().GetStringSlice("ulimit") commonOpts := &buildah.CommonBuildOptions{ diff --git a/vendor/github.com/containers/buildah/pkg/unshare/unshare.c b/vendor/github.com/containers/buildah/pkg/unshare/unshare.c index 67a3e0e4d..fd0d48d43 100644 --- a/vendor/github.com/containers/buildah/pkg/unshare/unshare.c +++ b/vendor/github.com/containers/buildah/pkg/unshare/unshare.c @@ -3,7 +3,7 @@ #include <sys/ioctl.h> #include <sys/stat.h> #include <sys/syscall.h> -#include <linux/memfd.h> +#include <sys/mman.h> #include <fcntl.h> #include <grp.h> #include <sched.h> @@ -14,6 +14,17 @@ #include <errno.h> #include <unistd.h> +/* Open Source projects like conda-forge, want to package podman and are based + off of centos:6, Conda-force has minimal libc requirements and is lacking + the memfd.h file, so we use mmam.h +*/ +#ifndef MFD_ALLOW_SEALING +#define MFD_ALLOW_SEALING 2U +#endif +#ifndef MFD_CLOEXEC +#define MFD_CLOEXEC 1U +#endif + #ifndef F_LINUX_SPECIFIC_BASE #define F_LINUX_SPECIFIC_BASE 1024 #endif diff --git a/vendor/github.com/containers/buildah/pkg/unshare/unshare.go b/vendor/github.com/containers/buildah/pkg/unshare/unshare.go index 5a68d6b8d..21b102cf5 100644 --- a/vendor/github.com/containers/buildah/pkg/unshare/unshare.go +++ b/vendor/github.com/containers/buildah/pkg/unshare/unshare.go @@ -195,13 +195,25 @@ func (c *Cmd) Start() error { if err == nil { gidmapSet = true } else { - fmt.Fprintf(continueWrite, "error running newgidmap: %v: %s", err, g.String()) - fmt.Fprintf(continueWrite, "falling back to single mapping\n") + logrus.Warnf("error running newgidmap: %v: %s", err, g.String()) + logrus.Warnf("falling back to single mapping") g.Reset() g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid()))) } } if !gidmapSet { + if c.UseNewgidmap { + setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening /proc/%s/setgroups: %v", pidString, err) + return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString) + } + defer setgroups.Close() + if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { + fmt.Fprintf(continueWrite, "error writing 'deny' to /proc/%s/setgroups: %v", pidString, err) + return errors.Wrapf(err, "error writing 'deny' to /proc/%s/setgroups", pidString) + } + } gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) if err != nil { fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err) @@ -232,8 +244,8 @@ func (c *Cmd) Start() error { if err == nil { uidmapSet = true } else { - fmt.Fprintf(continueWrite, "error running newuidmap: %v: %s", err, u.String()) - fmt.Fprintf(continueWrite, "falling back to single mapping\n") + logrus.Warnf("error running newuidmap: %v: %s", err, u.String()) + logrus.Warnf("falling back to single mapping") u.Reset() u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid()))) } diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index 16c0550aa..55f9502b2 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -174,7 +174,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { bindFiles["/etc/hosts"] = hostFile } - if !contains(volumes, "/etc/resolv.conf") { + if !(contains(volumes, "/etc/resolv.conf") || (len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none")) { resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf", rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions) if err != nil { return err @@ -434,7 +434,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st // Add temporary copies of the contents of volume locations at the // volume locations, unless we already have something there. - copyWithTar := b.copyWithTar(nil, nil) + copyWithTar := b.copyWithTar(nil, nil, nil) builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes, int(rootUID), int(rootGID)) if err != nil { return err @@ -1049,6 +1049,18 @@ func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetwo return teardown, nil } +func setNonblock(fd int, description string, nonblocking bool) error { + err := unix.SetNonblock(fd, nonblocking) + if err != nil { + if nonblocking { + logrus.Errorf("error setting %s to nonblocking: %v", description, err) + } else { + logrus.Errorf("error setting descriptor %s blocking: %v", description, err) + } + } + return err +} + func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copyConsole bool, consoleListener *net.UnixListener, finishCopy []int, finishedCopy chan struct{}, spec *specs.Spec) { defer func() { unix.Close(finishCopy[0]) @@ -1116,14 +1128,16 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy } // Set our reading descriptors to non-blocking. for rfd, wfd := range relayMap { - if err := unix.SetNonblock(rfd, true); err != nil { - logrus.Errorf("error setting %s to nonblocking: %v", readDesc[rfd], err) + if err := setNonblock(rfd, readDesc[rfd], true); err != nil { return } - if err := unix.SetNonblock(wfd, false); err != nil { - logrus.Errorf("error setting descriptor %d (%s) blocking: %v", wfd, writeDesc[wfd], err) - } + setNonblock(wfd, writeDesc[wfd], false) } + + setNonblock(stdioPipe[unix.Stdin][1], writeDesc[stdioPipe[unix.Stdin][1]], true) + + closeStdin := false + // Pass data back and forth. pollTimeout := -1 for len(relayMap) > 0 { @@ -1155,12 +1169,6 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy } // If the POLLIN flag isn't set, then there's no data to be read from this descriptor. if pollFd.Revents&unix.POLLIN == 0 { - // If we're using pipes and it's our stdin and it's closed, close the writing - // end of the corresponding pipe. - if copyPipes && int(pollFd.Fd) == unix.Stdin && pollFd.Revents&unix.POLLHUP != 0 { - unix.Close(stdioPipe[unix.Stdin][1]) - stdioPipe[unix.Stdin][1] = -1 - } continue } // Read whatever there is to be read. @@ -1175,10 +1183,8 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy // using pipes, it's an EOF, so close the stdin // pipe's writing end. if n == 0 && copyPipes && int(pollFd.Fd) == unix.Stdin { - unix.Close(stdioPipe[unix.Stdin][1]) - stdioPipe[unix.Stdin][1] = -1 - } - if n > 0 { + removes[int(pollFd.Fd)] = struct{}{} + } else if n > 0 { // Buffer the data in case we get blocked on where they need to go. nwritten, err := relayBuffer[writeFD].Write(buf[:n]) if err != nil { @@ -1222,6 +1228,11 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy if n > 0 { relayBuffer[writeFD].Next(n) } + if closeStdin && writeFD == stdioPipe[unix.Stdin][1] && stdioPipe[unix.Stdin][1] >= 0 && relayBuffer[stdioPipe[unix.Stdin][1]].Len() == 0 { + logrus.Debugf("closing stdin") + unix.Close(stdioPipe[unix.Stdin][1]) + stdioPipe[unix.Stdin][1] = -1 + } } if relayBuffer[writeFD].Len() > 0 { pollTimeout = 100 @@ -1229,6 +1240,14 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy } // Remove any descriptors which we don't need to poll any more from the poll descriptor list. for remove := range removes { + if copyPipes && remove == unix.Stdin { + closeStdin = true + if relayBuffer[stdioPipe[unix.Stdin][1]].Len() == 0 { + logrus.Debugf("closing stdin") + unix.Close(stdioPipe[unix.Stdin][1]) + stdioPipe[unix.Stdin][1] = -1 + } + } delete(relayMap, remove) } // If the we-can-return pipe had anything for us, we're done. @@ -1453,7 +1472,7 @@ func setupNamespaces(g *generate.Generator, namespaceOptions NamespaceOptions, i } } } - if configureNetwork { + if configureNetwork && !unshare.IsRootless() { for name, val := range util.DefaultNetworkSysctl { // Check that the sysctl we are adding is actually supported // by the kernel @@ -1564,6 +1583,15 @@ func (b *Builder) cleanupTempVolumes() { func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, rootUID, rootGID int) (mounts []specs.Mount, Err error) { + // Make sure the overlay directory is clean before running + containerDir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return nil, errors.Wrapf(err, "error looking up container directory for %s", b.ContainerID) + } + if err := overlay.CleanupContent(containerDir); err != nil { + return nil, errors.Wrapf(err, "error cleaning up overlay content for %s", b.ContainerID) + } + parseMount := func(host, container string, options []string) (specs.Mount, error) { var foundrw, foundro, foundz, foundZ, foundO bool var rootProp string diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go index 08fb99706..ce21d2651 100644 --- a/vendor/github.com/containers/buildah/util.go +++ b/vendor/github.com/containers/buildah/util.go @@ -1,9 +1,12 @@ package buildah import ( + "archive/tar" "io" "os" + "path/filepath" + "github.com/containers/buildah/util" "github.com/containers/image/docker/reference" "github.com/containers/image/pkg/sysregistries" "github.com/containers/image/pkg/sysregistriesv2" @@ -12,7 +15,9 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/system" "github.com/opencontainers/image-spec/specs-go/v1" rspec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux" @@ -105,19 +110,108 @@ func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMa } // copyFileWithTar returns a function which copies a single file from outside -// of any container into our working container, mapping permissions using the -// container's ID maps, possibly overridden using the passed-in chownOpts -func (b *Builder) copyFileWithTar(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error { - convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) - return chrootarchive.CopyFileWithTarAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap) +// of any container, or another container, into our working container, mapping +// read permissions using the passed-in ID maps, writing using the container's +// ID mappings, possibly overridden using the passed-in chownOpts +func (b *Builder) copyFileWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error { + if tarIDMappingOptions == nil { + tarIDMappingOptions = &IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + } + } + return func(src, dest string) error { + logrus.Debugf("copyFileWithTar(%s, %s)", src, dest) + f, err := os.Open(src) + if err != nil { + return errors.Wrapf(err, "error opening %q to copy its contents", src) + } + defer func() { + if f != nil { + f.Close() + } + }() + + sysfi, err := system.Lstat(src) + if err != nil { + return errors.Wrapf(err, "error reading attributes of %q", src) + } + + hostUID := sysfi.UID() + hostGID := sysfi.GID() + containerUID, containerGID, err := util.GetContainerIDs(tarIDMappingOptions.UIDMap, tarIDMappingOptions.GIDMap, hostUID, hostGID) + if err != nil { + return errors.Wrapf(err, "error mapping owner IDs of %q: %d/%d", src, hostUID, hostGID) + } + + fi, err := os.Lstat(src) + if err != nil { + return errors.Wrapf(err, "error reading attributes of %q", src) + } + + hdr, err := tar.FileInfoHeader(fi, filepath.Base(src)) + if err != nil { + return errors.Wrapf(err, "error generating tar header for: %q", src) + } + hdr.Name = filepath.Base(dest) + hdr.Uid = int(containerUID) + hdr.Gid = int(containerGID) + + pipeReader, pipeWriter := io.Pipe() + writer := tar.NewWriter(pipeWriter) + var copyErr error + go func(srcFile *os.File) { + err := writer.WriteHeader(hdr) + if err != nil { + logrus.Debugf("error writing header for %s: %v", srcFile.Name(), err) + copyErr = err + } + n, err := pools.Copy(writer, srcFile) + if n != hdr.Size { + logrus.Debugf("expected to write %d bytes for %s, wrote %d instead", hdr.Size, srcFile.Name(), n) + } + if err != nil { + logrus.Debugf("error reading %s: %v", srcFile.Name(), err) + copyErr = err + } + if err = writer.Close(); err != nil { + logrus.Debugf("error closing write pipe for %s: %v", srcFile.Name(), err) + } + if err = srcFile.Close(); err != nil { + logrus.Debugf("error closing %s: %v", srcFile.Name(), err) + } + pipeWriter.Close() + pipeWriter = nil + return + }(f) + + untar := b.untar(chownOpts, hasher) + err = untar(pipeReader, filepath.Dir(dest)) + if err == nil { + err = copyErr + } + f = nil + if pipeWriter != nil { + pipeWriter.Close() + } + return err + } } // copyWithTar returns a function which copies a directory tree from outside of -// any container into our working container, mapping permissions using the -// container's ID maps, possibly overridden using the passed-in chownOpts -func (b *Builder) copyWithTar(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error { - convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) - return chrootarchive.CopyWithTarAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap) +// our container or from another container, into our working container, mapping +// permissions at read-time using the container's ID maps, with ownership at +// write-time possibly overridden using the passed-in chownOpts +func (b *Builder) copyWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error { + tar := b.tarPath(tarIDMappingOptions) + untar := b.untar(chownOpts, hasher) + return func(src, dest string) error { + rc, err := tar(src) + if err != nil { + return errors.Wrapf(err, "error archiving %q for copy", src) + } + return untar(rc, dest) + } } // untarPath returns a function which extracts an archive in a specified @@ -128,12 +222,58 @@ func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer) func(sr return chrootarchive.UntarPathAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap) } -// tarPath returns a function which creates an archive of a specified +// tarPath returns a function which creates an archive of a specified location, +// which is often somewhere in the container's filesystem, mapping permissions +// using the container's ID maps, or the passed-in maps if specified +func (b *Builder) tarPath(idMappingOptions *IDMappingOptions) func(path string) (io.ReadCloser, error) { + var uidmap, gidmap []idtools.IDMap + if idMappingOptions == nil { + idMappingOptions = &IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + } + } + convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(idMappingOptions.UIDMap, idMappingOptions.GIDMap) + tarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap) + uidmap = tarMappings.UIDs() + gidmap = tarMappings.GIDs() + options := &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: uidmap, + GIDMaps: gidmap, + } + return func(path string) (io.ReadCloser, error) { + return archive.TarWithOptions(path, options) + } +} + +// untar returns a function which extracts an archive stream to a specified // location in the container's filesystem, mapping permissions using the -// container's ID maps -func (b *Builder) tarPath() func(path string) (io.ReadCloser, error) { +// container's ID maps, possibly overridden using the passed-in chownOpts +func (b *Builder) untar(chownOpts *idtools.IDPair, hasher io.Writer) func(tarArchive io.ReadCloser, dest string) error { convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) - return archive.TarPath(convertedUIDMap, convertedGIDMap) + untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap) + options := &archive.TarOptions{ + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: chownOpts, + } + untar := chrootarchive.Untar + if hasher != nil { + originalUntar := untar + untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return func(tarArchive io.ReadCloser, dest string) error { + err := untar(tarArchive, dest, options) + if err2 := tarArchive.Close(); err2 != nil { + if err == nil { + err = err2 + } + } + return err + } } // isRegistryBlocked checks if the named registry is marked as blocked diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go index 30afe8313..4736d7b77 100644 --- a/vendor/github.com/containers/buildah/util/util.go +++ b/vendor/github.com/containers/buildah/util/util.go @@ -106,13 +106,19 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto // Figure out the list of registries. var registries []string - searchRegistries, err := sysregistriesv2.FindUnqualifiedSearchRegistries(sc) + searchRegistries, err := sysregistriesv2.UnqualifiedSearchRegistries(sc) if err != nil { logrus.Debugf("unable to read configured registries to complete %q: %v", name, err) + searchRegistries = nil } for _, registry := range searchRegistries { - if !registry.Blocked { - registries = append(registries, registry.Location) + reg, err := sysregistriesv2.FindRegistry(sc, registry) + if err != nil { + logrus.Debugf("unable to read registry configuraitno for %#v: %v", registry, err) + continue + } + if reg == nil || !reg.Blocked { + registries = append(registries, registry) } } searchRegistriesAreEmpty := len(registries) == 0 @@ -257,6 +263,36 @@ func StringInSlice(s string, slice []string) bool { return false } +// GetContainerIDs uses ID mappings to compute the container-level IDs that will +// correspond to a UID/GID pair on the host. +func GetContainerIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, uint32, error) { + uidMapped := true + for _, m := range uidmap { + uidMapped = false + if uid >= m.HostID && uid < m.HostID+m.Size { + uid = (uid - m.HostID) + m.ContainerID + uidMapped = true + break + } + } + if !uidMapped { + return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid) + } + gidMapped := true + for _, m := range gidmap { + gidMapped = false + if gid >= m.HostID && gid < m.HostID+m.Size { + gid = (gid - m.HostID) + m.ContainerID + gidMapped = true + break + } + } + if !gidMapped { + return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid) + } + return uid, gid, nil +} + // GetHostIDs uses ID mappings to compute the host-level IDs that will // correspond to a UID/GID pair in the container. func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, uint32, error) { @@ -270,7 +306,7 @@ func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, } } if !uidMapped { - return 0, 0, errors.Errorf("container uses ID mappings, but doesn't map UID %d", uid) + return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid) } gidMapped := true for _, m := range gidmap { @@ -282,7 +318,7 @@ func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, } } if !gidMapped { - return 0, 0, errors.Errorf("container uses ID mappings, but doesn't map GID %d", gid) + return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid) } return uid, gid, nil } diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf index 0c982626a..88148947a 100644 --- a/vendor/github.com/containers/buildah/vendor.conf +++ b/vendor/github.com/containers/buildah/vendor.conf @@ -3,12 +3,12 @@ github.com/blang/semver v3.5.0 github.com/BurntSushi/toml v0.2.0 github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d github.com/containernetworking/cni v0.7.0-rc2 -github.com/containers/image 9467ac9cfd92c545aa389f22f27e552de053c0f2 +github.com/containers/image v2.0.0 github.com/cyphar/filepath-securejoin v0.2.1 github.com/vbauerster/mpb v3.3.4 github.com/mattn/go-isatty v0.0.4 github.com/VividCortex/ewma v1.1.1 -github.com/containers/storage v1.12.7 +github.com/containers/storage v1.12.10 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83 github.com/docker/docker-credential-helpers v0.6.1 diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go index c8fdb407c..c43e6e7ca 100644 --- a/vendor/github.com/containers/image/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/docker/docker_image_src.go @@ -29,44 +29,16 @@ type dockerImageSource struct { cachedManifestMIMEType string // Only valid if cachedManifest != nil } -// newImageSource creates a new `ImageSource` for the specified image reference -// `ref`. -// -// The following steps will be done during the instance creation: -// -// - Lookup the registry within the configured location in -// `sys.SystemRegistriesConfPath`. If there is no configured registry available, -// we fallback to the provided docker reference `ref`. -// -// - References which contain a configured prefix will be automatically rewritten -// to the correct target reference. For example, if the configured -// `prefix = "example.com/foo"`, `location = "example.com"` and the image will be -// pulled from the ref `example.com/foo/image`, then the resulting pull will -// effectively point to `example.com/image`. -// -// - If the rewritten reference succeeds, it will be used as the `dockerRef` -// in the client. If the rewrite fails, the function immediately returns an error. -// -// - Each mirror will be used (in the configured order) to test the -// availability of the image manifest on the remote location. For example, -// if the manifest is not reachable due to connectivity issues, then the next -// mirror will be tested instead. If no mirror is configured or contains the -// target manifest, then the initial `ref` will be tested as fallback. The -// creation of the new `dockerImageSource` only succeeds if a remote -// location with the available manifest was found. -// -// A cleanup call to `.Close()` is needed if the caller is done using the returned -// `ImageSource`. +// newImageSource creates a new ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name()) if err != nil { return nil, errors.Wrapf(err, "error loading registries configuration") } - if registry == nil { - // No configuration was found for the provided reference, so we create - // a fallback registry by hand to make the client creation below work - // as intended. + // No configuration was found for the provided reference, so use the + // equivalent of a default configuration. registry = &sysregistriesv2.Registry{ Endpoint: sysregistriesv2.Endpoint{ Location: ref.ref.String(), @@ -76,18 +48,19 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef } primaryDomain := reference.Domain(ref.ref) - // Found the registry within the sysregistriesv2 configuration. Now we test - // all endpoints for the manifest availability. If a working image source - // was found, it will be used for all future pull actions. + // Check all endpoints for the manifest availability. If we find one that does + // contain the image, it will be used for all future pull actions. Always try the + // non-mirror original location last; this both transparently handles the case + // of no mirrors configured, and ensures we return the error encountered when + // acessing the upstream location if all endpoints fail. manifestLoadErr := errors.New("Internal error: newImageSource returned without trying any endpoint") - for _, endpoint := range append(registry.Mirrors, registry.Endpoint) { - logrus.Debugf("Trying to pull %q from endpoint %q", ref.ref, endpoint.Location) - - newRef, err := endpoint.RewriteReference(ref.ref, registry.Prefix) - if err != nil { - return nil, err - } - dockerRef, err := newReference(newRef) + pullSources, err := registry.PullSourcesFromReference(ref.ref) + if err != nil { + return nil, err + } + for _, pullSource := range pullSources { + logrus.Debugf("Trying to pull %q", pullSource.Reference) + dockerRef, err := newReference(pullSource.Reference) if err != nil { return nil, err } @@ -104,7 +77,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef if err != nil { return nil, err } - client.tlsClientConfig.InsecureSkipVerify = endpoint.Insecure + client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure testImageSource := &dockerImageSource{ ref: dockerRef, diff --git a/vendor/github.com/containers/image/docker/reference/README.md b/vendor/github.com/containers/image/docker/reference/README.md index 53a88de82..3c4d74eb4 100644 --- a/vendor/github.com/containers/image/docker/reference/README.md +++ b/vendor/github.com/containers/image/docker/reference/README.md @@ -1,2 +1,2 @@ -This is a copy of github.com/docker/distribution/reference as of commit fb0bebc4b64e3881cc52a2478d749845ed76d2a8, +This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8, except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset.
\ No newline at end of file diff --git a/vendor/github.com/containers/image/docker/reference/normalize.go b/vendor/github.com/containers/image/docker/reference/normalize.go index fcc436a39..6a86ec64f 100644 --- a/vendor/github.com/containers/image/docker/reference/normalize.go +++ b/vendor/github.com/containers/image/docker/reference/normalize.go @@ -55,6 +55,35 @@ func ParseNormalizedNamed(s string) (Named, error) { return named, nil } +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + // splitDockerDomain splits a repository name to domain and remotename string. // If no valid domain is found, the default domain is used. Repository name // needs to be already validated before. diff --git a/vendor/github.com/containers/image/docker/reference/reference.go b/vendor/github.com/containers/image/docker/reference/reference.go index fd3510e9e..8c0c23b2f 100644 --- a/vendor/github.com/containers/image/docker/reference/reference.go +++ b/vendor/github.com/containers/image/docker/reference/reference.go @@ -15,7 +15,7 @@ // tag := /[\w][\w.-]{0,127}/ // // digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* // digest-algorithm-separator := /[+.-_]/ // digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ // digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value @@ -205,7 +205,7 @@ func Parse(s string) (Reference, error) { var repo repository nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if nameMatch != nil && len(nameMatch) == 3 { + if len(nameMatch) == 3 { repo.domain = nameMatch[1] repo.path = nameMatch[2] } else { diff --git a/vendor/github.com/containers/image/docker/reference/regexp.go b/vendor/github.com/containers/image/docker/reference/regexp.go index 405e995db..786034932 100644 --- a/vendor/github.com/containers/image/docker/reference/regexp.go +++ b/vendor/github.com/containers/image/docker/reference/regexp.go @@ -20,15 +20,15 @@ var ( optional(repeated(separatorRegexp, alphaNumericRegexp))) // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by domainRegexp + // repository name to start with a component as defined by DomainRegexp // and followed by an optional port. domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - // domainRegexp defines the structure of potential domain components + // DomainRegexp defines the structure of potential domain components // that may be part of image names. This is purposely a subset of what is // allowed by DNS to ensure backwards compatibility with Docker image // names. - domainRegexp = expression( + DomainRegexp = expression( domainComponentRegexp, optional(repeated(literal(`.`), domainComponentRegexp)), optional(literal(`:`), match(`[0-9]+`))) @@ -51,14 +51,14 @@ var ( // regexp has capturing groups for the domain and name part omitting // the separating forward slash from either. NameRegexp = expression( - optional(domainRegexp, literal(`/`)), + optional(DomainRegexp, literal(`/`)), nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp))) // anchoredNameRegexp is used to parse a name value, capturing the // domain and trailing components. anchoredNameRegexp = anchored( - optional(capture(domainRegexp), literal(`/`)), + optional(capture(DomainRegexp), literal(`/`)), capture(nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp)))) diff --git a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go index 99ae65774..361e6fc60 100644 --- a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "os" "path/filepath" + "regexp" "strings" "sync" @@ -35,10 +36,10 @@ type Endpoint struct { Insecure bool `toml:"insecure"` } -// RewriteReference will substitute the provided reference `prefix` to the +// rewriteReference will substitute the provided reference `prefix` to the // endpoints `location` from the `ref` and creates a new named reference from it. // The function errors if the newly created reference is not parsable. -func (e *Endpoint) RewriteReference(ref reference.Named, prefix string) (reference.Named, error) { +func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) { refString := ref.String() if !refMatchesPrefix(refString, prefix) { return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString) @@ -61,8 +62,10 @@ type Registry struct { Mirrors []Endpoint `toml:"mirror"` // If true, pulling from the registry will be blocked. Blocked bool `toml:"blocked"` - // If true, the registry can be used when pulling an unqualified image. - Search bool `toml:"unqualified-search"` + // If true, mirrors will only be used for digest pulls. Pulling images by + // tag can potentially yield different images, depending on which endpoint + // we pull from. Forcing digest-pulls for mirrors avoids that issue. + MirrorByDigestOnly bool `toml:"mirror-by-digest-only"` // Prefix is used for matching images, and to translate one namespace to // another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"` // and we pull from "example.com/bar/myimage:latest", the image will @@ -71,6 +74,41 @@ type Registry struct { Prefix string `toml:"prefix"` } +// PullSource consists of an Endpoint and a Reference. Note that the reference is +// rewritten according to the registries prefix and the Endpoint's location. +type PullSource struct { + Endpoint Endpoint + Reference reference.Named +} + +// PullSourcesFromReference returns a slice of PullSource's based on the passed +// reference. +func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) { + var endpoints []Endpoint + + if r.MirrorByDigestOnly { + // Only use mirrors when the reference is a digest one. + if _, isDigested := ref.(reference.Canonical); isDigested { + endpoints = append(r.Mirrors, r.Endpoint) + } else { + endpoints = []Endpoint{r.Endpoint} + } + } else { + endpoints = append(r.Mirrors, r.Endpoint) + } + + sources := []PullSource{} + for _, ep := range endpoints { + rewritten, err := ep.rewriteReference(ref, r.Prefix) + if err != nil { + return nil, err + } + sources = append(sources, PullSource{Endpoint: ep, Reference: rewritten}) + } + + return sources, nil +} + // V1TOMLregistries is for backwards compatibility to sysregistries v1 type V1TOMLregistries struct { Registries []string `toml:"registries"` @@ -83,11 +121,35 @@ type V1TOMLConfig struct { Block V1TOMLregistries `toml:"block"` } +// V1RegistriesConf is the sysregistries v1 configuration format. +type V1RegistriesConf struct { + V1TOMLConfig `toml:"registries"` +} + +// Nonempty returns true if config contains at least one configuration entry. +func (config *V1RegistriesConf) Nonempty() bool { + return (len(config.V1TOMLConfig.Search.Registries) != 0 || + len(config.V1TOMLConfig.Insecure.Registries) != 0 || + len(config.V1TOMLConfig.Block.Registries) != 0) +} + +// V2RegistriesConf is the sysregistries v2 configuration format. +type V2RegistriesConf struct { + Registries []Registry `toml:"registry"` + // An array of host[:port] (not prefix!) entries to use for resolving unqualified image references + UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"` +} + +// Nonempty returns true if config contains at least one configuration entry. +func (config *V2RegistriesConf) Nonempty() bool { + return (len(config.Registries) != 0 || + len(config.UnqualifiedSearchRegistries) != 0) +} + // tomlConfig is the data type used to unmarshal the toml config. type tomlConfig struct { - Registries []Registry `toml:"registry"` - // backwards compatability to sysregistries v1 - V1TOMLConfig `toml:"registries"` + V2RegistriesConf + V1RegistriesConf // for backwards compatibility with sysregistries v1 } // InvalidRegistries represents an invalid registry configurations. An example @@ -120,12 +182,10 @@ func parseLocation(input string) (string, error) { return trimmed, nil } -// getV1Registries transforms v1 registries in the config into an array of v2 -// registries of type Registry. -func getV1Registries(config *tomlConfig) ([]Registry, error) { +// ConvertToV2 returns a v2 config corresponding to a v1 one. +func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) { regMap := make(map[string]*Registry) - // We must preserve the order of config.V1Registries.Search.Registries at least. The order of the - // other registries is not really important, but make it deterministic (the same for the same config file) + // The order of the registries is not really important, but make it deterministic (the same for the same config file) // to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations. registryOrder := []string{} @@ -148,15 +208,6 @@ func getV1Registries(config *tomlConfig) ([]Registry, error) { return reg, nil } - // Note: config.V1Registries.Search needs to be processed first to ensure registryOrder is populated in the right order - // if one of the search registries is also in one of the other lists. - for _, search := range config.V1TOMLConfig.Search.Registries { - reg, err := getRegistry(search) - if err != nil { - return nil, err - } - reg.Search = true - } for _, blocked := range config.V1TOMLConfig.Block.Registries { reg, err := getRegistry(blocked) if err != nil { @@ -172,28 +223,31 @@ func getV1Registries(config *tomlConfig) ([]Registry, error) { reg.Insecure = true } - registries := []Registry{} + res := &V2RegistriesConf{ + UnqualifiedSearchRegistries: config.V1TOMLConfig.Search.Registries, + } for _, location := range registryOrder { reg := regMap[location] - registries = append(registries, *reg) + res.Registries = append(res.Registries, *reg) } - return registries, nil + return res, nil } -// postProcessRegistries checks the consistency of all registries (e.g., set -// the Prefix to Location if not set) and applies conflict checks. It returns an -// array of cleaned registries and error in case of conflicts. -func postProcessRegistries(regs []Registry) ([]Registry, error) { - var registries []Registry - regMap := make(map[string][]Registry) +// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries. +var anchoredDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$") - for _, reg := range regs { - var err error +// postProcess checks the consistency of all the configuration, looks for conflicts, +// and normalizes the configuration (e.g., sets the Prefix to Location if not set). +func (config *V2RegistriesConf) postProcess() error { + regMap := make(map[string][]*Registry) + for i := range config.Registries { + reg := &config.Registries[i] // make sure Location and Prefix are valid + var err error reg.Location, err = parseLocation(reg.Location) if err != nil { - return nil, err + return err } if reg.Prefix == "" { @@ -201,7 +255,7 @@ func postProcessRegistries(regs []Registry) ([]Registry, error) { } else { reg.Prefix, err = parseLocation(reg.Prefix) if err != nil { - return nil, err + return err } } @@ -209,10 +263,9 @@ func postProcessRegistries(regs []Registry) ([]Registry, error) { for _, mir := range reg.Mirrors { mir.Location, err = parseLocation(mir.Location) if err != nil { - return nil, err + return err } } - registries = append(registries, reg) regMap[reg.Location] = append(regMap[reg.Location], reg) } @@ -222,22 +275,32 @@ func postProcessRegistries(regs []Registry) ([]Registry, error) { // // Note: we need to iterate over the registries array to ensure a // deterministic behavior which is not guaranteed by maps. - for _, reg := range registries { + for _, reg := range config.Registries { others, _ := regMap[reg.Location] for _, other := range others { if reg.Insecure != other.Insecure { msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location) - - return nil, &InvalidRegistries{s: msg} + return &InvalidRegistries{s: msg} } if reg.Blocked != other.Blocked { msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location) - return nil, &InvalidRegistries{s: msg} + return &InvalidRegistries{s: msg} } } } - return registries, nil + for i := range config.UnqualifiedSearchRegistries { + registry, err := parseLocation(config.UnqualifiedSearchRegistries[i]) + if err != nil { + return err + } + if !anchoredDomainRegexp.MatchString(registry) { + return &InvalidRegistries{fmt.Sprintf("Invalid unqualified-search-registries entry %#v", registry)} + } + config.UnqualifiedSearchRegistries[i] = registry + } + + return nil } // getConfigPath returns the system-registries config path if specified. @@ -260,7 +323,7 @@ var configMutex = sync.Mutex{} // configCache caches already loaded configs with config paths as keys and is // used to avoid redudantly parsing configs. Concurrent accesses to the cache // are synchronized via configMutex. -var configCache = make(map[string][]Registry) +var configCache = make(map[string]*V2RegistriesConf) // InvalidateCache invalidates the registry cache. This function is meant to be // used for long-running processes that need to reload potential changes made to @@ -268,20 +331,18 @@ var configCache = make(map[string][]Registry) func InvalidateCache() { configMutex.Lock() defer configMutex.Unlock() - configCache = make(map[string][]Registry) + configCache = make(map[string]*V2RegistriesConf) } -// GetRegistries loads and returns the registries specified in the config. -// Note the parsed content of registry config files is cached. For reloading, -// use `InvalidateCache` and re-call `GetRegistries`. -func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { +// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached. +func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) { configPath := getConfigPath(ctx) configMutex.Lock() defer configMutex.Unlock() // if the config has already been loaded, return the cached registries - if registries, inCache := configCache[configPath]; inCache { - return registries, nil + if config, inCache := configCache[configPath]; inCache { + return config, nil } // load the config @@ -292,51 +353,53 @@ func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { // isn't set. Note: if ctx.SystemRegistriesConfPath points to // the default config, we will still return an error. if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") { - return []Registry{}, nil + return &V2RegistriesConf{Registries: []Registry{}}, nil } return nil, err } - registries := config.Registries + v2Config := &config.V2RegistriesConf // backwards compatibility for v1 configs - v1Registries, err := getV1Registries(config) - if err != nil { - return nil, err - } - if len(v1Registries) > 0 { - if len(registries) > 0 { + if config.V1RegistriesConf.Nonempty() { + if config.V2RegistriesConf.Nonempty() { return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"} } - registries = v1Registries + v2, err := config.V1RegistriesConf.ConvertToV2() + if err != nil { + return nil, err + } + v2Config = v2 } - registries, err = postProcessRegistries(registries) - if err != nil { + if err := v2Config.postProcess(); err != nil { return nil, err } // populate the cache - configCache[configPath] = registries - - return registries, err + configCache[configPath] = v2Config + return v2Config, nil } -// FindUnqualifiedSearchRegistries returns all registries that are configured -// for unqualified image search (i.e., with Registry.Search == true). -func FindUnqualifiedSearchRegistries(ctx *types.SystemContext) ([]Registry, error) { - registries, err := GetRegistries(ctx) +// GetRegistries loads and returns the registries specified in the config. +// Note the parsed content of registry config files is cached. For reloading, +// use `InvalidateCache` and re-call `GetRegistries`. +func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { + config, err := getConfig(ctx) if err != nil { return nil, err } + return config.Registries, nil +} - unqualified := []Registry{} - for _, reg := range registries { - if reg.Search { - unqualified = append(unqualified, reg) - } +// UnqualifiedSearchRegistries returns a list of host[:port] entries to try +// for unqualified image search, in the returned order) +func UnqualifiedSearchRegistries(ctx *types.SystemContext) ([]string, error) { + config, err := getConfig(ctx) + if err != nil { + return nil, err } - return unqualified, nil + return config.UnqualifiedSearchRegistries, nil } // refMatchesPrefix returns true iff ref, @@ -371,14 +434,14 @@ func refMatchesPrefix(ref, prefix string) bool { // — note that this requires the name to start with an explicit hostname!). // If no Registry prefixes the image, nil is returned. func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { - registries, err := GetRegistries(ctx) + config, err := getConfig(ctx) if err != nil { return nil, err } reg := Registry{} prefixLen := 0 - for _, r := range registries { + for _, r := range config.Registries { if refMatchesPrefix(ref, r.Prefix) { length := len(r.Prefix) if length > prefixLen { @@ -393,21 +456,12 @@ func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { return nil, nil } -// Reads the global registry file from the filesystem. Returns a byte array. -func readRegistryConf(configPath string) ([]byte, error) { - configBytes, err := ioutil.ReadFile(configPath) - return configBytes, err -} - -// Used in unittests to parse custom configs without a types.SystemContext. -var readConf = readRegistryConf - // Loads the registry configuration file from the filesystem and then unmarshals // it. Returns the unmarshalled object. func loadRegistryConf(configPath string) (*tomlConfig, error) { config := &tomlConfig{} - configBytes, err := readConf(configPath) + configBytes, err := ioutil.ReadFile(configPath) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/version/version.go b/vendor/github.com/containers/image/version/version.go index 184274736..62b2c8bc5 100644 --- a/vendor/github.com/containers/image/version/version.go +++ b/vendor/github.com/containers/image/version/version.go @@ -4,14 +4,14 @@ import "fmt" const ( // VersionMajor is for an API incompatible changes - VersionMajor = 1 + VersionMajor = 2 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 7 + VersionMinor = 0 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-dev" + VersionDev = "" ) // Version is the specification version that the package types support. |