diff options
42 files changed, 903 insertions, 1171 deletions
@@ -13,7 +13,7 @@ require ( github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect github.com/containernetworking/cni v0.7.1 github.com/containernetworking/plugins v0.8.1 - github.com/containers/buildah v1.10.1 + github.com/containers/buildah v1.8.4-0.20190821140209-376e52ee0142 github.com/containers/conmon v0.3.0 // indirect github.com/containers/image v3.0.2+incompatible github.com/containers/psgo v1.3.1 @@ -67,6 +67,8 @@ github.com/containernetworking/plugins v0.7.4 h1:ugkuXfg1Pdzm54U5DGMzreYIkZPSCmS github.com/containernetworking/plugins v0.7.4/go.mod h1:dagHaAhNjXjT9QYOklkKJDGaQPTg4pf//FrUcJeb7FU= github.com/containernetworking/plugins v0.8.1 h1:dJbykiiSIS3Xvo8d+A6rSXcUEFGfvCjUA+bUED4qegQ= github.com/containernetworking/plugins v0.8.1/go.mod h1:dagHaAhNjXjT9QYOklkKJDGaQPTg4pf//FrUcJeb7FU= +github.com/containers/buildah v1.8.4-0.20190821140209-376e52ee0142 h1:RxJ7MbdmorTHiKcJDOz6SwRPasZVp4LOdRWoZ1fdlsQ= +github.com/containers/buildah v1.8.4-0.20190821140209-376e52ee0142/go.mod h1:QIIw13J1YIwWQskItX1wqZPQtUOOKrOnHE+LTibbLLA= github.com/containers/buildah v1.9.0 h1:ktVRCGNoVfW8PlTuCKUeh+zGdqn1Nik80DSWvGX+v4Y= github.com/containers/buildah v1.9.0/go.mod h1:1CsiLJvyU+h+wOjnqJJOWuJCVcMxZOr5HN/gHGdzJxY= github.com/containers/buildah v1.9.2 h1:dg87r1W1poWVQE0lTmP3BzaqgEI5IRudZ3jKjNIZ3xQ= diff --git a/libpod/container_api.go b/libpod/container_api.go index 9e59104cc..9bf97c5d4 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -14,7 +14,6 @@ import ( "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/remotecommand" ) @@ -524,24 +523,25 @@ func (c *Container) WaitWithInterval(waitTimeout time.Duration) (int32, error) { if !c.valid { return -1, define.ErrCtrRemoved } - err := wait.PollImmediateInfinite(waitTimeout, - func() (bool, error) { - logrus.Debugf("Checking container %s status...", c.ID()) - stopped, err := c.isStopped() - if err != nil { - return false, err - } - if !stopped { - return false, nil - } - return true, nil - }, - ) - if err != nil { - return 0, err + + exitFile := c.exitFilePath() + chWait := make(chan error, 1) + + defer close(chWait) + + for { + // ignore errors here, it is only used to avoid waiting + // too long. + _, _ = WaitForFile(exitFile, chWait, waitTimeout) + + stopped, err := c.isStopped() + if err != nil { + return -1, err + } + if stopped { + return c.state.ExitCode, nil + } } - exitCode := c.state.ExitCode - return exitCode, nil } // Cleanup unmounts all mount points in container and cleans up container storage diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 5aa4ee9a9..cb19b5484 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -1039,6 +1039,11 @@ func (c *Container) makeBindMounts() error { // generateResolvConf generates a containers resolv.conf func (c *Container) generateResolvConf() (string, error) { + var ( + nameservers []string + cniNameServers []string + ) + resolvConf := "/etc/resolv.conf" for _, namespace := range c.config.Spec.Linux.Namespaces { if namespace.Type == spec.NetworkNamespace { @@ -1074,18 +1079,31 @@ func (c *Container) generateResolvConf() (string, error) { return "", errors.Wrapf(err, "error parsing host resolv.conf") } - // Make a new resolv.conf - nameservers := resolvconf.GetNameservers(resolv.Content) - // slirp4netns has a built in DNS server. - if c.config.NetMode.IsSlirp4netns() { - nameservers = append([]string{"10.0.2.3"}, nameservers...) + // Check if CNI gave back and DNS servers for us to add in + cniResponse := c.state.NetworkStatus + for _, i := range cniResponse { + if i.DNS.Nameservers != nil { + cniNameServers = append(cniNameServers, i.DNS.Nameservers...) + logrus.Debugf("adding nameserver(s) from cni response of '%q'", i.DNS.Nameservers) + } } + + // If the user provided dns, it trumps all; then dns masq; then resolv.conf if len(c.config.DNSServer) > 0 { // We store DNS servers as net.IP, so need to convert to string - nameservers = []string{} for _, server := range c.config.DNSServer { nameservers = append(nameservers, server.String()) } + } else if len(cniNameServers) > 0 { + nameservers = append(nameservers, cniNameServers...) + } else { + // Make a new resolv.conf + nameservers = resolvconf.GetNameservers(resolv.Content) + // slirp4netns has a built in DNS server. + if c.config.NetMode.IsSlirp4netns() { + nameservers = append([]string{"10.0.2.3"}, nameservers...) + } + } search := resolvconf.GetSearchDomains(resolv.Content) diff --git a/libpod/options.go b/libpod/options.go index a7ddbec34..6df1ca5be 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -1360,10 +1360,15 @@ func WithNamedVolumes(volumes []*ContainerNamedVolume) CtrCreateOption { } destinations[vol.Dest] = true + mountOpts, err := util.ProcessOptions(vol.Options, false, nil) + if err != nil { + return errors.Wrapf(err, "error processing options for named volume %q mounted at %q", vol.Name, vol.Dest) + } + ctr.config.NamedVolumes = append(ctr.config.NamedVolumes, &ContainerNamedVolume{ Name: vol.Name, Dest: vol.Dest, - Options: util.ProcessOptions(vol.Options), + Options: mountOpts, }) } diff --git a/libpod/util.go b/libpod/util.go index b60575264..164800af4 100644 --- a/libpod/util.go +++ b/libpod/util.go @@ -69,7 +69,11 @@ func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, e defer watcher.Close() } - timeoutChan := time.After(timeout) + var timeoutChan <-chan time.Time + + if timeout != 0 { + timeoutChan = time.After(timeout) + } for { select { diff --git a/pkg/spec/spec.go b/pkg/spec/spec.go index 156d6849d..44bbda885 100644 --- a/pkg/spec/spec.go +++ b/pkg/spec/spec.go @@ -2,13 +2,11 @@ package createconfig import ( "os" - "path/filepath" "strings" "github.com/containers/libpod/libpod" "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/rootless" - pmount "github.com/containers/storage/pkg/mount" "github.com/docker/docker/oci/caps" "github.com/docker/go-units" "github.com/opencontainers/runc/libcontainer/user" @@ -368,7 +366,11 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM // BIND MOUNTS configSpec.Mounts = supercedeUserMounts(userMounts, configSpec.Mounts) // Process mounts to ensure correct options - configSpec.Mounts = initFSMounts(configSpec.Mounts) + finalMounts, err := initFSMounts(configSpec.Mounts) + if err != nil { + return nil, err + } + configSpec.Mounts = finalMounts // BLOCK IO blkio, err := config.CreateBlockIO() @@ -394,43 +396,6 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM } } - // Make sure that the bind mounts keep options like nosuid, noexec, nodev. - mounts, err := pmount.GetMounts() - if err != nil { - return nil, err - } - for i := range configSpec.Mounts { - m := &configSpec.Mounts[i] - isBind := false - for _, o := range m.Options { - if o == "bind" || o == "rbind" { - isBind = true - break - } - } - if !isBind { - continue - } - mount, err := findMount(m.Source, mounts) - if err != nil { - return nil, err - } - if mount == nil { - continue - } - next_option: - for _, o := range strings.Split(mount.Opts, ",") { - if o == "nosuid" || o == "noexec" || o == "nodev" { - for _, e := range m.Options { - if e == o { - continue next_option - } - } - m.Options = append(m.Options, o) - } - } - } - // Add annotations if configSpec.Annotations == nil { configSpec.Annotations = make(map[string]string) @@ -490,25 +455,6 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM return configSpec, nil } -func findMount(target string, mounts []*pmount.Info) (*pmount.Info, error) { - var err error - target, err = filepath.Abs(target) - if err != nil { - return nil, errors.Wrapf(err, "cannot resolve %s", target) - } - var bestSoFar *pmount.Info - for _, i := range mounts { - if bestSoFar != nil && len(bestSoFar.Mountpoint) > len(i.Mountpoint) { - // Won't be better than what we have already found - continue - } - if strings.HasPrefix(target, i.Mountpoint) { - bestSoFar = i - } - } - return bestSoFar, nil -} - func blockAccessToKernelFilesystems(config *CreateConfig, g *generate.Generator) { if !config.Privileged { for _, mp := range []string{ diff --git a/pkg/spec/storage.go b/pkg/spec/storage.go index b634f4cac..bc0eaad6d 100644 --- a/pkg/spec/storage.go +++ b/pkg/spec/storage.go @@ -10,6 +10,7 @@ import ( "github.com/containers/buildah/pkg/parse" "github.com/containers/libpod/libpod" "github.com/containers/libpod/pkg/util" + pmount "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/stringid" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" @@ -160,22 +161,18 @@ func (config *CreateConfig) parseVolumes(runtime *libpod.Runtime) ([]spec.Mount, } // If requested, add tmpfs filesystems for read-only containers. - // Need to keep track of which we created, so we don't modify options - // for them later... - readonlyTmpfs := map[string]bool{ - "/tmp": false, - "/var/tmp": false, - "/run": false, - } if config.ReadOnlyRootfs && config.ReadOnlyTmpfs { + readonlyTmpfs := []string{"/tmp", "/var/tmp", "/run"} options := []string{"rw", "rprivate", "nosuid", "nodev", "tmpcopyup"} - for dest := range readonlyTmpfs { + for _, dest := range readonlyTmpfs { if _, ok := baseMounts[dest]; ok { continue } localOpts := options if dest == "/run" { localOpts = append(localOpts, "noexec", "size=65536k") + } else { + localOpts = append(localOpts, "exec") } baseMounts[dest] = spec.Mount{ Destination: dest, @@ -183,7 +180,6 @@ func (config *CreateConfig) parseVolumes(runtime *libpod.Runtime) ([]spec.Mount, Source: "tmpfs", Options: localOpts, } - readonlyTmpfs[dest] = true } } @@ -202,16 +198,6 @@ func (config *CreateConfig) parseVolumes(runtime *libpod.Runtime) ([]spec.Mount, // Final step: maps to arrays finalMounts := make([]spec.Mount, 0, len(baseMounts)) for _, mount := range baseMounts { - // All user-added tmpfs mounts need their options processed. - // Exception: mounts added by the ReadOnlyTmpfs option, which - // contain several exceptions to normal options rules. - if mount.Type == TypeTmpfs && !readonlyTmpfs[mount.Destination] { - opts, err := util.ProcessTmpfsOptions(mount.Options) - if err != nil { - return nil, nil, err - } - mount.Options = opts - } if mount.Type == TypeBind { absSrc, err := filepath.Abs(mount.Source) if err != nil { @@ -226,9 +212,6 @@ func (config *CreateConfig) parseVolumes(runtime *libpod.Runtime) ([]spec.Mount, finalVolumes = append(finalVolumes, volume) } - logrus.Debugf("Got mounts: %v", finalMounts) - logrus.Debugf("Got volumes: %v", finalVolumes) - return finalMounts, finalVolumes, nil } @@ -250,14 +233,17 @@ func (config *CreateConfig) getVolumesFrom(runtime *libpod.Runtime) (map[string] splitVol = strings.SplitN(vol, ":", 2) ) if len(splitVol) == 2 { - if strings.Contains(splitVol[1], "Z") || - strings.Contains(splitVol[1], "private") || - strings.Contains(splitVol[1], "slave") || - strings.Contains(splitVol[1], "shared") { - return nil, nil, errors.Errorf("invalid options %q, can only specify 'ro', 'rw', and 'z", splitVol[1]) + splitOpts := strings.Split(splitVol[1], ",") + for _, checkOpt := range splitOpts { + switch checkOpt { + case "z", "ro", "rw": + // Do nothing, these are valid options + default: + return nil, nil, errors.Errorf("invalid options %q, can only specify 'ro', 'rw', and 'z'", splitVol[1]) + } } - if options, err = parse.ValidateVolumeOpts(strings.Split(splitVol[1], ",")); err != nil { + if options, err = parse.ValidateVolumeOpts(splitOpts); err != nil { return nil, nil, err } } @@ -403,9 +389,7 @@ func getBindMount(args []string) (spec.Mount, error) { Type: TypeBind, } - setSource := false - setDest := false - setRORW := false + var setSource, setDest, setRORW, setSuid, setDev, setExec bool for _, val := range args { kv := strings.Split(val, "=") @@ -440,9 +424,23 @@ func getBindMount(args []string) (spec.Mount, error) { } else { return newMount, errors.Wrapf(optionArgError, "badly formatted option %q", val) } - case "nosuid", "nodev", "noexec": - // TODO: detect duplication of these options. - // (Is this necessary?) + case "nosuid", "suid": + if setSuid { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once") + } + setSuid = true + newMount.Options = append(newMount.Options, kv[0]) + case "nodev", "dev": + if setDev { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once") + } + setDev = true + newMount.Options = append(newMount.Options, kv[0]) + case "noexec", "exec": + if setExec { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once") + } + setExec = true newMount.Options = append(newMount.Options, kv[0]) case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z": newMount.Options = append(newMount.Options, kv[0]) @@ -497,12 +495,34 @@ func getTmpfsMount(args []string) (spec.Mount, error) { Source: TypeTmpfs, } - setDest := false + var setDest, setRORW, setSuid, setDev, setExec bool for _, val := range args { kv := strings.Split(val, "=") switch kv[0] { - case "ro", "nosuid", "nodev", "noexec": + case "ro", "rw": + if setRORW { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'ro' and 'rw' options more than once") + } + setRORW = true + newMount.Options = append(newMount.Options, kv[0]) + case "nosuid", "suid": + if setSuid { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once") + } + setSuid = true + newMount.Options = append(newMount.Options, kv[0]) + case "nodev", "dev": + if setDev { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once") + } + setDev = true + newMount.Options = append(newMount.Options, kv[0]) + case "noexec", "exec": + if setExec { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once") + } + setExec = true newMount.Options = append(newMount.Options, kv[0]) case "tmpfs-mode": if len(kv) == 1 { @@ -543,14 +563,34 @@ func getTmpfsMount(args []string) (spec.Mount, error) { func getNamedVolume(args []string) (*libpod.ContainerNamedVolume, error) { newVolume := new(libpod.ContainerNamedVolume) - setSource := false - setDest := false + var setSource, setDest, setRORW, setSuid, setDev, setExec bool for _, val := range args { kv := strings.Split(val, "=") switch kv[0] { - case "ro", "nosuid", "nodev", "noexec": - // TODO: detect duplication of these options + case "ro", "rw": + if setRORW { + return nil, errors.Wrapf(optionArgError, "cannot pass 'ro' and 'rw' options more than once") + } + setRORW = true + newVolume.Options = append(newVolume.Options, kv[0]) + case "nosuid", "suid": + if setSuid { + return nil, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once") + } + setSuid = true + newVolume.Options = append(newVolume.Options, kv[0]) + case "nodev", "dev": + if setDev { + return nil, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once") + } + setDev = true + newVolume.Options = append(newVolume.Options, kv[0]) + case "noexec", "exec": + if setExec { + return nil, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once") + } + setExec = true newVolume.Options = append(newVolume.Options, kv[0]) case "volume-label": return nil, errors.Errorf("the --volume-label option is not presently implemented") @@ -692,6 +732,9 @@ func (config *CreateConfig) getTmpfsMounts() (map[string]spec.Mount, error) { var options []string spliti := strings.Split(i, ":") destPath := spliti[0] + if err := parse.ValidateVolumeCtrDir(spliti[0]); err != nil { + return nil, err + } if len(spliti) > 1 { options = strings.Split(spliti[1], ",") } @@ -775,16 +818,75 @@ func supercedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.M } // Ensure mount options on all mounts are correct -func initFSMounts(inputMounts []spec.Mount) []spec.Mount { +func initFSMounts(inputMounts []spec.Mount) ([]spec.Mount, error) { + // We need to look up mounts so we can figure out the proper mount flags + // to apply. + systemMounts, err := pmount.GetMounts() + if err != nil { + return nil, errors.Wrapf(err, "error retrieving system mounts to look up mount options") + } + + // TODO: We probably don't need to re-build the mounts array var mounts []spec.Mount for _, m := range inputMounts { if m.Type == TypeBind { - m.Options = util.ProcessOptions(m.Options) + baseMnt, err := findMount(m.Destination, systemMounts) + if err != nil { + return nil, errors.Wrapf(err, "error looking up mountpoint for mount %s", m.Destination) + } + var noexec, nosuid, nodev bool + for _, baseOpt := range strings.Split(baseMnt.Opts, ",") { + switch baseOpt { + case "noexec": + noexec = true + case "nosuid": + nosuid = true + case "nodev": + nodev = true + } + } + + defaultMountOpts := new(util.DefaultMountOptions) + defaultMountOpts.Noexec = noexec + defaultMountOpts.Nosuid = nosuid + defaultMountOpts.Nodev = nodev + + opts, err := util.ProcessOptions(m.Options, false, defaultMountOpts) + if err != nil { + return nil, err + } + m.Options = opts } if m.Type == TypeTmpfs && filepath.Clean(m.Destination) != "/dev" { - m.Options = append(m.Options, "tmpcopyup") + opts, err := util.ProcessOptions(m.Options, true, nil) + if err != nil { + return nil, err + } + m.Options = opts } + mounts = append(mounts, m) } - return mounts + return mounts, nil +} + +// TODO: We could make this a bit faster by building a tree of the mountpoints +// and traversing it to identify the correct mount. +func findMount(target string, mounts []*pmount.Info) (*pmount.Info, error) { + var err error + target, err = filepath.Abs(target) + if err != nil { + return nil, errors.Wrapf(err, "cannot resolve %s", target) + } + var bestSoFar *pmount.Info + for _, i := range mounts { + if bestSoFar != nil && len(bestSoFar.Mountpoint) > len(i.Mountpoint) { + // Won't be better than what we have already found + continue + } + if strings.HasPrefix(target, i.Mountpoint) { + bestSoFar = i + } + } + return bestSoFar, nil } diff --git a/pkg/util/mountOpts.go b/pkg/util/mountOpts.go index 9b2c734c0..670daeaf9 100644 --- a/pkg/util/mountOpts.go +++ b/pkg/util/mountOpts.go @@ -10,91 +10,120 @@ var ( // ErrBadMntOption indicates that an invalid mount option was passed. ErrBadMntOption = errors.Errorf("invalid mount option") // ErrDupeMntOption indicates that a duplicate mount option was passed. - ErrDupeMntOption = errors.Errorf("duplicate option passed") + ErrDupeMntOption = errors.Errorf("duplicate mount option passed") ) -// ProcessOptions parses the options for a bind mount and ensures that they are -// sensible and follow convention. -func ProcessOptions(options []string) []string { - var ( - foundbind, foundrw, foundro bool - rootProp string - ) - - for _, opt := range options { - switch opt { - case "bind", "rbind": - foundbind = true - case "ro": - foundro = true - case "rw": - foundrw = true - case "private", "rprivate", "slave", "rslave", "shared", "rshared": - rootProp = opt - } - } - if !foundbind { - options = append(options, "rbind") - } - if !foundrw && !foundro { - options = append(options, "rw") - } - if rootProp == "" { - options = append(options, "rprivate") - } - return options +// DefaultMountOptions sets default mount options for ProcessOptions. +type DefaultMountOptions struct { + Noexec bool + Nosuid bool + Nodev bool } -// ProcessTmpfsOptions parses the options for a tmpfs mountpoint and ensures -// that they are sensible and follow convention. -func ProcessTmpfsOptions(options []string) ([]string, error) { +// ProcessOptions parses the options for a bind or tmpfs mount and ensures that +// they are sensible and follow convention. The isTmpfs variable controls +// whether extra, tmpfs-specific options will be allowed. +// The defaults variable controls default mount options that will be set. If it +// is not included, they will be set unconditionally. +func ProcessOptions(options []string, isTmpfs bool, defaults *DefaultMountOptions) ([]string, error) { var ( - foundWrite, foundSize, foundProp, foundMode bool + foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ bool ) - baseOpts := []string{"noexec", "nosuid", "nodev"} for _, opt := range options { // Some options have parameters - size, mode splitOpt := strings.SplitN(opt, "=", 2) switch splitOpt[0] { + case "exec", "noexec": + if foundExec { + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'noexec' and 'exec' can be used") + } + foundExec = true + case "suid", "nosuid": + if foundSuid { + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'nosuid' and 'suid' can be used") + } + foundSuid = true + case "nodev", "dev": + if foundDev { + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'nodev' and 'dev' can be used") + } + foundDev = true case "rw", "ro": if foundWrite { - return nil, errors.Wrapf(ErrDupeMntOption, "only one of rw and ro can be used") + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'rw' and 'ro' can be used") } foundWrite = true - baseOpts = append(baseOpts, opt) case "private", "rprivate", "slave", "rslave", "shared", "rshared": if foundProp { return nil, errors.Wrapf(ErrDupeMntOption, "only one root propagation mode can be used") } foundProp = true - baseOpts = append(baseOpts, opt) case "size": + if !isTmpfs { + return nil, errors.Wrapf(ErrBadMntOption, "the 'size' option is only allowed with tmpfs mounts") + } if foundSize { return nil, errors.Wrapf(ErrDupeMntOption, "only one tmpfs size can be specified") } foundSize = true - baseOpts = append(baseOpts, opt) case "mode": + if !isTmpfs { + return nil, errors.Wrapf(ErrBadMntOption, "the 'mode' option is only allowed with tmpfs mounts") + } if foundMode { return nil, errors.Wrapf(ErrDupeMntOption, "only one tmpfs mode can be specified") } foundMode = true - baseOpts = append(baseOpts, opt) - case "noexec", "nodev", "nosuid": - // Do nothing. We always include these even if they are - // not explicitly requested. + case "tmpcopyup": + if !isTmpfs { + return nil, errors.Wrapf(ErrBadMntOption, "the 'tmpcopyup' option is only allowed with tmpfs mounts") + } + if foundCopyUp { + return nil, errors.Wrapf(ErrDupeMntOption, "the 'tmpcopyup' option can only be set once") + } + foundCopyUp = true + case "bind", "rbind": + if isTmpfs { + return nil, errors.Wrapf(ErrBadMntOption, "the 'bind' and 'rbind' options are not allowed with tmpfs mounts") + } + if foundBind { + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'rbind' and 'bind' can be used") + } + foundBind = true + case "z", "Z": + if isTmpfs { + return nil, errors.Wrapf(ErrBadMntOption, "the 'z' and 'Z' options are not allowed with tmpfs mounts") + } + if foundZ { + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'z' and 'Z' can be used") + } default: - return nil, errors.Wrapf(ErrBadMntOption, "unknown tmpfs option %q", opt) + return nil, errors.Wrapf(ErrBadMntOption, "unknown mount option %q", opt) } } if !foundWrite { - baseOpts = append(baseOpts, "rw") + options = append(options, "rw") } if !foundProp { - baseOpts = append(baseOpts, "rprivate") + options = append(options, "rprivate") + } + if !foundExec && (defaults == nil || defaults.Noexec) { + options = append(options, "noexec") + } + if !foundSuid && (defaults == nil || defaults.Nosuid) { + options = append(options, "nosuid") + } + if !foundDev && (defaults == nil || defaults.Nodev) { + options = append(options, "nodev") + } + if isTmpfs && !foundCopyUp { + options = append(options, "tmpcopyup") + } + if !isTmpfs && !foundBind { + options = append(options, "rbind") } - return baseOpts, nil + return options, nil } diff --git a/test/e2e/run_volume_test.go b/test/e2e/run_volume_test.go index abb93a149..5bad6744b 100644 --- a/test/e2e/run_volume_test.go +++ b/test/e2e/run_volume_test.go @@ -162,4 +162,32 @@ var _ = Describe("Podman run with volumes", func() { Expect(session.OutputToString()).To(ContainSubstring("/testvol1")) Expect(session.OutputToString()).To(ContainSubstring("/testvol2")) }) + + It("podman run with volumes and suid/dev/exec options", func() { + mountPath := filepath.Join(podmanTest.TempDir, "secrets") + os.Mkdir(mountPath, 0755) + session := podmanTest.Podman([]string{"run", "--rm", "-v", fmt.Sprintf("%s:/run/test:suid,dev,exec", mountPath), ALPINE, "grep", "/run/test", "/proc/self/mountinfo"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + found, matches := session.GrepString("/run/test") + Expect(found).Should(BeTrue()) + Expect(matches[0]).To(Not(ContainSubstring("noexec"))) + Expect(matches[0]).To(Not(ContainSubstring("nodev"))) + Expect(matches[0]).To(Not(ContainSubstring("nosuid"))) + + session = podmanTest.Podman([]string{"run", "--rm", "--tmpfs", "/run/test:suid,dev,exec", ALPINE, "grep", "/run/test", "/proc/self/mountinfo"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + found, matches = session.GrepString("/run/test") + Expect(found).Should(BeTrue()) + Expect(matches[0]).To(Not(ContainSubstring("noexec"))) + Expect(matches[0]).To(Not(ContainSubstring("nodev"))) + Expect(matches[0]).To(Not(ContainSubstring("nosuid"))) + }) + + It("podman run with noexec can't exec", func() { + session := podmanTest.Podman([]string{"run", "--rm", "-v", "/bin:/hostbin:noexec", ALPINE, "/hostbin/ls", "/"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Not(Equal(0))) + }) }) diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml index 19cd0c64c..1f7815073 100644 --- a/vendor/github.com/containers/buildah/.cirrus.yml +++ b/vendor/github.com/containers/buildah/.cirrus.yml @@ -19,10 +19,10 @@ env: #### # GCE project where images live IMAGE_PROJECT: "libpod-218412" - FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-30-1-2-1559164849" - PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-29-1-2-1559164849" - UBUNTU_CACHE_IMAGE_NAME: "ubuntu-1904-disco-v20190514" # Latest - PRIOR_UBUNTU_CACHE_IMAGE_NAME: "ubuntu-1804-bionic-v20190530" # LTS + FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-30-1-2-1565360543" + PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-29-1-2-1565360543" + UBUNTU_CACHE_IMAGE_NAME: "ubuntu-1904-disco-v20190724" # Latest + PRIOR_UBUNTU_CACHE_IMAGE_NAME: "ubuntu-1804-bionic-v20190722a" # LTS #### #### Command variables to help avoid duplication diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index ddf6fb1d4..82248b901 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,6 +2,16 @@ # Changelog +## v1.10.1 (2019-08-08) + Bump containers/image to v3.0.2 to fix keyring issue + Bug fix for volume minus syntax + Bump container/storage v1.13.1 and containers/image v3.0.1 + bump github.com/containernetworking/cni to v0.7.1 + Add overlayfs to fuse-overlayfs tip + Add automatic apparmor tag discovery + Fix bug whereby --get-login has no effect + Bump to v1.11.0-dev + ## v1.10.0 (2019-08-02) vendor github.com/containers/image@v3.0.0 Remove GO111MODULE in favor of `-mod=vendor` diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile index f8e079cbf..b490d8041 100644 --- a/vendor/github.com/containers/buildah/Makefile +++ b/vendor/github.com/containers/buildah/Makefile @@ -21,7 +21,7 @@ export GO_BUILD=$(GO) build endif GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed")) -BUILD_INFO := $(if $(shell date +%s),$(shell date +%s),$(error "date failed")) +SOURCE_DATE_EPOCH ?= $(if $(shell date +%s),$(shell date +%s),$(error "date failed")) STATIC_STORAGETAGS = "containers_image_ostree_stub containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)" CNI_COMMIT := $(shell sed -n 's;\tgithub.com/containernetworking/cni \([^ \n]*\).*$\;\1;p' go.mod) @@ -29,7 +29,7 @@ RUNC_COMMIT := $(shell sed -n 's;\tgithub.com/opencontainers/runc \([^ \n]*\).*$ LIBSECCOMP_COMMIT := release-2.3 EXTRALDFLAGS := -LDFLAGS := -ldflags '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(BUILD_INFO) -X main.cniVersion=$(CNI_COMMIT)' $(EXTRALDFLAGS) +LDFLAGS := -ldflags '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT)' $(EXTRALDFLAGS) SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go cmd/buildah/*.go docker/*.go pkg/blobcache/*.go pkg/cli/*.go pkg/parse/*.go pkg/unshare/*.c pkg/unshare/*.go util/*.go all: buildah imgtype docs @@ -65,19 +65,9 @@ docs: install.tools ## build the docs on the host gopath: test $(shell pwd) = $(shell cd ../../../../src/github.com/containers/buildah ; pwd) -# We use https://github.com/lk4d4/vndr to manage dependencies. -.PHONY: deps -deps: gopath - env GOPATH=$(shell cd ../../../.. ; pwd) vndr - .PHONY: validate validate: install.tools - # Run gofmt on version 1.11 and higher -ifneq ($(GO110),$(GOVERSION)) - @./tests/validate/gofmt.sh -endif @./tests/validate/whitespace.sh - @./tests/validate/govet.sh @./tests/validate/git-validation.sh .PHONY: install.tools @@ -124,7 +114,7 @@ install.runc: .PHONY: test-integration test-integration: install.tools - ./tests/tools/ginkgo $(BUILDFLAGS) -v tests/e2e/. + ./tests/tools/build/ginkgo $(BUILDFLAGS) -v tests/e2e/. cd tests; ./test_runner.sh tests/testreport/testreport: tests/testreport/testreport.go diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md index 827d5a87f..01e376a17 100644 --- a/vendor/github.com/containers/buildah/README.md +++ b/vendor/github.com/containers/buildah/README.md @@ -55,7 +55,8 @@ into other tools. Podman specializes in all of the commands and functions that help you to maintain and modify OCI images, such as pulling and tagging. It also allows you to create, run, and maintain those containers -created from those images. +created from those images. For building container images via Dockerfiles, Podman uses Buildah's +golang API and can be installed independently from Buildah. A major difference between Podman and Buildah is their concept of a container. Podman allows users to create "traditional containers" where the intent of these containers is @@ -76,7 +77,7 @@ From [`./examples/lighttpd.sh`](examples/lighttpd.sh): ```bash $ cat > lighttpd.sh <<"EOF" -#!/bin/bash -x +#!/usr/bin/env bash -x ctr1=$(buildah from "${1:-fedora}") diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go index 6ff9f5250..bd3d25cd4 100644 --- a/vendor/github.com/containers/buildah/add.go +++ b/vendor/github.com/containers/buildah/add.go @@ -16,7 +16,6 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/system" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -36,57 +35,72 @@ type AddAndCopyOptions struct { Hasher io.Writer // Excludes is the contents of the .dockerignore file Excludes []string - // The base directory for Excludes and data to copy in + // ContextDir is the base directory for Excludes for content being copied ContextDir string // ID mapping options to use when contents to be copied are part of // another container, and need ownerships to be mapped from the host to // that container's values before copying them into the container. IDMappingOptions *IDMappingOptions + // DryRun indicates that the content should be digested, but not actually + // copied into the container. + DryRun bool } // addURL copies the contents of the source URL to the destination. This is // its own function so that deferred closes happen after we're done pulling // down each item of potentially many. -func addURL(destination, srcurl string, owner idtools.IDPair, hasher io.Writer) error { - logrus.Debugf("saving %q to %q", srcurl, destination) +func (b *Builder) addURL(destination, srcurl string, owner idtools.IDPair, hasher io.Writer, dryRun bool) error { resp, err := http.Get(srcurl) if err != nil { return errors.Wrapf(err, "error getting %q", srcurl) } defer resp.Body.Close() - f, err := os.Create(destination) - if err != nil { - return errors.Wrapf(err, "error creating %q", destination) + + thisHasher := hasher + if thisHasher != nil && b.ContentDigester.Hash() != nil { + thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash()) } - if err = f.Chown(owner.UID, owner.GID); err != nil { - return errors.Wrapf(err, "error setting owner of %q to %d:%d", destination, owner.UID, owner.GID) + if thisHasher == nil { + thisHasher = b.ContentDigester.Hash() } - if last := resp.Header.Get("Last-Modified"); last != "" { - if mtime, err2 := time.Parse(time.RFC1123, last); err2 != nil { - logrus.Debugf("error parsing Last-Modified time %q: %v", last, err2) - } else { - defer func() { - if err3 := os.Chtimes(destination, time.Now(), mtime); err3 != nil { - logrus.Debugf("error setting mtime on %q to Last-Modified time %q: %v", destination, last, err3) - } - }() + thisWriter := thisHasher + + if !dryRun { + logrus.Debugf("saving %q to %q", srcurl, destination) + f, err := os.Create(destination) + if err != nil { + return errors.Wrapf(err, "error creating %q", destination) } + defer f.Close() + if err = f.Chown(owner.UID, owner.GID); err != nil { + return errors.Wrapf(err, "error setting owner of %q to %d:%d", destination, owner.UID, owner.GID) + } + if last := resp.Header.Get("Last-Modified"); last != "" { + if mtime, err2 := time.Parse(time.RFC1123, last); err2 != nil { + logrus.Debugf("error parsing Last-Modified time %q: %v", last, err2) + } else { + defer func() { + if err3 := os.Chtimes(destination, time.Now(), mtime); err3 != nil { + logrus.Debugf("error setting mtime on %q to Last-Modified time %q: %v", destination, last, err3) + } + }() + } + } + defer func() { + if err2 := f.Chmod(0600); err2 != nil { + logrus.Debugf("error setting permissions on %q: %v", destination, err2) + } + }() + thisWriter = io.MultiWriter(f, thisWriter) } - defer f.Close() - bodyReader := io.Reader(resp.Body) - if hasher != nil { - bodyReader = io.TeeReader(bodyReader, hasher) - } - n, err := io.Copy(f, bodyReader) + + n, err := io.Copy(thisWriter, resp.Body) if err != nil { return errors.Wrapf(err, "error reading contents for %q from %q", destination, srcurl) } if resp.ContentLength >= 0 && n != resp.ContentLength { return errors.Errorf("error reading contents for %q from %q: wrong length (%d != %d)", destination, srcurl, n, resp.ContentLength) } - if err := f.Chmod(0600); err != nil { - return errors.Wrapf(err, "error setting permissions on %q", destination) - } return nil } @@ -119,32 +133,35 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption } hostOwner := idtools.IDPair{UID: int(hostUID), GID: int(hostGID)} dest := mountPoint - if destination != "" && filepath.IsAbs(destination) { - dir := filepath.Dir(destination) - if dir != "." && dir != "/" { - if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, dir), 0755, hostOwner); err != nil { - return errors.Wrapf(err, "error creating directory %q", filepath.Join(dest, dir)) + if !options.DryRun { + // Resolve the destination if it was specified as a relative path. + if destination != "" && filepath.IsAbs(destination) { + dir := filepath.Dir(destination) + if dir != "." && dir != "/" { + if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, dir), 0755, hostOwner); err != nil { + return errors.Wrapf(err, "error creating directory %q", filepath.Join(dest, dir)) + } + } + dest = filepath.Join(dest, destination) + } else { + if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, b.WorkDir()), 0755, hostOwner); err != nil { + return errors.Wrapf(err, "error creating directory %q", filepath.Join(dest, b.WorkDir())) } + dest = filepath.Join(dest, b.WorkDir(), destination) } - dest = filepath.Join(dest, destination) - } else { - if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, b.WorkDir()), 0755, hostOwner); err != nil { - return errors.Wrapf(err, "error creating directory %q", filepath.Join(dest, b.WorkDir())) + // If the destination was explicitly marked as a directory by ending it + // with a '/', create it so that we can be sure that it's a directory, + // and any files we're copying will be placed in the directory. + if len(destination) > 0 && destination[len(destination)-1] == os.PathSeparator { + if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil { + return errors.Wrapf(err, "error creating directory %q", dest) + } } - dest = filepath.Join(dest, b.WorkDir(), destination) - } - // If the destination was explicitly marked as a directory by ending it - // with a '/', create it so that we can be sure that it's a directory, - // and any files we're copying will be placed in the directory. - if len(destination) > 0 && destination[len(destination)-1] == os.PathSeparator { - if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil { - return errors.Wrapf(err, "error creating directory %q", dest) + // Make sure the destination's parent directory is usable. + if destpfi, err2 := os.Stat(filepath.Dir(dest)); err2 == nil && !destpfi.IsDir() { + return errors.Errorf("%q already exists, but is not a subdirectory)", filepath.Dir(dest)) } } - // Make sure the destination's parent directory is usable. - if destpfi, err2 := os.Stat(filepath.Dir(dest)); err2 == nil && !destpfi.IsDir() { - return errors.Errorf("%q already exists, but is not a subdirectory)", filepath.Dir(dest)) - } // Now look at the destination itself. destfi, err := os.Stat(dest) if err != nil { @@ -156,10 +173,10 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption if len(source) > 1 && (destfi == nil || !destfi.IsDir()) { return errors.Errorf("destination %q is not a directory", dest) } - copyFileWithTar := b.copyFileWithTar(options.IDMappingOptions, &containerOwner, options.Hasher) - copyWithTar := b.copyWithTar(options.IDMappingOptions, &containerOwner, options.Hasher) - untarPath := b.untarPath(nil, options.Hasher) - err = addHelper(excludes, extract, dest, destfi, hostOwner, options, copyFileWithTar, copyWithTar, untarPath, source...) + copyFileWithTar := b.copyFileWithTar(options.IDMappingOptions, &containerOwner, options.Hasher, options.DryRun) + copyWithTar := b.copyWithTar(options.IDMappingOptions, &containerOwner, options.Hasher, options.DryRun) + untarPath := b.untarPath(nil, options.Hasher, options.DryRun) + err = b.addHelper(excludes, extract, dest, destfi, hostOwner, options, copyFileWithTar, copyWithTar, untarPath, source...) if err != nil { return err } @@ -230,9 +247,10 @@ func dockerIgnoreMatcher(lines []string, contextDir string) (*fileutils.PatternM return matcher, nil } -func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, destfi os.FileInfo, hostOwner idtools.IDPair, options AddAndCopyOptions, copyFileWithTar, copyWithTar, untarPath func(src, dest string) error, source ...string) error { - for _, src := range source { +func (b *Builder) addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, destfi os.FileInfo, hostOwner idtools.IDPair, options AddAndCopyOptions, copyFileWithTar, copyWithTar, untarPath func(src, dest string) error, source ...string) error { + for n, src := range source { if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { + b.ContentDigester.Start("") // We assume that source is a file, and we're copying // it to the destination. If the destination is // already a directory, create a file inside of it. @@ -246,7 +264,7 @@ func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, de if destfi != nil && destfi.IsDir() { d = filepath.Join(dest, path.Base(url.Path)) } - if err = addURL(d, src, hostOwner, options.Hasher); err != nil { + if err = b.addURL(d, src, hostOwner, options.Hasher, options.DryRun); err != nil { return err } continue @@ -270,14 +288,17 @@ func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, de return errors.Wrapf(err, "error reading %q", esrc) } if srcfi.IsDir() { + b.ContentDigester.Start("dir") // The source is a directory, so copy the contents of // the source directory into the target directory. Try // to create it first, so that if there's a problem, // we'll discover why that won't work. - if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil { - return errors.Wrapf(err, "error creating directory %q", dest) + if !options.DryRun { + if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil { + return errors.Wrapf(err, "error creating directory %q", dest) + } } - logrus.Debugf("copying %q to %q", esrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*") + logrus.Debugf("copying[%d] %q to %q", n, esrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*") if excludes == nil || !excludes.Exclusions() { if err = copyWithTar(esrc, dest); err != nil { return errors.Wrapf(err, "error copying %q to %q", esrc, dest) @@ -295,26 +316,11 @@ func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, de if skip { return nil } - // combine the filename with the dest directory + // combine the source's basename with the dest directory fpath, err := filepath.Rel(esrc, path) if err != nil { return errors.Wrapf(err, "error converting %s to a path relative to %s", path, esrc) } - mtime := info.ModTime() - atime := mtime - times := []syscall.Timespec{ - syscall.NsecToTimespec(atime.Unix()), - syscall.NsecToTimespec(mtime.Unix()), - } - if info.IsDir() { - return addHelperDirectory(esrc, path, filepath.Join(dest, fpath), info, hostOwner, times) - } - if info.Mode()&os.ModeSymlink == os.ModeSymlink { - return addHelperSymlink(path, filepath.Join(dest, fpath), hostOwner, times) - } - if !info.Mode().IsRegular() { - return errors.Errorf("error copying %q to %q: source is not a regular file; file mode is %s", path, dest, info.Mode()) - } if err = copyFileWithTar(path, filepath.Join(dest, fpath)); err != nil { return errors.Wrapf(err, "error copying %q to %q", path, dest) } @@ -326,6 +332,8 @@ func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, de continue } + b.ContentDigester.Start("file") + if !extract || !archive.IsArchivePath(esrc) { // This source is a file, and either it's not an // archive, or we don't care whether or not it's an @@ -335,7 +343,7 @@ func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, de d = filepath.Join(dest, filepath.Base(gsrc)) } // Copy the file, preserving attributes. - logrus.Debugf("copying %q to %q", esrc, d) + logrus.Debugf("copying[%d] %q to %q", n, esrc, d) if err = copyFileWithTar(esrc, d); err != nil { return errors.Wrapf(err, "error copying %q to %q", esrc, d) } @@ -343,7 +351,7 @@ func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, de } // We're extracting an archive into the destination directory. - logrus.Debugf("extracting contents of %q into %q", esrc, dest) + logrus.Debugf("extracting contents[%d] of %q into %q", n, esrc, dest) if err = untarPath(esrc, dest); err != nil { return errors.Wrapf(err, "error extracting %q into %q", esrc, dest) } @@ -351,45 +359,3 @@ func addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, de } return nil } - -func addHelperDirectory(esrc, path, dest string, info os.FileInfo, hostOwner idtools.IDPair, times []syscall.Timespec) error { - if err := idtools.MkdirAllAndChownNew(dest, info.Mode().Perm(), hostOwner); err != nil { - // discard only EEXIST on the top directory, which would have been created earlier in the caller - if !os.IsExist(err) || path != esrc { - return errors.Errorf("error creating directory %q", dest) - } - } - if err := idtools.SafeLchown(dest, hostOwner.UID, hostOwner.GID); err != nil { - return errors.Wrapf(err, "error setting owner of directory %q to %d:%d", dest, hostOwner.UID, hostOwner.GID) - } - if err := system.LUtimesNano(dest, times); err != nil { - return errors.Wrapf(err, "error setting dates on directory %q", dest) - } - return nil -} - -func addHelperSymlink(src, dest string, hostOwner idtools.IDPair, times []syscall.Timespec) error { - linkContents, err := os.Readlink(src) - if err != nil { - return errors.Wrapf(err, "error reading contents of symbolic link at %q", src) - } - if err = os.Symlink(linkContents, dest); err != nil { - if !os.IsExist(err) { - return errors.Wrapf(err, "error creating symbolic link to %q at %q", linkContents, dest) - } - if err = os.RemoveAll(dest); err != nil { - return errors.Wrapf(err, "error clearing symbolic link target %q", dest) - } - if err = os.Symlink(linkContents, dest); err != nil { - return errors.Wrapf(err, "error creating symbolic link to %q at %q (second try)", linkContents, dest) - } - } - if err = idtools.SafeLchown(dest, hostOwner.UID, hostOwner.GID); err != nil { - return errors.Wrapf(err, "error setting owner of symbolic link %q to %d:%d", dest, hostOwner.UID, hostOwner.GID) - } - if err = system.LUtimesNano(dest, times); err != nil { - return errors.Wrapf(err, "error setting dates on symbolic link %q", dest) - } - logrus.Debugf("Symlink(%s, %s)", linkContents, dest) - return nil -} diff --git a/vendor/github.com/containers/buildah/btrfs_installed_tag.sh b/vendor/github.com/containers/buildah/btrfs_installed_tag.sh index 357f33b8b..c4d99f377 100644 --- a/vendor/github.com/containers/buildah/btrfs_installed_tag.sh +++ b/vendor/github.com/containers/buildah/btrfs_installed_tag.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash cc -E - > /dev/null 2> /dev/null << EOF #include <btrfs/ioctl.h> EOF diff --git a/vendor/github.com/containers/buildah/btrfs_tag.sh b/vendor/github.com/containers/buildah/btrfs_tag.sh index cc48504ab..59cb969ad 100644 --- a/vendor/github.com/containers/buildah/btrfs_tag.sh +++ b/vendor/github.com/containers/buildah/btrfs_tag.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash cc -E - > /dev/null 2> /dev/null << EOF #include <btrfs/version.h> EOF diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index cd0d48566..1a290f262 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -26,7 +26,7 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.10.1" + Version = "1.11.0-dev" // The value we use to identify what type of information, currently a // serialized Builder structure, we are using as per-container state. // This should only be changed when we make incompatible changes to @@ -196,6 +196,8 @@ type Builder struct { Format string // TempVolumes are temporary mount points created during container runs TempVolumes map[string]bool + // ContentDigester counts the digest of all Add()ed content + ContentDigester CompositeDigester } // BuilderInfo are used as objects to display container information diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go index b224fb367..fbccbcdb0 100644 --- a/vendor/github.com/containers/buildah/chroot/run.go +++ b/vendor/github.com/containers/buildah/chroot/run.go @@ -1181,6 +1181,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( switch m.Type { case "bind": // Do the bind mount. + logrus.Debugf("bind mounting %q on %q", m.Destination, filepath.Join(spec.Root.Path, m.Destination)) if err := unix.Mount(m.Source, target, "", requestFlags, ""); err != nil { return undoBinds, errors.Wrapf(err, "error bind mounting %q from host to %q in mount namespace (%q)", m.Source, m.Destination, target) } @@ -1366,7 +1367,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( } } else { // If the target's is not a directory or os.DevNull, bind mount os.DevNull over it. - if isDevNull(targetinfo) { + if !isDevNull(targetinfo) { if err = unix.Mount(os.DevNull, target, "", uintptr(syscall.MS_BIND|syscall.MS_RDONLY|syscall.MS_PRIVATE), ""); err != nil { return undoBinds, errors.Wrapf(err, "error masking non-directory %q in mount namespace", target) } diff --git a/vendor/github.com/containers/buildah/digester.go b/vendor/github.com/containers/buildah/digester.go new file mode 100644 index 000000000..498bdeeb5 --- /dev/null +++ b/vendor/github.com/containers/buildah/digester.go @@ -0,0 +1,64 @@ +package buildah + +import ( + "hash" + "strings" + + digest "github.com/opencontainers/go-digest" +) + +type singleDigester struct { + digester digest.Digester + prefix string +} + +// CompositeDigester can compute a digest over multiple items. +type CompositeDigester struct { + digesters []singleDigester +} + +// Restart clears all state, so that the composite digester can start over. +func (c *CompositeDigester) Restart() { + c.digesters = nil +} + +// Start starts recording the digest for a new item. The caller should call +// Hash() immediately after to retrieve the new io.Writer. +func (c *CompositeDigester) Start(prefix string) { + prefix = strings.TrimSuffix(prefix, ":") + c.digesters = append(c.digesters, singleDigester{digester: digest.Canonical.Digester(), prefix: prefix}) +} + +// Hash returns the hasher for the current item. +func (c *CompositeDigester) Hash() hash.Hash { + num := len(c.digesters) + if num == 0 { + return nil + } + return c.digesters[num-1].digester.Hash() +} + +// Digest returns the prefix and a composite digest over everything that's been +// digested. +func (c *CompositeDigester) Digest() (string, digest.Digest) { + num := len(c.digesters) + switch num { + case 0: + return "", "" + case 1: + return c.digesters[0].prefix, c.digesters[0].digester.Digest() + default: + content := "" + for i, digester := range c.digesters { + if i > 0 { + content += "," + } + prefix := digester.prefix + if digester.prefix != "" { + digester.prefix += ":" + } + content += prefix + digester.digester.Digest().Encoded() + } + return "multi", digest.Canonical.FromString(content) + } +} diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod index 197db35da..61c80d90b 100644 --- a/vendor/github.com/containers/buildah/go.mod +++ b/vendor/github.com/containers/buildah/go.mod @@ -8,7 +8,7 @@ require ( github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect github.com/containernetworking/cni v0.7.1 github.com/containers/image v3.0.2+incompatible - github.com/containers/storage v1.13.1 + github.com/containers/storage v1.13.2 github.com/cyphar/filepath-securejoin v0.2.1 github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65 github.com/docker/docker-credential-helpers v0.6.1 // indirect diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum index 1016bcbea..afd38be1a 100644 --- a/vendor/github.com/containers/buildah/go.sum +++ b/vendor/github.com/containers/buildah/go.sum @@ -49,6 +49,8 @@ github.com/containers/storage v1.12.16 h1:zePYS1GiG8CuRqLCeA0ufx4X27K06HcJLV50Dd github.com/containers/storage v1.12.16/go.mod h1:QsZp4XMJjyPNNbQHZeyNW3OmhwsWviI+7S6iOcu6a4c= github.com/containers/storage v1.13.1 h1:rjVirLS9fCGkUFlLDZEoGDDUugtIf46DufWvJu08wxQ= github.com/containers/storage v1.13.1/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA= +github.com/containers/storage v1.13.2 h1:UXZ0Ckmk6+6+4vj2M2ywruVtH97pnRoAhTG8ctd+yQI= +github.com/containers/storage v1.13.2/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA= github.com/cyphar/filepath-securejoin v0.2.1 h1:5DPkzz/0MwUpvR4fxASKzgApeq2OMFY5FfYtrX28Coo= github.com/cyphar/filepath-securejoin v0.2.1/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go index a6d3b4c8f..2fb10ab83 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go +++ b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go @@ -6,7 +6,6 @@ import ( "os" "path/filepath" "strings" - "time" "github.com/containers/storage/pkg/reexec" "github.com/pkg/errors" @@ -15,13 +14,11 @@ import ( const ( symlinkChrootedCommand = "chrootsymlinks-resolve" - symlinkModifiedTime = "modtimesymlinks-resolve" maxSymlinksResolved = 40 ) func init() { reexec.Register(symlinkChrootedCommand, resolveChrootedSymlinks) - reexec.Register(symlinkModifiedTime, resolveSymlinkTimeModified) } // resolveSymlink uses a child subprocess to resolve any symlinks in filename @@ -71,118 +68,6 @@ func resolveChrootedSymlinks() { os.Exit(status) } -// main() for grandparent subprocess. Its main job is to shuttle stdio back -// and forth, managing a pseudo-terminal if we want one, for our child, the -// parent subprocess. -func resolveSymlinkTimeModified() { - status := 0 - flag.Parse() - if len(flag.Args()) < 1 { - os.Exit(1) - } - // Our first parameter is the directory to chroot into. - if err := unix.Chdir(flag.Arg(0)); err != nil { - fmt.Fprintf(os.Stderr, "chdir(): %v\n", err) - os.Exit(1) - } - if err := unix.Chroot(flag.Arg(0)); err != nil { - fmt.Fprintf(os.Stderr, "chroot(): %v\n", err) - os.Exit(1) - } - - // Our second parameter is the path name to evaluate for symbolic links. - // Our third parameter is the time the cached intermediate image was created. - // We check whether the modified time of the filepath we provide is after the time the cached image was created. - timeIsGreater, err := modTimeIsGreater(flag.Arg(0), flag.Arg(1), flag.Arg(2)) - if err != nil { - fmt.Fprintf(os.Stderr, "error checking if modified time of resolved symbolic link is greater: %v\n", err) - os.Exit(1) - } - if _, err := os.Stdout.WriteString(fmt.Sprintf("%v", timeIsGreater)); err != nil { - fmt.Fprintf(os.Stderr, "error writing string to stdout: %v\n", err) - os.Exit(1) - } - os.Exit(status) -} - -// resolveModifiedTime (in the grandparent process) checks filename for any symlinks, -// resolves it and compares the modified time of the file with historyTime, which is -// the creation time of the cached image. It returns true if filename was modified after -// historyTime, otherwise returns false. -func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) { - // The child process expects a chroot and one path that - // will be consulted relative to the chroot directory and evaluated - // for any symbolic links present. - cmd := reexec.Command(symlinkModifiedTime, rootdir, filename, historyTime) - output, err := cmd.CombinedOutput() - if err != nil { - return false, errors.Wrapf(err, string(output)) - } - // Hand back true/false depending on in the file was modified after the caches image was created. - return string(output) == "true", nil -} - -// modTimeIsGreater goes through the files added/copied in using the Dockerfile and -// checks the time stamp (follows symlinks) with the time stamp of when the cached -// image was created. IT compares the two and returns true if the file was modified -// after the cached image was created, otherwise it returns false. -func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) { - var timeIsGreater bool - - // Convert historyTime from string to time.Time for comparison - histTime, err := time.Parse(time.RFC3339Nano, historyTime) - if err != nil { - return false, errors.Wrapf(err, "error converting string to time.Time %q", historyTime) - } - - // Since we are chroot in rootdir, we want a relative path, i.e (path - rootdir) - relPath, err := filepath.Rel(rootdir, path) - if err != nil { - return false, errors.Wrapf(err, "error making path %q relative to %q", path, rootdir) - } - - // Walk the file tree and check the time stamps. - err = filepath.Walk(relPath, func(path string, info os.FileInfo, err error) error { - // If using cached images, it is possible for files that are being copied to come from - // previous build stages. But if using cached images, then the copied file won't exist - // since a container won't have been created for the previous build stage and info will be nil. - // In that case just return nil and continue on with using the cached image for the whole build process. - if info == nil { - return nil - } - modTime := info.ModTime() - if info.Mode()&os.ModeSymlink == os.ModeSymlink { - // Evaluate any symlink that occurs to get updated modified information - resolvedPath, err := filepath.EvalSymlinks(path) - if err != nil && os.IsNotExist(err) { - return errors.Wrapf(errDanglingSymlink, "%q", path) - } - if err != nil { - return errors.Wrapf(err, "error evaluating symlink %q", path) - } - fileInfo, err := os.Stat(resolvedPath) - if err != nil { - return errors.Wrapf(err, "error getting file info %q", resolvedPath) - } - modTime = fileInfo.ModTime() - } - if modTime.After(histTime) { - timeIsGreater = true - return nil - } - return nil - }) - - if err != nil { - // if error is due to dangling symlink, ignore error and return nil - if errors.Cause(err) == errDanglingSymlink { - return false, nil - } - return false, errors.Wrapf(err, "error walking file tree %q", path) - } - return timeIsGreater, err -} - // getSymbolic link goes through each part of the path and continues resolving symlinks as they appear. // Returns what the whole target path for what "path" resolves to. func getSymbolicLink(path string) (string, error) { diff --git a/vendor/github.com/containers/buildah/imagebuildah/errors.go b/vendor/github.com/containers/buildah/imagebuildah/errors.go deleted file mode 100644 index cf299656b..000000000 --- a/vendor/github.com/containers/buildah/imagebuildah/errors.go +++ /dev/null @@ -1,7 +0,0 @@ -package imagebuildah - -import "errors" - -var ( - errDanglingSymlink = errors.New("error evaluating dangling symlink") -) diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index 8d68fe85f..34ccb6efb 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -42,8 +42,8 @@ var builtinAllowedBuildArgs = map[string]bool{ } // Executor is a buildah-based implementation of the imagebuilder.Executor -// interface. It coordinates the entire build by using one StageExecutors to -// handle each stage of the build. +// interface. It coordinates the entire build by using one or more +// StageExecutors to handle each stage of the build. type Executor struct { stages map[string]*StageExecutor store storage.Store @@ -248,26 +248,36 @@ func (b *Executor) getImageHistory(ctx context.Context, imageID string) ([]v1.Hi return oci.History, nil } -// getCreatedBy returns the command the image at node will be created by. -func (b *Executor) getCreatedBy(node *parser.Node) string { +// getCreatedBy returns the command the image at node will be created by. If +// the passed-in CompositeDigester is not nil, it is assumed to have the digest +// information for the content if the node is ADD or COPY. +func (b *Executor) getCreatedBy(node *parser.Node, addedContentDigest string) string { if node == nil { return "/bin/sh" } - if node.Value == "run" { + switch strings.ToUpper(node.Value) { + case "RUN": buildArgs := b.getBuildArgs() if buildArgs != "" { return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:] } return "/bin/sh -c " + node.Original[4:] + case "ADD", "COPY": + destination := node + for destination.Next != nil { + destination = destination.Next + } + return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + " " + addedContentDigest + " in " + destination.Value + " " + default: + return "/bin/sh -c #(nop) " + node.Original } - return "/bin/sh -c #(nop) " + node.Original } // historyMatches returns true if a candidate history matches the history of our // base image (if we have one), plus the current instruction. // Used to verify whether a cache of the intermediate image exists and whether // to run the build again. -func (b *Executor) historyMatches(baseHistory []v1.History, child *parser.Node, history []v1.History) bool { +func (b *Executor) historyMatches(baseHistory []v1.History, child *parser.Node, history []v1.History, addedContentDigest string) bool { if len(baseHistory) >= len(history) { return false } @@ -297,7 +307,7 @@ func (b *Executor) historyMatches(baseHistory []v1.History, child *parser.Node, return false } } - return history[len(baseHistory)].CreatedBy == b.getCreatedBy(child) + return history[len(baseHistory)].CreatedBy == b.getCreatedBy(child, addedContentDigest) } // getBuildArgs returns a string of the build-args specified during the build process @@ -406,13 +416,12 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image // arg expansion, so if the previous stage // was named using argument values, we might // not record the right value here. - rootfs := flag[7:] + rootfs := strings.TrimPrefix(flag, "--from=") b.rootfsMap[rootfs] = true logrus.Debugf("rootfs: %q", rootfs) } } } - break } node = node.Next // next line } diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index 8030e351a..144bf8c24 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "net/http" "os" "path/filepath" "strconv" @@ -249,9 +248,112 @@ func (s *StageExecutor) volumeCacheRestore() error { return nil } +// digestContent digests any content that this next instruction would add to +// the image, returning the digester if there is any, or nil otherwise. We +// don't care about the details of where in the filesystem the content actually +// goes, because we're not actually going to add it here, so this is less +// involved than Copy(). +func (s *StageExecutor) digestSpecifiedContent(node *parser.Node) (string, error) { + // No instruction: done. + if node == nil { + return "", nil + } + + // Not adding content: done. + switch strings.ToUpper(node.Value) { + default: + return "", nil + case "ADD", "COPY": + } + + // Pull out everything except the first node (the instruction) and the + // last node (the destination). + var srcs []string + destination := node + for destination.Next != nil { + destination = destination.Next + if destination.Next != nil { + srcs = append(srcs, destination.Value) + } + } + + var sources []string + var idMappingOptions *buildah.IDMappingOptions + contextDir := s.executor.contextDir + for _, flag := range node.Flags { + if strings.HasPrefix(flag, "--from=") { + // Flag says to read the content from another + // container. Update the ID mappings and + // all-content-comes-from-below-this-directory value. + from := strings.TrimPrefix(flag, "--from=") + if other, ok := s.executor.stages[from]; ok { + contextDir = other.mountPoint + idMappingOptions = &other.builder.IDMappingOptions + } else if builder, ok := s.executor.containerMap[from]; ok { + contextDir = builder.MountPoint + idMappingOptions = &builder.IDMappingOptions + } else { + return "", errors.Errorf("the stage %q has not been built", from) + } + } + } + for _, src := range srcs { + if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { + // Source is a URL. TODO: cache this content + // somewhere, so that we can avoid pulling it down + // again if we end up needing to drop it into the + // filesystem. + sources = append(sources, src) + } else { + // Source is not a URL, so it's a location relative to + // the all-content-comes-from-below-this-directory + // directory. + contextSrc, err := securejoin.SecureJoin(contextDir, src) + if err != nil { + return "", errors.Wrapf(err, "error joining %q and %q", contextDir, src) + } + sources = append(sources, contextSrc) + } + } + // If the all-content-comes-from-below-this-directory is the build + // context, read its .dockerignore. + var excludes []string + if contextDir == s.executor.contextDir { + var err error + if excludes, err = imagebuilder.ParseDockerignore(contextDir); err != nil { + return "", errors.Wrapf(err, "error parsing .dockerignore in %s", contextDir) + } + } + // Restart the digester and have it do a dry-run copy to compute the + // digest information. + options := buildah.AddAndCopyOptions{ + Excludes: excludes, + ContextDir: contextDir, + IDMappingOptions: idMappingOptions, + DryRun: true, + } + s.builder.ContentDigester.Restart() + download := strings.ToUpper(node.Value) == "ADD" + err := s.builder.Add(destination.Value, download, options, sources...) + if err != nil { + return "", errors.Wrapf(err, "error dry-running %q", node.Original) + } + // Return the formatted version of the digester's result. + contentDigest := "" + prefix, digest := s.builder.ContentDigester.Digest() + if prefix != "" { + prefix += ":" + } + if digest.Validate() == nil { + contentDigest = prefix + digest.Encoded() + } + return contentDigest, nil +} + // Copy copies data into the working tree. The "Download" field is how -// imagebuilder tells us the instruction was "ADD" and not "COPY" +// imagebuilder tells us the instruction was "ADD" and not "COPY". func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error { + s.builder.ContentDigester.Restart() for _, copy := range copies { // Check the file and see if part of it is a symlink. // Convert it to the target if so. To be ultrasafe @@ -283,41 +385,52 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err if err := s.volumeCacheInvalidate(copy.Dest); err != nil { return err } - sources := []string{} + var sources []string + // The From field says to read the content from another + // container. Update the ID mappings and + // all-content-comes-from-below-this-directory value. + var idMappingOptions *buildah.IDMappingOptions + var copyExcludes []string + contextDir := s.executor.contextDir + if len(copy.From) > 0 { + if other, ok := s.executor.stages[copy.From]; ok && other.index < s.index { + contextDir = other.mountPoint + idMappingOptions = &other.builder.IDMappingOptions + } else if builder, ok := s.executor.containerMap[copy.From]; ok { + contextDir = builder.MountPoint + idMappingOptions = &builder.IDMappingOptions + } else { + return errors.Errorf("the stage %q has not been built", copy.From) + } + copyExcludes = excludes + } else { + copyExcludes = append(s.executor.excludes, excludes...) + } for _, src := range copy.Src { - contextDir := s.executor.contextDir - copyExcludes := excludes - var idMappingOptions *buildah.IDMappingOptions if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { + // Source is a URL. sources = append(sources, src) - } else if len(copy.From) > 0 { - var srcRoot string - if other, ok := s.executor.stages[copy.From]; ok && other.index < s.index { - srcRoot = other.mountPoint - contextDir = other.mountPoint - idMappingOptions = &other.builder.IDMappingOptions - } else if builder, ok := s.executor.containerMap[copy.From]; ok { - srcRoot = builder.MountPoint - contextDir = builder.MountPoint - idMappingOptions = &builder.IDMappingOptions - } else { - return errors.Errorf("the stage %q has not been built", copy.From) - } - srcSecure, err := securejoin.SecureJoin(srcRoot, src) + } else { + // Treat the source, which is not a URL, as a + // location relative to the + // all-content-comes-from-below-this-directory + // directory. + srcSecure, err := securejoin.SecureJoin(contextDir, src) if err != nil { return err } - // If destination is a folder, we need to take extra care to - // ensure that files are copied with correct names (since - // resolving a symlink may result in a different name). if hadFinalPathSeparator { + // If destination is a folder, we need to take extra care to + // ensure that files are copied with correct names (since + // resolving a symlink may result in a different name). _, srcName := filepath.Split(src) _, srcNameSecure := filepath.Split(srcSecure) if srcName != srcNameSecure { options := buildah.AddAndCopyOptions{ - Chown: copy.Chown, - ContextDir: contextDir, - Excludes: copyExcludes, + Chown: copy.Chown, + ContextDir: contextDir, + Excludes: copyExcludes, + IDMappingOptions: idMappingOptions, } if err := s.builder.Add(filepath.Join(copy.Dest, srcName), copy.Download, options, srcSecure); err != nil { return err @@ -326,21 +439,17 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err } } sources = append(sources, srcSecure) - - } else { - sources = append(sources, filepath.Join(s.executor.contextDir, src)) - copyExcludes = append(s.executor.excludes, excludes...) - } - options := buildah.AddAndCopyOptions{ - Chown: copy.Chown, - ContextDir: contextDir, - Excludes: copyExcludes, - IDMappingOptions: idMappingOptions, - } - if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil { - return err } } + options := buildah.AddAndCopyOptions{ + Chown: copy.Chown, + ContextDir: contextDir, + Excludes: copyExcludes, + IDMappingOptions: idMappingOptions, + } + if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil { + return err + } } return nil } @@ -645,7 +754,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b // squash the contents of the base image. Whichever is // the case, we need to commit() to create a new image. logCommit(s.output, -1) - if imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(nil), false, s.output); err != nil { + if imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(nil, ""), false, s.output); err != nil { return "", nil, errors.Wrapf(err, "error committing base container") } } else { @@ -711,13 +820,18 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step)) return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) } + // In case we added content, retrieve its digest. + addedContentDigest, err := s.digestSpecifiedContent(node) + if err != nil { + return "", nil, err + } if moreInstructions { // There are still more instructions to process // for this stage. Make a note of the // instruction in the history that we'll write // for the image when we eventually commit it. now := time.Now() - s.builder.AddPrependedEmptyLayer(&now, s.executor.getCreatedBy(node), "", "") + s.builder.AddPrependedEmptyLayer(&now, s.executor.getCreatedBy(node, addedContentDigest), "", "") continue } else { // This is the last instruction for this stage, @@ -726,7 +840,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b // if it's used as the basis for a later stage. if lastStage || imageIsUsedLater { logCommit(s.output, i) - imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(node), false, s.output) + imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(node, addedContentDigest), false, s.output) if err != nil { return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) } @@ -756,7 +870,11 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b // cached images so far, look for one that matches what we // expect to produce for this instruction. if checkForLayers && !(s.executor.squash && lastInstruction && lastStage) { - cacheID, err = s.layerExists(ctx, node) + addedContentDigest, err := s.digestSpecifiedContent(node) + if err != nil { + return "", nil, err + } + cacheID, err = s.intermediateImageExists(ctx, node, addedContentDigest) if err != nil { return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build") } @@ -809,9 +927,14 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step)) return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) } + // In case we added content, retrieve its digest. + addedContentDigest, err := s.digestSpecifiedContent(node) + if err != nil { + return "", nil, err + } // Create a new image, maybe with a new layer. logCommit(s.output, i) - imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(node), !s.stepRequiresLayer(step), commitName) + imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(node, addedContentDigest), !s.stepRequiresLayer(step), commitName) if err != nil { return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) } @@ -899,9 +1022,9 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st return img.ID, ref, nil } -// layerExists returns true if an intermediate image of currNode exists in the image store from a previous build. +// intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build. // It verifies this by checking the parent of the top layer of the image and the history. -func (s *StageExecutor) layerExists(ctx context.Context, currNode *parser.Node) (string, error) { +func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string) (string, error) { // Get the list of images available in the image store images, err := s.executor.store.Images() if err != nil { @@ -932,85 +1055,14 @@ func (s *StageExecutor) layerExists(ctx context.Context, currNode *parser.Node) return "", errors.Wrapf(err, "error getting history of %q", image.ID) } // children + currNode is the point of the Dockerfile we are currently at. - if s.executor.historyMatches(baseHistory, currNode, history) { - // This checks if the files copied during build have been changed if the node is - // a COPY or ADD command. - filesMatch, err := s.copiedFilesMatch(currNode, history[len(history)-1].Created) - if err != nil { - return "", errors.Wrapf(err, "error checking if copied files match") - } - if filesMatch { - return image.ID, nil - } + if s.executor.historyMatches(baseHistory, currNode, history, addedContentDigest) { + return image.ID, nil } } } return "", nil } -// getFilesToCopy goes through node to get all the src files that are copied, added or downloaded. -// It is possible for the Dockerfile to have src as hom*, which means all files that have hom as a prefix. -// Another format is hom?.txt, which means all files that have that name format with the ? replaced by another character. -func (s *StageExecutor) getFilesToCopy(node *parser.Node) ([]string, error) { - currNode := node.Next - var src []string - for currNode.Next != nil { - if strings.HasPrefix(currNode.Value, "http://") || strings.HasPrefix(currNode.Value, "https://") { - src = append(src, currNode.Value) - currNode = currNode.Next - continue - } - matches, err := filepath.Glob(filepath.Join(s.copyFrom, currNode.Value)) - if err != nil { - return nil, errors.Wrapf(err, "error finding match for pattern %q", currNode.Value) - } - src = append(src, matches...) - currNode = currNode.Next - } - return src, nil -} - -// copiedFilesMatch checks to see if the node instruction is a COPY or ADD. -// If it is either of those two it checks the timestamps on all the files copied/added -// by the dockerfile. If the host version has a time stamp greater than the time stamp -// of the build, the build will not use the cached version and will rebuild. -func (s *StageExecutor) copiedFilesMatch(node *parser.Node, historyTime *time.Time) (bool, error) { - if node.Value != "add" && node.Value != "copy" { - return true, nil - } - - src, err := s.getFilesToCopy(node) - if err != nil { - return false, err - } - for _, item := range src { - // for urls, check the Last-Modified field in the header. - if strings.HasPrefix(item, "http://") || strings.HasPrefix(item, "https://") { - urlContentNew, err := urlContentModified(item, historyTime) - if err != nil { - return false, err - } - if urlContentNew { - return false, nil - } - continue - } - // Walks the file tree for local files and uses chroot to ensure we don't escape out of the allowed path - // when resolving any symlinks. - // Change the time format to ensure we don't run into a parsing error when converting again from string - // to time.Time. It is a known Go issue that the conversions cause errors sometimes, so specifying a particular - // time format here when converting to a string. - timeIsGreater, err := resolveModifiedTime(s.copyFrom, item, historyTime.Format(time.RFC3339Nano)) - if err != nil { - return false, errors.Wrapf(err, "error resolving symlinks and comparing modified times: %q", item) - } - if timeIsGreater { - return false, nil - } - } - return true, nil -} - // commit writes the container's contents to an image, using a passed-in tag as // the name if there is one, generating a unique ID-based one otherwise. func (s *StageExecutor) commit(ctx context.Context, ib *imagebuilder.Builder, createdBy string, emptyLayer bool, output string) (string, reference.Canonical, error) { @@ -1134,23 +1186,3 @@ func (s *StageExecutor) EnsureContainerPath(path string) error { } return nil } - -// urlContentModified sends a get request to the url and checks if the header has a value in -// Last-Modified, and if it does compares the time stamp to that of the history of the cached image. -// returns true if there is no Last-Modified value in the header. -func urlContentModified(url string, historyTime *time.Time) (bool, error) { - resp, err := http.Get(url) - if err != nil { - return false, errors.Wrapf(err, "error getting %q", url) - } - defer resp.Body.Close() - if lastModified := resp.Header.Get("Last-Modified"); lastModified != "" { - lastModifiedTime, err := time.Parse(time.RFC1123, lastModified) - if err != nil { - return false, errors.Wrapf(err, "error parsing time for %q", url) - } - return lastModifiedTime.After(*historyTime), nil - } - logrus.Debugf("Response header did not have Last-Modified %q, will rebuild.", url) - return true, nil -} diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md index 463f4ebc9..6cfa3f24b 100644 --- a/vendor/github.com/containers/buildah/install.md +++ b/vendor/github.com/containers/buildah/install.md @@ -385,3 +385,14 @@ Buildah uses Go Modules for vendoring purposes. If you need to update or add a * `make` * `make install` * Then add any updated or added files with `git add` then do a `git commit` and create a PR. + +### Vendor from your own fork + +If you wish to vendor in your personal fork to try changes out (assuming containers/storage in the below example): + + * `go mod edit -replace github.com/containers/storage=github.com/{mygithub_username}/storage@YOUR_BRANCH` + * `make vendor` + +To revert + * `go mod edit -dropreplace github.com/containers/storage` + * `make vendor` diff --git a/vendor/github.com/containers/buildah/libdm_tag.sh b/vendor/github.com/containers/buildah/libdm_tag.sh index d1f83ba10..d3668aab1 100644 --- a/vendor/github.com/containers/buildah/libdm_tag.sh +++ b/vendor/github.com/containers/buildah/libdm_tag.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash tmpdir="$PWD/tmp.$RANDOM" mkdir -p "$tmpdir" trap 'rm -fr "$tmpdir"' EXIT diff --git a/vendor/github.com/containers/buildah/ostree_tag.sh b/vendor/github.com/containers/buildah/ostree_tag.sh index 6a2f2e38b..537c17ff4 100644 --- a/vendor/github.com/containers/buildah/ostree_tag.sh +++ b/vendor/github.com/containers/buildah/ostree_tag.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash if pkg-config ostree-1 2> /dev/null ; then echo containers_image_ostree else diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go index d3c071555..1a73efb54 100644 --- a/vendor/github.com/containers/buildah/pkg/cli/common.go +++ b/vendor/github.com/containers/buildah/pkg/cli/common.go @@ -186,7 +186,7 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.") fs.StringSliceVar(&flags.DNSSearch, "dns-search", []string{}, "Set custom DNS search domains") - fs.StringSliceVar(&flags.DNSServers, "dns", []string{}, "Set custom DNS servers") + fs.StringSliceVar(&flags.DNSServers, "dns", []string{}, "Set custom DNS servers or disable it completely by setting it to 'none', which prevents the automatic creation of `/etc/resolv.conf`.") fs.StringSliceVar(&flags.DNSOptions, "dns-option", []string{}, "Set custom DNS options") fs.BoolVar(&flags.HTTPProxy, "http-proxy", true, "pass thru HTTP Proxy environment variables") fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.") diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index 0ab449c1e..09ca542a1 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -462,25 +462,40 @@ func ValidateVolumeCtrDir(ctrDir string) error { // ValidateVolumeOpts validates a volume's options func ValidateVolumeOpts(options []string) ([]string, error) { - var foundRootPropagation, foundRWRO, foundLabelChange, bindType int + var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid int finalOpts := make([]string, 0, len(options)) for _, opt := range options { switch opt { + case "noexec", "exec": + foundExec++ + if foundExec > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 'noexec' or 'exec' option", strings.Join(options, ", ")) + } + case "nodev", "dev": + foundDev++ + if foundDev > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 'nodev' or 'dev' option", strings.Join(options, ", ")) + } + case "nosuid", "suid": + foundSuid++ + if foundSuid > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 'nosuid' or 'suid' option", strings.Join(options, ", ")) + } case "rw", "ro": + foundRWRO++ if foundRWRO > 1 { return nil, errors.Errorf("invalid options %q, can only specify 1 'rw' or 'ro' option", strings.Join(options, ", ")) } - foundRWRO++ case "z", "Z", "O": + foundLabelChange++ if foundLabelChange > 1 { return nil, errors.Errorf("invalid options %q, can only specify 1 'z', 'Z', or 'O' option", strings.Join(options, ", ")) } - foundLabelChange++ case "private", "rprivate", "shared", "rshared", "slave", "rslave", "unbindable", "runbindable": + foundRootPropagation++ if foundRootPropagation > 1 { return nil, errors.Errorf("invalid options %q, can only specify 1 '[r]shared', '[r]private', '[r]slave' or '[r]unbindable' option", strings.Join(options, ", ")) } - foundRootPropagation++ case "bind", "rbind": bindType++ if bindType > 1 { diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go index 4e34f3fad..98e3ff354 100644 --- a/vendor/github.com/containers/buildah/pull.go +++ b/vendor/github.com/containers/buildah/pull.go @@ -102,19 +102,11 @@ func localImageNameForReference(ctx context.Context, store storage.Store, srcRef } case directory.Transport.Name(): // supports pull from a directory - name = srcRef.StringWithinTransport() - // remove leading "/" - if name[:1] == "/" { - name = name[1:] - } + name = toLocalImageName(srcRef.StringWithinTransport()) case oci.Transport.Name(): // supports pull from a directory split := strings.SplitN(srcRef.StringWithinTransport(), ":", 2) - name = split[0] - // remove leading "/" - if name[:1] == "/" { - name = name[1:] - } + name = toLocalImageName(split[0]) default: ref := srcRef.DockerReference() if ref == nil { @@ -287,3 +279,8 @@ func getImageDigest(ctx context.Context, src types.ImageReference, sc *types.Sys } return "@" + digest.Hex(), nil } + +// toLocalImageName converts an image name into a 'localhost/' prefixed one +func toLocalImageName(imageName string) string { + return "localhost/" + strings.TrimLeft(imageName, "/") +} diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index 9a2a396cb..e5541dd34 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -343,7 +343,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st net := namespaceOptions.Find(string(specs.NetworkNamespace)) hostNetwork := net == nil || net.Host user := namespaceOptions.Find(string(specs.UserNamespace)) - hostUser := user == nil || user.Host + hostUser := (user == nil || user.Host) && !unshare.IsRootless() // Copy mounts from the generated list. mountCgroups := true @@ -431,7 +431,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st // Add temporary copies of the contents of volume locations at the // volume locations, unless we already have something there. - copyWithTar := b.copyWithTar(nil, nil, nil) + copyWithTar := b.copyWithTar(nil, nil, nil, false) builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes, int(rootUID), int(rootGID)) if err != nil { return err diff --git a/vendor/github.com/containers/buildah/selinux_tag.sh b/vendor/github.com/containers/buildah/selinux_tag.sh index ff80fda04..993630ad6 100644 --- a/vendor/github.com/containers/buildah/selinux_tag.sh +++ b/vendor/github.com/containers/buildah/selinux_tag.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash if pkg-config libselinux 2> /dev/null ; then echo selinux fi diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go index a44165921..9fbeb14d4 100644 --- a/vendor/github.com/containers/buildah/util.go +++ b/vendor/github.com/containers/buildah/util.go @@ -3,6 +3,7 @@ package buildah import ( "archive/tar" "io" + "io/ioutil" "os" "path/filepath" @@ -112,24 +113,23 @@ func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMa // of any container, or another container, into our working container, mapping // read permissions using the passed-in ID maps, writing using the container's // ID mappings, possibly overridden using the passed-in chownOpts -func (b *Builder) copyFileWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error { +func (b *Builder) copyFileWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error { if tarIDMappingOptions == nil { tarIDMappingOptions = &IDMappingOptions{ HostUIDMapping: true, HostGIDMapping: true, } } + + var hardlinkChecker util.HardlinkChecker return func(src, dest string) error { + var f *os.File + logrus.Debugf("copyFileWithTar(%s, %s)", src, dest) - f, err := os.Open(src) + fi, err := os.Lstat(src) if err != nil { - return errors.Wrapf(err, "error opening %q to copy its contents", src) + return errors.Wrapf(err, "error reading attributes of %q", src) } - defer func() { - if f != nil { - f.Close() - } - }() sysfi, err := system.Lstat(src) if err != nil { @@ -143,19 +143,45 @@ func (b *Builder) copyFileWithTar(tarIDMappingOptions *IDMappingOptions, chownOp return errors.Wrapf(err, "error mapping owner IDs of %q: %d/%d", src, hostUID, hostGID) } - fi, err := os.Lstat(src) - if err != nil { - return errors.Wrapf(err, "error reading attributes of %q", src) - } - hdr, err := tar.FileInfoHeader(fi, filepath.Base(src)) if err != nil { return errors.Wrapf(err, "error generating tar header for: %q", src) } - hdr.Name = filepath.Base(dest) + chrootedDest, err := filepath.Rel(b.MountPoint, dest) + if err != nil { + return errors.Wrapf(err, "error generating relative-to-chroot target name for %q", dest) + } + hdr.Name = chrootedDest hdr.Uid = int(containerUID) hdr.Gid = int(containerGID) + if fi.Mode().IsRegular() && hdr.Typeflag == tar.TypeReg { + if linkname := hardlinkChecker.Check(fi); linkname != "" { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = linkname + } else { + hardlinkChecker.Add(fi, chrootedDest) + f, err = os.Open(src) + if err != nil { + return errors.Wrapf(err, "error opening %q to copy its contents", src) + } + defer func() { + if err := f.Close(); err != nil { + logrus.Debugf("error closing %s: %v", fi.Name(), err) + } + }() + } + } + + if fi.Mode()&os.ModeSymlink == os.ModeSymlink && hdr.Typeflag == tar.TypeSymlink { + hdr.Typeflag = tar.TypeSymlink + linkName, err := os.Readlink(src) + if err != nil { + return errors.Wrapf(err, "error reading destination from symlink %q", src) + } + hdr.Linkname = linkName + } + pipeReader, pipeWriter := io.Pipe() writer := tar.NewWriter(pipeWriter) var copyErr error @@ -165,26 +191,25 @@ func (b *Builder) copyFileWithTar(tarIDMappingOptions *IDMappingOptions, chownOp logrus.Debugf("error writing header for %s: %v", srcFile.Name(), err) copyErr = err } - n, err := pools.Copy(writer, srcFile) - if n != hdr.Size { - logrus.Debugf("expected to write %d bytes for %s, wrote %d instead", hdr.Size, srcFile.Name(), n) - } - if err != nil { - logrus.Debugf("error reading %s: %v", srcFile.Name(), err) - copyErr = err + if srcFile != nil { + n, err := pools.Copy(writer, srcFile) + if n != hdr.Size { + logrus.Debugf("expected to write %d bytes for %s, wrote %d instead", hdr.Size, srcFile.Name(), n) + } + if err != nil { + logrus.Debugf("error copying contents of %s: %v", fi.Name(), err) + copyErr = err + } } if err = writer.Close(); err != nil { - logrus.Debugf("error closing write pipe for %s: %v", srcFile.Name(), err) - } - if err = srcFile.Close(); err != nil { - logrus.Debugf("error closing %s: %v", srcFile.Name(), err) + logrus.Debugf("error closing write pipe for %s: %v", hdr.Name, err) } pipeWriter.Close() pipeWriter = nil }(f) - untar := b.untar(chownOpts, hasher) - err = untar(pipeReader, filepath.Dir(dest)) + untar := b.untar(chownOpts, hasher, dryRun) + err = untar(pipeReader, b.MountPoint) if err == nil { err = copyErr } @@ -200,10 +225,17 @@ func (b *Builder) copyFileWithTar(tarIDMappingOptions *IDMappingOptions, chownOp // our container or from another container, into our working container, mapping // permissions at read-time using the container's ID maps, with ownership at // write-time possibly overridden using the passed-in chownOpts -func (b *Builder) copyWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error { +func (b *Builder) copyWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error { tar := b.tarPath(tarIDMappingOptions) - untar := b.untar(chownOpts, hasher) return func(src, dest string) error { + thisHasher := hasher + if thisHasher != nil && b.ContentDigester.Hash() != nil { + thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash()) + } + if thisHasher == nil { + thisHasher = b.ContentDigester.Hash() + } + untar := b.untar(chownOpts, thisHasher, dryRun) rc, err := tar(src) if err != nil { return errors.Wrapf(err, "error archiving %q for copy", src) @@ -215,8 +247,28 @@ func (b *Builder) copyWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts * // untarPath returns a function which extracts an archive in a specified // location into our working container, mapping permissions using the // container's ID maps, possibly overridden using the passed-in chownOpts -func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer) func(src, dest string) error { +func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error { + if hasher != nil && b.ContentDigester.Hash() != nil { + hasher = io.MultiWriter(hasher, b.ContentDigester.Hash()) + } + if hasher == nil { + hasher = b.ContentDigester.Hash() + } convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) + if dryRun { + return func(src, dest string) error { + if hasher == nil { + hasher = ioutil.Discard + } + f, err := os.Open(src) + if err != nil { + return errors.Wrapf(err, "error opening %q", src) + } + defer f.Close() + _, err = io.Copy(hasher, f) + return err + } + } return chrootarchive.UntarPathAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap) } @@ -248,7 +300,7 @@ func (b *Builder) tarPath(idMappingOptions *IDMappingOptions) func(path string) // untar returns a function which extracts an archive stream to a specified // location in the container's filesystem, mapping permissions using the // container's ID maps, possibly overridden using the passed-in chownOpts -func (b *Builder) untar(chownOpts *idtools.IDPair, hasher io.Writer) func(tarArchive io.ReadCloser, dest string) error { +func (b *Builder) untar(chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(tarArchive io.ReadCloser, dest string) error { convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap) options := &archive.TarOptions{ @@ -257,14 +309,31 @@ func (b *Builder) untar(chownOpts *idtools.IDPair, hasher io.Writer) func(tarArc ChownOpts: chownOpts, } untar := chrootarchive.Untar - if hasher != nil { - originalUntar := untar + if dryRun { untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + if _, err := io.Copy(ioutil.Discard, tarArchive); err != nil { + return errors.Wrapf(err, "error digesting tar stream") + } + return nil + } + } + originalUntar := untar + untarWithHasher := func(tarArchive io.Reader, dest string, options *archive.TarOptions, untarHasher io.Writer) error { + reader := tarArchive + if untarHasher != nil { + reader = io.TeeReader(tarArchive, untarHasher) } + return originalUntar(reader, dest, options) } return func(tarArchive io.ReadCloser, dest string) error { - err := untar(tarArchive, dest, options) + thisHasher := hasher + if thisHasher != nil && b.ContentDigester.Hash() != nil { + thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash()) + } + if thisHasher == nil { + thisHasher = b.ContentDigester.Hash() + } + err := untarWithHasher(tarArchive, dest, options, thisHasher) if err2 := tarArchive.Close(); err2 != nil { if err == nil { err = err2 diff --git a/vendor/github.com/containers/buildah/util/util_not_uint64.go b/vendor/github.com/containers/buildah/util/util_not_uint64.go new file mode 100644 index 000000000..92ff23b41 --- /dev/null +++ b/vendor/github.com/containers/buildah/util/util_not_uint64.go @@ -0,0 +1,14 @@ +// +build darwin + +package util + +import ( + "syscall" +) + +func makeHardlinkDeviceAndInode(st *syscall.Stat_t) hardlinkDeviceAndInode { + return hardlinkDeviceAndInode{ + device: uint64(st.Dev), + inode: uint64(st.Ino), + } +} diff --git a/vendor/github.com/containers/buildah/util/util_uint64.go b/vendor/github.com/containers/buildah/util/util_uint64.go new file mode 100644 index 000000000..b271e3d11 --- /dev/null +++ b/vendor/github.com/containers/buildah/util/util_uint64.go @@ -0,0 +1,14 @@ +// +build linux + +package util + +import ( + "syscall" +) + +func makeHardlinkDeviceAndInode(st *syscall.Stat_t) hardlinkDeviceAndInode { + return hardlinkDeviceAndInode{ + device: st.Dev, + inode: st.Ino, + } +} diff --git a/vendor/github.com/containers/buildah/util/util_unix.go b/vendor/github.com/containers/buildah/util/util_unix.go new file mode 100644 index 000000000..04d9a01cc --- /dev/null +++ b/vendor/github.com/containers/buildah/util/util_unix.go @@ -0,0 +1,31 @@ +// +build linux darwin + +package util + +import ( + "os" + "sync" + "syscall" +) + +type hardlinkDeviceAndInode struct { + device, inode uint64 +} + +type HardlinkChecker struct { + hardlinks sync.Map +} + +func (h *HardlinkChecker) Check(fi os.FileInfo) string { + if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 { + if name, ok := h.hardlinks.Load(makeHardlinkDeviceAndInode(st)); ok && name.(string) != "" { + return name.(string) + } + } + return "" +} +func (h *HardlinkChecker) Add(fi os.FileInfo, name string) { + if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 { + h.hardlinks.Store(makeHardlinkDeviceAndInode(st), name) + } +} diff --git a/vendor/github.com/containers/buildah/util/util_windows.go b/vendor/github.com/containers/buildah/util/util_windows.go new file mode 100644 index 000000000..0e7f92325 --- /dev/null +++ b/vendor/github.com/containers/buildah/util/util_windows.go @@ -0,0 +1,16 @@ +// +build !linux,!darwin + +package util + +import ( + "os" +) + +type HardlinkChecker struct { +} + +func (h *HardlinkChecker) Check(fi os.FileInfo) string { + return "" +} +func (h *HardlinkChecker) Add(fi os.FileInfo, name string) { +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go deleted file mode 100644 index 3f0c968ec..000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package wait provides tools for polling or listening for changes -// to a condition. -package wait // import "k8s.io/apimachinery/pkg/util/wait" diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go deleted file mode 100644 index bc6b18d2b..000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ /dev/null @@ -1,504 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package wait - -import ( - "context" - "errors" - "math/rand" - "sync" - "time" - - "k8s.io/apimachinery/pkg/util/runtime" -) - -// For any test of the style: -// ... -// <- time.After(timeout): -// t.Errorf("Timed out") -// The value for timeout should effectively be "forever." Obviously we don't want our tests to truly lock up forever, but 30s -// is long enough that it is effectively forever for the things that can slow down a run on a heavily contended machine -// (GC, seeks, etc), but not so long as to make a developer ctrl-c a test run if they do happen to break that test. -var ForeverTestTimeout = time.Second * 30 - -// NeverStop may be passed to Until to make it never stop. -var NeverStop <-chan struct{} = make(chan struct{}) - -// Group allows to start a group of goroutines and wait for their completion. -type Group struct { - wg sync.WaitGroup -} - -func (g *Group) Wait() { - g.wg.Wait() -} - -// StartWithChannel starts f in a new goroutine in the group. -// stopCh is passed to f as an argument. f should stop when stopCh is available. -func (g *Group) StartWithChannel(stopCh <-chan struct{}, f func(stopCh <-chan struct{})) { - g.Start(func() { - f(stopCh) - }) -} - -// StartWithContext starts f in a new goroutine in the group. -// ctx is passed to f as an argument. f should stop when ctx.Done() is available. -func (g *Group) StartWithContext(ctx context.Context, f func(context.Context)) { - g.Start(func() { - f(ctx) - }) -} - -// Start starts f in a new goroutine in the group. -func (g *Group) Start(f func()) { - g.wg.Add(1) - go func() { - defer g.wg.Done() - f() - }() -} - -// Forever calls f every period for ever. -// -// Forever is syntactic sugar on top of Until. -func Forever(f func(), period time.Duration) { - Until(f, period, NeverStop) -} - -// Until loops until stop channel is closed, running f every period. -// -// Until is syntactic sugar on top of JitterUntil with zero jitter factor and -// with sliding = true (which means the timer for period starts after the f -// completes). -func Until(f func(), period time.Duration, stopCh <-chan struct{}) { - JitterUntil(f, period, 0.0, true, stopCh) -} - -// UntilWithContext loops until context is done, running f every period. -// -// UntilWithContext is syntactic sugar on top of JitterUntilWithContext -// with zero jitter factor and with sliding = true (which means the timer -// for period starts after the f completes). -func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { - JitterUntilWithContext(ctx, f, period, 0.0, true) -} - -// NonSlidingUntil loops until stop channel is closed, running f every -// period. -// -// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter -// factor, with sliding = false (meaning the timer for period starts at the same -// time as the function starts). -func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { - JitterUntil(f, period, 0.0, false, stopCh) -} - -// NonSlidingUntilWithContext loops until context is done, running f every -// period. -// -// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext -// with zero jitter factor, with sliding = false (meaning the timer for period -// starts at the same time as the function starts). -func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { - JitterUntilWithContext(ctx, f, period, 0.0, false) -} - -// JitterUntil loops until stop channel is closed, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Close stopCh to stop. f may not be invoked if stop channel is already -// closed. Pass NeverStop to if you don't want it stop. -func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { - var t *time.Timer - var sawTimeout bool - - for { - select { - case <-stopCh: - return - default: - } - - jitteredPeriod := period - if jitterFactor > 0.0 { - jitteredPeriod = Jitter(period, jitterFactor) - } - - if !sliding { - t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout) - } - - func() { - defer runtime.HandleCrash() - f() - }() - - if sliding { - t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout) - } - - // NOTE: b/c there is no priority selection in golang - // it is possible for this to race, meaning we could - // trigger t.C and stopCh, and t.C select falls through. - // In order to mitigate we re-check stopCh at the beginning - // of every loop to prevent extra executions of f(). - select { - case <-stopCh: - return - case <-t.C: - sawTimeout = true - } - } -} - -// JitterUntilWithContext loops until context is done, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Cancel context to stop. f may not be invoked if context is already expired. -func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { - JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) -} - -// Jitter returns a time.Duration between duration and duration + maxFactor * -// duration. -// -// This allows clients to avoid converging on periodic behavior. If maxFactor -// is 0.0, a suggested default value will be chosen. -func Jitter(duration time.Duration, maxFactor float64) time.Duration { - if maxFactor <= 0.0 { - maxFactor = 1.0 - } - wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration)) - return wait -} - -// ErrWaitTimeout is returned when the condition exited without success. -var ErrWaitTimeout = errors.New("timed out waiting for the condition") - -// ConditionFunc returns true if the condition is satisfied, or an error -// if the loop should be aborted. -type ConditionFunc func() (done bool, err error) - -// Backoff holds parameters applied to a Backoff function. -type Backoff struct { - // The initial duration. - Duration time.Duration - // Duration is multiplied by factor each iteration. Must be greater - // than or equal to zero. - Factor float64 - // The amount of jitter applied each iteration. Jitter is applied after - // cap. - Jitter float64 - // The number of steps before duration stops changing. If zero, initial - // duration is always used. Used for exponential backoff in combination - // with Factor. - Steps int - // The returned duration will never be greater than cap *before* jitter - // is applied. The actual maximum cap is `cap * (1.0 + jitter)`. - Cap time.Duration -} - -// Step returns the next interval in the exponential backoff. This method -// will mutate the provided backoff. -func (b *Backoff) Step() time.Duration { - if b.Steps < 1 { - if b.Jitter > 0 { - return Jitter(b.Duration, b.Jitter) - } - return b.Duration - } - b.Steps-- - - duration := b.Duration - - // calculate the next step - if b.Factor != 0 { - b.Duration = time.Duration(float64(b.Duration) * b.Factor) - if b.Cap > 0 && b.Duration > b.Cap { - b.Duration = b.Cap - b.Steps = 0 - } - } - - if b.Jitter > 0 { - duration = Jitter(duration, b.Jitter) - } - return duration -} - -// contextForChannel derives a child context from a parent channel. -// -// The derived context's Done channel is closed when the returned cancel function -// is called or when the parent channel is closed, whichever happens first. -// -// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked. -func contextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - select { - case <-parentCh: - cancel() - case <-ctx.Done(): - } - }() - return ctx, cancel -} - -// ExponentialBackoff repeats a condition check with exponential backoff. -// -// It checks the condition up to Steps times, increasing the wait by multiplying -// the previous duration by Factor. -// -// If Jitter is greater than zero, a random amount of each duration is added -// (between duration and duration*(1+jitter)). -// -// If the condition never returns true, ErrWaitTimeout is returned. All other -// errors terminate immediately. -func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { - for backoff.Steps > 0 { - if ok, err := condition(); err != nil || ok { - return err - } - if backoff.Steps == 1 { - break - } - time.Sleep(backoff.Step()) - } - return ErrWaitTimeout -} - -// Poll tries a condition func until it returns true, an error, or the timeout -// is reached. -// -// Poll always waits the interval before the run of 'condition'. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to Poll something forever, see PollInfinite. -func Poll(interval, timeout time.Duration, condition ConditionFunc) error { - return pollInternal(poller(interval, timeout), condition) -} - -func pollInternal(wait WaitFunc, condition ConditionFunc) error { - done := make(chan struct{}) - defer close(done) - return WaitFor(wait, condition, done) -} - -// PollImmediate tries a condition func until it returns true, an error, or the timeout -// is reached. -// -// PollImmediate always checks 'condition' before waiting for the interval. 'condition' -// will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to immediately Poll something forever, see PollImmediateInfinite. -func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { - return pollImmediateInternal(poller(interval, timeout), condition) -} - -func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error { - done, err := condition() - if err != nil { - return err - } - if done { - return nil - } - return pollInternal(wait, condition) -} - -// PollInfinite tries a condition func until it returns true or an error -// -// PollInfinite always waits the interval before the run of 'condition'. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollInfinite(interval time.Duration, condition ConditionFunc) error { - done := make(chan struct{}) - defer close(done) - return PollUntil(interval, condition, done) -} - -// PollImmediateInfinite tries a condition func until it returns true or an error -// -// PollImmediateInfinite runs the 'condition' before waiting for the interval. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { - done, err := condition() - if err != nil { - return err - } - if done { - return nil - } - return PollInfinite(interval, condition) -} - -// PollUntil tries a condition func until it returns true, an error or stopCh is -// closed. -// -// PollUntil always waits interval before the first run of 'condition'. -// 'condition' will always be invoked at least once. -func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := contextForChannel(stopCh) - defer cancel() - return WaitFor(poller(interval, 0), condition, ctx.Done()) -} - -// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. -// -// PollImmediateUntil runs the 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - done, err := condition() - if err != nil { - return err - } - if done { - return nil - } - select { - case <-stopCh: - return ErrWaitTimeout - default: - return PollUntil(interval, condition, stopCh) - } -} - -// WaitFunc creates a channel that receives an item every time a test -// should be executed and is closed when the last test should be invoked. -type WaitFunc func(done <-chan struct{}) <-chan struct{} - -// WaitFor continually checks 'fn' as driven by 'wait'. -// -// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value -// placed on the channel and once more when the channel is closed. If the channel is closed -// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout. -// -// If 'fn' returns an error the loop ends and that error is returned. If -// 'fn' returns true the loop ends and nil is returned. -// -// ErrWaitTimeout will be returned if the 'done' channel is closed without fn ever -// returning true. -// -// When the done channel is closed, because the golang `select` statement is -// "uniform pseudo-random", the `fn` might still run one or multiple time, -// though eventually `WaitFor` will return. -func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { - stopCh := make(chan struct{}) - defer close(stopCh) - c := wait(stopCh) - for { - select { - case _, open := <-c: - ok, err := fn() - if err != nil { - return err - } - if ok { - return nil - } - if !open { - return ErrWaitTimeout - } - case <-done: - return ErrWaitTimeout - } - } -} - -// poller returns a WaitFunc that will send to the channel every interval until -// timeout has elapsed and then closes the channel. -// -// Over very short intervals you may receive no ticks before the channel is -// closed. A timeout of 0 is interpreted as an infinity, and in such a case -// it would be the caller's responsibility to close the done channel. -// Failure to do so would result in a leaked goroutine. -// -// Output ticks are not buffered. If the channel is not ready to receive an -// item, the tick is skipped. -func poller(interval, timeout time.Duration) WaitFunc { - return WaitFunc(func(done <-chan struct{}) <-chan struct{} { - ch := make(chan struct{}) - - go func() { - defer close(ch) - - tick := time.NewTicker(interval) - defer tick.Stop() - - var after <-chan time.Time - if timeout != 0 { - // time.After is more convenient, but it - // potentially leaves timers around much longer - // than necessary if we exit early. - timer := time.NewTimer(timeout) - after = timer.C - defer timer.Stop() - } - - for { - select { - case <-tick.C: - // If the consumer isn't ready for this signal drop it and - // check the other channels. - select { - case ch <- struct{}{}: - default: - } - case <-after: - return - case <-done: - return - } - } - }() - - return ch - }) -} - -// resetOrReuseTimer avoids allocating a new timer if one is already in use. -// Not safe for multiple threads. -func resetOrReuseTimer(t *time.Timer, d time.Duration, sawTimeout bool) *time.Timer { - if t == nil { - return time.NewTimer(d) - } - if !t.Stop() && !sawTimeout { - <-t.C - } - t.Reset(d) - return t -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 3acff38c9..f3564ebc6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -45,7 +45,7 @@ github.com/containernetworking/cni/pkg/version github.com/containernetworking/cni/pkg/types/020 # github.com/containernetworking/plugins v0.8.1 github.com/containernetworking/plugins/pkg/ns -# github.com/containers/buildah v1.10.1 +# github.com/containers/buildah v1.8.4-0.20190821140209-376e52ee0142 github.com/containers/buildah github.com/containers/buildah/imagebuildah github.com/containers/buildah/pkg/chrootuser @@ -543,7 +543,6 @@ gopkg.in/yaml.v2 k8s.io/api/core/v1 # k8s.io/apimachinery v0.0.0-20190624085041-961b39a1baa0 k8s.io/apimachinery/pkg/apis/meta/v1 -k8s.io/apimachinery/pkg/util/wait k8s.io/apimachinery/pkg/util/runtime k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/runtime |