summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xAPI.md2
-rw-r--r--cmd/podman/commit.go9
-rw-r--r--cmd/podman/common.go2
-rw-r--r--cmd/podman/cp.go2
-rw-r--r--cmd/podman/inspect.go1
-rw-r--r--cmd/podman/mount.go6
-rw-r--r--cmd/podman/pod_inspect.go1
-rw-r--r--cmd/podman/pod_stats.go6
-rw-r--r--cmd/podman/ps.go3
-rw-r--r--cmd/podman/run.go6
-rw-r--r--cmd/podman/shared/create.go15
-rw-r--r--cmd/podman/sign.go6
-rw-r--r--cmd/podman/start.go6
-rw-r--r--cmd/podman/system_df.go13
-rw-r--r--cmd/podman/trust_set_show.go1
-rw-r--r--cmd/podman/varlink/io.podman.varlink1
-rw-r--r--contrib/spec/podman.spec.in3
-rw-r--r--docs/podman-system-df.1.md12
-rw-r--r--libpod/boltdb_state_internal.go11
-rw-r--r--libpod/container.go13
-rw-r--r--libpod/container_internal_linux.go2
-rw-r--r--libpod/events.go6
-rw-r--r--libpod/info.go9
-rw-r--r--libpod/options.go9
-rw-r--r--libpod/pod_api.go5
-rw-r--r--libpod/runtime.go44
-rw-r--r--libpod/runtime_ctr.go5
-rw-r--r--libpod/util.go10
-rw-r--r--pkg/adapter/pods_remote.go5
-rw-r--r--pkg/adapter/runtime_remote.go1
-rw-r--r--pkg/spec/createconfig.go15
-rw-r--r--pkg/util/utils.go64
-rw-r--r--pkg/varlinkapi/containers_create.go5
-rw-r--r--pkg/varlinkapi/images.go9
-rw-r--r--vendor.conf11
-rw-r--r--vendor/github.com/containernetworking/cni/README.md17
-rw-r--r--vendor/github.com/containernetworking/cni/libcni/api.go249
-rw-r--r--vendor/github.com/containernetworking/cni/libcni/conf.go15
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go48
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/exec.go15
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go2
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go17
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/types/020/types.go7
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/types/current/types.go23
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/types/types.go14
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/version/plugin.go10
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/version/version.go22
-rw-r--r--vendor/github.com/containers/buildah/README.md14
-rw-r--r--vendor/github.com/containers/buildah/add.go151
-rw-r--r--vendor/github.com/containers/buildah/buildah.go4
-rw-r--r--vendor/github.com/containers/buildah/commit.go14
-rw-r--r--vendor/github.com/containers/buildah/common.go41
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go827
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/util.go25
-rw-r--r--vendor/github.com/containers/buildah/import.go10
-rw-r--r--vendor/github.com/containers/buildah/info.go4
-rw-r--r--vendor/github.com/containers/buildah/new.go4
-rw-r--r--vendor/github.com/containers/buildah/pkg/chrootuser/user.go (renamed from pkg/chrootuser/user.go)0
-rw-r--r--vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go (renamed from pkg/chrootuser/user_basic.go)0
-rw-r--r--vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go (renamed from pkg/chrootuser/user_linux.go)0
-rw-r--r--vendor/github.com/containers/buildah/pkg/formats/formats.go8
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go17
-rw-r--r--vendor/github.com/containers/buildah/pkg/secrets/secrets.go18
-rw-r--r--vendor/github.com/containers/buildah/pull.go9
-rw-r--r--vendor/github.com/containers/buildah/run.go12
-rw-r--r--vendor/github.com/containers/buildah/unshare/unshare.go38
-rw-r--r--vendor/github.com/containers/buildah/unshare/unshare_unsupported.go27
-rw-r--r--vendor/github.com/containers/buildah/vendor.conf9
-rw-r--r--vendor/github.com/containers/image/README.md2
-rw-r--r--vendor/github.com/containers/image/copy/copy.go34
-rw-r--r--vendor/github.com/containers/image/docker/docker_client.go2
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_dest.go4
-rw-r--r--vendor/github.com/containers/image/image/docker_schema2.go6
-rw-r--r--vendor/github.com/containers/image/image/oci.go4
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go (renamed from vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go)45
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/default.go8
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go (renamed from vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go)44
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go (renamed from vendor/github.com/containers/image/pkg/blobinfocache/memory.go)62
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/none/none.go (renamed from vendor/github.com/containers/image/pkg/blobinfocache/none.go)10
-rw-r--r--vendor/github.com/containers/image/signature/policy_config.go2
-rw-r--r--vendor/github.com/containers/image/storage/storage_image.go14
-rw-r--r--vendor/github.com/containers/image/storage/storage_transport.go6
-rw-r--r--vendor/github.com/containers/image/vendor.conf2
-rw-r--r--vendor/github.com/containers/image/version/version.go4
-rw-r--r--vendor/github.com/containers/storage/containers.go10
-rw-r--r--vendor/github.com/containers/storage/containers_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/copy/copy.go3
-rw-r--r--vendor/github.com/containers/storage/images.go35
-rw-r--r--vendor/github.com/containers/storage/images_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/layers.go198
-rw-r--r--vendor/github.com/containers/storage/lockfile.go7
-rw-r--r--vendor/github.com/containers/storage/lockfile_otherunix.go (renamed from vendor/github.com/containers/storage/lockfile_darwin.go)0
-rw-r--r--vendor/github.com/containers/storage/lockfile_unix.go51
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_linux.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools.go18
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/reexec/command_linux.go25
-rw-r--r--vendor/github.com/containers/storage/pkg/reexec/command_unix.go15
-rw-r--r--vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/reexec/command_windows.go17
-rw-r--r--vendor/github.com/containers/storage/store.go164
-rw-r--r--vendor/github.com/containers/storage/utils.go234
-rw-r--r--vendor/github.com/containers/storage/vendor.conf3
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go5
-rw-r--r--vendor/github.com/cri-o/ocicni/vendor.conf13
-rw-r--r--vendor/github.com/ulule/deepcopier/LICENSE22
-rw-r--r--vendor/github.com/ulule/deepcopier/README.md129
-rw-r--r--vendor/github.com/ulule/deepcopier/deepcopier.go362
109 files changed, 1945 insertions, 1624 deletions
diff --git a/API.md b/API.md
index f7f5525bf..3a095f0ba 100755
--- a/API.md
+++ b/API.md
@@ -1472,6 +1472,8 @@ gid_map [IDMap](#IDMap)
id [string](https://godoc.org/builtin#string)
+digest [string](https://godoc.org/builtin#string)
+
parentId [string](https://godoc.org/builtin#string)
repoTags [[]string](#[]string)
diff --git a/cmd/podman/commit.go b/cmd/podman/commit.go
index 584ab6880..f7e206856 100644
--- a/cmd/podman/commit.go
+++ b/cmd/podman/commit.go
@@ -96,9 +96,14 @@ func commitCmd(c *cliconfig.CommitValues) error {
return errors.Wrapf(err, "error looking up container %q", container)
}
- sc := image.GetSystemContext(runtime.GetConfig().SignaturePolicyPath, "", false)
+ rtc, err := runtime.GetConfig()
+ if err != nil {
+ return err
+ }
+
+ sc := image.GetSystemContext(rtc.SignaturePolicyPath, "", false)
coptions := buildah.CommitOptions{
- SignaturePolicyPath: runtime.GetConfig().SignaturePolicyPath,
+ SignaturePolicyPath: rtc.SignaturePolicyPath,
ReportWriter: writer,
SystemContext: sc,
PreferredManifestType: mimeType,
diff --git a/cmd/podman/common.go b/cmd/podman/common.go
index 167b3e845..10fed053e 100644
--- a/cmd/podman/common.go
+++ b/cmd/podman/common.go
@@ -12,12 +12,14 @@ import (
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/fatih/camelcase"
+ jsoniter "github.com/json-iterator/go"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
stores = make(map[storage.Store]struct{})
+ json = jsoniter.ConfigCompatibleWithStandardLibrary
)
const (
diff --git a/cmd/podman/cp.go b/cmd/podman/cp.go
index 6223676ac..18fb2cb73 100644
--- a/cmd/podman/cp.go
+++ b/cmd/podman/cp.go
@@ -7,11 +7,11 @@ import (
"strconv"
"strings"
+ "github.com/containers/buildah/pkg/chrootuser"
"github.com/containers/buildah/util"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
- "github.com/containers/libpod/pkg/chrootuser"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
diff --git a/cmd/podman/inspect.go b/cmd/podman/inspect.go
index 3d6fd07e0..528320170 100644
--- a/cmd/podman/inspect.go
+++ b/cmd/podman/inspect.go
@@ -2,7 +2,6 @@ package main
import (
"context"
- "encoding/json"
"strings"
"github.com/containers/buildah/pkg/formats"
diff --git a/cmd/podman/mount.go b/cmd/podman/mount.go
index d074551ce..138548097 100644
--- a/cmd/podman/mount.go
+++ b/cmd/podman/mount.go
@@ -71,7 +71,11 @@ func mountCmd(c *cliconfig.MountValues) error {
defer runtime.Shutdown(false)
if os.Geteuid() != 0 {
- if driver := runtime.GetConfig().StorageConfig.GraphDriverName; driver != "vfs" {
+ rtc, err := runtime.GetConfig()
+ if err != nil {
+ return err
+ }
+ if driver := rtc.StorageConfig.GraphDriverName; driver != "vfs" {
// Do not allow to mount a graphdriver that is not vfs if we are creating the userns as part
// of the mount command.
return fmt.Errorf("cannot mount using driver %s in rootless mode", driver)
diff --git a/cmd/podman/pod_inspect.go b/cmd/podman/pod_inspect.go
index 851f39aa0..e12678354 100644
--- a/cmd/podman/pod_inspect.go
+++ b/cmd/podman/pod_inspect.go
@@ -1,7 +1,6 @@
package main
import (
- "encoding/json"
"fmt"
"github.com/containers/libpod/cmd/podman/cliconfig"
diff --git a/cmd/podman/pod_stats.go b/cmd/podman/pod_stats.go
index e8ff322ce..36b0b95ed 100644
--- a/cmd/podman/pod_stats.go
+++ b/cmd/podman/pod_stats.go
@@ -9,7 +9,6 @@ import (
"text/tabwriter"
"time"
- "encoding/json"
tm "github.com/buger/goterm"
"github.com/containers/buildah/pkg/formats"
"github.com/containers/libpod/cmd/podman/cliconfig"
@@ -17,7 +16,6 @@ import (
"github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/spf13/cobra"
- "github.com/ulule/deepcopier"
)
var (
@@ -187,7 +185,9 @@ func podStatsCmd(c *cliconfig.PodStatsValues) error {
}
time.Sleep(time.Second)
previousPodStats := new([]*libpod.PodContainerStats)
- deepcopier.Copy(newStats).To(previousPodStats)
+ if err := libpod.JSONDeepCopy(newStats, previousPodStats); err != nil {
+ return err
+ }
pods, err = runtime.GetStatPods(c)
if err != nil {
return err
diff --git a/cmd/podman/ps.go b/cmd/podman/ps.go
index 27774f95d..a9802d27f 100644
--- a/cmd/podman/ps.go
+++ b/cmd/podman/ps.go
@@ -1,7 +1,6 @@
package main
import (
- "encoding/json"
"fmt"
"html/template"
"os"
@@ -647,7 +646,7 @@ func printFormat(format string, containers []shared.PsContainerOutput) error {
}
func dumpJSON(containers []shared.PsContainerOutput) error {
- b, err := json.MarshalIndent(containers, "", "\t")
+ b, err := json.MarshalIndent(containers, "", " ")
if err != nil {
return err
}
diff --git a/cmd/podman/run.go b/cmd/podman/run.go
index 32e7b3510..3c26e98c1 100644
--- a/cmd/podman/run.go
+++ b/cmd/podman/run.go
@@ -154,7 +154,11 @@ func runCmd(c *cliconfig.RunValues) error {
if errors.Cause(err) == libpod.ErrNoSuchCtr {
// The container may have been removed
// Go looking for an exit file
- ctrExitCode, err := readExitFile(runtime.GetConfig().TmpDir, ctr.ID())
+ rtc, err := runtime.GetConfig()
+ if err != nil {
+ return err
+ }
+ ctrExitCode, err := readExitFile(rtc.TmpDir, ctr.ID())
if err != nil {
logrus.Errorf("Cannot get exit code: %v", err)
exitCode = 127
diff --git a/cmd/podman/shared/create.go b/cmd/podman/shared/create.go
index 5f7263cb6..d927e5bf6 100644
--- a/cmd/podman/shared/create.go
+++ b/cmd/podman/shared/create.go
@@ -43,20 +43,23 @@ func getContext() context.Context {
func CreateContainer(ctx context.Context, c *cliconfig.PodmanCommand, runtime *libpod.Runtime) (*libpod.Container, *cc.CreateConfig, error) {
var (
healthCheck *manifest.Schema2HealthConfig
+ err error
+ cidFile *os.File
)
if c.Bool("trace") {
span, _ := opentracing.StartSpanFromContext(ctx, "createContainer")
defer span.Finish()
}
- rtc := runtime.GetConfig()
+ rtc, err := runtime.GetConfig()
+ if err != nil {
+ return nil, nil, err
+ }
rootfs := ""
if c.Bool("rootfs") {
rootfs = c.InputArgs[0]
}
- var err error
- var cidFile *os.File
if c.IsSet("cidfile") && os.Geteuid() == 0 {
cidFile, err = libpod.OpenExclusiveFile(c.String("cidfile"))
if err != nil && os.IsExist(err) {
@@ -721,7 +724,11 @@ func ParseCreateOpts(ctx context.Context, c *cliconfig.PodmanCommand, runtime *l
if c.Bool("init") {
initPath := c.String("init-path")
if initPath == "" {
- initPath = runtime.GetConfig().InitPath
+ rtc, err := runtime.GetConfig()
+ if err != nil {
+ return nil, err
+ }
+ initPath = rtc.InitPath
}
if err := config.AddContainerInitBinary(initPath); err != nil {
return nil, err
diff --git a/cmd/podman/sign.go b/cmd/podman/sign.go
index 06418e4a5..75d723514 100644
--- a/cmd/podman/sign.go
+++ b/cmd/podman/sign.go
@@ -108,7 +108,11 @@ func signCmd(c *cliconfig.SignValues) error {
}
// create the signstore file
- newImage, err := runtime.ImageRuntime().New(getContext(), signimage, runtime.GetConfig().SignaturePolicyPath, "", os.Stderr, nil, image.SigningOptions{SignBy: signby}, false, nil)
+ rtc, err := runtime.GetConfig()
+ if err != nil {
+ return err
+ }
+ newImage, err := runtime.ImageRuntime().New(getContext(), signimage, rtc.SignaturePolicyPath, "", os.Stderr, nil, image.SigningOptions{SignBy: signby}, false, nil)
if err != nil {
return errors.Wrapf(err, "error pulling image %s", signimage)
}
diff --git a/cmd/podman/start.go b/cmd/podman/start.go
index cf406cf66..d17a78268 100644
--- a/cmd/podman/start.go
+++ b/cmd/podman/start.go
@@ -129,7 +129,11 @@ func startCmd(c *cliconfig.StartValues) error {
if errors.Cause(err) == libpod.ErrNoSuchCtr {
// The container may have been removed
// Go looking for an exit file
- ctrExitCode, err := readExitFile(runtime.GetConfig().TmpDir, ctr.ID())
+ rtc, err := runtime.GetConfig()
+ if err != nil {
+ return err
+ }
+ ctrExitCode, err := readExitFile(rtc.TmpDir, ctr.ID())
if err != nil {
logrus.Errorf("Cannot get exit code: %v", err)
exitCode = 127
diff --git a/cmd/podman/system_df.go b/cmd/podman/system_df.go
index 60da4238a..992e869bd 100644
--- a/cmd/podman/system_df.go
+++ b/cmd/podman/system_df.go
@@ -85,6 +85,9 @@ type volumeVerboseDiskUsage struct {
}
const systemDfDefaultFormat string = "table {{.Type}}\t{{.Total}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}"
+const imageVerboseFormat string = "table {{.Repository}}\t{{.Tag}}\t{{.ImageID}}\t{{.Created}}\t{{.Size}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}"
+const containerVerboseFormat string = "table {{.ContainerID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.Created}}\t{{.Status}}\t{{.Names}}"
+const volumeVerboseFormat string = "table {{.VolumeName}}\t{{.Links}}\t{{.Size}}"
func init() {
dfSystemCommand.Command = _dfSystemCommand
@@ -473,7 +476,7 @@ func getImageVerboseDiskUsage(ctx context.Context, images []*image.Image, images
Repository: repo,
Tag: tag,
ImageID: shortID(img.ID()),
- Created: units.HumanDuration(time.Since((img.Created().Local()))) + " ago",
+ Created: fmt.Sprintf("%s ago", units.HumanDuration(time.Since((img.Created().Local())))),
Size: units.HumanSizeWithPrecision(float64(*size), 3),
SharedSize: units.HumanSizeWithPrecision(float64(*size-imgUniqueSizeMap[img.ID()]), 3),
UniqueSize: units.HumanSizeWithPrecision(float64(imgUniqueSizeMap[img.ID()]), 3),
@@ -502,7 +505,7 @@ func getContainerVerboseDiskUsage(containers []*libpod.Container) (containersVer
Command: strings.Join(ctr.Command(), " "),
LocalVolumes: len(ctr.UserVolumes()),
Size: units.HumanSizeWithPrecision(float64(size), 3),
- Created: units.HumanDuration(time.Since(ctr.CreatedTime().Local())) + "ago",
+ Created: fmt.Sprintf("%s ago", units.HumanDuration(time.Since(ctr.CreatedTime().Local()))),
Status: state.String(),
Names: ctr.Name(),
}
@@ -548,7 +551,7 @@ func imagesVerboseOutput(ctx context.Context, metaData dfMetaData) error {
return errors.Wrapf(err, "error getting verbose output of images")
}
os.Stderr.WriteString("Images space usage:\n\n")
- out := formats.StdoutTemplateArray{Output: systemDfImageVerboseDiskUsageToGeneric(imagesVerboseDiskUsage), Template: "table {{.Repository}}\t{{.Tag}}\t{{.ImageID}}\t{{.Created}}\t{{.Size}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}", Fields: imageVerboseHeader}
+ out := formats.StdoutTemplateArray{Output: systemDfImageVerboseDiskUsageToGeneric(imagesVerboseDiskUsage), Template: imageVerboseFormat, Fields: imageVerboseHeader}
formats.Writer(out).Out()
return nil
}
@@ -569,7 +572,7 @@ func containersVerboseOutput(ctx context.Context, metaData dfMetaData) error {
return errors.Wrapf(err, "error getting verbose output of containers")
}
os.Stderr.WriteString("\nContainers space usage:\n\n")
- out := formats.StdoutTemplateArray{Output: systemDfContainerVerboseDiskUsageToGeneric(containersVerboseDiskUsage), Template: "table {{.ContainerID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.Created}}\t{{.Status}}\t{{.Names}}", Fields: containerVerboseHeader}
+ out := formats.StdoutTemplateArray{Output: systemDfContainerVerboseDiskUsageToGeneric(containersVerboseDiskUsage), Template: containerVerboseFormat, Fields: containerVerboseHeader}
formats.Writer(out).Out()
return nil
}
@@ -585,7 +588,7 @@ func volumesVerboseOutput(ctx context.Context, metaData dfMetaData) error {
return errors.Wrapf(err, "error getting verbose ouput of volumes")
}
os.Stderr.WriteString("\nLocal Volumes space usage:\n\n")
- out := formats.StdoutTemplateArray{Output: systemDfVolumeVerboseDiskUsageToGeneric(volumesVerboseDiskUsage), Template: "table {{.VolumeName}}\t{{.Links}}\t{{.Size}}", Fields: volumeVerboseHeader}
+ out := formats.StdoutTemplateArray{Output: systemDfVolumeVerboseDiskUsageToGeneric(volumesVerboseDiskUsage), Template: volumeVerboseFormat, Fields: volumeVerboseHeader}
formats.Writer(out).Out()
return nil
}
diff --git a/cmd/podman/trust_set_show.go b/cmd/podman/trust_set_show.go
index d7a4ea6d6..626d27aae 100644
--- a/cmd/podman/trust_set_show.go
+++ b/cmd/podman/trust_set_show.go
@@ -1,7 +1,6 @@
package main
import (
- "encoding/json"
"io/ioutil"
"os"
"sort"
diff --git a/cmd/podman/varlink/io.podman.varlink b/cmd/podman/varlink/io.podman.varlink
index ad2de56f8..5e996f46b 100644
--- a/cmd/podman/varlink/io.podman.varlink
+++ b/cmd/podman/varlink/io.podman.varlink
@@ -59,6 +59,7 @@ type VolumeRemoveOpts (
type Image (
id: string,
+ digest: string,
parentId: string,
repoTags: []string,
repoDigests: []string,
diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in
index 3324ee8f9..319bbe979 100644
--- a/contrib/spec/podman.spec.in
+++ b/contrib/spec/podman.spec.in
@@ -151,7 +151,6 @@ Provides: bundled(golang(github.com/stretchr/testify)) = 4d4bfba8f1d1027c4fdbe37
Provides: bundled(golang(github.com/syndtr/gocapability)) = e7cb7fa329f456b3855136a2642b197bad7366ba
Provides: bundled(golang(github.com/tchap/go-patricia)) = v2.2.6
Provides: bundled(golang(github.com/ulikunitz/xz)) = v0.5.4
-Provides: bundled(golang(github.com/ulule/deepcopier)) = master
# "-" are not accepted in version strings, so comment out below line
#Provides: bundled(golang(github.com/urfave/cli)) = fix-short-opts-parsing
Provides: bundled(golang(github.com/varlink/go)) = master
@@ -237,7 +236,6 @@ BuildRequires: golang(github.com/opencontainers/selinux/go-selinux)
BuildRequires: golang(github.com/opencontainers/selinux/go-selinux/label)
BuildRequires: golang(github.com/pkg/errors)
BuildRequires: golang(github.com/sirupsen/logrus)
-BuildRequires: golang(github.com/ulule/deepcopier)
BuildRequires: golang(golang.org/x/crypto/ssh/terminal)
BuildRequires: golang(golang.org/x/sys/unix)
BuildRequires: golang(k8s.io/apimachinery/pkg/util/wait)
@@ -290,7 +288,6 @@ Requires: golang(github.com/opencontainers/selinux/go-selinux)
Requires: golang(github.com/opencontainers/selinux/go-selinux/label)
Requires: golang(github.com/pkg/errors)
Requires: golang(github.com/sirupsen/logrus)
-Requires: golang(github.com/ulule/deepcopier)
Requires: golang(golang.org/x/crypto/ssh/terminal)
Requires: golang(golang.org/x/sys/unix)
Requires: golang(k8s.io/apimachinery/pkg/util/wait)
diff --git a/docs/podman-system-df.1.md b/docs/podman-system-df.1.md
index f33523dd6..b6d71c634 100644
--- a/docs/podman-system-df.1.md
+++ b/docs/podman-system-df.1.md
@@ -33,12 +33,12 @@ docker.io/library/alpine latest 5cb3aa00f899 2 weeks ago 5.79MB 0B
Containers space usage:
-CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED STATUS NAMES
-073f7e62812d 5cb3 sleep 100 1 0B About an hourago exited zen_joliot
-3f19f5bba242 5cb3 sleep 100 0 5.52kB 4 hoursago exited pedantic_archimedes
-8cd89bf645cc 5cb3 ls foodir 0 58B 2 hoursago configured agitated_hamilton
-a1d948a4b61d 5cb3 ls foodir 0 12B 2 hoursago exited laughing_wing
-eafe3e3c5bb3 5cb3 sleep 10000 0 72B 2 hoursago running priceless_liskov
+CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED STATUS NAMES
+073f7e62812d 5cb3 sleep 100 1 0B 20 hours ago exited zen_joliot
+3f19f5bba242 5cb3 sleep 100 0 5.52kB 22 hours ago exited pedantic_archimedes
+8cd89bf645cc 5cb3 ls foodir 0 58B 21 hours ago configured agitated_hamilton
+a1d948a4b61d 5cb3 ls foodir 0 12B 21 hours ago exited laughing_wing
+eafe3e3c5bb3 5cb3 sleep 10000 0 72B 21 hours ago exited priceless_liskov
Local Volumes space usage:
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index 936ccbf4c..b6a0759b1 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -6,6 +6,7 @@ import (
"strings"
"github.com/boltdb/bolt"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -95,22 +96,26 @@ func checkRuntimeConfig(db *bolt.DB, rt *Runtime) error {
return err
}
+ storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
+ if err != nil {
+ return err
+ }
if err := validateDBAgainstConfig(configBkt, "storage temporary directory (runroot)",
rt.config.StorageConfig.RunRoot, runRootKey,
- storage.DefaultStoreOptions.RunRoot); err != nil {
+ storeOpts.RunRoot); err != nil {
return err
}
if err := validateDBAgainstConfig(configBkt, "storage graph root directory (graphroot)",
rt.config.StorageConfig.GraphRoot, graphRootKey,
- storage.DefaultStoreOptions.GraphRoot); err != nil {
+ storeOpts.GraphRoot); err != nil {
return err
}
if err := validateDBAgainstConfig(configBkt, "storage graph driver",
rt.config.StorageConfig.GraphDriverName,
graphDriverKey,
- storage.DefaultStoreOptions.GraphDriverName); err != nil {
+ storeOpts.GraphDriverName); err != nil {
return err
}
diff --git a/libpod/container.go b/libpod/container.go
index 806e75c63..6d80a9bf4 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -17,7 +17,6 @@ import (
"github.com/cri-o/ocicni/pkg/ocicni"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
- "github.com/ulule/deepcopier"
)
// ContainerStatus represents the current state of a container
@@ -407,7 +406,9 @@ func (t ContainerStatus) String() string {
// Config returns the configuration used to create the container
func (c *Container) Config() *ContainerConfig {
returnConfig := new(ContainerConfig)
- deepcopier.Copy(c.config).To(returnConfig)
+ if err := JSONDeepCopy(c.config, returnConfig); err != nil {
+ return nil
+ }
return returnConfig
}
@@ -417,7 +418,9 @@ func (c *Container) Config() *ContainerConfig {
// spec may differ slightly as mounts are added based on the image
func (c *Container) Spec() *spec.Spec {
returnSpec := new(spec.Spec)
- deepcopier.Copy(c.config.Spec).To(returnSpec)
+ if err := JSONDeepCopy(c.config.Spec, returnSpec); err != nil {
+ return nil
+ }
return returnSpec
}
@@ -1094,7 +1097,9 @@ func (c *Container) ContainerState() (*ContainerState, error) {
}
}
returnConfig := new(ContainerState)
- deepcopier.Copy(c.state).To(returnConfig)
+ if err := JSONDeepCopy(c.state, returnConfig); err != nil {
+ return nil, errors.Wrapf(err, "error copying container %s state", c.ID())
+ }
return c.state, nil
}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 02f8d6aa4..23de1aa01 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -820,7 +820,7 @@ func (c *Container) makeBindMounts() error {
}
// Add Secret Mounts
- secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.DefaultMountsFile, c.state.DestinationRunDir, c.RootUID(), c.RootGID())
+ secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.DefaultMountsFile, c.state.DestinationRunDir, c.RootUID(), c.RootGID(), rootless.IsRootless())
for _, mount := range secretMounts {
if _, ok := c.state.BindMounts[mount.Destination]; !ok {
c.state.BindMounts[mount.Destination] = mount.Source
diff --git a/libpod/events.go b/libpod/events.go
index 139600982..b6a277789 100644
--- a/libpod/events.go
+++ b/libpod/events.go
@@ -58,6 +58,10 @@ func (v *Volume) newVolumeEvent(status events.Status) {
// Events is a wrapper function for everyone to begin tailing the events log
// with options
func (r *Runtime) Events(fromStart, stream bool, options []events.EventFilter, eventChannel chan *events.Event) error {
+ if !r.valid {
+ return ErrRuntimeStopped
+ }
+
t, err := r.getTail(fromStart, stream)
if err != nil {
return err
@@ -71,7 +75,7 @@ func (r *Runtime) Events(fromStart, stream bool, options []events.EventFilter, e
case events.Image, events.Volume, events.Pod, events.Container:
// no-op
default:
- return errors.Errorf("event type %s is not valid in %s", event.Type.String(), r.GetConfig().EventsLogFilePath)
+ return errors.Errorf("event type %s is not valid in %s", event.Type.String(), r.config.EventsLogFilePath)
}
include := true
for _, filter := range options {
diff --git a/libpod/info.go b/libpod/info.go
index 62088b730..b42f64a1f 100644
--- a/libpod/info.go
+++ b/libpod/info.go
@@ -13,8 +13,8 @@ import (
"github.com/containers/buildah"
"github.com/containers/libpod/pkg/rootless"
- "github.com/containers/libpod/pkg/util"
"github.com/containers/libpod/utils"
+ "github.com/containers/storage"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
)
@@ -116,12 +116,17 @@ func (r *Runtime) hostInfo() (map[string]interface{}, error) {
func (r *Runtime) storeInfo() (map[string]interface{}, error) {
// lets say storage driver in use, number of images, number of containers
info := map[string]interface{}{}
- info["ConfigFile"] = util.StorageConfigFile()
info["GraphRoot"] = r.store.GraphRoot()
info["RunRoot"] = r.store.RunRoot()
info["GraphDriverName"] = r.store.GraphDriverName()
info["GraphOptions"] = r.store.GraphOptions()
info["VolumePath"] = r.config.VolumePath
+
+ configFile, err := storage.DefaultConfigFile(rootless.IsRootless())
+ if err != nil {
+ return nil, err
+ }
+ info["ConfigFile"] = configFile
statusPairs, err := r.store.Status()
if err != nil {
return nil, err
diff --git a/libpod/options.go b/libpod/options.go
index 3ca80e96c..e1ffd6ea5 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -9,6 +9,7 @@ import (
"github.com/containers/image/manifest"
"github.com/containers/libpod/pkg/namespaces"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/cri-o/ocicni/pkg/ocicni"
@@ -82,11 +83,15 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption {
// or graphdriveroptions are set, then GraphRoot and RunRoot
// must be set
if setField {
+ storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
+ if err != nil {
+ return err
+ }
if rt.config.StorageConfig.GraphRoot == "" {
- rt.config.StorageConfig.GraphRoot = storage.DefaultStoreOptions.GraphRoot
+ rt.config.StorageConfig.GraphRoot = storeOpts.GraphRoot
}
if rt.config.StorageConfig.RunRoot == "" {
- rt.config.StorageConfig.RunRoot = storage.DefaultStoreOptions.RunRoot
+ rt.config.StorageConfig.RunRoot = storeOpts.RunRoot
}
}
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index b9a11000e..9a6baf23e 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -6,7 +6,6 @@ import (
"github.com/containers/libpod/libpod/events"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/ulule/deepcopier"
)
// Start starts all containers within a pod
@@ -441,7 +440,9 @@ func (p *Pod) Inspect() (*PodInspect, error) {
infraContainerID := p.state.InfraContainerID
config := new(PodConfig)
- deepcopier.Copy(p.config).To(config)
+ if err := JSONDeepCopy(p.config, config); err != nil {
+ return nil, err
+ }
inspectData := PodInspect{
Config: config,
State: &PodInspectState{
diff --git a/libpod/runtime.go b/libpod/runtime.go
index b3b75d791..f7b166513 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -23,7 +23,6 @@ import (
"github.com/docker/docker/pkg/namesgenerator"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/ulule/deepcopier"
)
// RuntimeStateStore is a constant indicating which state store implementation
@@ -249,11 +248,16 @@ type runtimeConfiguredFrom struct {
noPivotRoot bool
}
-var (
- defaultRuntimeConfig = RuntimeConfig{
+func defaultRuntimeConfig() (RuntimeConfig, error) {
+ storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
+ if err != nil {
+ return RuntimeConfig{}, err
+ }
+
+ return RuntimeConfig{
// Leave this empty so containers/storage will use its defaults
StorageConfig: storage.StoreOptions{},
- VolumePath: filepath.Join(storage.DefaultStoreOptions.GraphRoot, "volumes"),
+ VolumePath: filepath.Join(storeOpts.GraphRoot, "volumes"),
ImageDefaultTransport: DefaultTransport,
StateType: BoltDBStateStore,
OCIRuntime: "runc",
@@ -282,7 +286,7 @@ var (
},
InitPath: DefaultInitPath,
CgroupManager: SystemdCgroupsManager,
- StaticDir: filepath.Join(storage.DefaultStoreOptions.GraphRoot, "libpod"),
+ StaticDir: filepath.Join(storeOpts.GraphRoot, "libpod"),
TmpDir: "",
MaxLogSize: -1,
NoPivotRoot: false,
@@ -293,8 +297,8 @@ var (
EnablePortReservation: true,
EnableLabeling: true,
NumLocks: 2048,
- }
-)
+ }, nil
+}
func getDefaultTmpDir() (string, error) {
if !rootless.IsRootless() {
@@ -355,10 +359,17 @@ func newRuntimeFromConfig(userConfigPath string, options ...RuntimeOption) (runt
if err != nil {
return nil, err
}
- deepcopier.Copy(defaultRuntimeConfig).To(runtime.config)
+
+ defRunConf, err := defaultRuntimeConfig()
+ if err != nil {
+ return nil, err
+ }
+ if err := JSONDeepCopy(defRunConf, runtime.config); err != nil {
+ return nil, errors.Wrapf(err, "error copying runtime default config")
+ }
runtime.config.TmpDir = tmpDir
- storageConf, err := util.GetDefaultStoreOptions()
+ storageConf, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
if err != nil {
return nil, errors.Wrapf(err, "error retrieving storage config")
}
@@ -507,7 +518,10 @@ func newRuntimeFromConfig(userConfigPath string, options ...RuntimeOption) (runt
}
if rootlessConfigPath != "" {
// storage.conf
- storageConfFile := util.StorageConfigFile()
+ storageConfFile, err := storage.DefaultConfigFile(rootless.IsRootless())
+ if err != nil {
+ return nil, err
+ }
if _, err := os.Stat(storageConfFile); os.IsNotExist(err) {
if err := util.WriteStorageConfigFile(&runtime.config.StorageConfig, storageConfFile); err != nil {
return nil, errors.Wrapf(err, "cannot write config file %s", storageConfFile)
@@ -923,20 +937,22 @@ func makeRuntime(runtime *Runtime) (err error) {
}
// GetConfig returns a copy of the configuration used by the runtime
-func (r *Runtime) GetConfig() *RuntimeConfig {
+func (r *Runtime) GetConfig() (*RuntimeConfig, error) {
r.lock.RLock()
defer r.lock.RUnlock()
if !r.valid {
- return nil
+ return nil, ErrRuntimeStopped
}
config := new(RuntimeConfig)
// Copy so the caller won't be able to modify the actual config
- deepcopier.Copy(r.config).To(config)
+ if err := JSONDeepCopy(r.config, config); err != nil {
+ return nil, errors.Wrapf(err, "error copying config")
+ }
- return config
+ return config, nil
}
// Shutdown shuts down the runtime and associated containers and storage
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index f23dc86dd..7c39d8ced 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -19,7 +19,6 @@ import (
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/ulule/deepcopier"
)
// CtrRemoveTimeout is the default number of seconds to wait after stopping a container
@@ -63,7 +62,9 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
ctr.config.ID = stringid.GenerateNonCryptoID()
ctr.config.Spec = new(spec.Spec)
- deepcopier.Copy(rSpec).To(ctr.config.Spec)
+ if err := JSONDeepCopy(rSpec, ctr.config.Spec); err != nil {
+ return nil, errors.Wrapf(err, "error copying runtime spec while creating container")
+ }
ctr.config.CreatedTime = time.Now()
ctr.config.ShmSize = DefaultShmSize
diff --git a/libpod/util.go b/libpod/util.go
index b7578135a..7e2dff21a 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -187,3 +187,13 @@ func validPodNSOption(p *Pod, ctrPod string) error {
}
return nil
}
+
+// JSONDeepCopy performs a deep copy by performing a JSON encode/decode of the
+// given structures. From and To should be identically typed structs.
+func JSONDeepCopy(from, to interface{}) error {
+ tmp, err := json.Marshal(from)
+ if err != nil {
+ return err
+ }
+ return json.Unmarshal(tmp, to)
+}
diff --git a/pkg/adapter/pods_remote.go b/pkg/adapter/pods_remote.go
index ef8de90a6..4a32607a2 100644
--- a/pkg/adapter/pods_remote.go
+++ b/pkg/adapter/pods_remote.go
@@ -14,7 +14,6 @@ import (
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/varlinkapi"
"github.com/pkg/errors"
- "github.com/ulule/deepcopier"
)
// Pod ...
@@ -99,7 +98,9 @@ func (r *LocalRuntime) LookupPod(nameOrID string) (*Pod, error) {
// the data of a remotepod data struct
func (p *Pod) Inspect() (*libpod.PodInspect, error) {
config := new(libpod.PodConfig)
- deepcopier.Copy(p.remotepod.config).To(config)
+ if err := libpod.JSONDeepCopy(p.remotepod.config, config); err != nil {
+ return nil, err
+ }
inspectData := libpod.PodInspect{
Config: config,
State: p.remotepod.state,
diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go
index 6c53d0c62..dcc2d5aa6 100644
--- a/pkg/adapter/runtime_remote.go
+++ b/pkg/adapter/runtime_remote.go
@@ -137,6 +137,7 @@ func imageInListToContainerImage(i iopodman.Image, name string, runtime *LocalRu
ri := remoteImage{
InputName: name,
ID: i.Id,
+ Digest: digest.Digest(i.Digest),
Labels: i.Labels,
RepoTags: i.RepoTags,
RepoDigests: i.RepoTags,
diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go
index 79a318771..07ae633d1 100644
--- a/pkg/spec/createconfig.go
+++ b/pkg/spec/createconfig.go
@@ -346,8 +346,11 @@ func (c *CreateConfig) GetTmpfsMounts() []spec.Mount {
return m
}
-func (c *CreateConfig) createExitCommand() []string {
- config := c.Runtime.GetConfig()
+func (c *CreateConfig) createExitCommand() ([]string, error) {
+ config, err := c.Runtime.GetConfig()
+ if err != nil {
+ return nil, err
+ }
cmd, _ := os.Executable()
command := []string{cmd,
@@ -372,7 +375,7 @@ func (c *CreateConfig) createExitCommand() []string {
command = append(command, "--rm")
}
- return command
+ return command, nil
}
// GetContainerCreateOptions takes a CreateConfig and returns a slice of CtrCreateOptions
@@ -567,7 +570,11 @@ func (c *CreateConfig) GetContainerCreateOptions(runtime *libpod.Runtime, pod *l
}
// Always use a cleanup process to clean up Podman after termination
- options = append(options, libpod.WithExitCommand(c.createExitCommand()))
+ exitCmd, err := c.createExitCommand()
+ if err != nil {
+ return nil, err
+ }
+ options = append(options, libpod.WithExitCommand(exitCmd))
if c.HealthCheck != nil {
options = append(options, libpod.WithHealthCheck(c.HealthCheck))
diff --git a/pkg/util/utils.go b/pkg/util/utils.go
index a408ad34b..19b2c44be 100644
--- a/pkg/util/utils.go
+++ b/pkg/util/utils.go
@@ -3,7 +3,6 @@ package util
import (
"fmt"
"os"
- "os/exec"
"path/filepath"
"strings"
"syscall"
@@ -241,25 +240,6 @@ func GetRootlessDirInfo() (string, string, error) {
return dataDir, rootlessRuntime, nil
}
-// GetRootlessStorageOpts returns the storage opts for containers running as non root
-func GetRootlessStorageOpts() (storage.StoreOptions, error) {
- var opts storage.StoreOptions
-
- dataDir, rootlessRuntime, err := GetRootlessDirInfo()
- if err != nil {
- return opts, err
- }
- opts.RunRoot = rootlessRuntime
- opts.GraphRoot = filepath.Join(dataDir, "containers", "storage")
- if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
- opts.GraphDriverName = "overlay"
- opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)}
- } else {
- opts.GraphDriverName = "vfs"
- }
- return opts, nil
-}
-
type tomlOptionsConfig struct {
MountProgram string `toml:"mount_program"`
}
@@ -289,42 +269,6 @@ func getTomlStorage(storeOptions *storage.StoreOptions) *tomlConfig {
return config
}
-// GetDefaultStoreOptions returns the default storage ops for containers
-func GetDefaultStoreOptions() (storage.StoreOptions, error) {
- var (
- defaultRootlessRunRoot string
- defaultRootlessGraphRoot string
- err error
- )
- storageOpts := storage.DefaultStoreOptions
- if rootless.IsRootless() {
- storageOpts, err = GetRootlessStorageOpts()
- if err != nil {
- return storageOpts, err
- }
- }
-
- storageConf := StorageConfigFile()
- if _, err = os.Stat(storageConf); err == nil {
- defaultRootlessRunRoot = storageOpts.RunRoot
- defaultRootlessGraphRoot = storageOpts.GraphRoot
- storageOpts = storage.StoreOptions{}
- storage.ReloadConfigurationFile(storageConf, &storageOpts)
- }
- if rootless.IsRootless() && err == nil {
- // If the file did not specify a graphroot or runroot,
- // set sane defaults so we don't try and use root-owned
- // directories
- if storageOpts.RunRoot == "" {
- storageOpts.RunRoot = defaultRootlessRunRoot
- }
- if storageOpts.GraphRoot == "" {
- storageOpts.GraphRoot = defaultRootlessGraphRoot
- }
- }
- return storageOpts, nil
-}
-
// WriteStorageConfigFile writes the configuration to a file
func WriteStorageConfigFile(storageOpts *storage.StoreOptions, storageConf string) error {
os.MkdirAll(filepath.Dir(storageConf), 0755)
@@ -342,14 +286,6 @@ func WriteStorageConfigFile(storageOpts *storage.StoreOptions, storageConf strin
return nil
}
-// StorageConfigFile returns the path to the storage config file used
-func StorageConfigFile() string {
- if rootless.IsRootless() {
- return filepath.Join(os.Getenv("HOME"), ".config/containers/storage.conf")
- }
- return storage.DefaultConfigFile
-}
-
// ParseInputTime takes the users input and to determine if it is valid and
// returns a time format and error. The input is compared to known time formats
// or a duration which implies no-duration
diff --git a/pkg/varlinkapi/containers_create.go b/pkg/varlinkapi/containers_create.go
index 6b53b22c6..8990ac001 100644
--- a/pkg/varlinkapi/containers_create.go
+++ b/pkg/varlinkapi/containers_create.go
@@ -22,7 +22,10 @@ import (
// CreateContainer ...
func (i *LibpodAPI) CreateContainer(call iopodman.VarlinkCall, config iopodman.Create) error {
- rtc := i.Runtime.GetConfig()
+ rtc, err := i.Runtime.GetConfig()
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
ctx := getContext()
newImage, err := i.Runtime.ImageRuntime().New(ctx, config.Image, rtc.SignaturePolicyPath, "", os.Stderr, nil, image.SigningOptions{}, false, nil)
diff --git a/pkg/varlinkapi/images.go b/pkg/varlinkapi/images.go
index 210f139ce..0ca867410 100644
--- a/pkg/varlinkapi/images.go
+++ b/pkg/varlinkapi/images.go
@@ -54,6 +54,7 @@ func (i *LibpodAPI) ListImages(call iopodman.VarlinkCall) error {
i := iopodman.Image{
Id: image.ID(),
+ Digest: string(image.Digest()),
ParentId: image.Parent,
RepoTags: image.Names(),
RepoDigests: repoDigests,
@@ -514,7 +515,11 @@ func (i *LibpodAPI) Commit(call iopodman.VarlinkCall, name, imageName string, ch
if err != nil {
return call.ReplyContainerNotFound(name, err.Error())
}
- sc := image.GetSystemContext(i.Runtime.GetConfig().SignaturePolicyPath, "", false)
+ rtc, err := i.Runtime.GetConfig()
+ if err != nil {
+ return call.ReplyErrorOccurred(err.Error())
+ }
+ sc := image.GetSystemContext(rtc.SignaturePolicyPath, "", false)
var mimeType string
switch manifestType {
case "oci", "": //nolint
@@ -525,7 +530,7 @@ func (i *LibpodAPI) Commit(call iopodman.VarlinkCall, name, imageName string, ch
return call.ReplyErrorOccurred(fmt.Sprintf("unrecognized image format %q", manifestType))
}
coptions := buildah.CommitOptions{
- SignaturePolicyPath: i.Runtime.GetConfig().SignaturePolicyPath,
+ SignaturePolicyPath: rtc.SignaturePolicyPath,
ReportWriter: nil,
SystemContext: sc,
PreferredManifestType: mimeType,
diff --git a/vendor.conf b/vendor.conf
index bbfc96377..d36de6ffc 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -13,16 +13,16 @@ github.com/buger/goterm c206103e1f37c0c6c5c039706305ea2aa6e8ad3b
github.com/checkpoint-restore/go-criu v3.11
github.com/containerd/cgroups 39b18af02c4120960f517a3a4c2588fabb61d02c
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
-github.com/containernetworking/cni v0.7.0-alpha1
+github.com/containernetworking/cni v0.7.0-rc2
github.com/containernetworking/plugins v0.7.4
-github.com/containers/image v1.5
+github.com/containers/image v1.5.1
github.com/vbauerster/mpb v3.3.4
github.com/mattn/go-isatty v0.0.4
github.com/VividCortex/ewma v1.1.1
-github.com/containers/storage v1.10
+github.com/containers/storage v1.12.1
github.com/containers/psgo v1.2
github.com/coreos/go-systemd v14
-github.com/cri-o/ocicni 2d2983e40c242322a56c22a903785e7f83eb378c
+github.com/cri-o/ocicni 0c180f981b27ef6036fa5be29bcb4dd666e406eb
github.com/cyphar/filepath-securejoin v0.2.1
github.com/davecgh/go-spew v1.1.0
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
@@ -73,7 +73,6 @@ github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
github.com/tchap/go-patricia v2.2.6
github.com/uber/jaeger-client-go 64f57863bf63d3842dbe79cdc793d57baaff9ab5
github.com/uber/jaeger-lib d036253de8f5b698150d81b922486f1e8e7628ec
-github.com/ulule/deepcopier ca99b135e50f526fde9cd88705f0ff2f3f95b77c
github.com/vbatts/tar-split v0.11.1
github.com/vishvananda/netlink v1.0.0
github.com/vishvananda/netns 13995c7128ccc8e51e9a6bd2b551020a27180abd
@@ -94,7 +93,7 @@ k8s.io/apimachinery kubernetes-1.10.13-beta.0 https://github.com/kubernetes/apim
k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go
github.com/mrunalp/fileutils 7d4729fb36185a7c1719923406c9d40e54fb93c7
github.com/varlink/go 3ac79db6fd6aec70924193b090962f92985fe199
-github.com/containers/buildah 3ba8822d309128f7d76599432b8d9cdf77d4032f
+github.com/containers/buildah c933fe4bc608676d308ffcb276b7d8561a18e94d
# TODO: Gotty has not been updated since 2012. Can we find replacement?
github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512
github.com/fsouza/go-dockerclient v1.3.0
diff --git a/vendor/github.com/containernetworking/cni/README.md b/vendor/github.com/containernetworking/cni/README.md
index 65ccda9f9..3968d908a 100644
--- a/vendor/github.com/containernetworking/cni/README.md
+++ b/vendor/github.com/containernetworking/cni/README.md
@@ -9,9 +9,9 @@
# Community Sync Meeting
-There is a community sync meeting for users and developers every 1-2 months. The next meeting will help on a Google Hangout and the link is in the [agenda](https://docs.google.com/document/d/10ECyT2mBGewsJUcmYmS8QNo1AcNgy2ZIe2xS7lShYhE/edit?usp=sharing) (Notes from previous meeting are also in this doc).
+There is a community sync meeting for users and developers every 1-2 months. The next meeting will help on a Google Hangout and the link is in the [agenda](https://docs.google.com/document/d/10ECyT2mBGewsJUcmYmS8QNo1AcNgy2ZIe2xS7lShYhE/edit?usp=sharing) (Notes from previous meeting are also in this doc).
-The next meeting will be held on *Wednesday, October 4th* at *3:00pm UTC / 11:00am EDT / 8:00am PDT* [Add to Calendar](https://www.worldtimebuddy.com/?qm=1&lid=100,5,2643743,5391959&h=100&date=2017-10-04&sln=15-16).
+The next meeting will be held on *Wednesday, January 30th, 2019* at *4:00pm UTC / 11:00am EDT / 8:00am PDT* [Add to Calendar](https://www.worldtimebuddy.com/?qm=1&lid=100,5,2643743,5391959&h=100&date=2019-01-30&sln=16-17).
---
@@ -38,11 +38,13 @@ To avoid duplication, we think it is prudent to define a common interface betwee
## Who is using CNI?
### Container runtimes
- [rkt - container engine](https://coreos.com/blog/rkt-cni-networking.html)
-- [Kubernetes - a system to simplify container operations](http://kubernetes.io/docs/admin/network-plugins/)
+- [Kubernetes - a system to simplify container operations](https://kubernetes.io/docs/admin/network-plugins/)
- [OpenShift - Kubernetes with additional enterprise features](https://github.com/openshift/origin/blob/master/docs/openshift_networking_requirements.md)
- [Cloud Foundry - a platform for cloud applications](https://github.com/cloudfoundry-incubator/cf-networking-release)
- [Apache Mesos - a distributed systems kernel](https://github.com/apache/mesos/blob/master/docs/cni.md)
- [Amazon ECS - a highly scalable, high performance container management service](https://aws.amazon.com/ecs/)
+- [Singularity - container platform optimized for HPC, EPC, and AI](https://github.com/sylabs/singularity)
+- [OpenSVC - orchestrator for legacy and containerized application stacks](https://docs.opensvc.com/latest/fr/agent.configure.cni.html)
### 3rd party plugins
- [Project Calico - a layer 3 virtual network](https://github.com/projectcalico/calico-cni)
@@ -61,6 +63,10 @@ To avoid duplication, we think it is prudent to define a common interface betwee
- [Amazon ECS CNI Plugins - a collection of CNI Plugins to configure containers with Amazon EC2 elastic network interfaces (ENIs)](https://github.com/aws/amazon-ecs-cni-plugins)
- [Bonding CNI - a Link aggregating plugin to address failover and high availability network](https://github.com/Intel-Corp/bond-cni)
- [ovn-kubernetes - an container network plugin built on Open vSwitch (OVS) and Open Virtual Networking (OVN) with support for both Linux and Windows](https://github.com/openvswitch/ovn-kubernetes)
+- [Juniper Contrail](https://www.juniper.net/cloud) / [TungstenFabric](https://tungstenfabric.io) - Provides overlay SDN solution, delivering multicloud networking, hybrid cloud networking, simultaneous overlay-underlay support, network policy enforcement, network isolation, service chaining and flexible load balancing
+- [Knitter - a CNI plugin supporting multiple networking for Kubernetes](https://github.com/ZTE/Knitter)
+- [DANM - a CNI-compliant networking solution for TelCo workloads running on Kubernetes](https://github.com/nokia/danm)
+- [VMware NSX – a CNI plugin that enables automated NSX L2/L3 networking and L4/L7 Load Balancing; network isolation at the pod, node, and cluster level; and zero-trust security policy for your Kubernetes cluster.](https://docs.vmware.com/en/VMware-NSX-T/2.2/com.vmware.nsxt.ncp_kubernetes.doc/GUID-6AFA724E-BB62-4693-B95C-321E8DDEA7E1.html)
The CNI team also maintains some [core plugins in a separate repository](https://github.com/containernetworking/plugins).
@@ -74,7 +80,7 @@ If you intend to contribute to code or documentation, please read [CONTRIBUTING.
### Requirements
-The CNI spec is language agnostic. To use the Go language libraries in this repository, you'll need a recent version of Go. Our [automated tests](https://travis-ci.org/containernetworking/cni/builds) cover Go versions 1.7 and 1.8.
+The CNI spec is language agnostic. To use the Go language libraries in this repository, you'll need a recent version of Go. You can find the Go versions covered by our [automated tests](https://travis-ci.org/containernetworking/cni/builds) in [.travis.yaml](.travis.yml).
### Reference Plugins
@@ -111,6 +117,7 @@ EOF
$ cat >/etc/cni/net.d/99-loopback.conf <<EOF
{
"cniVersion": "0.2.0",
+ "name": "lo",
"type": "loopback"
}
EOF
@@ -122,7 +129,7 @@ Next, build the plugins:
```bash
$ cd $GOPATH/src/github.com/containernetworking/plugins
-$ ./build.sh
+$ ./build_linux.sh # or build_windows.sh
```
Finally, execute a command (`ifconfig` in this example) in a private network namespace that has joined the `mynet` network:
diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go
index d494e43d4..360733e74 100644
--- a/vendor/github.com/containernetworking/cni/libcni/api.go
+++ b/vendor/github.com/containernetworking/cni/libcni/api.go
@@ -15,6 +15,7 @@
package libcni
import (
+ "context"
"encoding/json"
"fmt"
"io/ioutil"
@@ -57,20 +58,25 @@ type NetworkConfig struct {
}
type NetworkConfigList struct {
- Name string
- CNIVersion string
- Plugins []*NetworkConfig
- Bytes []byte
+ Name string
+ CNIVersion string
+ DisableCheck bool
+ Plugins []*NetworkConfig
+ Bytes []byte
}
type CNI interface {
- AddNetworkList(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
- GetNetworkList(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
- DelNetworkList(net *NetworkConfigList, rt *RuntimeConf) error
+ AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
+ CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
+ DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
- AddNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
- GetNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
- DelNetwork(net *NetworkConfig, rt *RuntimeConf) error
+ AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
+ CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
+ DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
+ GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
+
+ ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error)
+ ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error)
}
type CNIConfig struct {
@@ -120,7 +126,7 @@ func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult typ
// These capabilities arguments are filtered through the plugin's advertised
// capabilities from its config JSON, and any keys in the CapabilityArgs
// matching plugin capabilities are added to the "runtimeConfig" dictionary
-// sent to the plugin via JSON on stdin. For exmaple, if the plugin's
+// sent to the plugin via JSON on stdin. For example, if the plugin's
// capabilities include "portMappings", and the CapabilityArgs map includes a
// "portMappings" key, that key and its value are added to the "runtimeConfig"
// dictionary to be passed to the plugin's stdin.
@@ -158,40 +164,12 @@ func (c *CNIConfig) ensureExec() invoke.Exec {
return c.exec
}
-func (c *CNIConfig) addOrGetNetwork(command, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) {
- c.ensureExec()
- pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
- if err != nil {
- return nil, err
- }
-
- newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
- if err != nil {
- return nil, err
- }
-
- return invoke.ExecPluginWithResult(pluginPath, newConf.Bytes, c.args(command, rt), c.exec)
-}
-
-// Note that only GET requests should pass an initial prevResult
-func (c *CNIConfig) addOrGetNetworkList(command string, prevResult types.Result, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
- var err error
- for _, net := range list.Plugins {
- prevResult, err = c.addOrGetNetwork(command, list.Name, list.CNIVersion, net, prevResult, rt)
- if err != nil {
- return nil, err
- }
- }
-
- return prevResult, nil
-}
-
func getResultCacheFilePath(netName string, rt *RuntimeConf) string {
cacheDir := rt.CacheDir
if cacheDir == "" {
cacheDir = CacheDir
}
- return filepath.Join(cacheDir, "results", fmt.Sprintf("%s-%s", netName, rt.ContainerID))
+ return filepath.Join(cacheDir, "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName))
}
func setCachedResult(result types.Result, netName string, rt *RuntimeConf) error {
@@ -243,37 +221,94 @@ func getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result,
return result, err
}
-// AddNetworkList executes a sequence of plugins with the ADD command
-func (c *CNIConfig) AddNetworkList(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
- result, err := c.addOrGetNetworkList("ADD", nil, list, rt)
+// GetNetworkListCachedResult returns the cached Result of the previous
+// previous AddNetworkList() operation for a network list, or an error.
+func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
+ return getCachedResult(list.Name, list.CNIVersion, rt)
+}
+
+// GetNetworkCachedResult returns the cached Result of the previous
+// previous AddNetwork() operation for a network, or an error.
+func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
+ return getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
+}
+
+func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) {
+ c.ensureExec()
+ pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
if err != nil {
return nil, err
}
+ newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
+ if err != nil {
+ return nil, err
+ }
+
+ return invoke.ExecPluginWithResult(ctx, pluginPath, newConf.Bytes, c.args("ADD", rt), c.exec)
+}
+
+// AddNetworkList executes a sequence of plugins with the ADD command
+func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
+ var err error
+ var result types.Result
+ for _, net := range list.Plugins {
+ result, err = c.addNetwork(ctx, list.Name, list.CNIVersion, net, result, rt)
+ if err != nil {
+ return nil, err
+ }
+ }
+
if err = setCachedResult(result, list.Name, rt); err != nil {
- return nil, fmt.Errorf("failed to set network '%s' cached result: %v", list.Name, err)
+ return nil, fmt.Errorf("failed to set network %q cached result: %v", list.Name, err)
}
return result, nil
}
-// GetNetworkList executes a sequence of plugins with the GET command
-func (c *CNIConfig) GetNetworkList(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
- // GET was added in CNI spec version 0.4.0 and higher
+func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error {
+ c.ensureExec()
+ pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
+ if err != nil {
+ return err
+ }
+
+ newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
+ if err != nil {
+ return err
+ }
+
+ return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("CHECK", rt), c.exec)
+}
+
+// CheckNetworkList executes a sequence of plugins with the CHECK command
+func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error {
+ // CHECK was added in CNI spec version 0.4.0 and higher
if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil {
- return nil, err
+ return err
} else if !gtet {
- return nil, fmt.Errorf("configuration version %q does not support the GET command", list.CNIVersion)
+ return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion)
+ }
+
+ if list.DisableCheck {
+ return nil
}
cachedResult, err := getCachedResult(list.Name, list.CNIVersion, rt)
if err != nil {
- return nil, fmt.Errorf("failed to get network '%s' cached result: %v", list.Name, err)
+ return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err)
+ }
+
+ for _, net := range list.Plugins {
+ if err := c.checkNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil {
+ return err
+ }
}
- return c.addOrGetNetworkList("GET", cachedResult, list, rt)
+
+ return nil
}
-func (c *CNIConfig) delNetwork(name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error {
+func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error {
c.ensureExec()
pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path)
if err != nil {
@@ -285,11 +320,11 @@ func (c *CNIConfig) delNetwork(name, cniVersion string, net *NetworkConfig, prev
return err
}
- return invoke.ExecPluginWithoutResult(pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec)
+ return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec)
}
// DelNetworkList executes a sequence of plugins with the DEL command
-func (c *CNIConfig) DelNetworkList(list *NetworkConfigList, rt *RuntimeConf) error {
+func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error {
var cachedResult types.Result
// Cached result on DEL was added in CNI spec version 0.4.0 and higher
@@ -298,13 +333,13 @@ func (c *CNIConfig) DelNetworkList(list *NetworkConfigList, rt *RuntimeConf) err
} else if gtet {
cachedResult, err = getCachedResult(list.Name, list.CNIVersion, rt)
if err != nil {
- return fmt.Errorf("failed to get network '%s' cached result: %v", list.Name, err)
+ return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err)
}
}
for i := len(list.Plugins) - 1; i >= 0; i-- {
net := list.Plugins[i]
- if err := c.delNetwork(list.Name, list.CNIVersion, net, cachedResult, rt); err != nil {
+ if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil {
return err
}
}
@@ -314,37 +349,37 @@ func (c *CNIConfig) DelNetworkList(list *NetworkConfigList, rt *RuntimeConf) err
}
// AddNetwork executes the plugin with the ADD command
-func (c *CNIConfig) AddNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
- result, err := c.addOrGetNetwork("ADD", net.Network.Name, net.Network.CNIVersion, net, nil, rt)
+func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
+ result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt)
if err != nil {
return nil, err
}
if err = setCachedResult(result, net.Network.Name, rt); err != nil {
- return nil, fmt.Errorf("failed to set network '%s' cached result: %v", net.Network.Name, err)
+ return nil, fmt.Errorf("failed to set network %q cached result: %v", net.Network.Name, err)
}
return result, nil
}
-// GetNetwork executes the plugin with the GET command
-func (c *CNIConfig) GetNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
- // GET was added in CNI spec version 0.4.0 and higher
+// CheckNetwork executes the plugin with the CHECK command
+func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error {
+ // CHECK was added in CNI spec version 0.4.0 and higher
if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil {
- return nil, err
+ return err
} else if !gtet {
- return nil, fmt.Errorf("configuration version %q does not support the GET command", net.Network.CNIVersion)
+ return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion)
}
cachedResult, err := getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
if err != nil {
- return nil, fmt.Errorf("failed to get network '%s' cached result: %v", net.Network.Name, err)
+ return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err)
}
- return c.addOrGetNetwork("GET", net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt)
+ return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt)
}
// DelNetwork executes the plugin with the DEL command
-func (c *CNIConfig) DelNetwork(net *NetworkConfig, rt *RuntimeConf) error {
+func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error {
var cachedResult types.Result
// Cached result on DEL was added in CNI spec version 0.4.0 and higher
@@ -353,27 +388,99 @@ func (c *CNIConfig) DelNetwork(net *NetworkConfig, rt *RuntimeConf) error {
} else if gtet {
cachedResult, err = getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
if err != nil {
- return fmt.Errorf("failed to get network '%s' cached result: %v", net.Network.Name, err)
+ return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err)
}
}
- if err := c.delNetwork(net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil {
+ if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil {
return err
}
_ = delCachedResult(net.Network.Name, rt)
return nil
}
+// ValidateNetworkList checks that a configuration is reasonably valid.
+// - all the specified plugins exist on disk
+// - every plugin supports the desired version.
+//
+// Returns a list of all capabilities supported by the configuration, or error
+func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) {
+ version := list.CNIVersion
+
+ // holding map for seen caps (in case of duplicates)
+ caps := map[string]interface{}{}
+
+ errs := []error{}
+ for _, net := range list.Plugins {
+ if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil {
+ errs = append(errs, err)
+ }
+ for c, enabled := range net.Network.Capabilities {
+ if !enabled {
+ continue
+ }
+ caps[c] = struct{}{}
+ }
+ }
+
+ if len(errs) > 0 {
+ return nil, fmt.Errorf("%v", errs)
+ }
+
+ // make caps list
+ cc := make([]string, 0, len(caps))
+ for c := range caps {
+ cc = append(cc, c)
+ }
+
+ return cc, nil
+}
+
+// ValidateNetwork checks that a configuration is reasonably valid.
+// It uses the same logic as ValidateNetworkList)
+// Returns a list of capabilities
+func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) {
+ caps := []string{}
+ for c, ok := range net.Network.Capabilities {
+ if ok {
+ caps = append(caps, c)
+ }
+ }
+ if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil {
+ return nil, err
+ }
+ return caps, nil
+}
+
+// validatePlugin checks that an individual plugin's configuration is sane
+func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error {
+ pluginPath, err := invoke.FindInPath(pluginName, c.Path)
+ if err != nil {
+ return err
+ }
+
+ vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec)
+ if err != nil {
+ return err
+ }
+ for _, vers := range vi.SupportedVersions() {
+ if vers == expectedVersion {
+ return nil
+ }
+ }
+ return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion)
+}
+
// GetVersionInfo reports which versions of the CNI spec are supported by
// the given plugin.
-func (c *CNIConfig) GetVersionInfo(pluginType string) (version.PluginInfo, error) {
+func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) {
c.ensureExec()
pluginPath, err := c.exec.FindInPath(pluginType, c.Path)
if err != nil {
return nil, err
}
- return invoke.GetVersionInfo(pluginPath, c.exec)
+ return invoke.GetVersionInfo(ctx, pluginPath, c.exec)
}
// =====
diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go
index 9834d715b..ea56c509d 100644
--- a/vendor/github.com/containernetworking/cni/libcni/conf.go
+++ b/vendor/github.com/containernetworking/cni/libcni/conf.go
@@ -83,10 +83,19 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) {
}
}
+ disableCheck := false
+ if rawDisableCheck, ok := rawList["disableCheck"]; ok {
+ disableCheck, ok = rawDisableCheck.(bool)
+ if !ok {
+ return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck)
+ }
+ }
+
list := &NetworkConfigList{
- Name: name,
- CNIVersion: cniVersion,
- Bytes: bytes,
+ Name: name,
+ DisableCheck: disableCheck,
+ CNIVersion: cniVersion,
+ Bytes: bytes,
}
var plugins []interface{}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
index 21efdf802..30b4672f1 100644
--- a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
@@ -15,6 +15,7 @@
package invoke
import (
+ "context"
"fmt"
"os"
"path/filepath"
@@ -22,54 +23,53 @@ import (
"github.com/containernetworking/cni/pkg/types"
)
-func delegateAddOrGet(command, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) {
+func delegateCommon(expectedCommand, delegatePlugin string, exec Exec) (string, Exec, error) {
if exec == nil {
exec = defaultExec
}
+ if os.Getenv("CNI_COMMAND") != expectedCommand {
+ return "", nil, fmt.Errorf("CNI_COMMAND is not " + expectedCommand)
+ }
+
paths := filepath.SplitList(os.Getenv("CNI_PATH"))
pluginPath, err := exec.FindInPath(delegatePlugin, paths)
if err != nil {
- return nil, err
+ return "", nil, err
}
- return ExecPluginWithResult(pluginPath, netconf, ArgsFromEnv(), exec)
+ return pluginPath, exec, nil
}
// DelegateAdd calls the given delegate plugin with the CNI ADD action and
// JSON configuration
-func DelegateAdd(delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) {
- if os.Getenv("CNI_COMMAND") != "ADD" {
- return nil, fmt.Errorf("CNI_COMMAND is not ADD")
+func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) {
+ pluginPath, realExec, err := delegateCommon("ADD", delegatePlugin, exec)
+ if err != nil {
+ return nil, err
}
- return delegateAddOrGet("ADD", delegatePlugin, netconf, exec)
+
+ return ExecPluginWithResult(ctx, pluginPath, netconf, ArgsFromEnv(), realExec)
}
-// DelegateGet calls the given delegate plugin with the CNI GET action and
+// DelegateCheck calls the given delegate plugin with the CNI CHECK action and
// JSON configuration
-func DelegateGet(delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) {
- if os.Getenv("CNI_COMMAND") != "GET" {
- return nil, fmt.Errorf("CNI_COMMAND is not GET")
+func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
+ pluginPath, realExec, err := delegateCommon("CHECK", delegatePlugin, exec)
+ if err != nil {
+ return err
}
- return delegateAddOrGet("GET", delegatePlugin, netconf, exec)
+
+ return ExecPluginWithoutResult(ctx, pluginPath, netconf, ArgsFromEnv(), realExec)
}
// DelegateDel calls the given delegate plugin with the CNI DEL action and
// JSON configuration
-func DelegateDel(delegatePlugin string, netconf []byte, exec Exec) error {
- if exec == nil {
- exec = defaultExec
- }
-
- if os.Getenv("CNI_COMMAND") != "DEL" {
- return fmt.Errorf("CNI_COMMAND is not DEL")
- }
-
- paths := filepath.SplitList(os.Getenv("CNI_PATH"))
- pluginPath, err := exec.FindInPath(delegatePlugin, paths)
+func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error {
+ pluginPath, realExec, err := delegateCommon("DEL", delegatePlugin, exec)
if err != nil {
return err
}
- return ExecPluginWithoutResult(pluginPath, netconf, ArgsFromEnv(), exec)
+ return ExecPluginWithoutResult(ctx, pluginPath, netconf, ArgsFromEnv(), realExec)
}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
index cf019d3a0..8e6d30b82 100644
--- a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
@@ -15,6 +15,7 @@
package invoke
import (
+ "context"
"fmt"
"os"
@@ -26,7 +27,7 @@ import (
// and executing a CNI plugin. Tests may provide a fake implementation
// to avoid writing fake plugins to temporary directories during the test.
type Exec interface {
- ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error)
+ ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error)
FindInPath(plugin string, paths []string) (string, error)
Decode(jsonBytes []byte) (version.PluginInfo, error)
}
@@ -72,12 +73,12 @@ type Exec interface {
// return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths)
//}
-func ExecPluginWithResult(pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) {
+func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) {
if exec == nil {
exec = defaultExec
}
- stdoutBytes, err := exec.ExecPlugin(pluginPath, netconf, args.AsEnv())
+ stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv())
if err != nil {
return nil, err
}
@@ -92,11 +93,11 @@ func ExecPluginWithResult(pluginPath string, netconf []byte, args CNIArgs, exec
return version.NewResult(confVersion, stdoutBytes)
}
-func ExecPluginWithoutResult(pluginPath string, netconf []byte, args CNIArgs, exec Exec) error {
+func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error {
if exec == nil {
exec = defaultExec
}
- _, err := exec.ExecPlugin(pluginPath, netconf, args.AsEnv())
+ _, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv())
return err
}
@@ -104,7 +105,7 @@ func ExecPluginWithoutResult(pluginPath string, netconf []byte, args CNIArgs, ex
// For recent-enough plugins, it uses the information returned by the VERSION
// command. For older plugins which do not recognize that command, it reports
// version 0.1.0
-func GetVersionInfo(pluginPath string, exec Exec) (version.PluginInfo, error) {
+func GetVersionInfo(ctx context.Context, pluginPath string, exec Exec) (version.PluginInfo, error) {
if exec == nil {
exec = defaultExec
}
@@ -117,7 +118,7 @@ func GetVersionInfo(pluginPath string, exec Exec) (version.PluginInfo, error) {
Path: "dummy",
}
stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current()))
- stdoutBytes, err := exec.ExecPlugin(pluginPath, stdin, args.AsEnv())
+ stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, stdin, args.AsEnv())
if err != nil {
if err.Error() == "unknown CNI_COMMAND: VERSION" {
return version.PluginSupports("0.1.0"), nil
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go
index bab5737a9..9bcfb4553 100644
--- a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build darwin dragonfly freebsd linux netbsd opensbd solaris
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package invoke
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go
index a598f09c2..e5b86634d 100644
--- a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go
@@ -16,6 +16,7 @@ package invoke
import (
"bytes"
+ "context"
"encoding/json"
"fmt"
"io"
@@ -28,17 +29,13 @@ type RawExec struct {
Stderr io.Writer
}
-func (e *RawExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) {
+func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) {
stdout := &bytes.Buffer{}
-
- c := exec.Cmd{
- Env: environ,
- Path: pluginPath,
- Args: []string{pluginPath},
- Stdin: bytes.NewBuffer(stdinData),
- Stdout: stdout,
- Stderr: e.Stderr,
- }
+ c := exec.CommandContext(ctx, pluginPath)
+ c.Env = environ
+ c.Stdin = bytes.NewBuffer(stdinData)
+ c.Stdout = stdout
+ c.Stderr = e.Stderr
if err := c.Run(); err != nil {
return nil, pluginErr(err, stdout.Bytes())
}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go
index 2833aba78..53256167f 100644
--- a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go
+++ b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go
@@ -17,6 +17,7 @@ package types020
import (
"encoding/json"
"fmt"
+ "io"
"net"
"os"
@@ -73,11 +74,15 @@ func (r *Result) GetAsVersion(version string) (types.Result, error) {
}
func (r *Result) Print() error {
+ return r.PrintTo(os.Stdout)
+}
+
+func (r *Result) PrintTo(writer io.Writer) error {
data, err := json.MarshalIndent(r, "", " ")
if err != nil {
return err
}
- _, err = os.Stdout.Write(data)
+ _, err = writer.Write(data)
return err
}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/current/types.go b/vendor/github.com/containernetworking/cni/pkg/types/current/types.go
index 92980c1a7..7267a2e6d 100644
--- a/vendor/github.com/containernetworking/cni/pkg/types/current/types.go
+++ b/vendor/github.com/containernetworking/cni/pkg/types/current/types.go
@@ -17,6 +17,7 @@ package current
import (
"encoding/json"
"fmt"
+ "io"
"net"
"os"
@@ -75,13 +76,9 @@ func convertFrom020(result types.Result) (*Result, error) {
Gateway: oldResult.IP4.Gateway,
})
for _, route := range oldResult.IP4.Routes {
- gw := route.GW
- if gw == nil {
- gw = oldResult.IP4.Gateway
- }
newResult.Routes = append(newResult.Routes, &types.Route{
Dst: route.Dst,
- GW: gw,
+ GW: route.GW,
})
}
}
@@ -93,21 +90,13 @@ func convertFrom020(result types.Result) (*Result, error) {
Gateway: oldResult.IP6.Gateway,
})
for _, route := range oldResult.IP6.Routes {
- gw := route.GW
- if gw == nil {
- gw = oldResult.IP6.Gateway
- }
newResult.Routes = append(newResult.Routes, &types.Route{
Dst: route.Dst,
- GW: gw,
+ GW: route.GW,
})
}
}
- if len(newResult.IPs) == 0 {
- return nil, fmt.Errorf("cannot convert: no valid IP addresses")
- }
-
return newResult, nil
}
@@ -206,11 +195,15 @@ func (r *Result) GetAsVersion(version string) (types.Result, error) {
}
func (r *Result) Print() error {
+ return r.PrintTo(os.Stdout)
+}
+
+func (r *Result) PrintTo(writer io.Writer) error {
data, err := json.MarshalIndent(r, "", " ")
if err != nil {
return err
}
- _, err = os.Stdout.Write(data)
+ _, err = writer.Write(data)
return err
}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go
index 4684a3207..d0d11006a 100644
--- a/vendor/github.com/containernetworking/cni/pkg/types/types.go
+++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go
@@ -18,6 +18,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "io"
"net"
"os"
)
@@ -65,6 +66,9 @@ type NetConf struct {
Capabilities map[string]bool `json:"capabilities,omitempty"`
IPAM IPAM `json:"ipam,omitempty"`
DNS DNS `json:"dns"`
+
+ RawPrevResult map[string]interface{} `json:"prevResult,omitempty"`
+ PrevResult Result `json:"-"`
}
type IPAM struct {
@@ -75,15 +79,16 @@ type IPAM struct {
type NetConfList struct {
CNIVersion string `json:"cniVersion,omitempty"`
- Name string `json:"name,omitempty"`
- Plugins []*NetConf `json:"plugins,omitempty"`
+ Name string `json:"name,omitempty"`
+ DisableCheck bool `json:"disableCheck,omitempty"`
+ Plugins []*NetConf `json:"plugins,omitempty"`
}
type ResultFactoryFunc func([]byte) (Result, error)
// Result is an interface that provides the result of plugin execution
type Result interface {
- // The highest CNI specification result verison the result supports
+ // The highest CNI specification result version the result supports
// without having to convert
Version() string
@@ -94,6 +99,9 @@ type Result interface {
// Prints the result in JSON format to stdout
Print() error
+ // Prints the result in JSON format to provided writer
+ PrintTo(writer io.Writer) error
+
// Returns a JSON string representation of the result
String() string
}
diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go
index 612335a81..1df427243 100644
--- a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go
+++ b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go
@@ -86,9 +86,13 @@ func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) {
// minor, and micro numbers or returns an error
func ParseVersion(version string) (int, int, int, error) {
var major, minor, micro int
+ if version == "" {
+ return -1, -1, -1, fmt.Errorf("invalid version %q: the version is empty", version)
+ }
+
parts := strings.Split(version, ".")
- if len(parts) == 0 || len(parts) >= 4 {
- return -1, -1, -1, fmt.Errorf("invalid version %q: too many or too few parts", version)
+ if len(parts) >= 4 {
+ return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version)
}
major, err := strconv.Atoi(parts[0])
@@ -114,7 +118,7 @@ func ParseVersion(version string) (int, int, int, error) {
}
// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro
-// nubmers, and compares them to determine whether the first version is greater
+// numbers, and compares them to determine whether the first version is greater
// than or equal to the second
func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) {
firstMajor, firstMinor, firstMicro, err := ParseVersion(version)
diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go
index c8e46d55b..8f3508e61 100644
--- a/vendor/github.com/containernetworking/cni/pkg/version/version.go
+++ b/vendor/github.com/containernetworking/cni/pkg/version/version.go
@@ -15,6 +15,7 @@
package version
import (
+ "encoding/json"
"fmt"
"github.com/containernetworking/cni/pkg/types"
@@ -59,3 +60,24 @@ func NewResult(version string, resultBytes []byte) (types.Result, error) {
return nil, fmt.Errorf("unsupported CNI result version %q", version)
}
+
+// ParsePrevResult parses a prevResult in a NetConf structure and sets
+// the NetConf's PrevResult member to the parsed Result object.
+func ParsePrevResult(conf *types.NetConf) error {
+ if conf.RawPrevResult == nil {
+ return nil
+ }
+
+ resultBytes, err := json.Marshal(conf.RawPrevResult)
+ if err != nil {
+ return fmt.Errorf("could not serialize prevResult: %v", err)
+ }
+
+ conf.RawPrevResult = nil
+ conf.PrevResult, err = NewResult(conf.CNIVersion, resultBytes)
+ if err != nil {
+ return fmt.Errorf("could not parse prevResult: %v", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md
index 913a4336f..827d5a87f 100644
--- a/vendor/github.com/containers/buildah/README.md
+++ b/vendor/github.com/containers/buildah/README.md
@@ -78,21 +78,21 @@ From [`./examples/lighttpd.sh`](examples/lighttpd.sh):
$ cat > lighttpd.sh <<"EOF"
#!/bin/bash -x
-ctr1=`buildah from ${1:-fedora}`
+ctr1=$(buildah from "${1:-fedora}")
## Get all updates and install our minimal httpd server
-buildah run $ctr1 -- dnf update -y
-buildah run $ctr1 -- dnf install -y lighttpd
+buildah run "$ctr1" -- dnf update -y
+buildah run "$ctr1" -- dnf install -y lighttpd
## Include some buildtime annotations
-buildah config --annotation "com.example.build.host=$(uname -n)" $ctr1
+buildah config --annotation "com.example.build.host=$(uname -n)" "$ctr1"
## Run our server and expose the port
-buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" $ctr1
-buildah config --port 80 $ctr1
+buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" "$ctr1"
+buildah config --port 80 "$ctr1"
## Commit this container to an image name
-buildah commit $ctr1 ${2:-$USER/lighttpd}
+buildah commit "$ctr1" "${2:-$USER/lighttpd}"
EOF
$ chmod +x lighttpd.sh
diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go
index 6542b0377..250d75b24 100644
--- a/vendor/github.com/containers/buildah/add.go
+++ b/vendor/github.com/containers/buildah/add.go
@@ -11,8 +11,8 @@ import (
"syscall"
"time"
+ "github.com/containers/buildah/pkg/chrootuser"
"github.com/containers/buildah/util"
- "github.com/containers/libpod/pkg/chrootuser"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
"github.com/opencontainers/runtime-spec/specs-go"
@@ -32,6 +32,10 @@ type AddAndCopyOptions struct {
// If the sources include directory trees, Hasher will be passed
// tar-format archives of the directory trees.
Hasher io.Writer
+ // Exludes contents in the .dockerignore file
+ Excludes []string
+ // current directory on host
+ ContextDir string
}
// addURL copies the contents of the source URL to the destination. This is
@@ -84,6 +88,7 @@ func addURL(destination, srcurl string, owner idtools.IDPair, hasher io.Writer)
// filesystem, optionally extracting contents of local files that look like
// non-empty archives.
func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, source ...string) error {
+ excludes := DockerIgnoreHelper(options.Excludes, options.ContextDir)
mountPoint, err := b.Mount(b.MountLabel)
if err != nil {
return err
@@ -139,6 +144,71 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
copyFileWithTar := b.copyFileWithTar(&containerOwner, options.Hasher)
copyWithTar := b.copyWithTar(&containerOwner, options.Hasher)
untarPath := b.untarPath(nil, options.Hasher)
+ err = addHelper(excludes, extract, dest, destfi, hostOwner, options, copyFileWithTar, copyWithTar, untarPath, source...)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// user returns the user (and group) information which the destination should belong to.
+func (b *Builder) user(mountPoint string, userspec string) (specs.User, error) {
+ if userspec == "" {
+ userspec = b.User()
+ }
+
+ uid, gid, err := chrootuser.GetUser(mountPoint, userspec)
+ u := specs.User{
+ UID: uid,
+ GID: gid,
+ Username: userspec,
+ }
+ if !strings.Contains(userspec, ":") {
+ groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
+ if err2 != nil {
+ if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil {
+ err = err2
+ }
+ } else {
+ u.AdditionalGids = groups
+ }
+
+ }
+ return u, err
+}
+
+// DockerIgnore struct keep info from .dockerignore
+type DockerIgnore struct {
+ ExcludePath string
+ IsExcluded bool
+}
+
+// DockerIgnoreHelper returns the lines from .dockerignore file without the comments
+// and reverses the order
+func DockerIgnoreHelper(lines []string, contextDir string) []DockerIgnore {
+ var excludes []DockerIgnore
+ // the last match of a file in the .dockerignmatches determines whether it is included or excluded
+ // reverse the order
+ for i := len(lines) - 1; i >= 0; i-- {
+ exclude := lines[i]
+ // ignore the comment in .dockerignore
+ if strings.HasPrefix(exclude, "#") || len(exclude) == 0 {
+ continue
+ }
+ excludeFlag := true
+ if strings.HasPrefix(exclude, "!") {
+ exclude = strings.TrimPrefix(exclude, "!")
+ excludeFlag = false
+ }
+ excludes = append(excludes, DockerIgnore{ExcludePath: filepath.Join(contextDir, exclude), IsExcluded: excludeFlag})
+ }
+ if len(excludes) != 0 {
+ excludes = append(excludes, DockerIgnore{ExcludePath: filepath.Join(contextDir, ".dockerignore"), IsExcluded: true})
+ }
+ return excludes
+}
+
+func addHelper(excludes []DockerIgnore, extract bool, dest string, destfi os.FileInfo, hostOwner idtools.IDPair, options AddAndCopyOptions, copyFileWithTar, copyWithTar, untarPath func(src, dest string) error, source ...string) error {
for _, src := range source {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
// We assume that source is a file, and we're copying
@@ -167,6 +237,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
if len(glob) == 0 {
return errors.Wrapf(syscall.ENOENT, "no files found matching %q", src)
}
+ outer:
for _, gsrc := range glob {
esrc, err := filepath.EvalSymlinks(gsrc)
if err != nil {
@@ -185,11 +256,59 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
return errors.Wrapf(err, "error creating directory %q", dest)
}
logrus.Debugf("copying %q to %q", esrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*")
- if err = copyWithTar(esrc, dest); err != nil {
- return errors.Wrapf(err, "error copying %q to %q", esrc, dest)
+ if len(excludes) == 0 {
+ if err = copyWithTar(esrc, dest); err != nil {
+ return errors.Wrapf(err, "error copying %q to %q", esrc, dest)
+ }
+ continue
+ }
+ err := filepath.Walk(esrc, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if info.IsDir() {
+ return nil
+ }
+ for _, exclude := range excludes {
+ match, err := filepath.Match(filepath.Clean(exclude.ExcludePath), filepath.Clean(path))
+ if err != nil {
+ return err
+ }
+ if !match {
+ continue
+ }
+ if exclude.IsExcluded {
+ return nil
+ }
+ break
+ }
+ // combine the filename with the dest directory
+ fpath := strings.TrimPrefix(path, options.ContextDir)
+ if err = copyFileWithTar(path, filepath.Join(dest, fpath)); err != nil {
+ return errors.Wrapf(err, "error copying %q to %q", path, dest)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
}
continue
}
+
+ for _, exclude := range excludes {
+ match, err := filepath.Match(filepath.Clean(exclude.ExcludePath), esrc)
+ if err != nil {
+ return err
+ }
+ if !match {
+ continue
+ }
+ if exclude.IsExcluded {
+ continue outer
+ }
+ break
+ }
+
if !extract || !archive.IsArchivePath(esrc) {
// This source is a file, and either it's not an
// archive, or we don't care whether or not it's an
@@ -214,29 +333,3 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}
return nil
}
-
-// user returns the user (and group) information which the destination should belong to.
-func (b *Builder) user(mountPoint string, userspec string) (specs.User, error) {
- if userspec == "" {
- userspec = b.User()
- }
-
- uid, gid, err := chrootuser.GetUser(mountPoint, userspec)
- u := specs.User{
- UID: uid,
- GID: gid,
- Username: userspec,
- }
- if !strings.Contains(userspec, ":") {
- groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
- if err2 != nil {
- if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil {
- err = err2
- }
- } else {
- u.AdditionalGids = groups
- }
-
- }
- return u, err
-}
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 8f5364632..febc3d0d1 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -336,10 +336,10 @@ type BuilderOptions struct {
// needs to be pulled and the image name alone can not be resolved to a
// reference to a source image. No separator is implicitly added.
Registry string
- // PullBlobDirectory is the name of a directory in which we'll attempt
+ // BlobDirectory is the name of a directory in which we'll attempt
// to store copies of layer blobs that we pull down, if any. It should
// already exist.
- PullBlobDirectory string
+ BlobDirectory string
// Mount signals to NewBuilder() that the container should be mounted
// immediately.
Mount bool
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index da28bea61..9ab90196c 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -114,7 +114,7 @@ type PushOptions struct {
func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options CommitOptions) (string, reference.Canonical, digest.Digest, error) {
var imgID string
- systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
+ systemContext := getSystemContext(b.store, options.SystemContext, options.SignaturePolicyPath)
blocked, err := isReferenceBlocked(dest, systemContext)
if err != nil {
@@ -152,8 +152,8 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
if err != nil {
return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID)
}
- var maybeCachedSrc types.ImageReference = src
- var maybeCachedDest types.ImageReference = dest
+ var maybeCachedSrc = types.ImageReference(src)
+ var maybeCachedDest = types.ImageReference(dest)
if options.BlobDirectory != "" {
compress := types.PreserveOriginal
if options.Compression != archive.Uncompressed {
@@ -178,7 +178,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
systemContext.DirForceCompress = true
}
var manifestBytes []byte
- if manifestBytes, err = cp.Image(ctx, policyContext, maybeCachedDest, maybeCachedSrc, getCopyOptions(options.ReportWriter, maybeCachedSrc, nil, maybeCachedDest, systemContext, "")); err != nil {
+ if manifestBytes, err = cp.Image(ctx, policyContext, maybeCachedDest, maybeCachedSrc, getCopyOptions(b.store, options.ReportWriter, maybeCachedSrc, nil, maybeCachedDest, systemContext, "")); err != nil {
return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID)
}
if len(options.AdditionalTags) > 0 {
@@ -230,7 +230,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
// Push copies the contents of the image to a new location.
func Push(ctx context.Context, image string, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) {
- systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
+ systemContext := getSystemContext(options.Store, options.SystemContext, options.SignaturePolicyPath)
if options.Quiet {
options.ReportWriter = nil // Turns off logging output
@@ -256,7 +256,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
if err != nil {
return nil, "", err
}
- var maybeCachedSrc types.ImageReference = src
+ var maybeCachedSrc = types.ImageReference(src)
if options.BlobDirectory != "" {
compress := types.PreserveOriginal
if options.Compression != archive.Uncompressed {
@@ -276,7 +276,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
systemContext.DirForceCompress = true
}
var manifestBytes []byte
- if manifestBytes, err = cp.Image(ctx, policyContext, dest, maybeCachedSrc, getCopyOptions(options.ReportWriter, maybeCachedSrc, nil, dest, systemContext, options.ManifestType)); err != nil {
+ if manifestBytes, err = cp.Image(ctx, policyContext, dest, maybeCachedSrc, getCopyOptions(options.Store, options.ReportWriter, maybeCachedSrc, nil, dest, systemContext, options.ManifestType)); err != nil {
return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(maybeCachedSrc), transports.ImageName(dest))
}
if options.ReportWriter != nil {
diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go
index e369dc407..667a1a484 100644
--- a/vendor/github.com/containers/buildah/common.go
+++ b/vendor/github.com/containers/buildah/common.go
@@ -5,9 +5,10 @@ import (
"os"
"path/filepath"
+ "github.com/containers/buildah/unshare"
cp "github.com/containers/image/copy"
"github.com/containers/image/types"
- "github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/storage"
)
const (
@@ -17,33 +18,16 @@ const (
DOCKER = "docker"
)
-// userRegistriesFile is the path to the per user registry configuration file.
-var userRegistriesFile = filepath.Join(os.Getenv("HOME"), ".config/containers/registries.conf")
-
-func getCopyOptions(reportWriter io.Writer, sourceReference types.ImageReference, sourceSystemContext *types.SystemContext, destinationReference types.ImageReference, destinationSystemContext *types.SystemContext, manifestType string) *cp.Options {
- sourceCtx := &types.SystemContext{}
+func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceReference types.ImageReference, sourceSystemContext *types.SystemContext, destinationReference types.ImageReference, destinationSystemContext *types.SystemContext, manifestType string) *cp.Options {
+ sourceCtx := getSystemContext(store, nil, "")
if sourceSystemContext != nil {
*sourceCtx = *sourceSystemContext
- } else {
- if rootless.IsRootless() {
- if _, err := os.Stat(userRegistriesFile); err == nil {
- sourceCtx.SystemRegistriesConfPath = userRegistriesFile
- }
-
- }
}
- destinationCtx := &types.SystemContext{}
+ destinationCtx := getSystemContext(store, nil, "")
if destinationSystemContext != nil {
*destinationCtx = *destinationSystemContext
- } else {
- if rootless.IsRootless() {
- if _, err := os.Stat(userRegistriesFile); err == nil {
- destinationCtx.SystemRegistriesConfPath = userRegistriesFile
- }
- }
}
-
return &cp.Options{
ReportWriter: reportWriter,
SourceCtx: sourceCtx,
@@ -52,7 +36,7 @@ func getCopyOptions(reportWriter io.Writer, sourceReference types.ImageReference
}
}
-func getSystemContext(defaults *types.SystemContext, signaturePolicyPath string) *types.SystemContext {
+func getSystemContext(store storage.Store, defaults *types.SystemContext, signaturePolicyPath string) *types.SystemContext {
sc := &types.SystemContext{}
if defaults != nil {
*sc = *defaults
@@ -60,11 +44,16 @@ func getSystemContext(defaults *types.SystemContext, signaturePolicyPath string)
if signaturePolicyPath != "" {
sc.SignaturePolicyPath = signaturePolicyPath
}
- if sc.SystemRegistriesConfPath == "" && rootless.IsRootless() {
- if _, err := os.Stat(userRegistriesFile); err == nil {
- sc.SystemRegistriesConfPath = userRegistriesFile
+ if store != nil {
+ if sc.BlobInfoCacheDir == "" {
+ sc.BlobInfoCacheDir = filepath.Join(store.GraphRoot(), "cache")
+ }
+ if sc.SystemRegistriesConfPath == "" && unshare.IsRootless() {
+ userRegistriesFile := filepath.Join(store.GraphRoot(), "registries.conf")
+ if _, err := os.Stat(userRegistriesFile); err == nil {
+ sc.SystemRegistriesConfPath = userRegistriesFile
+ }
}
-
}
return sc
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index f50b11f6c..b1e30ca6a 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -11,6 +11,7 @@ import (
"os/exec"
"path/filepath"
"regexp"
+ "sort"
"strconv"
"strings"
"time"
@@ -20,6 +21,7 @@ import (
"github.com/containers/buildah/util"
cp "github.com/containers/image/copy"
"github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
is "github.com/containers/image/storage"
"github.com/containers/image/transports"
"github.com/containers/image/transports/alltransports"
@@ -171,14 +173,12 @@ type BuildOptions struct {
}
// Executor is a buildah-based implementation of the imagebuilder.Executor
-// interface.
+// interface. It coordinates the entire build by using one StageExecutors to
+// handle each stage of the build.
type Executor struct {
- index int
- name string
- named map[string]*Executor
+ stages map[string]*StageExecutor
store storage.Store
contextDir string
- builder *buildah.Builder
pullPolicy buildah.PullPolicy
registry string
ignoreUnrecognizedInstructions bool
@@ -196,11 +196,6 @@ type Executor struct {
err io.Writer
signaturePolicyPath string
systemContext *types.SystemContext
- mountPoint string
- preserved int
- volumes imagebuilder.VolumeSet
- volumeCache map[string]string
- volumeCacheInfo map[string]os.FileInfo
reportWriter io.Writer
isolation buildah.Isolation
namespaceOptions []buildah.NamespaceOption
@@ -217,16 +212,46 @@ type Executor struct {
onbuild []string
layers bool
topLayers []string
- noCache bool
+ useCache bool
removeIntermediateCtrs bool
forceRmIntermediateCtrs bool
- containerIDs []string // Stores the IDs of the successful intermediate containers used during layer build
imageMap map[string]string // Used to map images that we create to handle the AS construct.
- copyFrom string // Used to keep track of the --from flag from COPY and ADD
blobDirectory string
+ excludes []string
+ unusedArgs map[string]struct{}
}
-// builtinAllowedBuildArgs is list of built-in allowed build args
+// StageExecutor bundles up what we need to know when executing one stage of a
+// (possibly multi-stage) build.
+// Each stage may need to produce an image to be used as the base in a later
+// stage (with the last stage's image being the end product of the build), and
+// it may need to leave its working container in place so that the container's
+// root filesystem's contents can be used as the source for a COPY instruction
+// in a later stage.
+// Each stage has its own base image, so it starts with its own configuration
+// and set of volumes.
+// If we're naming the result of the build, only the last stage will apply that
+// name to the image that it produces.
+type StageExecutor struct {
+ executor *Executor
+ index int
+ stages int
+ name string
+ builder *buildah.Builder
+ preserved int
+ volumes imagebuilder.VolumeSet
+ volumeCache map[string]string
+ volumeCacheInfo map[string]os.FileInfo
+ mountPoint string
+ copyFrom string // Used to keep track of the --from flag from COPY and ADD
+ output string
+ containerIDs []string
+}
+
+// builtinAllowedBuildArgs is list of built-in allowed build args. Normally we
+// complain if we're given values for arguments which have no corresponding ARG
+// instruction in the Dockerfile, since that's usually an indication of a user
+// error, but for these values we make exceptions and ignore them.
var builtinAllowedBuildArgs = map[string]bool{
"HTTP_PROXY": true,
"http_proxy": true,
@@ -238,63 +263,70 @@ var builtinAllowedBuildArgs = map[string]bool{
"no_proxy": true,
}
-// withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME.
-func (b *Executor) withName(name string, index int, from string) *Executor {
- if b.named == nil {
- b.named = make(map[string]*Executor)
- }
- copied := *b
- copied.index = index
- copied.name = name
- child := &copied
- b.named[name] = child
- b.named[from] = child
+// startStage creates a new stage executor that will be referenced whenever a
+// COPY or ADD statement uses a --from=NAME flag.
+func (b *Executor) startStage(name string, index, stages int, from, output string) *StageExecutor {
+ if b.stages == nil {
+ b.stages = make(map[string]*StageExecutor)
+ }
+ stage := &StageExecutor{
+ executor: b,
+ index: index,
+ stages: stages,
+ name: name,
+ volumeCache: make(map[string]string),
+ volumeCacheInfo: make(map[string]os.FileInfo),
+ output: output,
+ }
+ b.stages[name] = stage
+ b.stages[from] = stage
if idx := strconv.Itoa(index); idx != name {
- b.named[idx] = child
+ b.stages[idx] = stage
}
- return child
+ return stage
}
-// Preserve informs the executor that from this point on, it needs to ensure
-// that only COPY and ADD instructions can modify the contents of this
+// Preserve informs the stage executor that from this point on, it needs to
+// ensure that only COPY and ADD instructions can modify the contents of this
// directory or anything below it.
-// The Executor handles this by caching the contents of directories which have
-// been marked this way before executing a RUN instruction, invalidating that
-// cache when an ADD or COPY instruction sets any location under the directory
-// as the destination, and using the cache to reset the contents of the
-// directory tree after processing each RUN instruction.
+// The StageExecutor handles this by caching the contents of directories which
+// have been marked this way before executing a RUN instruction, invalidating
+// that cache when an ADD or COPY instruction sets any location under the
+// directory as the destination, and using the cache to reset the contents of
+// the directory tree after processing each RUN instruction.
// It would be simpler if we could just mark the directory as a read-only bind
// mount of itself during Run(), but the directory is expected to be remain
-// writeable, even if any changes within it are ultimately discarded.
-func (b *Executor) Preserve(path string) error {
+// writeable while the RUN instruction is being handled, even if any changes
+// made within the directory are ultimately discarded.
+func (s *StageExecutor) Preserve(path string) error {
logrus.Debugf("PRESERVE %q", path)
- if b.volumes.Covers(path) {
+ if s.volumes.Covers(path) {
// This path is already a subdirectory of a volume path that
// we're already preserving, so there's nothing new to be done
// except ensure that it exists.
- archivedPath := filepath.Join(b.mountPoint, path)
+ archivedPath := filepath.Join(s.mountPoint, path)
if err := os.MkdirAll(archivedPath, 0755); err != nil {
return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath)
}
- if err := b.volumeCacheInvalidate(path); err != nil {
+ if err := s.volumeCacheInvalidate(path); err != nil {
return errors.Wrapf(err, "error ensuring volume path %q is preserved", archivedPath)
}
return nil
}
// Figure out where the cache for this volume would be stored.
- b.preserved++
- cacheDir, err := b.store.ContainerDirectory(b.builder.ContainerID)
+ s.preserved++
+ cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID)
if err != nil {
return errors.Errorf("unable to locate temporary directory for container")
}
- cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", b.preserved))
+ cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved))
// Save info about the top level of the location that we'll be archiving.
- archivedPath := filepath.Join(b.mountPoint, path)
+ archivedPath := filepath.Join(s.mountPoint, path)
// Try and resolve the symlink (if one exists)
// Set archivedPath and path based on whether a symlink is found or not
- if symLink, err := resolveSymlink(b.mountPoint, path); err == nil {
- archivedPath = filepath.Join(b.mountPoint, symLink)
+ if symLink, err := resolveSymlink(s.mountPoint, path); err == nil {
+ archivedPath = filepath.Join(s.mountPoint, symLink)
path = symLink
} else {
return errors.Wrapf(err, "error reading symbolic link to %q", path)
@@ -311,20 +343,20 @@ func (b *Executor) Preserve(path string) error {
logrus.Debugf("error reading info about %q: %v", archivedPath, err)
return errors.Wrapf(err, "error reading info about volume path %q", archivedPath)
}
- b.volumeCacheInfo[path] = st
- if !b.volumes.Add(path) {
+ s.volumeCacheInfo[path] = st
+ if !s.volumes.Add(path) {
// This path is not a subdirectory of a volume path that we're
// already preserving, so adding it to the list should work.
return errors.Errorf("error adding %q to the volume cache", path)
}
- b.volumeCache[path] = cacheFile
+ s.volumeCache[path] = cacheFile
// Now prune cache files for volumes that are now supplanted by this one.
removed := []string{}
- for cachedPath := range b.volumeCache {
+ for cachedPath := range s.volumeCache {
// Walk our list of cached volumes, and check that they're
// still in the list of locations that we need to cache.
found := false
- for _, volume := range b.volumes {
+ for _, volume := range s.volumes {
if volume == cachedPath {
// We need to keep this volume's cache.
found = true
@@ -339,47 +371,47 @@ func (b *Executor) Preserve(path string) error {
}
// Actually remove the caches that we decided to remove.
for _, cachedPath := range removed {
- archivedPath := filepath.Join(b.mountPoint, cachedPath)
- logrus.Debugf("no longer need cache of %q in %q", archivedPath, b.volumeCache[cachedPath])
- if err := os.Remove(b.volumeCache[cachedPath]); err != nil {
+ archivedPath := filepath.Join(s.mountPoint, cachedPath)
+ logrus.Debugf("no longer need cache of %q in %q", archivedPath, s.volumeCache[cachedPath])
+ if err := os.Remove(s.volumeCache[cachedPath]); err != nil {
if os.IsNotExist(err) {
continue
}
- return errors.Wrapf(err, "error removing %q", b.volumeCache[cachedPath])
+ return errors.Wrapf(err, "error removing %q", s.volumeCache[cachedPath])
}
- delete(b.volumeCache, cachedPath)
+ delete(s.volumeCache, cachedPath)
}
return nil
}
// Remove any volume cache item which will need to be re-saved because we're
// writing to part of it.
-func (b *Executor) volumeCacheInvalidate(path string) error {
+func (s *StageExecutor) volumeCacheInvalidate(path string) error {
invalidated := []string{}
- for cachedPath := range b.volumeCache {
+ for cachedPath := range s.volumeCache {
if strings.HasPrefix(path, cachedPath+string(os.PathSeparator)) {
invalidated = append(invalidated, cachedPath)
}
}
for _, cachedPath := range invalidated {
- if err := os.Remove(b.volumeCache[cachedPath]); err != nil {
+ if err := os.Remove(s.volumeCache[cachedPath]); err != nil {
if os.IsNotExist(err) {
continue
}
- return errors.Wrapf(err, "error removing volume cache %q", b.volumeCache[cachedPath])
+ return errors.Wrapf(err, "error removing volume cache %q", s.volumeCache[cachedPath])
}
- archivedPath := filepath.Join(b.mountPoint, cachedPath)
- logrus.Debugf("invalidated volume cache for %q from %q", archivedPath, b.volumeCache[cachedPath])
- delete(b.volumeCache, cachedPath)
+ archivedPath := filepath.Join(s.mountPoint, cachedPath)
+ logrus.Debugf("invalidated volume cache for %q from %q", archivedPath, s.volumeCache[cachedPath])
+ delete(s.volumeCache, cachedPath)
}
return nil
}
// Save the contents of each of the executor's list of volumes for which we
// don't already have a cache file.
-func (b *Executor) volumeCacheSave() error {
- for cachedPath, cacheFile := range b.volumeCache {
- archivedPath := filepath.Join(b.mountPoint, cachedPath)
+func (s *StageExecutor) volumeCacheSave() error {
+ for cachedPath, cacheFile := range s.volumeCache {
+ archivedPath := filepath.Join(s.mountPoint, cachedPath)
_, err := os.Stat(cacheFile)
if err == nil {
logrus.Debugf("contents of volume %q are already cached in %q", archivedPath, cacheFile)
@@ -411,9 +443,9 @@ func (b *Executor) volumeCacheSave() error {
}
// Restore the contents of each of the executor's list of volumes.
-func (b *Executor) volumeCacheRestore() error {
- for cachedPath, cacheFile := range b.volumeCache {
- archivedPath := filepath.Join(b.mountPoint, cachedPath)
+func (s *StageExecutor) volumeCacheRestore() error {
+ for cachedPath, cacheFile := range s.volumeCache {
+ archivedPath := filepath.Join(s.mountPoint, cachedPath)
logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile)
cache, err := os.Open(cacheFile)
if err != nil {
@@ -430,7 +462,7 @@ func (b *Executor) volumeCacheRestore() error {
if err != nil {
return errors.Wrapf(err, "error extracting archive at %q", archivedPath)
}
- if st, ok := b.volumeCacheInfo[cachedPath]; ok {
+ if st, ok := s.volumeCacheInfo[cachedPath]; ok {
if err := os.Chmod(archivedPath, st.Mode()); err != nil {
return errors.Wrapf(err, "error restoring permissions on %q", archivedPath)
}
@@ -447,10 +479,14 @@ func (b *Executor) volumeCacheRestore() error {
// Copy copies data into the working tree. The "Download" field is how
// imagebuilder tells us the instruction was "ADD" and not "COPY".
-func (b *Executor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
+func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
for _, copy := range copies {
- logrus.Debugf("COPY %#v, %#v", excludes, copy)
- if err := b.volumeCacheInvalidate(copy.Dest); err != nil {
+ if copy.Download {
+ logrus.Debugf("ADD %#v, %#v", excludes, copy)
+ } else {
+ logrus.Debugf("COPY %#v, %#v", excludes, copy)
+ }
+ if err := s.volumeCacheInvalidate(copy.Dest); err != nil {
return err
}
sources := []string{}
@@ -458,21 +494,23 @@ func (b *Executor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
sources = append(sources, src)
} else if len(copy.From) > 0 {
- if other, ok := b.named[copy.From]; ok && other.index < b.index {
+ if other, ok := s.executor.stages[copy.From]; ok && other.index < s.index {
sources = append(sources, filepath.Join(other.mountPoint, src))
} else {
return errors.Errorf("the stage %q has not been built", copy.From)
}
} else {
- sources = append(sources, filepath.Join(b.contextDir, src))
+ sources = append(sources, filepath.Join(s.executor.contextDir, src))
}
}
options := buildah.AddAndCopyOptions{
- Chown: copy.Chown,
+ Chown: copy.Chown,
+ ContextDir: s.executor.contextDir,
+ Excludes: s.executor.excludes,
}
- if err := b.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil {
+ if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil {
return err
}
}
@@ -493,14 +531,14 @@ func convertMounts(mounts []Mount) []specs.Mount {
return specmounts
}
-// Run executes a RUN instruction using the working container as a root
-// directory.
-func (b *Executor) Run(run imagebuilder.Run, config docker.Config) error {
+// Run executes a RUN instruction using the stage's current working container
+// as a root directory.
+func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
logrus.Debugf("RUN %#v, %#v", run, config)
- if b.builder == nil {
+ if s.builder == nil {
return errors.Errorf("no build container available")
}
- stdin := b.in
+ stdin := s.executor.in
if stdin == nil {
devNull, err := os.Open(os.DevNull)
if err != nil {
@@ -511,20 +549,20 @@ func (b *Executor) Run(run imagebuilder.Run, config docker.Config) error {
}
options := buildah.RunOptions{
Hostname: config.Hostname,
- Runtime: b.runtime,
- Args: b.runtimeArgs,
+ Runtime: s.executor.runtime,
+ Args: s.executor.runtimeArgs,
NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "",
- Mounts: convertMounts(b.transientMounts),
+ Mounts: convertMounts(s.executor.transientMounts),
Env: config.Env,
User: config.User,
WorkingDir: config.WorkingDir,
Entrypoint: config.Entrypoint,
Cmd: config.Cmd,
Stdin: stdin,
- Stdout: b.out,
- Stderr: b.err,
- Quiet: b.quiet,
- NamespaceOptions: b.namespaceOptions,
+ Stdout: s.executor.out,
+ Stderr: s.executor.err,
+ Quiet: s.executor.quiet,
+ NamespaceOptions: s.executor.namespaceOptions,
}
if config.NetworkDisabled {
options.ConfigureNetwork = buildah.NetworkDisabled
@@ -536,11 +574,11 @@ func (b *Executor) Run(run imagebuilder.Run, config docker.Config) error {
if run.Shell {
args = append([]string{"/bin/sh", "-c"}, args...)
}
- if err := b.volumeCacheSave(); err != nil {
+ if err := s.volumeCacheSave(); err != nil {
return err
}
- err := b.builder.Run(args, options)
- if err2 := b.volumeCacheRestore(); err2 != nil {
+ err := s.builder.Run(args, options)
+ if err2 := s.volumeCacheRestore(); err2 != nil {
if err == nil {
return err2
}
@@ -550,10 +588,10 @@ func (b *Executor) Run(run imagebuilder.Run, config docker.Config) error {
// UnrecognizedInstruction is called when we encounter an instruction that the
// imagebuilder parser didn't understand.
-func (b *Executor) UnrecognizedInstruction(step *imagebuilder.Step) error {
+func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error {
errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", step.Command)
err := fmt.Sprintf(errStr+"%#v", step)
- if b.ignoreUnrecognizedInstructions {
+ if s.executor.ignoreUnrecognizedInstructions {
logrus.Debugf(err)
return nil
}
@@ -572,9 +610,15 @@ func (b *Executor) UnrecognizedInstruction(step *imagebuilder.Step) error {
// NewExecutor creates a new instance of the imagebuilder.Executor interface.
func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
+ excludes, err := imagebuilder.ParseDockerignore(options.ContextDirectory)
+ if err != nil {
+ return nil, err
+ }
+
exec := Executor{
store: store,
contextDir: options.ContextDirectory,
+ excludes: excludes,
pullPolicy: options.PullPolicy,
registry: options.Registry,
ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions,
@@ -588,8 +632,6 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
additionalTags: options.AdditionalTags,
signaturePolicyPath: options.SignaturePolicyPath,
systemContext: options.SystemContext,
- volumeCache: make(map[string]string),
- volumeCacheInfo: make(map[string]os.FileInfo),
log: options.Log,
in: options.In,
out: options.Out,
@@ -608,10 +650,12 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
labels: append([]string{}, options.Labels...),
annotations: append([]string{}, options.Annotations...),
layers: options.Layers,
- noCache: options.NoCache,
+ useCache: !options.NoCache,
removeIntermediateCtrs: options.RemoveIntermediateCtrs,
forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
+ imageMap: make(map[string]string),
blobDirectory: options.BlobDirectory,
+ unusedArgs: make(map[string]struct{}),
}
if exec.err == nil {
exec.err = os.Stderr
@@ -628,12 +672,18 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
fmt.Fprintf(exec.err, prefix+format+suffix, args...)
}
}
+ for arg := range options.Args {
+ if _, isBuiltIn := builtinAllowedBuildArgs[arg]; !isBuiltIn {
+ exec.unusedArgs[arg] = struct{}{}
+ }
+ }
return &exec, nil
}
-// Prepare creates a working container based on specified image, or if one
-// isn't specified, the first FROM instruction we can find in the parsed tree.
-func (b *Executor) Prepare(ctx context.Context, stage imagebuilder.Stage, from string) error {
+// Prepare creates a working container based on the specified image, or if one
+// isn't specified, the first argument passed to the first FROM instruction we
+// can find in the stage's parsed tree.
+func (s *StageExecutor) Prepare(ctx context.Context, stage imagebuilder.Stage, from string) error {
ib := stage.Builder
node := stage.Node
@@ -646,7 +696,8 @@ func (b *Executor) Prepare(ctx context.Context, stage imagebuilder.Stage, from s
from = base
}
displayFrom := from
- // stage.Name will be a string of integers for all stages without an "AS" clause
+
+ // stage.Name will be a numeric string for all stages without an "AS" clause
asImageName := stage.Name
if asImageName != "" {
if _, err := strconv.Atoi(asImageName); err != nil {
@@ -657,38 +708,36 @@ func (b *Executor) Prepare(ctx context.Context, stage imagebuilder.Stage, from s
}
logrus.Debugf("FROM %#v", displayFrom)
- if !b.quiet {
- b.log("FROM %s", displayFrom)
+ if !s.executor.quiet {
+ s.executor.log("FROM %s", displayFrom)
}
builderOptions := buildah.BuilderOptions{
Args: ib.Args,
FromImage: from,
- PullPolicy: b.pullPolicy,
- Registry: b.registry,
- PullBlobDirectory: b.blobDirectory,
- SignaturePolicyPath: b.signaturePolicyPath,
- ReportWriter: b.reportWriter,
- SystemContext: b.systemContext,
- Isolation: b.isolation,
- NamespaceOptions: b.namespaceOptions,
- ConfigureNetwork: b.configureNetwork,
- CNIPluginPath: b.cniPluginPath,
- CNIConfigDir: b.cniConfigDir,
- IDMappingOptions: b.idmappingOptions,
- CommonBuildOpts: b.commonBuildOptions,
- DefaultMountsFilePath: b.defaultMountsFilePath,
- Format: b.outputFormat,
- }
-
- var builder *buildah.Builder
- var err error
- // Check and see if the image was declared previously with
- // an AS clause in the Dockerfile.
- if asImageFound, ok := b.imageMap[from]; ok {
+ PullPolicy: s.executor.pullPolicy,
+ Registry: s.executor.registry,
+ BlobDirectory: s.executor.blobDirectory,
+ SignaturePolicyPath: s.executor.signaturePolicyPath,
+ ReportWriter: s.executor.reportWriter,
+ SystemContext: s.executor.systemContext,
+ Isolation: s.executor.isolation,
+ NamespaceOptions: s.executor.namespaceOptions,
+ ConfigureNetwork: s.executor.configureNetwork,
+ CNIPluginPath: s.executor.cniPluginPath,
+ CNIConfigDir: s.executor.cniConfigDir,
+ IDMappingOptions: s.executor.idmappingOptions,
+ CommonBuildOpts: s.executor.commonBuildOptions,
+ DefaultMountsFilePath: s.executor.defaultMountsFilePath,
+ Format: s.executor.outputFormat,
+ }
+
+ // Check and see if the image is a pseudonym for the end result of a
+ // previous stage, named by an AS clause in the Dockerfile.
+ if asImageFound, ok := s.executor.imageMap[from]; ok {
builderOptions.FromImage = asImageFound
}
- builder, err = buildah.NewBuilder(ctx, b.store, builderOptions)
+ builder, err := buildah.NewBuilder(ctx, s.executor.store, builderOptions)
if err != nil {
return errors.Wrapf(err, "error creating build container")
}
@@ -749,45 +798,43 @@ func (b *Executor) Prepare(ctx context.Context, stage imagebuilder.Stage, from s
}
return errors.Wrapf(err, "error mounting new container")
}
- b.mountPoint = mountPoint
- b.builder = builder
+ s.mountPoint = mountPoint
+ s.builder = builder
// Add the top layer of this image to b.topLayers so we can keep track of them
// when building with cached images.
- b.topLayers = append(b.topLayers, builder.TopLayer)
+ s.executor.topLayers = append(s.executor.topLayers, builder.TopLayer)
logrus.Debugln("Container ID:", builder.ContainerID)
return nil
}
-// Delete deletes the working container, if we have one. The Executor object
-// should not be used to build another image, as the name of the output image
-// isn't resettable.
-func (b *Executor) Delete() (err error) {
- if b.builder != nil {
- err = b.builder.Delete()
- b.builder = nil
+// Delete deletes the stage's working container, if we have one.
+func (s *StageExecutor) Delete() (err error) {
+ if s.builder != nil {
+ err = s.builder.Delete()
+ s.builder = nil
}
return err
}
// resolveNameToImageRef creates a types.ImageReference from b.output
-func (b *Executor) resolveNameToImageRef() (types.ImageReference, error) {
+func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, error) {
var (
imageRef types.ImageReference
err error
)
- if b.output != "" {
- imageRef, err = alltransports.ParseImageName(b.output)
+ if output != "" {
+ imageRef, err = alltransports.ParseImageName(output)
if err != nil {
- candidates, _, _, err := util.ResolveName(b.output, "", b.systemContext, b.store)
+ candidates, _, _, err := util.ResolveName(output, "", b.systemContext, b.store)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing target image name %q", b.output)
+ return nil, errors.Wrapf(err, "error parsing target image name %q", output)
}
if len(candidates) == 0 {
- return nil, errors.Errorf("error parsing target image name %q", b.output)
+ return nil, errors.Errorf("error parsing target image name %q", output)
}
imageRef2, err2 := is.Transport.ParseStoreReference(b.store, candidates[0])
if err2 != nil {
- return nil, errors.Wrapf(err, "error parsing target image name %q", b.output)
+ return nil, errors.Wrapf(err, "error parsing target image name %q", output)
}
return imageRef2, nil
}
@@ -800,175 +847,205 @@ func (b *Executor) resolveNameToImageRef() (types.ImageReference, error) {
return imageRef, nil
}
-// Execute runs each of the steps in the parsed tree, in turn.
-func (b *Executor) Execute(ctx context.Context, stage imagebuilder.Stage) error {
+// Execute runs each of the steps in the stage's parsed tree, in turn.
+func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage) (imgID string, ref reference.Canonical, err error) {
ib := stage.Builder
node := stage.Node
checkForLayers := true
children := node.Children
- commitName := b.output
- b.containerIDs = nil
+ commitName := s.output
- var leftoverArgs []string
- for arg := range b.builder.Args {
- if !builtinAllowedBuildArgs[arg] {
- leftoverArgs = append(leftoverArgs, arg)
- }
- }
for i, node := range node.Children {
+ // Resolve any arguments in this instruction so that we don't have to.
step := ib.Step()
if err := step.Resolve(node); err != nil {
- return errors.Wrapf(err, "error resolving step %+v", *node)
+ return "", nil, errors.Wrapf(err, "error resolving step %+v", *node)
}
logrus.Debugf("Parsed Step: %+v", *step)
+ if !s.executor.quiet {
+ s.executor.log("%s", step.Original)
+ }
+
+ // If this instruction declares an argument, remove it from the
+ // set of arguments that we were passed but which we haven't
+ // seen used by the Dockerfile.
if step.Command == "arg" {
- for index, arg := range leftoverArgs {
- for _, Arg := range step.Args {
- list := strings.SplitN(Arg, "=", 2)
- if arg == list[0] {
- leftoverArgs = append(leftoverArgs[:index], leftoverArgs[index+1:]...)
- }
+ for _, Arg := range step.Args {
+ list := strings.SplitN(Arg, "=", 2)
+ if _, stillUnused := s.executor.unusedArgs[list[0]]; stillUnused {
+ delete(s.executor.unusedArgs, list[0])
}
}
}
- if !b.quiet {
- b.log("%s", step.Original)
+
+ // Check if there's a --from if the step command is COPY or
+ // ADD. Set copyFrom to point to either the context directory
+ // or the root of the container from the specified stage.
+ s.copyFrom = s.executor.contextDir
+ for _, n := range step.Flags {
+ if strings.Contains(n, "--from") && (step.Command == "copy" || step.Command == "add") {
+ arr := strings.Split(n, "=")
+ stage, ok := s.executor.stages[arr[1]]
+ if !ok {
+ return "", nil, errors.Errorf("%s --from=%s: no stage found with that name", step.Command, arr[1])
+ }
+ s.copyFrom = stage.mountPoint
+ break
+ }
}
- requiresStart := false
- if i < len(node.Children)-1 {
- requiresStart = ib.RequiresStart(&parser.Node{Children: node.Children[i+1:]})
+
+ // Determine if there are any RUN instructions to be run after
+ // this step. If not, we won't have to bother preserving the
+ // contents of any volumes declared between now and when we
+ // finish.
+ noRunsRemaining := false
+ if i < len(children)-1 {
+ noRunsRemaining = !ib.RequiresStart(&parser.Node{Children: children[i+1:]})
}
- if !b.layers && !b.noCache {
- err := ib.Run(step, b, requiresStart)
+ // If we're doing a single-layer build and not looking to take
+ // shortcuts using the cache, make a note of the instruction,
+ // process it, and then move on to the next instruction.
+ if !s.executor.layers && s.executor.useCache {
+ err := ib.Run(step, s, noRunsRemaining)
if err != nil {
- return errors.Wrapf(err, "error building at step %+v", *step)
+ return "", nil, errors.Wrapf(err, "error building at step %+v", *step)
}
continue
}
if i < len(children)-1 {
- b.output = ""
+ commitName = ""
} else {
- b.output = commitName
+ commitName = s.output
}
+ // TODO: this makes the tests happy, but it shouldn't be
+ // necessary unless this is the final stage.
+ commitName = s.executor.output
+
var (
cacheID string
err error
- imgID string
)
- b.copyFrom = ""
- // Check if --from exists in the step command of COPY or ADD
- // If it exists, set b.copyfrom to that value
- for _, n := range step.Flags {
- if strings.Contains(n, "--from") && (step.Command == "copy" || step.Command == "add") {
- arr := strings.Split(n, "=")
- b.copyFrom = b.named[arr[1]].mountPoint
- break
- }
- }
-
- // checkForLayers will be true if b.layers is true and a cached intermediate image is found.
- // checkForLayers is set to false when either there is no cached image or a break occurs where
- // the instructions in the Dockerfile change from a previous build.
- // Don't check for cache if b.noCache is set to true.
- if checkForLayers && !b.noCache {
- cacheID, err = b.layerExists(ctx, node, children[:i])
+ // If we're using the cache, and we've managed to stick with
+ // cached images so far, look for one that matches what we
+ // expect to produce for this instruction.
+ if checkForLayers && s.executor.useCache {
+ cacheID, err = s.layerExists(ctx, node, children[:i])
if err != nil {
- return errors.Wrap(err, "error checking if cached image exists from a previous build")
+ return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
}
}
-
if cacheID != "" {
- fmt.Fprintf(b.out, "--> Using cache %s\n", cacheID)
+ fmt.Fprintf(s.executor.out, "--> Using cache %s\n", cacheID)
}
- // If a cache is found for the last step, that means nothing in the
- // Dockerfile changed. Just create a copy of the existing image and
- // save it with the new name passed in by the user.
+ // If a cache is found and we're on the last step, that means
+ // nothing in this phase changed. Just create a copy of the
+ // existing image and save it with the name that we were going
+ // to assign to the one that we were building, and make sure
+ // that the builder's root fs matches it.
if cacheID != "" && i == len(children)-1 {
- if err := b.copyExistingImage(ctx, cacheID); err != nil {
- return err
+ if imgID, ref, err = s.copyExistingImage(ctx, cacheID, commitName); err != nil {
+ return "", nil, err
}
- b.containerIDs = append(b.containerIDs, b.builder.ContainerID)
break
}
+ // If we didn't find a cached step that we could just reuse,
+ // process the instruction and commit the layer.
if cacheID == "" || !checkForLayers {
checkForLayers = false
- err := ib.Run(step, b, requiresStart)
+ err := ib.Run(step, s, noRunsRemaining)
if err != nil {
- return errors.Wrapf(err, "error building at step %+v", *step)
+ return "", nil, errors.Wrapf(err, "error building at step %+v", *step)
}
}
// Commit if no cache is found
if cacheID == "" {
- imgID, _, err = b.Commit(ctx, ib, getCreatedBy(node))
+ imgID, ref, err = s.Commit(ctx, ib, getCreatedBy(node), commitName)
if err != nil {
- return errors.Wrapf(err, "error committing container for step %+v", *step)
+ return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
}
if i == len(children)-1 {
- b.log("COMMIT %s", b.output)
+ s.executor.log("COMMIT %s", commitName)
}
} else {
- // Cache is found, assign imgID the id of the cached image so
- // it is used to create the container for the next step.
+ // If we did find a cache, reuse the cached image's ID
+ // as the basis for the container for the next step.
imgID = cacheID
}
- // Add container ID of successful intermediate container to b.containerIDs
- b.containerIDs = append(b.containerIDs, b.builder.ContainerID)
+
// Prepare for the next step with imgID as the new base image.
- if i != len(children)-1 {
- if err := b.Prepare(ctx, stage, imgID); err != nil {
- return errors.Wrap(err, "error preparing container for next step")
+ if i < len(children)-1 {
+ s.containerIDs = append(s.containerIDs, s.builder.ContainerID)
+ if err := s.Prepare(ctx, stage, imgID); err != nil {
+ return "", nil, errors.Wrap(err, "error preparing container for next step")
}
}
}
- if len(leftoverArgs) > 0 {
- fmt.Fprintf(b.out, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs)
+
+ if s.executor.layers { // print out the final imageID if we're using layers flag
+ fmt.Fprintf(s.executor.out, "--> %s\n", imgID)
}
- return nil
+
+ return imgID, ref, nil
}
-// copyExistingImage creates a copy of an image already in store
-func (b *Executor) copyExistingImage(ctx context.Context, cacheID string) error {
+// copyExistingImage creates a copy of an image already in the store
+func (s *StageExecutor) copyExistingImage(ctx context.Context, cacheID, output string) (string, reference.Canonical, error) {
// Get the destination Image Reference
- dest, err := b.resolveNameToImageRef()
+ dest, err := s.executor.resolveNameToImageRef(output)
if err != nil {
- return err
+ return "", nil, err
}
- policyContext, err := util.GetPolicyContext(b.systemContext)
+ policyContext, err := util.GetPolicyContext(s.executor.systemContext)
if err != nil {
- return err
+ return "", nil, err
}
defer policyContext.Destroy()
// Look up the source image, expecting it to be in local storage
- src, err := is.Transport.ParseStoreReference(b.store, cacheID)
+ src, err := is.Transport.ParseStoreReference(s.executor.store, cacheID)
if err != nil {
- return errors.Wrapf(err, "error getting source imageReference for %q", cacheID)
+ return "", nil, errors.Wrapf(err, "error getting source imageReference for %q", cacheID)
}
- if _, err := cp.Image(ctx, policyContext, dest, src, nil); err != nil {
- return errors.Wrapf(err, "error copying image %q", cacheID)
+ manifestBytes, err := cp.Image(ctx, policyContext, dest, src, nil)
+ if err != nil {
+ return "", nil, errors.Wrapf(err, "error copying image %q", cacheID)
}
- b.log("COMMIT %s", b.output)
- return nil
+ manifestDigest, err := manifest.Digest(manifestBytes)
+ if err != nil {
+ return "", nil, errors.Wrapf(err, "error computing digest of manifest for image %q", cacheID)
+ }
+ img, err := is.Transport.GetStoreImage(s.executor.store, dest)
+ if err != nil {
+ return "", nil, errors.Wrapf(err, "error locating new copy of image %q (i.e., %q)", cacheID, transports.ImageName(dest))
+ }
+ s.executor.log("COMMIT %s", s.output)
+ var ref reference.Canonical
+ if dref := dest.DockerReference(); dref != nil {
+ if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
+ return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q (i.e., %q)", cacheID, transports.ImageName(dest))
+ }
+ }
+ return img.ID, ref, nil
}
// layerExists returns true if an intermediate image of currNode exists in the image store from a previous build.
// It verifies this by checking the parent of the top layer of the image and the history.
-func (b *Executor) layerExists(ctx context.Context, currNode *parser.Node, children []*parser.Node) (string, error) {
+func (s *StageExecutor) layerExists(ctx context.Context, currNode *parser.Node, children []*parser.Node) (string, error) {
// Get the list of images available in the image store
- images, err := b.store.Images()
+ images, err := s.executor.store.Images()
if err != nil {
return "", errors.Wrap(err, "error getting image list from store")
}
for _, image := range images {
- layer, err := b.store.Layer(image.TopLayer)
+ layer, err := s.executor.store.Layer(image.TopLayer)
if err != nil {
return "", errors.Wrapf(err, "error getting top layer info")
}
@@ -976,8 +1053,8 @@ func (b *Executor) layerExists(ctx context.Context, currNode *parser.Node, child
// it means that this image is potentially a cached intermediate image from a previous
// build. Next we double check that the history of this image is equivalent to the previous
// lines in the Dockerfile up till the point we are at in the build.
- if layer.Parent == b.topLayers[len(b.topLayers)-1] {
- history, err := b.getImageHistory(ctx, image.ID)
+ if layer.Parent == s.executor.topLayers[len(s.executor.topLayers)-1] {
+ history, err := s.executor.getImageHistory(ctx, image.ID)
if err != nil {
return "", errors.Wrapf(err, "error getting history of %q", image.ID)
}
@@ -985,7 +1062,7 @@ func (b *Executor) layerExists(ctx context.Context, currNode *parser.Node, child
if historyMatches(append(children, currNode), history) {
// This checks if the files copied during build have been changed if the node is
// a COPY or ADD command.
- filesMatch, err := b.copiedFilesMatch(currNode, history[len(history)-1].Created)
+ filesMatch, err := s.copiedFilesMatch(currNode, history[len(history)-1].Created)
if err != nil {
return "", errors.Wrapf(err, "error checking if copied files match")
}
@@ -1045,24 +1122,16 @@ func historyMatches(children []*parser.Node, history []v1.History) bool {
// getFilesToCopy goes through node to get all the src files that are copied, added or downloaded.
// It is possible for the Dockerfile to have src as hom*, which means all files that have hom as a prefix.
// Another format is hom?.txt, which means all files that have that name format with the ? replaced by another character.
-func (b *Executor) getFilesToCopy(node *parser.Node) ([]string, error) {
+func (s *StageExecutor) getFilesToCopy(node *parser.Node) ([]string, error) {
currNode := node.Next
var src []string
for currNode.Next != nil {
- if currNode.Next == nil {
- break
- }
if strings.HasPrefix(currNode.Value, "http://") || strings.HasPrefix(currNode.Value, "https://") {
src = append(src, currNode.Value)
currNode = currNode.Next
continue
}
- if b.copyFrom != "" {
- src = append(src, filepath.Join(b.copyFrom, currNode.Value))
- currNode = currNode.Next
- continue
- }
- matches, err := filepath.Glob(filepath.Join(b.contextDir, currNode.Value))
+ matches, err := filepath.Glob(filepath.Join(s.copyFrom, currNode.Value))
if err != nil {
return nil, errors.Wrapf(err, "error finding match for pattern %q", currNode.Value)
}
@@ -1076,12 +1145,12 @@ func (b *Executor) getFilesToCopy(node *parser.Node) ([]string, error) {
// If it is either of those two it checks the timestamps on all the files copied/added
// by the dockerfile. If the host version has a time stamp greater than the time stamp
// of the build, the build will not use the cached version and will rebuild.
-func (b *Executor) copiedFilesMatch(node *parser.Node, historyTime *time.Time) (bool, error) {
+func (s *StageExecutor) copiedFilesMatch(node *parser.Node, historyTime *time.Time) (bool, error) {
if node.Value != "add" && node.Value != "copy" {
return true, nil
}
- src, err := b.getFilesToCopy(node)
+ src, err := s.getFilesToCopy(node)
if err != nil {
return false, err
}
@@ -1102,12 +1171,7 @@ func (b *Executor) copiedFilesMatch(node *parser.Node, historyTime *time.Time) (
// Change the time format to ensure we don't run into a parsing error when converting again from string
// to time.Time. It is a known Go issue that the conversions cause errors sometimes, so specifying a particular
// time format here when converting to a string.
- // If the COPY has --from in the command, change the rootdir to mountpoint of the container it is copying from
- rootdir := b.contextDir
- if b.copyFrom != "" {
- rootdir = b.copyFrom
- }
- timeIsGreater, err := resolveModifiedTime(rootdir, item, historyTime.Format(time.RFC3339Nano))
+ timeIsGreater, err := resolveModifiedTime(s.copyFrom, item, historyTime.Format(time.RFC3339Nano))
if err != nil {
return false, errors.Wrapf(err, "error resolving symlinks and comparing modified times: %q", item)
}
@@ -1139,43 +1203,45 @@ func urlContentModified(url string, historyTime *time.Time) (bool, error) {
// Commit writes the container's contents to an image, using a passed-in tag as
// the name if there is one, generating a unique ID-based one otherwise.
-func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder, createdBy string) (string, reference.Canonical, error) {
- imageRef, err := b.resolveNameToImageRef()
+func (s *StageExecutor) Commit(ctx context.Context, ib *imagebuilder.Builder, createdBy, output string) (string, reference.Canonical, error) {
+ imageRef, err := s.executor.resolveNameToImageRef(output)
if err != nil {
return "", nil, err
}
if ib.Author != "" {
- b.builder.SetMaintainer(ib.Author)
+ s.builder.SetMaintainer(ib.Author)
}
config := ib.Config()
- b.builder.SetCreatedBy(createdBy)
- b.builder.SetHostname(config.Hostname)
- b.builder.SetDomainname(config.Domainname)
- b.builder.SetUser(config.User)
- b.builder.ClearPorts()
+ if createdBy != "" {
+ s.builder.SetCreatedBy(createdBy)
+ }
+ s.builder.SetHostname(config.Hostname)
+ s.builder.SetDomainname(config.Domainname)
+ s.builder.SetUser(config.User)
+ s.builder.ClearPorts()
for p := range config.ExposedPorts {
- b.builder.SetPort(string(p))
+ s.builder.SetPort(string(p))
}
for _, envSpec := range config.Env {
spec := strings.SplitN(envSpec, "=", 2)
- b.builder.SetEnv(spec[0], spec[1])
+ s.builder.SetEnv(spec[0], spec[1])
}
- b.builder.SetCmd(config.Cmd)
- b.builder.ClearVolumes()
+ s.builder.SetCmd(config.Cmd)
+ s.builder.ClearVolumes()
for v := range config.Volumes {
- b.builder.AddVolume(v)
+ s.builder.AddVolume(v)
}
- b.builder.ClearOnBuild()
+ s.builder.ClearOnBuild()
for _, onBuildSpec := range config.OnBuild {
- b.builder.SetOnBuild(onBuildSpec)
+ s.builder.SetOnBuild(onBuildSpec)
}
- b.builder.SetWorkDir(config.WorkingDir)
- b.builder.SetEntrypoint(config.Entrypoint)
- b.builder.SetShell(config.Shell)
- b.builder.SetStopSignal(config.StopSignal)
+ s.builder.SetWorkDir(config.WorkingDir)
+ s.builder.SetEntrypoint(config.Entrypoint)
+ s.builder.SetShell(config.Shell)
+ s.builder.SetStopSignal(config.StopSignal)
if config.Healthcheck != nil {
- b.builder.SetHealthcheck(&buildahdocker.HealthConfig{
+ s.builder.SetHealthcheck(&buildahdocker.HealthConfig{
Test: append([]string{}, config.Healthcheck.Test...),
Interval: config.Healthcheck.Interval,
Timeout: config.Healthcheck.Timeout,
@@ -1183,79 +1249,124 @@ func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder, created
Retries: config.Healthcheck.Retries,
})
} else {
- b.builder.SetHealthcheck(nil)
+ s.builder.SetHealthcheck(nil)
}
- b.builder.ClearLabels()
+ s.builder.ClearLabels()
for k, v := range config.Labels {
- b.builder.SetLabel(k, v)
+ s.builder.SetLabel(k, v)
}
- for _, labelSpec := range b.labels {
+ for _, labelSpec := range s.executor.labels {
label := strings.SplitN(labelSpec, "=", 2)
if len(label) > 1 {
- b.builder.SetLabel(label[0], label[1])
+ s.builder.SetLabel(label[0], label[1])
} else {
- b.builder.SetLabel(label[0], "")
+ s.builder.SetLabel(label[0], "")
}
}
- for _, annotationSpec := range b.annotations {
+ for _, annotationSpec := range s.executor.annotations {
annotation := strings.SplitN(annotationSpec, "=", 2)
if len(annotation) > 1 {
- b.builder.SetAnnotation(annotation[0], annotation[1])
+ s.builder.SetAnnotation(annotation[0], annotation[1])
} else {
- b.builder.SetAnnotation(annotation[0], "")
+ s.builder.SetAnnotation(annotation[0], "")
}
}
if imageRef != nil {
logName := transports.ImageName(imageRef)
logrus.Debugf("COMMIT %q", logName)
- if !b.quiet && !b.layers && !b.noCache {
- b.log("COMMIT %s", logName)
+ if !s.executor.quiet && !s.executor.layers && s.executor.useCache {
+ s.executor.log("COMMIT %s", logName)
}
} else {
logrus.Debugf("COMMIT")
- if !b.quiet && !b.layers && !b.noCache {
- b.log("COMMIT")
+ if !s.executor.quiet && !s.executor.layers && s.executor.useCache {
+ s.executor.log("COMMIT")
}
}
- writer := b.reportWriter
- if b.layers || b.noCache {
+ writer := s.executor.reportWriter
+ if s.executor.layers || !s.executor.useCache {
writer = nil
}
options := buildah.CommitOptions{
- Compression: b.compression,
- SignaturePolicyPath: b.signaturePolicyPath,
- AdditionalTags: b.additionalTags,
+ Compression: s.executor.compression,
+ SignaturePolicyPath: s.executor.signaturePolicyPath,
+ AdditionalTags: s.executor.additionalTags,
ReportWriter: writer,
- PreferredManifestType: b.outputFormat,
- SystemContext: b.systemContext,
- IIDFile: b.iidfile,
- Squash: b.squash,
- BlobDirectory: b.blobDirectory,
- Parent: b.builder.FromImageID,
- }
- imgID, ref, _, err := b.builder.Commit(ctx, imageRef, options)
+ PreferredManifestType: s.executor.outputFormat,
+ SystemContext: s.executor.systemContext,
+ IIDFile: s.executor.iidfile,
+ Squash: s.executor.squash,
+ BlobDirectory: s.executor.blobDirectory,
+ Parent: s.builder.FromImageID,
+ }
+ imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options)
if err != nil {
return "", nil, err
}
if options.IIDFile == "" && imgID != "" {
- fmt.Fprintf(b.out, "--> %s\n", imgID)
+ fmt.Fprintf(s.executor.out, "--> %s\n", imgID)
+ }
+ var ref reference.Canonical
+ if dref := imageRef.DockerReference(); dref != nil {
+ if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
+ return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q", imgID)
+ }
}
return imgID, ref, nil
}
// Build takes care of the details of running Prepare/Execute/Commit/Delete
// over each of the one or more parsed Dockerfiles and stages.
-func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (string, reference.Canonical, error) {
+func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (imageID string, ref reference.Canonical, err error) {
if len(stages) == 0 {
- errors.New("error building: no stages to build")
+ return "", nil, errors.New("error building: no stages to build")
}
var (
- stageExecutor *Executor
- lastErr error
+ stageExecutor *StageExecutor
+ cleanupImages []string
)
- b.imageMap = make(map[string]string)
- stageCount := 0
- for _, stage := range stages {
+ cleanupStages := make(map[int]*StageExecutor)
+
+ cleanup := func() error {
+ var lastErr error
+ // Clean up any containers associated with the final container
+ // built by a stage, for stages that succeeded, since we no
+ // longer need their filesystem contents.
+ for _, stage := range cleanupStages {
+ if err := stage.Delete(); err != nil {
+ logrus.Debugf("Failed to cleanup stage containers: %v", err)
+ lastErr = err
+ }
+ }
+ cleanupStages = nil
+ // Clean up any intermediate containers associated with stages,
+ // since we're not keeping them for debugging.
+ if b.removeIntermediateCtrs {
+ if err := b.deleteSuccessfulIntermediateCtrs(); err != nil {
+ logrus.Debugf("Failed to cleanup intermediate containers: %v", err)
+ lastErr = err
+ }
+ }
+ // Remove images from stages except the last one, since we're
+ // not going to use them as a starting point for any new
+ // stages.
+ for i := range cleanupImages {
+ removeID := cleanupImages[len(cleanupImages)-i-1]
+ if _, err := b.store.DeleteImage(removeID, true); err != nil {
+ logrus.Debugf("failed to remove intermediate image %q: %v", removeID, err)
+ if b.forceRmIntermediateCtrs || errors.Cause(err) != storage.ErrImageUsedByContainer {
+ lastErr = err
+ }
+ }
+ }
+ cleanupImages = nil
+ return lastErr
+ }
+ defer cleanup()
+
+ for stageIndex, stage := range stages {
+ var lastErr error
+
ib := stage.Builder
node := stage.Node
base, err := ib.From(node)
@@ -1264,82 +1375,73 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (strin
return "", nil, err
}
- stageExecutor = b.withName(stage.Name, stage.Position, base)
+ // If this is the last stage, then the image that we produce at
+ // its end should be given the desired output name.
+ output := ""
+ if stageIndex == len(stages)-1 {
+ output = b.output
+ }
+
+ stageExecutor = b.startStage(stage.Name, stage.Position, len(stages), base, output)
if err := stageExecutor.Prepare(ctx, stage, base); err != nil {
return "", nil, err
}
+
// Always remove the intermediate/build containers, even if the build was unsuccessful.
// If building with layers, remove all intermediate/build containers if b.forceRmIntermediateCtrs
// is true.
- if b.forceRmIntermediateCtrs || (!b.layers && !b.noCache) {
- defer stageExecutor.Delete()
+ if b.forceRmIntermediateCtrs || !b.layers {
+ cleanupStages[stage.Position] = stageExecutor
}
- if err := stageExecutor.Execute(ctx, stage); err != nil {
+ if imageID, ref, err = stageExecutor.Execute(ctx, stage); err != nil {
lastErr = err
}
-
- // Delete the successful intermediate containers if an error in the build
- // process occurs and b.removeIntermediateCtrs is true.
if lastErr != nil {
- if b.removeIntermediateCtrs {
- stageExecutor.deleteSuccessfulIntermediateCtrs()
- }
return "", nil, lastErr
}
- b.containerIDs = append(b.containerIDs, stageExecutor.containerIDs...)
- // If we've a stage.Name with alpha and not numeric, we've an
- // AS clause in play. Create an intermediate image for this
- // stage to be used by other FROM statements that will want
- // to use it later in the Dockerfile. Note the id in our map.
+ if !b.forceRmIntermediateCtrs && b.removeIntermediateCtrs {
+ cleanupStages[stage.Position] = stageExecutor
+ }
+
+ // If this is an intermediate stage, make a note to remove its
+ // image later.
if _, err := strconv.Atoi(stage.Name); err != nil {
- imgID, _, err := stageExecutor.Commit(ctx, stages[stageCount].Builder, "")
- if err != nil {
+ if imageID, ref, err = stageExecutor.Commit(ctx, stages[stageIndex].Builder, "", output); err != nil {
return "", nil, err
}
- b.imageMap[stage.Name] = imgID
+ b.imageMap[stage.Name] = imageID
+ cleanupImages = append(cleanupImages, imageID)
}
- stageCount++
+ }
+ if len(b.unusedArgs) > 0 {
+ unusedList := make([]string, 0, len(b.unusedArgs))
+ for k := range b.unusedArgs {
+ unusedList = append(unusedList, k)
+ }
+ sort.Strings(unusedList)
+ fmt.Fprintf(b.out, "[Warning] one or more build args were not consumed: %v\n", unusedList)
}
- var imageRef reference.Canonical
- imageID := ""
-
- // Check if we have a one line Dockerfile making layers irrelevant
- // or the user told us to ignore layers.
+ // Check if we have a one line Dockerfile (i.e., single phase, no
+ // actual steps) making layers irrelevant, or the user told us to
+ // ignore layers.
singleLineDockerfile := (len(stages) < 2 && len(stages[0].Node.Children) < 1)
- ignoreLayers := singleLineDockerfile || !b.layers && !b.noCache
+ ignoreLayers := singleLineDockerfile || !b.layers && b.useCache
if ignoreLayers {
- imgID, ref, err := stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "")
- if err != nil {
+ if imageID, ref, err = stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "", b.output); err != nil {
return "", nil, err
}
if singleLineDockerfile {
b.log("COMMIT %s", ref)
}
- imageID = imgID
- imageRef = ref
- }
- // If building with layers and b.removeIntermediateCtrs is true
- // only remove intermediate container for each step if an error
- // during the build process doesn't occur.
- // If the build is unsuccessful, the container created at the step
- // the failure happened will persist in the container store.
- // This if condition will be false if not building with layers and
- // the removal of intermediate/build containers will be handled by the
- // defer statement above.
- if b.removeIntermediateCtrs && (b.layers || b.noCache) {
- if err := b.deleteSuccessfulIntermediateCtrs(); err != nil {
- return "", nil, errors.Errorf("Failed to cleanup intermediate containers")
- }
}
- // Remove intermediate images that we created for AS clause handling
- for _, value := range b.imageMap {
- if _, err := b.store.DeleteImage(value, true); err != nil {
- logrus.Debugf("unable to remove intermediate image %q: %v", value, err)
- }
+
+ if err := cleanup(); err != nil {
+ return "", nil, err
}
- return imageID, imageRef, nil
+
+ return imageID, ref, nil
}
// BuildDockerfiles parses a set of one or more Dockerfiles (which may be
@@ -1450,7 +1552,6 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
// prepending a new FROM statement the Dockerfile that do not already have a corresponding
// FROM command within them.
func processCopyFrom(dockerfiles []io.ReadCloser) []io.ReadCloser {
-
var newDockerfiles []io.ReadCloser
// fromMap contains the names of the images seen in a FROM
// line in the Dockerfiles. The boolean value just completes the map object.
@@ -1520,23 +1621,31 @@ func processCopyFrom(dockerfiles []io.ReadCloser) []io.ReadCloser {
return newDockerfiles
}
-// deleteSuccessfulIntermediateCtrs goes through the container IDs in b.containerIDs
-// and deletes the containers associated with that ID.
+// deleteSuccessfulIntermediateCtrs goes through the container IDs in each
+// stage's containerIDs list and deletes the containers associated with those
+// IDs.
func (b *Executor) deleteSuccessfulIntermediateCtrs() error {
var lastErr error
- for _, ctr := range b.containerIDs {
- if err := b.store.DeleteContainer(ctr); err != nil {
- logrus.Errorf("error deleting build container %q: %v\n", ctr, err)
- lastErr = err
+ for _, s := range b.stages {
+ for _, ctr := range s.containerIDs {
+ if err := b.store.DeleteContainer(ctr); err != nil {
+ logrus.Errorf("error deleting build container %q: %v\n", ctr, err)
+ lastErr = err
+ }
}
+ // The stages map includes some stages under multiple keys, so
+ // clearing their lists after we process a given stage is
+ // necessary to avoid triggering errors that would occur if we
+ // tried to delete a given stage's containers multiple times.
+ s.containerIDs = nil
}
return lastErr
}
-func (b *Executor) EnsureContainerPath(path string) error {
- _, err := os.Stat(filepath.Join(b.mountPoint, path))
+func (s *StageExecutor) EnsureContainerPath(path string) error {
+ _, err := os.Stat(filepath.Join(s.mountPoint, path))
if err != nil && os.IsNotExist(err) {
- err = os.MkdirAll(filepath.Join(b.mountPoint, path), 0755)
+ err = os.MkdirAll(filepath.Join(s.mountPoint, path), 0755)
}
if err != nil {
return errors.Wrapf(err, "error ensuring container path %q", path)
diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go
index 4f5301b73..35dc5438a 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/util.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/util.go
@@ -111,28 +111,3 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
func InitReexec() bool {
return buildah.InitReexec()
}
-
-// ReposToMap parses the specified repotags and returns a map with repositories
-// as keys and the corresponding arrays of tags as values.
-func ReposToMap(repotags []string) map[string][]string {
- // map format is repo -> tag
- repos := make(map[string][]string)
- for _, repo := range repotags {
- var repository, tag string
- if strings.Contains(repo, ":") {
- li := strings.LastIndex(repo, ":")
- repository = repo[0:li]
- tag = repo[li+1:]
- } else if len(repo) > 0 {
- repository = repo
- tag = "<none>"
- } else {
- logrus.Warnf("Found image with empty name")
- }
- repos[repository] = append(repos[repository], tag)
- }
- if len(repos) == 0 {
- repos["<none>"] = []string{"<none>"}
- }
- return repos
-}
diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go
index f5f156be2..418487438 100644
--- a/vendor/github.com/containers/buildah/import.go
+++ b/vendor/github.com/containers/buildah/import.go
@@ -17,7 +17,11 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
return nil, errors.Errorf("Internal error: imageID is empty in importBuilderDataFromImage")
}
- uidmap, gidmap := convertStorageIDMaps(storage.DefaultStoreOptions.UIDMap, storage.DefaultStoreOptions.GIDMap)
+ storeopts, err := storage.DefaultStoreOptions(false, 0)
+ if err != nil {
+ return nil, err
+ }
+ uidmap, gidmap := convertStorageIDMaps(storeopts.UIDMap, storeopts.GIDMap)
ref, err := is.Transport.ParseStoreReference(store, imageID)
if err != nil {
@@ -83,7 +87,7 @@ func importBuilder(ctx context.Context, store storage.Store, options ImportOptio
return nil, err
}
- systemContext := getSystemContext(&types.SystemContext{}, options.SignaturePolicyPath)
+ systemContext := getSystemContext(store, &types.SystemContext{}, options.SignaturePolicyPath)
builder, err := importBuilderDataFromImage(ctx, store, systemContext, c.ImageID, options.Container, c.ID)
if err != nil {
@@ -115,7 +119,7 @@ func importBuilderFromImage(ctx context.Context, store storage.Store, options Im
return nil, errors.Errorf("image name must be specified")
}
- systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
+ systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath)
_, img, err := util.FindImage(store, "", systemContext, options.Image)
if err != nil {
diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go
index 8cd5e4438..7c73da87e 100644
--- a/vendor/github.com/containers/buildah/info.go
+++ b/vendor/github.com/containers/buildah/info.go
@@ -11,7 +11,7 @@ import (
"strings"
"time"
- "github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/buildah/unshare"
"github.com/containers/storage"
"github.com/containers/storage/pkg/system"
"github.com/sirupsen/logrus"
@@ -47,7 +47,7 @@ func hostInfo() (map[string]interface{}, error) {
info["os"] = runtime.GOOS
info["arch"] = runtime.GOARCH
info["cpus"] = runtime.NumCPU()
- info["rootless"] = rootless.IsRootless()
+ info["rootless"] = unshare.IsRootless()
mi, err := system.ReadMemInfo()
if err != nil {
logrus.Error(err, "err reading memory info")
diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go
index 262c90220..29546caba 100644
--- a/vendor/github.com/containers/buildah/new.go
+++ b/vendor/github.com/containers/buildah/new.go
@@ -29,7 +29,7 @@ func pullAndFindImage(ctx context.Context, store storage.Store, srcRef types.Ima
ReportWriter: options.ReportWriter,
Store: store,
SystemContext: options.SystemContext,
- BlobDirectory: options.PullBlobDirectory,
+ BlobDirectory: options.BlobDirectory,
}
ref, err := pullImage(ctx, store, srcRef, pullOptions, sc)
if err != nil {
@@ -244,7 +244,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
options.FromImage = ""
}
- systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
+ systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath)
if options.FromImage != "" && options.FromImage != "scratch" {
ref, _, img, err = resolveImage(ctx, systemContext, store, options)
diff --git a/pkg/chrootuser/user.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user.go
index c83dcc230..c83dcc230 100644
--- a/pkg/chrootuser/user.go
+++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user.go
diff --git a/pkg/chrootuser/user_basic.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go
index 79b0b24b5..79b0b24b5 100644
--- a/pkg/chrootuser/user_basic.go
+++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go
diff --git a/pkg/chrootuser/user_linux.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go
index 583eca569..583eca569 100644
--- a/pkg/chrootuser/user_linux.go
+++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go
diff --git a/vendor/github.com/containers/buildah/pkg/formats/formats.go b/vendor/github.com/containers/buildah/pkg/formats/formats.go
index 37f9b8a20..e95c32fc3 100644
--- a/vendor/github.com/containers/buildah/pkg/formats/formats.go
+++ b/vendor/github.com/containers/buildah/pkg/formats/formats.go
@@ -111,17 +111,13 @@ func (t StdoutTemplateArray) Out() error {
if err != nil {
return errors.Wrapf(err, parsingErrorStr)
}
- for i, raw := range t.Output {
+ for _, raw := range t.Output {
basicTmpl := tmpl.Funcs(basicFunctions)
if err := basicTmpl.Execute(w, raw); err != nil {
return errors.Wrapf(err, parsingErrorStr)
}
- if i != len(t.Output)-1 {
- fmt.Fprintln(w, "")
- continue
- }
+ fmt.Fprintln(w, "")
}
- fmt.Fprintln(w, "")
return w.Flush()
}
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index c309f686a..50318315f 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -9,7 +9,6 @@ import (
"github.com/spf13/cobra"
"net"
"os"
- "os/exec"
"path/filepath"
"strconv"
"strings"
@@ -393,25 +392,11 @@ func IDMappingOptions(c *cobra.Command, isolation buildah.Isolation) (usernsOpti
gidmap = uidmap
}
- useSlirp4netns := false
-
- if isolation == buildah.IsolationOCIRootless {
- _, err := exec.LookPath("slirp4netns")
- if execerr, ok := err.(*exec.Error); ok && !strings.Contains(execerr.Error(), "not found") {
- return nil, nil, errors.Wrapf(err, "cannot lookup slirp4netns %v", execerr)
- }
- if err == nil {
- useSlirp4netns = true
- } else {
- logrus.Warningf("could not find slirp4netns. Using host network namespace")
- }
- }
-
// By default, having mappings configured means we use a user
// namespace. Otherwise, we don't.
usernsOption := buildah.NamespaceOption{
Name: string(specs.UserNamespace),
- Host: len(uidmap) == 0 && len(gidmap) == 0 && !useSlirp4netns,
+ Host: len(uidmap) == 0 && len(gidmap) == 0,
}
// If the user specifically requested that we either use or don't use
// user namespaces, override that default.
diff --git a/vendor/github.com/containers/buildah/pkg/secrets/secrets.go b/vendor/github.com/containers/buildah/pkg/secrets/secrets.go
index 3b64f8952..97b681125 100644
--- a/vendor/github.com/containers/buildah/pkg/secrets/secrets.go
+++ b/vendor/github.com/containers/buildah/pkg/secrets/secrets.go
@@ -7,7 +7,6 @@ import (
"path/filepath"
"strings"
- "github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage/pkg/idtools"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
@@ -133,12 +132,12 @@ func getMountsMap(path string) (string, string, error) {
}
// SecretMounts copies, adds, and mounts the secrets to the container root filesystem
-func SecretMounts(mountLabel, containerWorkingDir, mountFile string) []rspec.Mount {
- return SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, containerWorkingDir, 0, 0)
+func SecretMounts(mountLabel, containerWorkingDir, mountFile string, rootless bool) []rspec.Mount {
+ return SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, containerWorkingDir, 0, 0, rootless)
}
// SecretMountsWithUIDGID specifies the uid/gid of the owner
-func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPrefix string, uid, gid int) []rspec.Mount {
+func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPrefix string, uid, gid int, rootless bool) []rspec.Mount {
var (
secretMounts []rspec.Mount
mountFiles []string
@@ -148,17 +147,8 @@ func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPre
// Note for testing purposes only
if mountFile == "" {
mountFiles = append(mountFiles, []string{OverrideMountsFile, DefaultMountsFile}...)
- if rootless.IsRootless() {
+ if rootless {
mountFiles = append([]string{UserOverrideMountsFile}, mountFiles...)
- _, err := os.Stat(UserOverrideMountsFile)
- if err != nil && os.IsNotExist(err) {
- os.MkdirAll(filepath.Dir(UserOverrideMountsFile), 0755)
- if f, err := os.Create(UserOverrideMountsFile); err != nil {
- logrus.Warnf("could not create file %s: %v", UserOverrideMountsFile, err)
- } else {
- f.Close()
- }
- }
}
} else {
mountFiles = append(mountFiles, mountFile)
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index d3c9870af..5eec1b3dd 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"io"
+
"strings"
"github.com/containers/buildah/pkg/blobcache"
@@ -153,13 +154,13 @@ func localImageNameForReference(ctx context.Context, store storage.Store, srcRef
// Pull copies the contents of the image from somewhere else to local storage.
func Pull(ctx context.Context, imageName string, options PullOptions) error {
- systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath)
+ systemContext := getSystemContext(options.Store, options.SystemContext, options.SignaturePolicyPath)
boptions := BuilderOptions{
FromImage: imageName,
SignaturePolicyPath: options.SignaturePolicyPath,
SystemContext: systemContext,
- PullBlobDirectory: options.BlobDirectory,
+ BlobDirectory: options.BlobDirectory,
ReportWriter: options.ReportWriter,
}
@@ -236,7 +237,7 @@ func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageRefer
if err != nil {
return nil, errors.Wrapf(err, "error parsing image name %q", destName)
}
- var maybeCachedDestRef types.ImageReference = destRef
+ var maybeCachedDestRef = types.ImageReference(destRef)
if options.BlobDirectory != "" {
cachedRef, err := blobcache.NewBlobCache(destRef, options.BlobDirectory, types.PreserveOriginal)
if err != nil {
@@ -262,7 +263,7 @@ func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageRefer
}()
logrus.Debugf("copying %q to %q", transports.ImageName(srcRef), destName)
- if _, err := cp.Image(ctx, policyContext, maybeCachedDestRef, srcRef, getCopyOptions(options.ReportWriter, srcRef, sc, maybeCachedDestRef, nil, "")); err != nil {
+ if _, err := cp.Image(ctx, policyContext, maybeCachedDestRef, srcRef, getCopyOptions(store, options.ReportWriter, srcRef, sc, maybeCachedDestRef, nil, "")); err != nil {
logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", transports.ImageName(srcRef), destName, err)
return nil, err
}
diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go
index 2fa3cd572..cd6568b66 100644
--- a/vendor/github.com/containers/buildah/run.go
+++ b/vendor/github.com/containers/buildah/run.go
@@ -2,6 +2,7 @@ package buildah
import (
"bytes"
+ "context"
"encoding/json"
"fmt"
"io"
@@ -21,6 +22,7 @@ import (
"github.com/containers/buildah/bind"
"github.com/containers/buildah/chroot"
"github.com/containers/buildah/pkg/secrets"
+ "github.com/containers/buildah/unshare"
"github.com/containers/buildah/util"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
@@ -416,7 +418,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
}
// Get the list of secrets mounts.
- secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID))
+ secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID), unshare.IsRootless())
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
@@ -1720,7 +1722,7 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) {
unix.CloseOnExec(fd)
}
- cmd := exec.Command(slirp4netns, "-r", "3", "-c", fmt.Sprintf("%d", pid), "tap0")
+ cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", fmt.Sprintf("%d", pid), "tap0")
cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil
cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW}
@@ -1765,7 +1767,7 @@ func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetwo
var netconf, undo []*libcni.NetworkConfigList
if isolation == IsolationOCIRootless {
- if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host {
+ if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host && ns.Path == "" {
return setupRootlessNetwork(pid)
}
}
@@ -1835,7 +1837,7 @@ func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetwo
rtconf := make(map[*libcni.NetworkConfigList]*libcni.RuntimeConf)
teardown = func() {
for _, nc := range undo {
- if err = cni.DelNetworkList(nc, rtconf[nc]); err != nil {
+ if err = cni.DelNetworkList(context.Background(), nc, rtconf[nc]); err != nil {
logrus.Errorf("error cleaning up network %v for %v: %v", rtconf[nc].IfName, command, err)
}
}
@@ -1851,7 +1853,7 @@ func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetwo
CapabilityArgs: map[string]interface{}{},
}
// Bring it up.
- _, err := cni.AddNetworkList(nc, rtconf[nc])
+ _, err := cni.AddNetworkList(context.Background(), nc, rtconf[nc])
if err != nil {
return teardown, errors.Wrapf(err, "error configuring network list %v for %v", rtconf[nc].IfName, command)
}
diff --git a/vendor/github.com/containers/buildah/unshare/unshare.go b/vendor/github.com/containers/buildah/unshare/unshare.go
index 1072c2035..91f4bb54a 100644
--- a/vendor/github.com/containers/buildah/unshare/unshare.go
+++ b/vendor/github.com/containers/buildah/unshare/unshare.go
@@ -11,6 +11,7 @@ import (
"runtime"
"strconv"
"strings"
+ "sync"
"syscall"
"github.com/containers/buildah/util"
@@ -57,8 +58,8 @@ func (c *Cmd) Start() error {
// Please the libpod "rootless" package to find the expected env variables.
if os.Geteuid() != 0 {
- c.Env = append(c.Env, "_LIBPOD_USERNS_CONFIGURED=done")
- c.Env = append(c.Env, fmt.Sprintf("_LIBPOD_ROOTLESS_UID=%d", os.Geteuid()))
+ c.Env = append(c.Env, "_CONTAINERS_USERNS_CONFIGURED=done")
+ c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%d", os.Geteuid()))
}
// Create the pipe for reading the child's PID.
@@ -272,3 +273,36 @@ func (c *Cmd) CombinedOutput() ([]byte, error) {
func (c *Cmd) Output() ([]byte, error) {
return nil, errors.New("unshare: Output() not implemented")
}
+
+var (
+ isRootlessOnce sync.Once
+ isRootless bool
+)
+
+const (
+ // UsernsEnvName is the environment variable, if set indicates in rootless mode
+ UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
+)
+
+// IsRootless tells us if we are running in rootless mode
+func IsRootless() bool {
+ isRootlessOnce.Do(func() {
+ isRootless = os.Geteuid() != 0 || os.Getenv(UsernsEnvName) != ""
+ })
+ return isRootless
+}
+
+// GetRootlessUID returns the UID of the user in the parent userNS
+func GetRootlessUID() int {
+ uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
+ if uidEnv != "" {
+ u, _ := strconv.Atoi(uidEnv)
+ return u
+ }
+ return os.Getuid()
+}
+
+// RootlessEnv returns the environment settings for the rootless containers
+func RootlessEnv() []string {
+ return append(os.Environ(), UsernsEnvName+"=")
+}
diff --git a/vendor/github.com/containers/buildah/unshare/unshare_unsupported.go b/vendor/github.com/containers/buildah/unshare/unshare_unsupported.go
new file mode 100644
index 000000000..3336fdad9
--- /dev/null
+++ b/vendor/github.com/containers/buildah/unshare/unshare_unsupported.go
@@ -0,0 +1,27 @@
+// +build !linux
+
+package unshare
+
+import (
+ "os"
+)
+
+const (
+ // UsernsEnvName is the environment variable, if set indicates in rootless mode
+ UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
+)
+
+// IsRootless tells us if we are running in rootless mode
+func IsRootless() bool {
+ return false
+}
+
+// GetRootlessUID returns the UID of the user in the parent userNS
+func GetRootlessUID() int {
+ return os.Getuid()
+}
+
+// RootlessEnv returns the environment settings for the rootless containers
+func RootlessEnv() []string {
+ return append(os.Environ(), UsernsEnvName+"=")
+}
diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf
index 53c2e673e..327de39b2 100644
--- a/vendor/github.com/containers/buildah/vendor.conf
+++ b/vendor/github.com/containers/buildah/vendor.conf
@@ -2,14 +2,13 @@ github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
github.com/blang/semver v3.5.0
github.com/BurntSushi/toml v0.2.0
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
-github.com/containernetworking/cni v0.7.0-alpha1
-github.com/containers/image v1.5
+github.com/containernetworking/cni v0.7.0-rc2
+github.com/containers/image f52cf78ebfa1916da406f8b6210d8f7764ec1185
github.com/vbauerster/mpb v3.3.4
github.com/mattn/go-isatty v0.0.4
github.com/VividCortex/ewma v1.1.1
github.com/boltdb/bolt v1.3.1
-github.com/containers/libpod v1.0
-github.com/containers/storage v1.11
+github.com/containers/storage v1.12.1
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83
github.com/docker/docker-credential-helpers v0.6.1
@@ -39,7 +38,7 @@ github.com/opencontainers/runc v1.0.0-rc6
github.com/opencontainers/runtime-spec v1.0.0
github.com/opencontainers/runtime-tools v0.8.0
github.com/opencontainers/selinux v1.1
-github.com/openshift/imagebuilder 705fe9255c57f8505efb9723a9ac4082b67973bc
+github.com/openshift/imagebuilder v1.1.0
github.com/ostreedev/ostree-go 9ab99253d365aac3a330d1f7281cf29f3d22820b
github.com/pkg/errors v0.8.1
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
diff --git a/vendor/github.com/containers/image/README.md b/vendor/github.com/containers/image/README.md
index 8fd6e513e..571e8342e 100644
--- a/vendor/github.com/containers/image/README.md
+++ b/vendor/github.com/containers/image/README.md
@@ -65,7 +65,7 @@ the primary downside is that creating new signatures with the Golang-only implem
- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. The `github.com/containers/image/ostree` package is completely disabled
and impossible to import when this build tag is in use.
-## [Contributing](CONTRIBUTING.md)**
+## [Contributing](CONTRIBUTING.md)
Information about contributing to this project.
diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go
index ba99336aa..3ed8a2b82 100644
--- a/vendor/github.com/containers/image/copy/copy.go
+++ b/vendor/github.com/containers/image/copy/copy.go
@@ -468,7 +468,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
}
data := make([]copyLayerData, numLayers)
- copyLayerHelper := func(index int, srcLayer types.BlobInfo, bar *mpb.Bar) {
+ copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) {
defer copySemaphore.Release(1)
defer copyGroup.Done()
cld := copyLayerData{}
@@ -483,24 +483,18 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
}
} else {
- cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, bar)
+ cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool)
}
data[index] = cld
- bar.SetTotal(srcLayer.Size, true)
}
func() { // A scope for defer
progressPool, progressCleanup := ic.c.newProgressPool(ctx)
defer progressCleanup()
- progressBars := make([]*mpb.Bar, numLayers)
- for i, srcInfo := range srcInfos {
- progressBars[i] = ic.c.createProgressBar(progressPool, srcInfo, "blob")
- }
-
for i, srcLayer := range srcInfos {
copySemaphore.Acquire(ctx, 1)
- go copyLayerHelper(i, srcLayer, progressBars[i])
+ go copyLayerHelper(i, srcLayer, progressPool)
}
// Wait for all layers to be copied
@@ -592,7 +586,7 @@ func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) {
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
// is ioutil.Discard, the progress bar's output will be discarded
-func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string) *mpb.Bar {
+func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar {
// shortDigestLen is the length of the digest used for blobs.
const shortDigestLen = 12
@@ -604,11 +598,12 @@ func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind
}
bar := pool.AddBar(info.Size,
+ mpb.BarClearOnComplete(),
mpb.PrependDecorators(
decor.Name(prefix),
),
mpb.AppendDecorators(
- decor.CountersKibiByte("%.1f / %.1f"),
+ decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete),
),
)
if c.progressOutput == ioutil.Discard {
@@ -629,7 +624,7 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
progressPool, progressCleanup := c.newProgressPool(ctx)
defer progressCleanup()
- bar := c.createProgressBar(progressPool, srcInfo, "config")
+ bar := c.createProgressBar(progressPool, srcInfo, "config", "done")
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
if err != nil {
return types.BlobInfo{}, err
@@ -656,7 +651,7 @@ type diffIDResult struct {
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
-func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, bar *mpb.Bar) (types.BlobInfo, digest.Digest, error) {
+func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) {
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
@@ -668,6 +663,8 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
}
if reused {
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
+ bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists")
+ bar.SetTotal(0, true)
return blobInfo, cachedDiffID, nil
}
}
@@ -679,10 +676,14 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
}
defer srcStream.Close()
+ bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done")
+
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, diffIDIsNeeded, bar)
if err != nil {
return types.BlobInfo{}, "", err
}
+
+ diffID := cachedDiffID
if diffIDIsNeeded {
select {
case <-ctx.Done():
@@ -695,11 +696,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
// we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
- return blobInfo, diffIDResult.digest, nil
+ diffID = diffIDResult.digest
}
- } else {
- return blobInfo, cachedDiffID, nil
}
+
+ bar.SetTotal(srcInfo.Size, true)
+ return blobInfo, diffID, nil
}
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go
index 43eb22ba2..40f11c62a 100644
--- a/vendor/github.com/containers/image/docker/docker_client.go
+++ b/vendor/github.com/containers/image/docker/docker_client.go
@@ -197,7 +197,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
registry := reference.Domain(ref.ref)
- username, password, err := config.GetAuthentication(sys, reference.Domain(ref.ref))
+ username, password, err := config.GetAuthentication(sys, registry)
if err != nil {
return nil, errors.Wrapf(err, "error getting username and password")
}
diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go
index 38500dd0e..c116cbec3 100644
--- a/vendor/github.com/containers/image/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/docker/docker_image_dest.go
@@ -16,7 +16,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
- "github.com/containers/image/pkg/blobinfocache"
+ "github.com/containers/image/pkg/blobinfocache/none"
"github.com/containers/image/types"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
@@ -129,7 +129,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
// But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_.
- haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, blobinfocache.NoCache, false)
+ haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false)
if err != nil {
return types.BlobInfo{}, err
}
diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go
index cee60f824..351e73ea1 100644
--- a/vendor/github.com/containers/image/image/docker_schema2.go
+++ b/vendor/github.com/containers/image/image/docker_schema2.go
@@ -11,7 +11,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
- "github.com/containers/image/pkg/blobinfocache"
+ "github.com/containers/image/pkg/blobinfocache/none"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -96,7 +96,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
}
- stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), blobinfocache.NoCache)
+ stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
if err != nil {
return nil, err
}
@@ -252,7 +252,7 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
logrus.Debugf("Uploading empty layer during conversion to schema 1")
// Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
// and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
- info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, blobinfocache.NoCache, false)
+ info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, none.NoCache, false)
if err != nil {
return nil, errors.Wrap(err, "Error uploading empty layer")
}
diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go
index 6fe2a9a32..cdff26e06 100644
--- a/vendor/github.com/containers/image/image/oci.go
+++ b/vendor/github.com/containers/image/image/oci.go
@@ -7,7 +7,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
- "github.com/containers/image/pkg/blobinfocache"
+ "github.com/containers/image/pkg/blobinfocache/none"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -61,7 +61,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
}
- stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), blobinfocache.NoCache)
+ stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go
index 4ee809134..91d4e9137 100644
--- a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go
@@ -1,4 +1,5 @@
-package blobinfocache
+// Package boltdb implements a BlobInfoCache backed by BoltDB.
+package boltdb
import (
"fmt"
@@ -7,6 +8,7 @@ import (
"time"
"github.com/boltdb/bolt"
+ "github.com/containers/image/pkg/blobinfocache/internal/prioritize"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
@@ -81,22 +83,23 @@ func unlockPath(path string) {
}
}
-// boltDBCache si a BlobInfoCache implementation which uses a BoltDB file at the specified path.
+// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path.
//
// Note that we don’t keep the database open across operations, because that would lock the file and block any other
// users; instead, we need to open/close it for every single write or lookup.
-type boltDBCache struct {
+type cache struct {
path string
}
-// NewBoltDBCache returns a BlobInfoCache implementation which uses a BoltDB file at path.
-// Most users should call DefaultCache instead.
-func NewBoltDBCache(path string) types.BlobInfoCache {
- return &boltDBCache{path: path}
+// New returns a BlobInfoCache implementation which uses a BoltDB file at path.
+//
+// Most users should call blobinfocache.DefaultCache instead.
+func New(path string) types.BlobInfoCache {
+ return &cache{path: path}
}
// view returns runs the specified fn within a read-only transaction on the database.
-func (bdc *boltDBCache) view(fn func(tx *bolt.Tx) error) (retErr error) {
+func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) {
// bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist,
// nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding
// a read lock, blocking any future writes.
@@ -122,7 +125,7 @@ func (bdc *boltDBCache) view(fn func(tx *bolt.Tx) error) (retErr error) {
}
// update returns runs the specified fn within a read-write transaction on the database.
-func (bdc *boltDBCache) update(fn func(tx *bolt.Tx) error) (retErr error) {
+func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) {
lockPath(bdc.path)
defer unlockPath(bdc.path)
db, err := bolt.Open(bdc.path, 0600, nil)
@@ -139,7 +142,7 @@ func (bdc *boltDBCache) update(fn func(tx *bolt.Tx) error) (retErr error) {
}
// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction.
-func (bdc *boltDBCache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
+func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
if b := tx.Bucket(uncompressedDigestBucket); b != nil {
if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil {
d, err := digest.Parse(string(uncompressedBytes))
@@ -166,7 +169,7 @@ func (bdc *boltDBCache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest)
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// May return anyDigest if it is known to be uncompressed.
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
-func (bdc *boltDBCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
var res digest.Digest
if err := bdc.view(func(tx *bolt.Tx) error {
res = bdc.uncompressedDigest(tx, anyDigest)
@@ -182,7 +185,7 @@ func (bdc *boltDBCache) UncompressedDigest(anyDigest digest.Digest) digest.Diges
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
-func (bdc *boltDBCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
_ = bdc.update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket)
if err != nil {
@@ -219,7 +222,7 @@ func (bdc *boltDBCache) RecordDigestUncompressedPair(anyDigest digest.Digest, un
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
// and can be reused given the opaque location data.
-func (bdc *boltDBCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
_ = bdc.update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists(knownLocationsBucket)
if err != nil {
@@ -248,8 +251,8 @@ func (bdc *boltDBCache) RecordKnownLocation(transport types.ImageTransport, scop
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
-// appendReplacementCandiates creates candidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates.
-func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []candidateWithTime {
+// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates.
+func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime {
b := scopeBucket.Bucket([]byte(digest.String()))
if b == nil {
return candidates
@@ -259,12 +262,12 @@ func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTi
if err := t.UnmarshalBinary(v); err != nil {
return err
}
- candidates = append(candidates, candidateWithTime{
- candidate: types.BICReplacementCandidate{
+ candidates = append(candidates, prioritize.CandidateWithTime{
+ Candidate: types.BICReplacementCandidate{
Digest: digest,
Location: types.BICLocationReference{Opaque: string(k)},
},
- lastSeen: t,
+ LastSeen: t,
})
return nil
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
@@ -277,8 +280,8 @@ func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTi
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
-func (bdc *boltDBCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
- res := []candidateWithTime{}
+func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ res := []prioritize.CandidateWithTime{}
var uncompressedDigestValue digest.Digest // = ""
if err := bdc.view(func(tx *bolt.Tx) error {
scopeBucket := tx.Bucket(knownLocationsBucket)
@@ -325,5 +328,5 @@ func (bdc *boltDBCache) CandidateLocations(transport types.ImageTransport, scope
return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
}
- return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
+ return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/pkg/blobinfocache/default.go
index 459ae5c06..1e6e543b2 100644
--- a/vendor/github.com/containers/image/pkg/blobinfocache/default.go
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/default.go
@@ -5,6 +5,8 @@ import (
"os"
"path/filepath"
+ "github.com/containers/image/pkg/blobinfocache/boltdb"
+ "github.com/containers/image/pkg/blobinfocache/memory"
"github.com/containers/image/types"
"github.com/sirupsen/logrus"
)
@@ -50,14 +52,14 @@ func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
dir, err := blobInfoCacheDir(sys, os.Geteuid())
if err != nil {
logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename)
- return NewMemoryCache()
+ return memory.New()
}
path := filepath.Join(dir, blobInfoCacheFilename)
if err := os.MkdirAll(dir, 0700); err != nil {
logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err)
- return NewMemoryCache()
+ return memory.New()
}
logrus.Debugf("Using blob info cache at %s", path)
- return NewBoltDBCache(path)
+ return boltdb.New(path)
}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go b/vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go
index 02709aa1c..5479319de 100644
--- a/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go
@@ -1,4 +1,6 @@
-package blobinfocache
+// Package prioritize provides utilities for prioritizing locations in
+// types.BlobInfoCache.CandidateLocations.
+package prioritize
import (
"sort"
@@ -13,16 +15,16 @@ import (
// This is a heuristic/guess, and could well use a different value.
const replacementAttempts = 5
-// candidateWithTime is the input to types.BICReplacementCandidate prioritization.
-type candidateWithTime struct {
- candidate types.BICReplacementCandidate // The replacement candidate
- lastSeen time.Time // Time the candidate was last known to exist (either read or written)
+// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
+type CandidateWithTime struct {
+ Candidate types.BICReplacementCandidate // The replacement candidate
+ LastSeen time.Time // Time the candidate was last known to exist (either read or written)
}
// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
// along with the specially-treated digest values for the implementation of sort.Interface.Less
type candidateSortState struct {
- cs []candidateWithTime // The entries to sort
+ cs []CandidateWithTime // The entries to sort
primaryDigest digest.Digest // The digest the user actually asked for
uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
}
@@ -40,35 +42,35 @@ func (css *candidateSortState) Less(i, j int) bool {
// Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
// First, deal with the primaryDigest/uncompressedDigest cases:
- if xi.candidate.Digest != xj.candidate.Digest {
+ if xi.Candidate.Digest != xj.Candidate.Digest {
// - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
- if xi.candidate.Digest == css.primaryDigest {
+ if xi.Candidate.Digest == css.primaryDigest {
return true
}
- if xj.candidate.Digest == css.primaryDigest {
+ if xj.Candidate.Digest == css.primaryDigest {
return false
}
if css.uncompressedDigest != "" {
- if xi.candidate.Digest == css.uncompressedDigest {
+ if xi.Candidate.Digest == css.uncompressedDigest {
return false
}
- if xj.candidate.Digest == css.uncompressedDigest {
+ if xj.Candidate.Digest == css.uncompressedDigest {
return true
}
}
- } else { // xi.candidate.Digest == xj.candidate.Digest
+ } else { // xi.Candidate.Digest == xj.Candidate.Digest
// The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
- if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) {
- return xi.lastSeen.After(xj.lastSeen)
+ if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) {
+ return xi.LastSeen.After(xj.LastSeen)
}
}
// Neither of the digests are primaryDigest/uncompressedDigest:
- if !xi.lastSeen.Equal(xj.lastSeen) { // Order primarily by time
- return xi.lastSeen.After(xj.lastSeen)
+ if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time
+ return xi.LastSeen.After(xj.LastSeen)
}
// Fall back to digest, if timestamps end up _exactly_ the same (how?!)
- return xi.candidate.Digest < xj.candidate.Digest
+ return xi.Candidate.Digest < xj.Candidate.Digest
}
func (css *candidateSortState) Swap(i, j int) {
@@ -77,7 +79,7 @@ func (css *candidateSortState) Swap(i, j int) {
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
// number of entries to limit, only to make testing simpler.
-func destructivelyPrioritizeReplacementCandidatesWithMax(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate {
+func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate {
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
// compare equal.
sort.Sort(&candidateSortState{
@@ -92,17 +94,17 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []candidateWithTime,
}
res := make([]types.BICReplacementCandidate, resLength)
for i := range res {
- res[i] = cs[i].candidate
+ res[i] = cs[i].Candidate
}
return res
}
-// destructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
+// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
//
// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
-func destructivelyPrioritizeReplacementCandidates(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate {
+func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate {
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/memory.go b/vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go
index cf6ca5263..dfb338634 100644
--- a/vendor/github.com/containers/image/pkg/blobinfocache/memory.go
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go
@@ -1,11 +1,13 @@
-package blobinfocache
+// Package memory implements an in-memory BlobInfoCache.
+package memory
import (
"sync"
"time"
+ "github.com/containers/image/pkg/blobinfocache/internal/prioritize"
"github.com/containers/image/types"
- "github.com/opencontainers/go-digest"
+ digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
@@ -16,21 +18,25 @@ type locationKey struct {
blobDigest digest.Digest
}
-// memoryCache implements an in-memory-only BlobInfoCache
-type memoryCache struct {
- mutex *sync.Mutex // synchronizes concurrent accesses
+// cache implements an in-memory-only BlobInfoCache
+type cache struct {
+ mutex sync.Mutex
+ // The following fields can only be accessed with mutex held.
uncompressedDigests map[digest.Digest]digest.Digest
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
}
-// NewMemoryCache returns a BlobInfoCache implementation which is in-memory only.
-// This is primarily intended for tests, but also used as a fallback if DefaultCache
-// can’t determine, or set up, the location for a persistent cache.
-// Manual users of types.{ImageSource,ImageDestination} might also use this instead of a persistent cache.
-func NewMemoryCache() types.BlobInfoCache {
- return &memoryCache{
- mutex: new(sync.Mutex),
+// New returns a BlobInfoCache implementation which is in-memory only.
+//
+// This is primarily intended for tests, but also used as a fallback
+// if blobinfocache.DefaultCache can’t determine, or set up, the
+// location for a persistent cache. Most users should use
+// blobinfocache.DefaultCache. instead of calling this directly.
+// Manual users of types.{ImageSource,ImageDestination} might also use
+// this instead of a persistent cache.
+func New() types.BlobInfoCache {
+ return &cache{
uncompressedDigests: map[digest.Digest]digest.Digest{},
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
@@ -40,16 +46,14 @@ func NewMemoryCache() types.BlobInfoCache {
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// May return anyDigest if it is known to be uncompressed.
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
-func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
mem.mutex.Lock()
defer mem.mutex.Unlock()
- return mem.uncompressedDigest(anyDigest)
+ return mem.uncompressedDigestLocked(anyDigest)
}
-// uncompressedDigest returns an uncompressed digest corresponding to anyDigest.
-// May return anyDigest if it is known to be uncompressed.
-// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
-func (mem *memoryCache) uncompressedDigest(anyDigest digest.Digest) digest.Digest {
+// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held.
+func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest {
if d, ok := mem.uncompressedDigests[anyDigest]; ok {
return d
}
@@ -67,7 +71,7 @@ func (mem *memoryCache) uncompressedDigest(anyDigest digest.Digest) digest.Diges
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
-func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
mem.mutex.Lock()
defer mem.mutex.Unlock()
if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
@@ -85,7 +89,7 @@ func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, un
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
// and can be reused given the opaque location data.
-func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
mem.mutex.Lock()
defer mem.mutex.Unlock()
key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
@@ -97,16 +101,16 @@ func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scop
locationScope[location] = time.Now() // Possibly overwriting an older entry.
}
-// appendReplacementCandiates creates candidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
-func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []candidateWithTime {
+// appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
+func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime {
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
for l, t := range locations {
- candidates = append(candidates, candidateWithTime{
- candidate: types.BICReplacementCandidate{
+ candidates = append(candidates, prioritize.CandidateWithTime{
+ Candidate: types.BICReplacementCandidate{
Digest: digest,
Location: l,
},
- lastSeen: t,
+ LastSeen: t,
})
}
return candidates
@@ -118,14 +122,14 @@ func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTi
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
-func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
mem.mutex.Lock()
defer mem.mutex.Unlock()
- res := []candidateWithTime{}
+ res := []prioritize.CandidateWithTime{}
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
var uncompressedDigest digest.Digest // = ""
if canSubstitute {
- if uncompressedDigest = mem.uncompressedDigest(primaryDigest); uncompressedDigest != "" {
+ if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
for d := range otherDigests {
if d != primaryDigest && d != uncompressedDigest {
@@ -137,5 +141,5 @@ func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope
}
}
}
- return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
+ return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/none.go b/vendor/github.com/containers/image/pkg/blobinfocache/none/none.go
index 5658d89ff..e5dca25ce 100644
--- a/vendor/github.com/containers/image/pkg/blobinfocache/none.go
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/none/none.go
@@ -1,4 +1,5 @@
-package blobinfocache
+// Package none implements a dummy BlobInfoCache which records no data.
+package none
import (
"github.com/containers/image/types"
@@ -11,9 +12,10 @@ type noCache struct {
// NoCache implements BlobInfoCache by not recording any data.
//
-// This exists primarily for implementations of configGetter for Manifest.Inspect,
-// because configs only have one representation.
-// Any use of BlobInfoCache with blobs should usually use at least a short-lived cache.
+// This exists primarily for implementations of configGetter for
+// Manifest.Inspect, because configs only have one representation.
+// Any use of BlobInfoCache with blobs should usually use at least a
+// short-lived cache, ideally blobinfocache.DefaultCache.
var NoCache types.BlobInfoCache = noCache{}
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
diff --git a/vendor/github.com/containers/image/signature/policy_config.go b/vendor/github.com/containers/image/signature/policy_config.go
index 39c0f2a55..12398e385 100644
--- a/vendor/github.com/containers/image/signature/policy_config.go
+++ b/vendor/github.com/containers/image/signature/policy_config.go
@@ -30,7 +30,7 @@ import (
// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path'
var systemDefaultPolicyPath = builtinDefaultPolicyPath
-// builtinDefaultPolicyPath is the policy pat used for DefaultPolicy().
+// builtinDefaultPolicyPath is the policy path used for DefaultPolicy().
// DO NOT change this, instead see systemDefaultPolicyPath above.
const builtinDefaultPolicyPath = "/etc/containers/policy.json"
diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go
index 67dc6142b..b39d2bcc0 100644
--- a/vendor/github.com/containers/image/storage/storage_image.go
+++ b/vendor/github.com/containers/image/storage/storage_image.go
@@ -18,7 +18,7 @@ import (
"github.com/containers/image/image"
"github.com/containers/image/internal/tmpdir"
"github.com/containers/image/manifest"
- "github.com/containers/image/pkg/blobinfocache"
+ "github.com/containers/image/pkg/blobinfocache/none"
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
@@ -595,12 +595,12 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
if !haveDiffID {
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
// or to even check if we had it.
- // Use blobinfocache.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
+ // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
// that relies on using a blob digest that has never been seeen by the store had better call
// TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
// so far we are going to accommodate that (if we should be doing that at all).
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
- has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, blobinfocache.NoCache, false)
+ has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
if err != nil {
return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
}
@@ -732,7 +732,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
if err != nil {
return errors.Wrapf(err, "error copying non-layer blob %q to image", blob)
}
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v); err != nil {
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
@@ -765,14 +765,14 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
if err != nil {
return errors.Wrapf(err, "error computing manifest digest")
}
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest); err != nil {
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest, manifest.Digest); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
return err
}
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest); err != nil {
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest, manifest.Digest); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
@@ -781,7 +781,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
}
// Save the signatures, if we have any.
if len(s.signatures) > 0 {
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures); err != nil {
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go
index 02d2f5c08..3a6be6e00 100644
--- a/vendor/github.com/containers/image/storage/storage_transport.go
+++ b/vendor/github.com/containers/image/storage/storage_transport.go
@@ -4,6 +4,7 @@ package storage
import (
"fmt"
+ "os"
"path/filepath"
"strings"
@@ -180,7 +181,10 @@ func (s *storageTransport) GetStore() (storage.Store, error) {
// Return the transport's previously-set store. If we don't have one
// of those, initialize one now.
if s.store == nil {
- options := storage.DefaultStoreOptions
+ options, err := storage.DefaultStoreOptions(os.Getuid() != 0, os.Getuid())
+ if err != nil {
+ return nil, err
+ }
options.UIDMap = s.defaultUIDMap
options.GIDMap = s.defaultGIDMap
store, err := storage.GetStore(options)
diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf
index 1c5b6b378..89b29722b 100644
--- a/vendor/github.com/containers/image/vendor.conf
+++ b/vendor/github.com/containers/image/vendor.conf
@@ -1,7 +1,7 @@
github.com/containers/image
github.com/sirupsen/logrus v1.0.0
-github.com/containers/storage master
+github.com/containers/storage v1.12.1
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
diff --git a/vendor/github.com/containers/image/version/version.go b/vendor/github.com/containers/image/version/version.go
index 2a3bc1b5c..9915cb2fa 100644
--- a/vendor/github.com/containers/image/version/version.go
+++ b/vendor/github.com/containers/image/version/version.go
@@ -8,10 +8,10 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 1
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 5
+ VersionPatch = 6
// VersionDev indicates development branch. Releases will be empty string.
- VersionDev = ""
+ VersionDev = "-dev"
)
// Version is the specification version that the package types support.
diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go
index 10d628dbe..bbac78b60 100644
--- a/vendor/github.com/containers/storage/containers.go
+++ b/vendor/github.com/containers/storage/containers.go
@@ -71,7 +71,7 @@ type Container struct {
type ContainerStore interface {
FileBasedStore
MetadataStore
- BigDataStore
+ ContainerBigDataStore
FlaggableStore
// Create creates a container that has a specified ID (or generates a
@@ -456,7 +456,7 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
return size, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
- if r.SetBigData(id, key, data) == nil {
+ if err = r.SetBigData(id, key, data); err == nil {
c, ok := r.lookup(id)
if !ok {
return -1, ErrContainerUnknown
@@ -464,6 +464,8 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
if size, ok := c.BigDataSizes[key]; ok {
return size, nil
}
+ } else {
+ return -1, err
}
}
return -1, ErrSizeUnknown
@@ -484,7 +486,7 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
return d, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
- if r.SetBigData(id, key, data) == nil {
+ if err = r.SetBigData(id, key, data); err == nil {
c, ok := r.lookup(id)
if !ok {
return "", ErrContainerUnknown
@@ -492,6 +494,8 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
if d, ok := c.BigDataDigests[key]; ok {
return d, nil
}
+ } else {
+ return "", err
}
}
return "", ErrDigestUnknown
diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go
index aef6becfe..40b912bb3 100644
--- a/vendor/github.com/containers/storage/containers_ffjson.go
+++ b/vendor/github.com/containers/storage/containers_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: containers.go
+// source: ./containers.go
package storage
diff --git a/vendor/github.com/containers/storage/drivers/copy/copy.go b/vendor/github.com/containers/storage/drivers/copy/copy.go
index 2617824c5..bcbc61284 100644
--- a/vendor/github.com/containers/storage/drivers/copy/copy.go
+++ b/vendor/github.com/containers/storage/drivers/copy/copy.go
@@ -19,6 +19,7 @@ import (
"syscall"
"time"
+ "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/system"
rsystem "github.com/opencontainers/runc/libcontainer/system"
@@ -212,7 +213,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
return nil
}
- if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
+ if err := idtools.SafeLchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
return err
}
diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go
index 93505f5fb..38b5a3ef3 100644
--- a/vendor/github.com/containers/storage/images.go
+++ b/vendor/github.com/containers/storage/images.go
@@ -8,7 +8,6 @@ import (
"strings"
"time"
- "github.com/containers/image/manifest"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
@@ -117,7 +116,7 @@ type ImageStore interface {
ROImageStore
RWFileBasedStore
RWMetadataStore
- RWBigDataStore
+ RWImageBigDataStore
FlaggableStore
// Create creates an image that has a specified ID (or a random one) and
@@ -272,7 +271,7 @@ func (r *imageStore) Load() error {
}
}
}
- if shouldSave && !r.IsReadWrite() {
+ if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
return ErrDuplicateImageNames
}
r.images = images
@@ -291,7 +290,7 @@ func (r *imageStore) Save() error {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath())
}
if !r.Locked() {
- return errors.New("image store is not locked")
+ return errors.New("image store is not locked for writing")
}
rpath := r.imagespath()
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
@@ -595,15 +594,7 @@ func (r *imageStore) BigDataSize(id, key string) (int64, error) {
return size, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
- if r.SetBigData(id, key, data) == nil {
- image, ok := r.lookup(id)
- if !ok {
- return -1, ErrImageUnknown
- }
- if size, ok := image.BigDataSizes[key]; ok {
- return size, nil
- }
- }
+ return int64(len(data)), nil
}
return -1, ErrSizeUnknown
}
@@ -622,17 +613,6 @@ func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
if d, ok := image.BigDataDigests[key]; ok {
return d, nil
}
- if data, err := r.BigData(id, key); err == nil && data != nil {
- if r.SetBigData(id, key, data) == nil {
- image, ok := r.lookup(id)
- if !ok {
- return "", ErrImageUnknown
- }
- if d, ok := image.BigDataDigests[key]; ok {
- return d, nil
- }
- }
- }
return "", ErrDigestUnknown
}
@@ -655,7 +635,7 @@ func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
return modified
}
-func (r *imageStore) SetBigData(id, key string, data []byte) error {
+func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
if key == "" {
return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item")
}
@@ -672,7 +652,10 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
}
var newDigest digest.Digest
if bigDataNameIsManifest(key) {
- if newDigest, err = manifest.Digest(data); err != nil {
+ if digestManifest == nil {
+ return errors.Wrapf(ErrDigestUnknown, "error digesting manifest: no manifest digest callback provided")
+ }
+ if newDigest, err = digestManifest(data); err != nil {
return errors.Wrapf(err, "error digesting manifest")
}
} else {
diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go
index 6b40ebd59..539acfe93 100644
--- a/vendor/github.com/containers/storage/images_ffjson.go
+++ b/vendor/github.com/containers/storage/images_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: images.go
+// source: ./images.go
package storage
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index d612f0459..110e737b2 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -229,6 +229,7 @@ type LayerStore interface {
type layerStore struct {
lockfile Locker
+ mountsLockfile Locker
rundir string
driver drivers.Driver
layerdir string
@@ -291,7 +292,6 @@ func (r *layerStore) Load() error {
idlist := []string{}
ids := make(map[string]*Layer)
names := make(map[string]*Layer)
- mounts := make(map[string]*Layer)
compressedsums := make(map[digest.Digest][]string)
uncompressedsums := make(map[digest.Digest][]string)
if r.lockfile.IsReadWrite() {
@@ -319,39 +319,29 @@ func (r *layerStore) Load() error {
label.ReserveLabel(layer.MountLabel)
}
}
+ err = nil
}
- if shouldSave && !r.IsReadWrite() {
+ if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
return ErrDuplicateLayerNames
}
- mpath := r.mountspath()
- data, err = ioutil.ReadFile(mpath)
- if err != nil && !os.IsNotExist(err) {
- return err
- }
- layerMounts := []layerMountPoint{}
- if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
- for _, mount := range layerMounts {
- if mount.MountPoint != "" {
- if layer, ok := ids[mount.ID]; ok {
- mounts[mount.MountPoint] = layer
- layer.MountPoint = mount.MountPoint
- layer.MountCount = mount.MountCount
- }
- }
- }
- }
r.layers = layers
r.idindex = truncindex.NewTruncIndex(idlist)
r.byid = ids
r.byname = names
- r.bymount = mounts
r.bycompressedsum = compressedsums
r.byuncompressedsum = uncompressedsums
- err = nil
+ // Load and merge information about which layers are mounted, and where.
+ if r.IsReadWrite() {
+ r.mountsLockfile.RLock()
+ defer r.mountsLockfile.Unlock()
+ if err = r.loadMounts(); err != nil {
+ return err
+ }
+ }
// Last step: if we're writable, try to remove anything that a previous
// user of this storage area marked for deletion but didn't manage to
// actually delete.
- if r.IsReadWrite() {
+ if r.IsReadWrite() && r.Locked() {
for _, layer := range r.layers {
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
@@ -373,12 +363,36 @@ func (r *layerStore) Load() error {
return err
}
+func (r *layerStore) loadMounts() error {
+ mounts := make(map[string]*Layer)
+ mpath := r.mountspath()
+ data, err := ioutil.ReadFile(mpath)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ layerMounts := []layerMountPoint{}
+ if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
+ for _, mount := range layerMounts {
+ if mount.MountPoint != "" {
+ if layer, ok := r.lookup(mount.ID); ok {
+ mounts[mount.MountPoint] = layer
+ layer.MountPoint = mount.MountPoint
+ layer.MountCount = mount.MountCount
+ }
+ }
+ }
+ err = nil
+ }
+ r.bymount = mounts
+ return err
+}
+
func (r *layerStore) Save() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
}
if !r.Locked() {
- return errors.New("layer store is not locked")
+ return errors.New("layer store is not locked for writing")
}
rpath := r.layerspath()
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
@@ -388,6 +402,25 @@ func (r *layerStore) Save() error {
if err != nil {
return err
}
+ if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
+ return err
+ }
+ if !r.IsReadWrite() {
+ return nil
+ }
+ r.mountsLockfile.Lock()
+ defer r.mountsLockfile.Unlock()
+ defer r.mountsLockfile.Touch()
+ return r.saveMounts()
+}
+
+func (r *layerStore) saveMounts() error {
+ if !r.IsReadWrite() {
+ return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
+ }
+ if !r.mountsLockfile.Locked() {
+ return errors.New("layer store mount information is not locked for writing")
+ }
mpath := r.mountspath()
if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
return err
@@ -406,11 +439,10 @@ func (r *layerStore) Save() error {
if err != nil {
return err
}
- if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
+ if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil {
return err
}
- defer r.Touch()
- return ioutils.AtomicWriteFile(mpath, jmdata, 0600)
+ return r.loadMounts()
}
func newLayerStore(rundir string, layerdir string, driver drivers.Driver, uidMap, gidMap []idtools.IDMap) (LayerStore, error) {
@@ -426,16 +458,21 @@ func newLayerStore(rundir string, layerdir string, driver drivers.Driver, uidMap
}
lockfile.Lock()
defer lockfile.Unlock()
+ mountsLockfile, err := GetLockfile(filepath.Join(rundir, "mountpoints.lock"))
+ if err != nil {
+ return nil, err
+ }
rlstore := layerStore{
- lockfile: lockfile,
- driver: driver,
- rundir: rundir,
- layerdir: layerdir,
- byid: make(map[string]*Layer),
- bymount: make(map[string]*Layer),
- byname: make(map[string]*Layer),
- uidMap: copyIDMap(uidMap),
- gidMap: copyIDMap(gidMap),
+ lockfile: lockfile,
+ mountsLockfile: mountsLockfile,
+ driver: driver,
+ rundir: rundir,
+ layerdir: layerdir,
+ byid: make(map[string]*Layer),
+ bymount: make(map[string]*Layer),
+ byname: make(map[string]*Layer),
+ uidMap: copyIDMap(uidMap),
+ gidMap: copyIDMap(gidMap),
}
if err := rlstore.Load(); err != nil {
return nil, err
@@ -451,13 +488,14 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROL
lockfile.Lock()
defer lockfile.Unlock()
rlstore := layerStore{
- lockfile: lockfile,
- driver: driver,
- rundir: rundir,
- layerdir: layerdir,
- byid: make(map[string]*Layer),
- bymount: make(map[string]*Layer),
- byname: make(map[string]*Layer),
+ lockfile: lockfile,
+ mountsLockfile: nil,
+ driver: driver,
+ rundir: rundir,
+ layerdir: layerdir,
+ byid: make(map[string]*Layer),
+ bymount: make(map[string]*Layer),
+ byname: make(map[string]*Layer),
}
if err := rlstore.Load(); err != nil {
return nil, err
@@ -673,6 +711,16 @@ func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel
}
func (r *layerStore) Mounted(id string) (int, error) {
+ if !r.IsReadWrite() {
+ return 0, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
+ }
+ r.mountsLockfile.RLock()
+ defer r.mountsLockfile.Unlock()
+ if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
+ if err = r.loadMounts(); err != nil {
+ return 0, err
+ }
+ }
layer, ok := r.lookup(id)
if !ok {
return 0, ErrLayerUnknown
@@ -684,13 +732,21 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
if !r.IsReadWrite() {
return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
}
+ r.mountsLockfile.Lock()
+ defer r.mountsLockfile.Unlock()
+ if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
+ if err = r.loadMounts(); err != nil {
+ return "", err
+ }
+ }
+ defer r.mountsLockfile.Touch()
layer, ok := r.lookup(id)
if !ok {
return "", ErrLayerUnknown
}
if layer.MountCount > 0 {
layer.MountCount++
- return layer.MountPoint, r.Save()
+ return layer.MountPoint, r.saveMounts()
}
if options.MountLabel == "" {
options.MountLabel = layer.MountLabel
@@ -709,7 +765,7 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
layer.MountPoint = filepath.Clean(mountpoint)
layer.MountCount++
r.bymount[layer.MountPoint] = layer
- err = r.Save()
+ err = r.saveMounts()
}
return mountpoint, err
}
@@ -718,6 +774,14 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
if !r.IsReadWrite() {
return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
}
+ r.mountsLockfile.Lock()
+ defer r.mountsLockfile.Unlock()
+ if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
+ if err = r.loadMounts(); err != nil {
+ return false, err
+ }
+ }
+ defer r.mountsLockfile.Touch()
layer, ok := r.lookup(id)
if !ok {
layerByMount, ok := r.bymount[filepath.Clean(id)]
@@ -731,7 +795,7 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
}
if layer.MountCount > 1 {
layer.MountCount--
- return true, r.Save()
+ return true, r.saveMounts()
}
err := r.driver.Put(id)
if err == nil || os.IsNotExist(err) {
@@ -740,12 +804,22 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
}
layer.MountCount--
layer.MountPoint = ""
- return false, r.Save()
+ return false, r.saveMounts()
}
return true, err
}
func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
+ if !r.IsReadWrite() {
+ return nil, nil, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
+ }
+ r.mountsLockfile.RLock()
+ defer r.mountsLockfile.Unlock()
+ if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
+ if err = r.loadMounts(); err != nil {
+ return nil, nil, err
+ }
+ }
layer, ok := r.lookup(id)
if !ok {
return nil, nil, ErrLayerUnknown
@@ -862,14 +936,23 @@ func (r *layerStore) Delete(id string) error {
return ErrLayerUnknown
}
id = layer.ID
- // This check is needed for idempotency of delete where the layer could have been
- // already unmounted (since c/storage gives you that API directly)
- for layer.MountCount > 0 {
+ // The layer may already have been explicitly unmounted, but if not, we
+ // should try to clean that up before we start deleting anything at the
+ // driver level.
+ mountCount, err := r.Mounted(id)
+ if err != nil {
+ return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
+ }
+ for mountCount > 0 {
if _, err := r.Unmount(id, false); err != nil {
return err
}
+ mountCount, err = r.Mounted(id)
+ if err != nil {
+ return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
+ }
}
- err := r.driver.Remove(id)
+ err = r.driver.Remove(id)
if err == nil {
os.Remove(r.tspath(id))
delete(r.byid, id)
@@ -1235,7 +1318,20 @@ func (r *layerStore) Touch() error {
}
func (r *layerStore) Modified() (bool, error) {
- return r.lockfile.Modified()
+ var mmodified bool
+ lmodified, err := r.lockfile.Modified()
+ if err != nil {
+ return lmodified, err
+ }
+ if r.IsReadWrite() {
+ r.mountsLockfile.RLock()
+ defer r.mountsLockfile.Unlock()
+ mmodified, err = r.mountsLockfile.Modified()
+ if err != nil {
+ return lmodified, err
+ }
+ }
+ return lmodified || mmodified, nil
}
func (r *layerStore) IsReadWrite() bool {
diff --git a/vendor/github.com/containers/storage/lockfile.go b/vendor/github.com/containers/storage/lockfile.go
index 7f07b9ac5..3a1befcbe 100644
--- a/vendor/github.com/containers/storage/lockfile.go
+++ b/vendor/github.com/containers/storage/lockfile.go
@@ -35,7 +35,7 @@ type Locker interface {
// IsReadWrite() checks if the lock file is read-write
IsReadWrite() bool
- // Locked() checks if lock is locked
+ // Locked() checks if lock is locked for writing by a thread in this process
Locked() bool
}
@@ -66,7 +66,10 @@ func getLockfile(path string, ro bool) (Locker, error) {
if lockfiles == nil {
lockfiles = make(map[string]Locker)
}
- cleanPath := filepath.Clean(path)
+ cleanPath, err := filepath.Abs(path)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error ensuring that path %q is an absolute path", path)
+ }
if locker, ok := lockfiles[cleanPath]; ok {
if ro && locker.IsReadWrite() {
return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath)
diff --git a/vendor/github.com/containers/storage/lockfile_darwin.go b/vendor/github.com/containers/storage/lockfile_otherunix.go
index 041d54c05..041d54c05 100644
--- a/vendor/github.com/containers/storage/lockfile_darwin.go
+++ b/vendor/github.com/containers/storage/lockfile_otherunix.go
diff --git a/vendor/github.com/containers/storage/lockfile_unix.go b/vendor/github.com/containers/storage/lockfile_unix.go
index 0adbc49a5..a9dc64122 100644
--- a/vendor/github.com/containers/storage/lockfile_unix.go
+++ b/vendor/github.com/containers/storage/lockfile_unix.go
@@ -32,7 +32,7 @@ func getLockFile(path string, ro bool) (Locker, error) {
}
return &lockfile{
stateMutex: &sync.Mutex{},
- writeMutex: &sync.Mutex{},
+ rwMutex: &sync.RWMutex{},
file: path,
fd: uintptr(fd),
lw: stringid.GenerateRandomID(),
@@ -42,10 +42,10 @@ func getLockFile(path string, ro bool) (Locker, error) {
}
type lockfile struct {
- // stateMutex is used to synchronize concurrent accesses
+ // rwMutex serializes concurrent reader-writer acquisitions in the same process space
+ rwMutex *sync.RWMutex
+ // stateMutex is used to synchronize concurrent accesses to the state below
stateMutex *sync.Mutex
- // writeMutex is used to serialize and avoid recursive writer locks
- writeMutex *sync.Mutex
counter int64
file string
fd uintptr
@@ -65,23 +65,24 @@ func (l *lockfile) lock(l_type int16) {
Len: 0,
Pid: int32(os.Getpid()),
}
- if l_type == unix.F_WRLCK {
- // If we try to lock as a writer, lock the writerMutex first to
- // avoid multiple writer acquisitions of the same process.
- // Note: it's important to lock it prior to the stateMutex to
- // avoid a deadlock.
- l.writeMutex.Lock()
+ switch l_type {
+ case unix.F_RDLCK:
+ l.rwMutex.RLock()
+ case unix.F_WRLCK:
+ l.rwMutex.Lock()
+ default:
+ panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", l_type))
}
l.stateMutex.Lock()
- l.locktype = l_type
if l.counter == 0 {
// Optimization: only use the (expensive) fcntl syscall when
- // the counter is 0. If it's greater than that, we're owning
- // the lock already and can only be a reader.
+ // the counter is 0. In this case, we're either the first
+ // reader lock or a writer lock.
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
}
+ l.locktype = l_type
l.locked = true
l.counter++
l.stateMutex.Unlock()
@@ -133,19 +134,28 @@ func (l *lockfile) Unlock() {
time.Sleep(10 * time.Millisecond)
}
}
- if l.locktype == unix.F_WRLCK {
- l.writeMutex.Unlock()
+ if l.locktype == unix.F_RDLCK {
+ l.rwMutex.RUnlock()
+ } else {
+ l.rwMutex.Unlock()
}
l.stateMutex.Unlock()
}
-// Locked checks if lockfile is locked.
+// Locked checks if lockfile is locked for writing by a thread in this process.
func (l *lockfile) Locked() bool {
- return l.locked
+ l.stateMutex.Lock()
+ defer l.stateMutex.Unlock()
+ return l.locked && (l.locktype == unix.F_WRLCK)
}
// Touch updates the lock file with the UID of the user.
func (l *lockfile) Touch() error {
+ l.stateMutex.Lock()
+ if !l.locked || (l.locktype != unix.F_WRLCK) {
+ panic("attempted to update last-writer in lockfile without the write lock")
+ }
+ l.stateMutex.Unlock()
l.lw = stringid.GenerateRandomID()
id := []byte(l.lw)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
@@ -170,6 +180,11 @@ func (l *lockfile) Touch() error {
// was loaded.
func (l *lockfile) Modified() (bool, error) {
id := []byte(l.lw)
+ l.stateMutex.Lock()
+ if !l.locked {
+ panic("attempted to check last-writer in lockfile without locking it first")
+ }
+ l.stateMutex.Unlock()
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
if err != nil {
return true, err
@@ -179,7 +194,7 @@ func (l *lockfile) Modified() (bool, error) {
return true, err
}
if n != len(id) {
- return true, unix.ENOSPC
+ return true, nil
}
lw := l.lw
l.lw = string(id)
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index ba1704250..9cc717e5a 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -636,7 +636,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
if chownOpts == nil {
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
}
- if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
+ if err := idtools.SafeLchown(path, chownOpts.UID, chownOpts.GID); err != nil {
return err
}
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
index 6e33ac38d..5602c7e21 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
@@ -7,6 +7,7 @@ import (
"strings"
"syscall"
+ "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"golang.org/x/sys/unix"
)
@@ -130,7 +131,7 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool,
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
return false, err
}
- if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
+ if err := idtools.SafeChown(originalPath, hdr.Uid, hdr.Gid); err != nil {
return false, err
}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
index 9c591aff8..815589382 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
@@ -7,6 +7,9 @@ import (
"sort"
"strconv"
"strings"
+ "syscall"
+
+ "github.com/pkg/errors"
)
// IDMap contains a single entry for user namespace range remapping. An array
@@ -277,3 +280,18 @@ func parseSubidFile(path, username string) (ranges, error) {
}
return rangeList, nil
}
+
+func checkChownErr(err error, name string, uid, gid int) error {
+ if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL {
+ return errors.Wrapf(err, "there might not be enough IDs available in the namespace (requested %d:%d for %s)", uid, gid, name)
+ }
+ return err
+}
+
+func SafeChown(name string, uid, gid int) error {
+ return checkChownErr(os.Chown(name, uid, gid), name, uid, gid)
+}
+
+func SafeLchown(name string, uid, gid int) error {
+ return checkChownErr(os.Lchown(name, uid, gid), name, uid, gid)
+}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
index b5870506a..bdbdf1b50 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
@@ -30,7 +30,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
paths = []string{path}
} else if err == nil && chownExisting {
// short-circuit--we were called with an existing directory and chown was requested
- return os.Chown(path, ownerUID, ownerGID)
+ return SafeChown(path, ownerUID, ownerGID)
} else if err == nil {
// nothing to do; directory path fully exists already and chown was NOT requested
return nil
@@ -60,7 +60,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
// even if it existed, we will chown the requested path + any subpaths that
// didn't exist when we called MkdirAll
for _, pathComponent := range paths {
- if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
+ if err := SafeChown(pathComponent, ownerUID, ownerGID); err != nil {
return err
}
}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
index 05319eacc..1ae728a61 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go
@@ -3,6 +3,7 @@
package reexec
import (
+ "context"
"os/exec"
"syscall"
@@ -20,11 +21,23 @@ func Self() string {
// This will use the in-memory version (/proc/self/exe) of the current binary,
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
func Command(args ...string) *exec.Cmd {
- return &exec.Cmd{
- Path: Self(),
- Args: args,
- SysProcAttr: &syscall.SysProcAttr{
- Pdeathsig: unix.SIGTERM,
- },
+ cmd := exec.Command(Self())
+ cmd.Args = args
+ cmd.SysProcAttr = &syscall.SysProcAttr{
+ Pdeathsig: unix.SIGTERM,
}
+ return cmd
+}
+
+// CommandContext returns *exec.Cmd which has Path as current binary, and also
+// sets SysProcAttr.Pdeathsig to SIGTERM.
+// This will use the in-memory version (/proc/self/exe) of the current binary,
+// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
+func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
+ cmd := exec.CommandContext(ctx, Self())
+ cmd.Args = args
+ cmd.SysProcAttr = &syscall.SysProcAttr{
+ Pdeathsig: unix.SIGTERM,
+ }
+ return cmd
}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
index 778a720e3..1ecaa906f 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
@@ -3,6 +3,7 @@
package reexec
import (
+ "context"
"os/exec"
)
@@ -16,8 +17,14 @@ func Self() string {
// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
// be set to "/usr/bin/docker".
func Command(args ...string) *exec.Cmd {
- return &exec.Cmd{
- Path: Self(),
- Args: args,
- }
+ cmd := exec.Command(Self())
+ cmd.Args = args
+ return cmd
+}
+
+// CommandContext returns *exec.Cmd which has Path as current binary.
+func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
+ cmd := exec.CommandContext(ctx, Self())
+ cmd.Args = args
+ return cmd
}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
index 76edd8242..9d9374268 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
@@ -3,6 +3,7 @@
package reexec
import (
+ "context"
"os/exec"
)
@@ -10,3 +11,8 @@ import (
func Command(args ...string) *exec.Cmd {
return nil
}
+
+// CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin.
+func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go
index ca871c422..673ab476a 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go
@@ -3,6 +3,7 @@
package reexec
import (
+ "context"
"os/exec"
)
@@ -16,8 +17,16 @@ func Self() string {
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
// be set to "C:\docker.exe".
func Command(args ...string) *exec.Cmd {
- return &exec.Cmd{
- Path: Self(),
- Args: args,
- }
+ cmd := exec.Command(Self())
+ cmd.Args = args
+ return cmd
+}
+
+// Command returns *exec.Cmd which has Path as current binary.
+// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
+// be set to "C:\docker.exe".
+func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
+ cmd := exec.CommandContext(ctx, Self())
+ cmd.Args = args
+ return cmd
}
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index d53703d6b..7e39e3959 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -32,7 +32,7 @@ import (
var (
// DefaultStoreOptions is a reasonable default set of options.
- DefaultStoreOptions StoreOptions
+ defaultStoreOptions StoreOptions
stores []*store
storesLock sync.Mutex
)
@@ -102,19 +102,21 @@ type ROBigDataStore interface {
BigDataNames(id string) ([]string, error)
}
-// A RWBigDataStore wraps up the read-write big-data related methods of the
-// various types of file-based lookaside stores that we implement.
-type RWBigDataStore interface {
- // SetBigData stores a (potentially large) piece of data associated with this
- // ID.
- SetBigData(id, key string, data []byte) error
+// A RWImageBigDataStore wraps up how we store big-data associated with images.
+type RWImageBigDataStore interface {
+ // SetBigData stores a (potentially large) piece of data associated
+ // with this ID.
+ // Pass github.com/containers/image/manifest.Digest as digestManifest
+ // to allow ByDigest to find images by their correct digests.
+ SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
}
-// A BigDataStore wraps up the most common big-data related methods of the
-// various types of file-based lookaside stores that we implement.
-type BigDataStore interface {
+// A ContainerBigDataStore wraps up how we store big-data associated with containers.
+type ContainerBigDataStore interface {
ROBigDataStore
- RWBigDataStore
+ // SetBigData stores a (potentially large) piece of data associated
+ // with this ID.
+ SetBigData(id, key string, data []byte) error
}
// A FlaggableStore can have flags set and cleared on items which it manages.
@@ -352,9 +354,11 @@ type Store interface {
// of named data associated with an image.
ImageBigDataDigest(id, key string) (digest.Digest, error)
- // SetImageBigData stores a (possibly large) chunk of named data associated
- // with an image.
- SetImageBigData(id, key string, data []byte) error
+ // SetImageBigData stores a (possibly large) chunk of named data
+ // associated with an image. Pass
+ // github.com/containers/image/manifest.Digest as digestManifest to
+ // allow ImagesByDigest to find images by their correct digests.
+ SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
// ImageSize computes the size of the image's layers and ancillary data.
ImageSize(id string) (int64, error)
@@ -546,14 +550,22 @@ type store struct {
// }
func GetStore(options StoreOptions) (Store, error) {
if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 {
- options = DefaultStoreOptions
+ options = defaultStoreOptions
}
if options.GraphRoot != "" {
- options.GraphRoot = filepath.Clean(options.GraphRoot)
+ dir, err := filepath.Abs(options.GraphRoot)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.GraphRoot)
+ }
+ options.GraphRoot = dir
}
if options.RunRoot != "" {
- options.RunRoot = filepath.Clean(options.RunRoot)
+ dir, err := filepath.Abs(options.RunRoot)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.RunRoot)
+ }
+ options.RunRoot = dir
}
storesLock.Lock()
@@ -1321,7 +1333,7 @@ func (s *store) Metadata(id string) (string, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1343,7 +1355,7 @@ func (s *store) Metadata(id string) (string, error) {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1359,7 +1371,7 @@ func (s *store) Metadata(id string) (string, error) {
if err != nil {
return "", err
}
- cstore.Lock()
+ cstore.RLock()
defer cstore.Unlock()
if modified, err := cstore.Modified(); modified || err != nil {
if err = cstore.Load(); err != nil {
@@ -1383,7 +1395,7 @@ func (s *store) ListImageBigData(id string) ([]string, error) {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1409,7 +1421,7 @@ func (s *store) ImageBigDataSize(id, key string) (int64, error) {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1436,7 +1448,7 @@ func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) {
stores = append([]ROImageStore{ristore}, stores...)
for _, r := range stores {
ristore := r
- ristore.Lock()
+ ristore.RLock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
@@ -1477,7 +1489,7 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) {
return nil, ErrImageUnknown
}
-func (s *store) SetImageBigData(id, key string, data []byte) error {
+func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
ristore, err := s.ImageStore()
if err != nil {
return err
@@ -1491,7 +1503,7 @@ func (s *store) SetImageBigData(id, key string, data []byte) error {
}
}
- return ristore.SetBigData(id, key, data)
+ return ristore.SetBigData(id, key, data, digestManifest)
}
func (s *store) ImageSize(id string) (int64, error) {
@@ -1507,7 +1519,7 @@ func (s *store) ImageSize(id string) (int64, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1529,7 +1541,7 @@ func (s *store) ImageSize(id string) (int64, error) {
// Look for the image's record.
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1617,7 +1629,7 @@ func (s *store) ContainerSize(id string) (int64, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1641,7 +1653,7 @@ func (s *store) ContainerSize(id string) (int64, error) {
if err != nil {
return -1, err
}
- rcstore.Lock()
+ rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -1705,7 +1717,7 @@ func (s *store) ListContainerBigData(id string) ([]string, error) {
return nil, err
}
- rcstore.Lock()
+ rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -1721,7 +1733,7 @@ func (s *store) ContainerBigDataSize(id, key string) (int64, error) {
if err != nil {
return -1, err
}
- rcstore.Lock()
+ rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -1736,7 +1748,7 @@ func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
if err != nil {
return "", err
}
- rcstore.Lock()
+ rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -1751,7 +1763,7 @@ func (s *store) ContainerBigData(id, key string) ([]byte, error) {
if err != nil {
return nil, err
}
- rcstore.Lock()
+ rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -1787,7 +1799,7 @@ func (s *store) Exists(id string) bool {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1809,7 +1821,7 @@ func (s *store) Exists(id string) bool {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1825,7 +1837,7 @@ func (s *store) Exists(id string) bool {
if err != nil {
return false
}
- rcstore.Lock()
+ rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -1912,7 +1924,7 @@ func (s *store) Names(id string) ([]string, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1934,7 +1946,7 @@ func (s *store) Names(id string) ([]string, error) {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1950,7 +1962,7 @@ func (s *store) Names(id string) ([]string, error) {
if err != nil {
return nil, err
}
- rcstore.Lock()
+ rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -1974,7 +1986,7 @@ func (s *store) Lookup(name string) (string, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1996,7 +2008,7 @@ func (s *store) Lookup(name string) (string, error) {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2012,7 +2024,7 @@ func (s *store) Lookup(name string) (string, error) {
if err != nil {
return "", err
}
- cstore.Lock()
+ cstore.RLock()
defer cstore.Unlock()
if modified, err := cstore.Modified(); modified || err != nil {
if err = cstore.Load(); err != nil {
@@ -2464,7 +2476,7 @@ func (s *store) Mounted(id string) (int, error) {
if err != nil {
return 0, err
}
- rlstore.Lock()
+ rlstore.RLock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
@@ -2507,7 +2519,7 @@ func (s *store) Changes(from, to string) ([]archive.Change, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2532,7 +2544,7 @@ func (s *store) DiffSize(from, to string) (int64, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2612,7 +2624,7 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2659,7 +2671,7 @@ func (s *store) LayerSize(id string) (int64, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2678,7 +2690,7 @@ func (s *store) LayerParentOwners(id string) ([]int, []int, error) {
if err != nil {
return nil, nil, err
}
- rlstore.Lock()
+ rlstore.RLock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
@@ -2700,14 +2712,14 @@ func (s *store) ContainerParentOwners(id string) ([]int, []int, error) {
if err != nil {
return nil, nil, err
}
- rlstore.Lock()
+ rlstore.RLock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return nil, nil, err
}
}
- rcstore.Lock()
+ rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -2738,7 +2750,7 @@ func (s *store) Layers() ([]Layer, error) {
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2767,7 +2779,7 @@ func (s *store) Images() ([]Image, error) {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2789,7 +2801,7 @@ func (s *store) Containers() ([]Container, error) {
return nil, err
}
- rcstore.Lock()
+ rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -2811,7 +2823,7 @@ func (s *store) Layer(id string) (*Layer, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2837,7 +2849,7 @@ func (s *store) Image(id string) (*Image, error) {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2870,7 +2882,7 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2903,7 +2915,7 @@ func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) {
return nil, err
}
for _, store := range append([]ROImageStore{istore}, istores...) {
- store.Lock()
+ store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2924,7 +2936,7 @@ func (s *store) Container(id string) (*Container, error) {
if err != nil {
return nil, err
}
- rcstore.Lock()
+ rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -2940,7 +2952,7 @@ func (s *store) ContainerLayerID(id string) (string, error) {
if err != nil {
return "", err
}
- rcstore.Lock()
+ rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -2963,7 +2975,7 @@ func (s *store) ContainerByLayer(id string) (*Container, error) {
if err != nil {
return nil, err
}
- rcstore.Lock()
+ rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -2988,7 +3000,7 @@ func (s *store) ContainerDirectory(id string) (string, error) {
if err != nil {
return "", err
}
- rcstore.Lock()
+ rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -3015,7 +3027,7 @@ func (s *store) ContainerRunDirectory(id string) (string, error) {
return "", err
}
- rcstore.Lock()
+ rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -3205,8 +3217,20 @@ func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
return ret
}
-// DefaultConfigFile path to the system wide storage.conf file
-const DefaultConfigFile = "/etc/containers/storage.conf"
+// defaultConfigFile path to the system wide storage.conf file
+const defaultConfigFile = "/etc/containers/storage.conf"
+
+// DefaultConfigFile returns the path to the storage config file used
+func DefaultConfigFile(rootless bool) (string, error) {
+ if rootless {
+ home, err := homeDir()
+ if err != nil {
+ return "", errors.Wrapf(err, "cannot determine users homedir")
+ }
+ return filepath.Join(home, ".config/containers/storage.conf"), nil
+ }
+ return defaultConfigFile, nil
+}
// TOML-friendly explicit tables used for conversions.
type tomlConfig struct {
@@ -3346,19 +3370,19 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
}
func init() {
- DefaultStoreOptions.RunRoot = "/var/run/containers/storage"
- DefaultStoreOptions.GraphRoot = "/var/lib/containers/storage"
- DefaultStoreOptions.GraphDriverName = ""
+ defaultStoreOptions.RunRoot = "/var/run/containers/storage"
+ defaultStoreOptions.GraphRoot = "/var/lib/containers/storage"
+ defaultStoreOptions.GraphDriverName = ""
- ReloadConfigurationFile(DefaultConfigFile, &DefaultStoreOptions)
+ ReloadConfigurationFile(defaultConfigFile, &defaultStoreOptions)
}
func GetDefaultMountOptions() ([]string, error) {
mountOpts := []string{
".mountopt",
- fmt.Sprintf("%s.mountopt", DefaultStoreOptions.GraphDriverName),
+ fmt.Sprintf("%s.mountopt", defaultStoreOptions.GraphDriverName),
}
- for _, option := range DefaultStoreOptions.GraphDriverOptions {
+ for _, option := range defaultStoreOptions.GraphDriverOptions {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return nil, err
diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go
new file mode 100644
index 000000000..e74956c9e
--- /dev/null
+++ b/vendor/github.com/containers/storage/utils.go
@@ -0,0 +1,234 @@
+package storage
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "os/user"
+ "path/filepath"
+ "strings"
+
+ "github.com/BurntSushi/toml"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/system"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping
+func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap string) (*IDMappingOptions, error) {
+ options := IDMappingOptions{
+ HostUIDMapping: true,
+ HostGIDMapping: true,
+ }
+ if subGIDMap == "" && subUIDMap != "" {
+ subGIDMap = subUIDMap
+ }
+ if subUIDMap == "" && subGIDMap != "" {
+ subUIDMap = subGIDMap
+ }
+ if len(GIDMapSlice) == 0 && len(UIDMapSlice) != 0 {
+ GIDMapSlice = UIDMapSlice
+ }
+ if len(UIDMapSlice) == 0 && len(GIDMapSlice) != 0 {
+ UIDMapSlice = GIDMapSlice
+ }
+ if len(UIDMapSlice) == 0 && subUIDMap == "" && os.Getuid() != 0 {
+ UIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getuid())}
+ }
+ if len(GIDMapSlice) == 0 && subGIDMap == "" && os.Getuid() != 0 {
+ GIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getgid())}
+ }
+
+ if subUIDMap != "" && subGIDMap != "" {
+ mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to create NewIDMappings for uidmap=%s gidmap=%s", subUIDMap, subGIDMap)
+ }
+ options.UIDMap = mappings.UIDs()
+ options.GIDMap = mappings.GIDs()
+ }
+ parsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, "UID")
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to create ParseUIDMap UID=%s", UIDMapSlice)
+ }
+ parsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, "GID")
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to create ParseGIDMap GID=%s", UIDMapSlice)
+ }
+ options.UIDMap = append(options.UIDMap, parsedUIDMap...)
+ options.GIDMap = append(options.GIDMap, parsedGIDMap...)
+ if len(options.UIDMap) > 0 {
+ options.HostUIDMapping = false
+ }
+ if len(options.GIDMap) > 0 {
+ options.HostGIDMapping = false
+ }
+ return &options, nil
+}
+
+// GetRootlessRuntimeDir returns the runtime directory when running as non root
+func GetRootlessRuntimeDir(rootlessUid int) (string, error) {
+ runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
+ if runtimeDir == "" {
+ tmpDir := fmt.Sprintf("/run/user/%d", rootlessUid)
+ st, err := system.Stat(tmpDir)
+ if err == nil && int(st.UID()) == os.Getuid() && st.Mode() == 0700 {
+ return tmpDir, nil
+ }
+ }
+ tmpDir := fmt.Sprintf("%s/%d", os.TempDir(), rootlessUid)
+ if err := os.MkdirAll(tmpDir, 0700); err != nil {
+ logrus.Errorf("failed to create %s: %v", tmpDir, err)
+ } else {
+ return tmpDir, nil
+ }
+ home, err := homeDir()
+ if err != nil {
+ return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty")
+ }
+ resolvedHome, err := filepath.EvalSymlinks(home)
+ if err != nil {
+ return "", errors.Wrapf(err, "cannot resolve %s", home)
+ }
+ return filepath.Join(resolvedHome, "rundir"), nil
+}
+
+// getRootlessDirInfo returns the parent path of where the storage for containers and
+// volumes will be in rootless mode
+func getRootlessDirInfo(rootlessUid int) (string, string, error) {
+ rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUid)
+ if err != nil {
+ return "", "", err
+ }
+
+ dataDir := os.Getenv("XDG_DATA_HOME")
+ if dataDir == "" {
+ home, err := homeDir()
+ if err != nil {
+ return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty")
+ }
+ // runc doesn't like symlinks in the rootfs path, and at least
+ // on CoreOS /home is a symlink to /var/home, so resolve any symlink.
+ resolvedHome, err := filepath.EvalSymlinks(home)
+ if err != nil {
+ return "", "", errors.Wrapf(err, "cannot resolve %s", home)
+ }
+ dataDir = filepath.Join(resolvedHome, ".local", "share")
+ }
+ return dataDir, rootlessRuntime, nil
+}
+
+// getRootlessStorageOpts returns the storage opts for containers running as non root
+func getRootlessStorageOpts(rootlessUid int) (StoreOptions, error) {
+ var opts StoreOptions
+
+ dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUid)
+ if err != nil {
+ return opts, err
+ }
+ opts.RunRoot = rootlessRuntime
+ opts.GraphRoot = filepath.Join(dataDir, "containers", "storage")
+ if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
+ opts.GraphDriverName = "overlay"
+ opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)}
+ } else {
+ opts.GraphDriverName = "vfs"
+ }
+ return opts, nil
+}
+
+type tomlOptionsConfig struct {
+ MountProgram string `toml:"mount_program"`
+}
+
+func getTomlStorage(storeOptions *StoreOptions) *tomlConfig {
+ config := new(tomlConfig)
+
+ config.Storage.Driver = storeOptions.GraphDriverName
+ config.Storage.RunRoot = storeOptions.RunRoot
+ config.Storage.GraphRoot = storeOptions.GraphRoot
+ for _, i := range storeOptions.GraphDriverOptions {
+ s := strings.Split(i, "=")
+ if s[0] == "overlay.mount_program" {
+ config.Storage.Options.MountProgram = s[1]
+ }
+ }
+
+ return config
+}
+
+// DefaultStoreOptions returns the default storage ops for containers
+func DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) {
+ var (
+ defaultRootlessRunRoot string
+ defaultRootlessGraphRoot string
+ err error
+ )
+ storageOpts := defaultStoreOptions
+ if rootless {
+ storageOpts, err = getRootlessStorageOpts(rootlessUid)
+ if err != nil {
+ return storageOpts, err
+ }
+ }
+
+ storageConf, err := DefaultConfigFile(rootless)
+ if err != nil {
+ return storageOpts, err
+ }
+ if _, err = os.Stat(storageConf); err == nil {
+ defaultRootlessRunRoot = storageOpts.RunRoot
+ defaultRootlessGraphRoot = storageOpts.GraphRoot
+ storageOpts = StoreOptions{}
+ ReloadConfigurationFile(storageConf, &storageOpts)
+ }
+
+ if !os.IsNotExist(err) {
+ return storageOpts, errors.Wrapf(err, "cannot stat %s", storageConf)
+ }
+
+ if rootless {
+ if err == nil {
+ // If the file did not specify a graphroot or runroot,
+ // set sane defaults so we don't try and use root-owned
+ // directories
+ if storageOpts.RunRoot == "" {
+ storageOpts.RunRoot = defaultRootlessRunRoot
+ }
+ if storageOpts.GraphRoot == "" {
+ storageOpts.GraphRoot = defaultRootlessGraphRoot
+ }
+ } else {
+ if err := os.MkdirAll(filepath.Dir(storageConf), 0755); err != nil {
+ return storageOpts, errors.Wrapf(err, "cannot make directory %s", filepath.Dir(storageConf))
+ }
+ file, err := os.OpenFile(storageConf, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+ if err != nil {
+ return storageOpts, errors.Wrapf(err, "cannot open %s", storageConf)
+ }
+
+ tomlConfiguration := getTomlStorage(&storageOpts)
+ defer file.Close()
+ enc := toml.NewEncoder(file)
+ if err := enc.Encode(tomlConfiguration); err != nil {
+ os.Remove(storageConf)
+
+ return storageOpts, errors.Wrapf(err, "failed to encode %s", storageConf)
+ }
+ }
+ }
+ return storageOpts, nil
+}
+
+func homeDir() (string, error) {
+ home := os.Getenv("HOME")
+ if home == "" {
+ usr, err := user.Current()
+ if err != nil {
+ return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty")
+ }
+ home = usr.HomeDir
+ }
+ return home, nil
+}
diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf
index c143b049d..62a3f98ca 100644
--- a/vendor/github.com/containers/storage/vendor.conf
+++ b/vendor/github.com/containers/storage/vendor.conf
@@ -1,18 +1,15 @@
github.com/BurntSushi/toml master
github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165
github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8
-github.com/containers/image master
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
-github.com/docker/libtrust master
github.com/klauspost/compress v1.4.1
github.com/klauspost/cpuid v1.2.0
github.com/klauspost/pgzip v1.2.1
github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
github.com/opencontainers/go-digest master
-github.com/opencontainers/image-spec master
github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07
github.com/opencontainers/selinux v1.1
github.com/ostreedev/ostree-go master
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
index dfc216389..a08be9ecd 100644
--- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
@@ -1,6 +1,7 @@
package ocicni
import (
+ "context"
"errors"
"fmt"
"net"
@@ -511,7 +512,7 @@ func (network *cniNetwork) addToNetwork(cacheDir string, podNetwork *PodNetwork,
netconf, cninet := network.NetworkConfig, network.CNIConfig
logrus.Infof("About to add CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type)
- res, err := cninet.AddNetworkList(netconf, rt)
+ res, err := cninet.AddNetworkList(context.Background(), netconf, rt)
if err != nil {
logrus.Errorf("Error adding network: %v", err)
return nil, err
@@ -529,7 +530,7 @@ func (network *cniNetwork) deleteFromNetwork(cacheDir string, podNetwork *PodNet
netconf, cninet := network.NetworkConfig, network.CNIConfig
logrus.Infof("About to del CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type)
- err = cninet.DelNetworkList(netconf, rt)
+ err = cninet.DelNetworkList(context.Background(), netconf, rt)
if err != nil {
logrus.Errorf("Error deleting network: %v", err)
return err
diff --git a/vendor/github.com/cri-o/ocicni/vendor.conf b/vendor/github.com/cri-o/ocicni/vendor.conf
new file mode 100644
index 000000000..d769d5177
--- /dev/null
+++ b/vendor/github.com/cri-o/ocicni/vendor.conf
@@ -0,0 +1,13 @@
+github.com/containernetworking/cni fbb95fff8a5239a4295c991efa8a397d43118f7e
+github.com/fsnotify/fsnotify 1485a34d5d5723fea214f5710708e19a831720e4
+github.com/sirupsen/logrus 787e519fa85519b874dead61020de598e9a23944
+github.com/onsi/ginkgo eea6ad008b96acdaa524f5b409513bf062b500ad
+github.com/onsi/gomega 90e289841c1ed79b7a598a7cd9959750cb5e89e2
+golang.org/x/net 63eda1eb0650888965ead1296efd04d0b2b61128
+gopkg.in/yaml.v2 51d6538a90f86fe93ac480b35f37b2be17fef232
+golang.org/x/text e3703dcdd614d2d7488fff034c75c551ea25da95
+golang.org/x/sys f49334f85ddcf0f08d7fb6dd7363e9e6d6b777eb
+github.com/hpcloud/tail a1dbeea552b7c8df4b542c66073e393de198a800
+gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
+gopkg.in/fsnotify/fsnotify.v1 c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9
+github.com/konsorten/go-windows-terminal-sequences f55edac94c9bbba5d6182a4be46d86a2c9b5b50e
diff --git a/vendor/github.com/ulule/deepcopier/LICENSE b/vendor/github.com/ulule/deepcopier/LICENSE
deleted file mode 100644
index d5c4ea02c..000000000
--- a/vendor/github.com/ulule/deepcopier/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Ulule
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
diff --git a/vendor/github.com/ulule/deepcopier/README.md b/vendor/github.com/ulule/deepcopier/README.md
deleted file mode 100644
index 25aafae8a..000000000
--- a/vendor/github.com/ulule/deepcopier/README.md
+++ /dev/null
@@ -1,129 +0,0 @@
-# Deepcopier
-
-[![Build Status](https://secure.travis-ci.org/ulule/deepcopier.png?branch=master)](http://travis-ci.org/ulule/deepcopier)
-
-This package is meant to make copying of structs to/from others structs a bit easier.
-
-## Installation
-
-```bash
-go get -u github.com/ulule/deepcopier
-```
-
-## Usage
-
-```golang
-// Deep copy instance1 into instance2
-Copy(instance1).To(instance2)
-
-// Deep copy instance1 into instance2 and passes the following context (which
-// is basically a map[string]interface{}) as first argument
-// to methods of instance2 that defined the struct tag "context".
-Copy(instance1).WithContext(map[string]interface{}{"foo": "bar"}).To(instance2)
-
-// Deep copy instance2 into instance1
-Copy(instance1).From(instance2)
-
-// Deep copy instance2 into instance1 and passes the following context (which
-// is basically a map[string]interface{}) as first argument
-// to methods of instance1 that defined the struct tag "context".
-Copy(instance1).WithContext(map[string]interface{}{"foo": "bar"}).From(instance2)
-```
-
-Available options for `deepcopier` struct tag:
-
-| Option | Description |
-| --------- | -------------------------------------------------------------------- |
-| `field` | Field or method name in source instance |
-| `skip` | Ignores the field |
-| `context` | Takes a `map[string]interface{}` as first argument (for methods) |
-| `force` | Set the value of a `sql.Null*` field (instead of copying the struct) |
-
-**Options example:**
-
-```golang
-type Source struct {
- Name string
- SkipMe string
- SQLNullStringToSQLNullString sql.NullString
- SQLNullStringToString sql.NullString
-
-}
-
-func (Source) MethodThatTakesContext(c map[string]interface{}) string {
- return "whatever"
-}
-
-type Destination struct {
- FieldWithAnotherNameInSource string `deepcopier:"field:Name"`
- SkipMe string `deepcopier:"skip"`
- MethodThatTakesContext string `deepcopier:"context"`
- SQLNullStringToSQLNullString sql.NullString
- SQLNullStringToString string `deepcopier:"force"`
-}
-
-```
-
-Example:
-
-```golang
-package main
-
-import (
- "fmt"
-
- "github.com/ulule/deepcopier"
-)
-
-// Model
-type User struct {
- // Basic string field
- Name string
- // Deepcopier supports https://golang.org/pkg/database/sql/driver/#Valuer
- Email sql.NullString
-}
-
-func (u *User) MethodThatTakesContext(ctx map[string]interface{}) string {
- // do whatever you want
- return "hello from this method"
-}
-
-// Resource
-type UserResource struct {
- DisplayName string `deepcopier:"field:Name"`
- SkipMe string `deepcopier:"skip"`
- MethodThatTakesContext string `deepcopier:"context"`
- Email string `deepcopier:"force"`
-
-}
-
-func main() {
- user := &User{
- Name: "gilles",
- Email: sql.NullString{
- Valid: true,
- String: "gilles@example.com",
- },
- }
-
- resource := &UserResource{}
-
- deepcopier.Copy(user).To(resource)
-
- fmt.Println(resource.DisplayName)
- fmt.Println(resource.Email)
-}
-```
-
-Looking for more information about the usage?
-
-We wrote [an introduction article](https://github.com/ulule/deepcopier/blob/master/examples/rest-usage/README.rst).
-Have a look and feel free to give us your feedback.
-
-## Contributing
-
-* Ping us on twitter [@oibafsellig](https://twitter.com/oibafsellig), [@thoas](https://twitter.com/thoas)
-* Fork the [project](https://github.com/ulule/deepcopier)
-* Help us improving and fixing [issues](https://github.com/ulule/deepcopier/issues)
-
-Don't hesitate ;)
diff --git a/vendor/github.com/ulule/deepcopier/deepcopier.go b/vendor/github.com/ulule/deepcopier/deepcopier.go
deleted file mode 100644
index 8a6c70b55..000000000
--- a/vendor/github.com/ulule/deepcopier/deepcopier.go
+++ /dev/null
@@ -1,362 +0,0 @@
-package deepcopier
-
-import (
- "database/sql/driver"
- "fmt"
- "reflect"
- "strings"
-)
-
-const (
- // TagName is the deepcopier struct tag name.
- TagName = "deepcopier"
- // FieldOptionName is the from field option name for struct tag.
- FieldOptionName = "field"
- // ContextOptionName is the context option name for struct tag.
- ContextOptionName = "context"
- // SkipOptionName is the skip option name for struct tag.
- SkipOptionName = "skip"
- // ForceOptionName is the skip option name for struct tag.
- ForceOptionName = "force"
-)
-
-type (
- // TagOptions is a map that contains extracted struct tag options.
- TagOptions map[string]string
-
- // Options are copier options.
- Options struct {
- // Context given to WithContext() method.
- Context map[string]interface{}
- // Reversed reverses struct tag checkings.
- Reversed bool
- }
-)
-
-// DeepCopier deep copies a struct to/from a struct.
-type DeepCopier struct {
- dst interface{}
- src interface{}
- ctx map[string]interface{}
-}
-
-// Copy sets source or destination.
-func Copy(src interface{}) *DeepCopier {
- return &DeepCopier{src: src}
-}
-
-// WithContext injects the given context into the builder instance.
-func (dc *DeepCopier) WithContext(ctx map[string]interface{}) *DeepCopier {
- dc.ctx = ctx
- return dc
-}
-
-// To sets the destination.
-func (dc *DeepCopier) To(dst interface{}) error {
- dc.dst = dst
- return process(dc.dst, dc.src, Options{Context: dc.ctx})
-}
-
-// From sets the given the source as destination and destination as source.
-func (dc *DeepCopier) From(src interface{}) error {
- dc.dst = dc.src
- dc.src = src
- return process(dc.dst, dc.src, Options{Context: dc.ctx, Reversed: true})
-}
-
-// process handles copy.
-func process(dst interface{}, src interface{}, args ...Options) error {
- var (
- options = Options{}
- srcValue = reflect.Indirect(reflect.ValueOf(src))
- dstValue = reflect.Indirect(reflect.ValueOf(dst))
- srcFieldNames = getFieldNames(src)
- srcMethodNames = getMethodNames(src)
- )
-
- if len(args) > 0 {
- options = args[0]
- }
-
- if !dstValue.CanAddr() {
- return fmt.Errorf("destination %+v is unaddressable", dstValue.Interface())
- }
-
- for _, f := range srcFieldNames {
- var (
- srcFieldValue = srcValue.FieldByName(f)
- srcFieldType, srcFieldFound = srcValue.Type().FieldByName(f)
- srcFieldName = srcFieldType.Name
- dstFieldName = srcFieldName
- tagOptions TagOptions
- )
-
- if !srcFieldFound {
- continue
- }
-
- if options.Reversed {
- tagOptions = getTagOptions(srcFieldType.Tag.Get(TagName))
- if v, ok := tagOptions[FieldOptionName]; ok && v != "" {
- dstFieldName = v
- }
- } else {
- if name, opts := getRelatedField(dst, srcFieldName); name != "" {
- dstFieldName, tagOptions = name, opts
- }
- }
-
- if _, ok := tagOptions[SkipOptionName]; ok {
- continue
- }
-
- var (
- dstFieldType, dstFieldFound = dstValue.Type().FieldByName(dstFieldName)
- dstFieldValue = dstValue.FieldByName(dstFieldName)
- )
-
- if !dstFieldFound {
- continue
- }
-
- // Force option for empty interfaces and nullable types
- _, force := tagOptions[ForceOptionName]
-
- // Valuer -> ptr
- if isNullableType(srcFieldType.Type) && dstFieldValue.Kind() == reflect.Ptr && force {
- // We have same nullable type on both sides
- if srcFieldValue.Type().AssignableTo(dstFieldType.Type) {
- dstFieldValue.Set(srcFieldValue)
- continue
- }
-
- v, _ := srcFieldValue.Interface().(driver.Valuer).Value()
- if v == nil {
- continue
- }
-
- valueType := reflect.TypeOf(v)
-
- ptr := reflect.New(valueType)
- ptr.Elem().Set(reflect.ValueOf(v))
-
- if valueType.AssignableTo(dstFieldType.Type.Elem()) {
- dstFieldValue.Set(ptr)
- }
-
- continue
- }
-
- // Valuer -> value
- if isNullableType(srcFieldType.Type) {
- // We have same nullable type on both sides
- if srcFieldValue.Type().AssignableTo(dstFieldType.Type) {
- dstFieldValue.Set(srcFieldValue)
- continue
- }
-
- if force {
- v, _ := srcFieldValue.Interface().(driver.Valuer).Value()
- if v == nil {
- continue
- }
-
- rv := reflect.ValueOf(v)
- if rv.Type().AssignableTo(dstFieldType.Type) {
- dstFieldValue.Set(rv)
- }
- }
-
- continue
- }
-
- if dstFieldValue.Kind() == reflect.Interface {
- if force {
- dstFieldValue.Set(srcFieldValue)
- }
- continue
- }
-
- // Ptr -> Value
- if srcFieldType.Type.Kind() == reflect.Ptr && !srcFieldValue.IsNil() && dstFieldType.Type.Kind() != reflect.Ptr {
- indirect := reflect.Indirect(srcFieldValue)
-
- if indirect.Type().AssignableTo(dstFieldType.Type) {
- dstFieldValue.Set(indirect)
- continue
- }
- }
-
- // Other types
- if srcFieldType.Type.AssignableTo(dstFieldType.Type) {
- dstFieldValue.Set(srcFieldValue)
- }
- }
-
- for _, m := range srcMethodNames {
- name, opts := getRelatedField(dst, m)
- if name == "" {
- continue
- }
-
- if _, ok := opts[SkipOptionName]; ok {
- continue
- }
-
- method := reflect.ValueOf(src).MethodByName(m)
- if !method.IsValid() {
- return fmt.Errorf("method %s is invalid", m)
- }
-
- var (
- dstFieldType, _ = dstValue.Type().FieldByName(name)
- dstFieldValue = dstValue.FieldByName(name)
- _, withContext = opts[ContextOptionName]
- _, force = opts[ForceOptionName]
- )
-
- args := []reflect.Value{}
- if withContext {
- args = []reflect.Value{reflect.ValueOf(options.Context)}
- }
-
- var (
- result = method.Call(args)[0]
- resultInterface = result.Interface()
- resultValue = reflect.ValueOf(resultInterface)
- resultType = resultValue.Type()
- )
-
- // Value -> Ptr
- if dstFieldValue.Kind() == reflect.Ptr && force {
- ptr := reflect.New(resultType)
- ptr.Elem().Set(resultValue)
-
- if ptr.Type().AssignableTo(dstFieldType.Type) {
- dstFieldValue.Set(ptr)
- }
-
- continue
- }
-
- // Ptr -> value
- if resultValue.Kind() == reflect.Ptr && force {
- if resultValue.Elem().Type().AssignableTo(dstFieldType.Type) {
- dstFieldValue.Set(resultValue.Elem())
- }
-
- continue
- }
-
- if resultType.AssignableTo(dstFieldType.Type) && result.IsValid() {
- dstFieldValue.Set(result)
- }
- }
-
- return nil
-}
-
-// getTagOptions parses deepcopier tag field and returns options.
-func getTagOptions(value string) TagOptions {
- options := TagOptions{}
-
- for _, opt := range strings.Split(value, ";") {
- o := strings.Split(opt, ":")
-
- // deepcopier:"keyword; without; value;"
- if len(o) == 1 {
- options[o[0]] = ""
- }
-
- // deepcopier:"key:value; anotherkey:anothervalue"
- if len(o) == 2 {
- options[strings.TrimSpace(o[0])] = strings.TrimSpace(o[1])
- }
- }
-
- return options
-}
-
-// getRelatedField returns first matching field.
-func getRelatedField(instance interface{}, name string) (string, TagOptions) {
- var (
- value = reflect.Indirect(reflect.ValueOf(instance))
- fieldName string
- tagOptions TagOptions
- )
-
- for i := 0; i < value.NumField(); i++ {
- var (
- vField = value.Field(i)
- tField = value.Type().Field(i)
- tagOptions = getTagOptions(tField.Tag.Get(TagName))
- )
-
- if tField.Type.Kind() == reflect.Struct && tField.Anonymous {
- if n, o := getRelatedField(vField.Interface(), name); n != "" {
- return n, o
- }
- }
-
- if v, ok := tagOptions[FieldOptionName]; ok && v == name {
- return tField.Name, tagOptions
- }
-
- if tField.Name == name {
- return tField.Name, tagOptions
- }
- }
-
- return fieldName, tagOptions
-}
-
-// getMethodNames returns instance's method names.
-func getMethodNames(instance interface{}) []string {
- var methods []string
-
- t := reflect.TypeOf(instance)
- for i := 0; i < t.NumMethod(); i++ {
- methods = append(methods, t.Method(i).Name)
- }
-
- return methods
-}
-
-// getFieldNames returns instance's field names.
-func getFieldNames(instance interface{}) []string {
- var (
- fields []string
- v = reflect.Indirect(reflect.ValueOf(instance))
- t = v.Type()
- )
-
- if t.Kind() != reflect.Struct {
- return nil
- }
-
- for i := 0; i < v.NumField(); i++ {
- var (
- vField = v.Field(i)
- tField = v.Type().Field(i)
- )
-
- // Is exportable?
- if tField.PkgPath != "" {
- continue
- }
-
- if tField.Type.Kind() == reflect.Struct && tField.Anonymous {
- fields = append(fields, getFieldNames(vField.Interface())...)
- continue
- }
-
- fields = append(fields, tField.Name)
- }
-
- return fields
-}
-
-// isNullableType returns true if the given type is a nullable one.
-func isNullableType(t reflect.Type) bool {
- return t.ConvertibleTo(reflect.TypeOf((*driver.Valuer)(nil)).Elem())
-}