From a5443a532b0fc6bd787cbb472c0ad2f75447c9df Mon Sep 17 00:00:00 2001 From: Valentin Rothberg Date: Thu, 28 Mar 2019 10:30:09 +0100 Subject: vendor buildah, image, storage, cni Signed-off-by: Valentin Rothberg --- .../github.com/containernetworking/cni/README.md | 17 +- .../containernetworking/cni/libcni/api.go | 249 +++++-- .../containernetworking/cni/libcni/conf.go | 15 +- .../containernetworking/cni/pkg/invoke/delegate.go | 48 +- .../containernetworking/cni/pkg/invoke/exec.go | 15 +- .../containernetworking/cni/pkg/invoke/os_unix.go | 2 +- .../containernetworking/cni/pkg/invoke/raw_exec.go | 17 +- .../containernetworking/cni/pkg/types/020/types.go | 7 +- .../cni/pkg/types/current/types.go | 23 +- .../containernetworking/cni/pkg/types/types.go | 14 +- .../containernetworking/cni/pkg/version/plugin.go | 10 +- .../containernetworking/cni/pkg/version/version.go | 22 + vendor/github.com/containers/buildah/README.md | 14 +- vendor/github.com/containers/buildah/add.go | 151 +++- vendor/github.com/containers/buildah/buildah.go | 4 +- vendor/github.com/containers/buildah/commit.go | 14 +- vendor/github.com/containers/buildah/common.go | 41 +- .../containers/buildah/imagebuildah/build.go | 827 ++++++++++++--------- .../containers/buildah/imagebuildah/util.go | 25 - vendor/github.com/containers/buildah/import.go | 10 +- vendor/github.com/containers/buildah/info.go | 4 +- vendor/github.com/containers/buildah/new.go | 4 +- .../containers/buildah/pkg/chrootuser/user.go | 108 +++ .../buildah/pkg/chrootuser/user_basic.go | 27 + .../buildah/pkg/chrootuser/user_linux.go | 293 ++++++++ .../containers/buildah/pkg/formats/formats.go | 8 +- .../containers/buildah/pkg/parse/parse.go | 17 +- .../containers/buildah/pkg/secrets/secrets.go | 18 +- vendor/github.com/containers/buildah/pull.go | 9 +- vendor/github.com/containers/buildah/run.go | 12 +- .../containers/buildah/unshare/unshare.go | 38 +- .../buildah/unshare/unshare_unsupported.go | 27 + vendor/github.com/containers/buildah/vendor.conf | 9 +- vendor/github.com/containers/image/README.md | 2 +- vendor/github.com/containers/image/copy/copy.go | 34 +- .../containers/image/docker/docker_client.go | 2 +- .../containers/image/docker/docker_image_dest.go | 4 +- .../containers/image/image/docker_schema2.go | 6 +- vendor/github.com/containers/image/image/oci.go | 4 +- .../containers/image/pkg/blobinfocache/boltdb.go | 329 -------- .../image/pkg/blobinfocache/boltdb/boltdb.go | 332 +++++++++ .../containers/image/pkg/blobinfocache/default.go | 8 +- .../internal/prioritize/prioritize.go | 110 +++ .../containers/image/pkg/blobinfocache/memory.go | 141 ---- .../image/pkg/blobinfocache/memory/memory.go | 145 ++++ .../containers/image/pkg/blobinfocache/none.go | 47 -- .../image/pkg/blobinfocache/none/none.go | 49 ++ .../image/pkg/blobinfocache/prioritize.go | 108 --- .../containers/image/signature/policy_config.go | 2 +- .../containers/image/storage/storage_image.go | 14 +- .../containers/image/storage/storage_transport.go | 6 +- vendor/github.com/containers/image/vendor.conf | 2 +- .../github.com/containers/image/version/version.go | 4 +- vendor/github.com/containers/storage/containers.go | 10 +- .../containers/storage/containers_ffjson.go | 2 +- .../containers/storage/drivers/copy/copy.go | 3 +- vendor/github.com/containers/storage/images.go | 35 +- .../github.com/containers/storage/images_ffjson.go | 2 +- vendor/github.com/containers/storage/layers.go | 198 +++-- vendor/github.com/containers/storage/lockfile.go | 7 +- .../containers/storage/lockfile_darwin.go | 19 - .../containers/storage/lockfile_otherunix.go | 19 + .../github.com/containers/storage/lockfile_unix.go | 51 +- .../containers/storage/pkg/archive/archive.go | 2 +- .../storage/pkg/archive/archive_linux.go | 3 +- .../containers/storage/pkg/idtools/idtools.go | 18 + .../containers/storage/pkg/idtools/idtools_unix.go | 4 +- .../containers/storage/pkg/reexec/command_linux.go | 25 +- .../containers/storage/pkg/reexec/command_unix.go | 15 +- .../storage/pkg/reexec/command_unsupported.go | 6 + .../storage/pkg/reexec/command_windows.go | 17 +- vendor/github.com/containers/storage/store.go | 164 ++-- vendor/github.com/containers/storage/utils.go | 234 ++++++ vendor/github.com/containers/storage/vendor.conf | 3 - .../github.com/cri-o/ocicni/pkg/ocicni/ocicni.go | 5 +- vendor/github.com/cri-o/ocicni/vendor.conf | 13 + 76 files changed, 2766 insertions(+), 1537 deletions(-) create mode 100644 vendor/github.com/containers/buildah/pkg/chrootuser/user.go create mode 100644 vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go create mode 100644 vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go create mode 100644 vendor/github.com/containers/buildah/unshare/unshare_unsupported.go delete mode 100644 vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go create mode 100644 vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go create mode 100644 vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go delete mode 100644 vendor/github.com/containers/image/pkg/blobinfocache/memory.go create mode 100644 vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go delete mode 100644 vendor/github.com/containers/image/pkg/blobinfocache/none.go create mode 100644 vendor/github.com/containers/image/pkg/blobinfocache/none/none.go delete mode 100644 vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go delete mode 100644 vendor/github.com/containers/storage/lockfile_darwin.go create mode 100644 vendor/github.com/containers/storage/lockfile_otherunix.go create mode 100644 vendor/github.com/containers/storage/utils.go create mode 100644 vendor/github.com/cri-o/ocicni/vendor.conf (limited to 'vendor/github.com') diff --git a/vendor/github.com/containernetworking/cni/README.md b/vendor/github.com/containernetworking/cni/README.md index 65ccda9f9..3968d908a 100644 --- a/vendor/github.com/containernetworking/cni/README.md +++ b/vendor/github.com/containernetworking/cni/README.md @@ -9,9 +9,9 @@ # Community Sync Meeting -There is a community sync meeting for users and developers every 1-2 months. The next meeting will help on a Google Hangout and the link is in the [agenda](https://docs.google.com/document/d/10ECyT2mBGewsJUcmYmS8QNo1AcNgy2ZIe2xS7lShYhE/edit?usp=sharing) (Notes from previous meeting are also in this doc). +There is a community sync meeting for users and developers every 1-2 months. The next meeting will help on a Google Hangout and the link is in the [agenda](https://docs.google.com/document/d/10ECyT2mBGewsJUcmYmS8QNo1AcNgy2ZIe2xS7lShYhE/edit?usp=sharing) (Notes from previous meeting are also in this doc). -The next meeting will be held on *Wednesday, October 4th* at *3:00pm UTC / 11:00am EDT / 8:00am PDT* [Add to Calendar](https://www.worldtimebuddy.com/?qm=1&lid=100,5,2643743,5391959&h=100&date=2017-10-04&sln=15-16). +The next meeting will be held on *Wednesday, January 30th, 2019* at *4:00pm UTC / 11:00am EDT / 8:00am PDT* [Add to Calendar](https://www.worldtimebuddy.com/?qm=1&lid=100,5,2643743,5391959&h=100&date=2019-01-30&sln=16-17). --- @@ -38,11 +38,13 @@ To avoid duplication, we think it is prudent to define a common interface betwee ## Who is using CNI? ### Container runtimes - [rkt - container engine](https://coreos.com/blog/rkt-cni-networking.html) -- [Kubernetes - a system to simplify container operations](http://kubernetes.io/docs/admin/network-plugins/) +- [Kubernetes - a system to simplify container operations](https://kubernetes.io/docs/admin/network-plugins/) - [OpenShift - Kubernetes with additional enterprise features](https://github.com/openshift/origin/blob/master/docs/openshift_networking_requirements.md) - [Cloud Foundry - a platform for cloud applications](https://github.com/cloudfoundry-incubator/cf-networking-release) - [Apache Mesos - a distributed systems kernel](https://github.com/apache/mesos/blob/master/docs/cni.md) - [Amazon ECS - a highly scalable, high performance container management service](https://aws.amazon.com/ecs/) +- [Singularity - container platform optimized for HPC, EPC, and AI](https://github.com/sylabs/singularity) +- [OpenSVC - orchestrator for legacy and containerized application stacks](https://docs.opensvc.com/latest/fr/agent.configure.cni.html) ### 3rd party plugins - [Project Calico - a layer 3 virtual network](https://github.com/projectcalico/calico-cni) @@ -61,6 +63,10 @@ To avoid duplication, we think it is prudent to define a common interface betwee - [Amazon ECS CNI Plugins - a collection of CNI Plugins to configure containers with Amazon EC2 elastic network interfaces (ENIs)](https://github.com/aws/amazon-ecs-cni-plugins) - [Bonding CNI - a Link aggregating plugin to address failover and high availability network](https://github.com/Intel-Corp/bond-cni) - [ovn-kubernetes - an container network plugin built on Open vSwitch (OVS) and Open Virtual Networking (OVN) with support for both Linux and Windows](https://github.com/openvswitch/ovn-kubernetes) +- [Juniper Contrail](https://www.juniper.net/cloud) / [TungstenFabric](https://tungstenfabric.io) - Provides overlay SDN solution, delivering multicloud networking, hybrid cloud networking, simultaneous overlay-underlay support, network policy enforcement, network isolation, service chaining and flexible load balancing +- [Knitter - a CNI plugin supporting multiple networking for Kubernetes](https://github.com/ZTE/Knitter) +- [DANM - a CNI-compliant networking solution for TelCo workloads running on Kubernetes](https://github.com/nokia/danm) +- [VMware NSX – a CNI plugin that enables automated NSX L2/L3 networking and L4/L7 Load Balancing; network isolation at the pod, node, and cluster level; and zero-trust security policy for your Kubernetes cluster.](https://docs.vmware.com/en/VMware-NSX-T/2.2/com.vmware.nsxt.ncp_kubernetes.doc/GUID-6AFA724E-BB62-4693-B95C-321E8DDEA7E1.html) The CNI team also maintains some [core plugins in a separate repository](https://github.com/containernetworking/plugins). @@ -74,7 +80,7 @@ If you intend to contribute to code or documentation, please read [CONTRIBUTING. ### Requirements -The CNI spec is language agnostic. To use the Go language libraries in this repository, you'll need a recent version of Go. Our [automated tests](https://travis-ci.org/containernetworking/cni/builds) cover Go versions 1.7 and 1.8. +The CNI spec is language agnostic. To use the Go language libraries in this repository, you'll need a recent version of Go. You can find the Go versions covered by our [automated tests](https://travis-ci.org/containernetworking/cni/builds) in [.travis.yaml](.travis.yml). ### Reference Plugins @@ -111,6 +117,7 @@ EOF $ cat >/etc/cni/net.d/99-loopback.conf <= 0; i-- { net := list.Plugins[i] - if err := c.delNetwork(list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { return err } } @@ -314,37 +349,37 @@ func (c *CNIConfig) DelNetworkList(list *NetworkConfigList, rt *RuntimeConf) err } // AddNetwork executes the plugin with the ADD command -func (c *CNIConfig) AddNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { - result, err := c.addOrGetNetwork("ADD", net.Network.Name, net.Network.CNIVersion, net, nil, rt) +func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt) if err != nil { return nil, err } if err = setCachedResult(result, net.Network.Name, rt); err != nil { - return nil, fmt.Errorf("failed to set network '%s' cached result: %v", net.Network.Name, err) + return nil, fmt.Errorf("failed to set network %q cached result: %v", net.Network.Name, err) } return result, nil } -// GetNetwork executes the plugin with the GET command -func (c *CNIConfig) GetNetwork(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { - // GET was added in CNI spec version 0.4.0 and higher +// CheckNetwork executes the plugin with the CHECK command +func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { - return nil, err + return err } else if !gtet { - return nil, fmt.Errorf("configuration version %q does not support the GET command", net.Network.CNIVersion) + return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion) } cachedResult, err := getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) if err != nil { - return nil, fmt.Errorf("failed to get network '%s' cached result: %v", net.Network.Name, err) + return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err) } - return c.addOrGetNetwork("GET", net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt) + return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt) } // DelNetwork executes the plugin with the DEL command -func (c *CNIConfig) DelNetwork(net *NetworkConfig, rt *RuntimeConf) error { +func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { var cachedResult types.Result // Cached result on DEL was added in CNI spec version 0.4.0 and higher @@ -353,27 +388,99 @@ func (c *CNIConfig) DelNetwork(net *NetworkConfig, rt *RuntimeConf) error { } else if gtet { cachedResult, err = getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) if err != nil { - return fmt.Errorf("failed to get network '%s' cached result: %v", net.Network.Name, err) + return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err) } } - if err := c.delNetwork(net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil { + if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil { return err } _ = delCachedResult(net.Network.Name, rt) return nil } +// ValidateNetworkList checks that a configuration is reasonably valid. +// - all the specified plugins exist on disk +// - every plugin supports the desired version. +// +// Returns a list of all capabilities supported by the configuration, or error +func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) { + version := list.CNIVersion + + // holding map for seen caps (in case of duplicates) + caps := map[string]interface{}{} + + errs := []error{} + for _, net := range list.Plugins { + if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil { + errs = append(errs, err) + } + for c, enabled := range net.Network.Capabilities { + if !enabled { + continue + } + caps[c] = struct{}{} + } + } + + if len(errs) > 0 { + return nil, fmt.Errorf("%v", errs) + } + + // make caps list + cc := make([]string, 0, len(caps)) + for c := range caps { + cc = append(cc, c) + } + + return cc, nil +} + +// ValidateNetwork checks that a configuration is reasonably valid. +// It uses the same logic as ValidateNetworkList) +// Returns a list of capabilities +func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) { + caps := []string{} + for c, ok := range net.Network.Capabilities { + if ok { + caps = append(caps, c) + } + } + if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil { + return nil, err + } + return caps, nil +} + +// validatePlugin checks that an individual plugin's configuration is sane +func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error { + pluginPath, err := invoke.FindInPath(pluginName, c.Path) + if err != nil { + return err + } + + vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec) + if err != nil { + return err + } + for _, vers := range vi.SupportedVersions() { + if vers == expectedVersion { + return nil + } + } + return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion) +} + // GetVersionInfo reports which versions of the CNI spec are supported by // the given plugin. -func (c *CNIConfig) GetVersionInfo(pluginType string) (version.PluginInfo, error) { +func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) { c.ensureExec() pluginPath, err := c.exec.FindInPath(pluginType, c.Path) if err != nil { return nil, err } - return invoke.GetVersionInfo(pluginPath, c.exec) + return invoke.GetVersionInfo(ctx, pluginPath, c.exec) } // ===== diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go index 9834d715b..ea56c509d 100644 --- a/vendor/github.com/containernetworking/cni/libcni/conf.go +++ b/vendor/github.com/containernetworking/cni/libcni/conf.go @@ -83,10 +83,19 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { } } + disableCheck := false + if rawDisableCheck, ok := rawList["disableCheck"]; ok { + disableCheck, ok = rawDisableCheck.(bool) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck) + } + } + list := &NetworkConfigList{ - Name: name, - CNIVersion: cniVersion, - Bytes: bytes, + Name: name, + DisableCheck: disableCheck, + CNIVersion: cniVersion, + Bytes: bytes, } var plugins []interface{} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go index 21efdf802..30b4672f1 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go @@ -15,6 +15,7 @@ package invoke import ( + "context" "fmt" "os" "path/filepath" @@ -22,54 +23,53 @@ import ( "github.com/containernetworking/cni/pkg/types" ) -func delegateAddOrGet(command, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { +func delegateCommon(expectedCommand, delegatePlugin string, exec Exec) (string, Exec, error) { if exec == nil { exec = defaultExec } + if os.Getenv("CNI_COMMAND") != expectedCommand { + return "", nil, fmt.Errorf("CNI_COMMAND is not " + expectedCommand) + } + paths := filepath.SplitList(os.Getenv("CNI_PATH")) pluginPath, err := exec.FindInPath(delegatePlugin, paths) if err != nil { - return nil, err + return "", nil, err } - return ExecPluginWithResult(pluginPath, netconf, ArgsFromEnv(), exec) + return pluginPath, exec, nil } // DelegateAdd calls the given delegate plugin with the CNI ADD action and // JSON configuration -func DelegateAdd(delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { - if os.Getenv("CNI_COMMAND") != "ADD" { - return nil, fmt.Errorf("CNI_COMMAND is not ADD") +func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { + pluginPath, realExec, err := delegateCommon("ADD", delegatePlugin, exec) + if err != nil { + return nil, err } - return delegateAddOrGet("ADD", delegatePlugin, netconf, exec) + + return ExecPluginWithResult(ctx, pluginPath, netconf, ArgsFromEnv(), realExec) } -// DelegateGet calls the given delegate plugin with the CNI GET action and +// DelegateCheck calls the given delegate plugin with the CNI CHECK action and // JSON configuration -func DelegateGet(delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { - if os.Getenv("CNI_COMMAND") != "GET" { - return nil, fmt.Errorf("CNI_COMMAND is not GET") +func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon("CHECK", delegatePlugin, exec) + if err != nil { + return err } - return delegateAddOrGet("GET", delegatePlugin, netconf, exec) + + return ExecPluginWithoutResult(ctx, pluginPath, netconf, ArgsFromEnv(), realExec) } // DelegateDel calls the given delegate plugin with the CNI DEL action and // JSON configuration -func DelegateDel(delegatePlugin string, netconf []byte, exec Exec) error { - if exec == nil { - exec = defaultExec - } - - if os.Getenv("CNI_COMMAND") != "DEL" { - return fmt.Errorf("CNI_COMMAND is not DEL") - } - - paths := filepath.SplitList(os.Getenv("CNI_PATH")) - pluginPath, err := exec.FindInPath(delegatePlugin, paths) +func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon("DEL", delegatePlugin, exec) if err != nil { return err } - return ExecPluginWithoutResult(pluginPath, netconf, ArgsFromEnv(), exec) + return ExecPluginWithoutResult(ctx, pluginPath, netconf, ArgsFromEnv(), realExec) } diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go index cf019d3a0..8e6d30b82 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go @@ -15,6 +15,7 @@ package invoke import ( + "context" "fmt" "os" @@ -26,7 +27,7 @@ import ( // and executing a CNI plugin. Tests may provide a fake implementation // to avoid writing fake plugins to temporary directories during the test. type Exec interface { - ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) + ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) FindInPath(plugin string, paths []string) (string, error) Decode(jsonBytes []byte) (version.PluginInfo, error) } @@ -72,12 +73,12 @@ type Exec interface { // return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths) //} -func ExecPluginWithResult(pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { +func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { if exec == nil { exec = defaultExec } - stdoutBytes, err := exec.ExecPlugin(pluginPath, netconf, args.AsEnv()) + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) if err != nil { return nil, err } @@ -92,11 +93,11 @@ func ExecPluginWithResult(pluginPath string, netconf []byte, args CNIArgs, exec return version.NewResult(confVersion, stdoutBytes) } -func ExecPluginWithoutResult(pluginPath string, netconf []byte, args CNIArgs, exec Exec) error { +func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error { if exec == nil { exec = defaultExec } - _, err := exec.ExecPlugin(pluginPath, netconf, args.AsEnv()) + _, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) return err } @@ -104,7 +105,7 @@ func ExecPluginWithoutResult(pluginPath string, netconf []byte, args CNIArgs, ex // For recent-enough plugins, it uses the information returned by the VERSION // command. For older plugins which do not recognize that command, it reports // version 0.1.0 -func GetVersionInfo(pluginPath string, exec Exec) (version.PluginInfo, error) { +func GetVersionInfo(ctx context.Context, pluginPath string, exec Exec) (version.PluginInfo, error) { if exec == nil { exec = defaultExec } @@ -117,7 +118,7 @@ func GetVersionInfo(pluginPath string, exec Exec) (version.PluginInfo, error) { Path: "dummy", } stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current())) - stdoutBytes, err := exec.ExecPlugin(pluginPath, stdin, args.AsEnv()) + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, stdin, args.AsEnv()) if err != nil { if err.Error() == "unknown CNI_COMMAND: VERSION" { return version.PluginSupports("0.1.0"), nil diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go index bab5737a9..9bcfb4553 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build darwin dragonfly freebsd linux netbsd opensbd solaris +// +build darwin dragonfly freebsd linux netbsd openbsd solaris package invoke diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go index a598f09c2..e5b86634d 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go @@ -16,6 +16,7 @@ package invoke import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -28,17 +29,13 @@ type RawExec struct { Stderr io.Writer } -func (e *RawExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { +func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) { stdout := &bytes.Buffer{} - - c := exec.Cmd{ - Env: environ, - Path: pluginPath, - Args: []string{pluginPath}, - Stdin: bytes.NewBuffer(stdinData), - Stdout: stdout, - Stderr: e.Stderr, - } + c := exec.CommandContext(ctx, pluginPath) + c.Env = environ + c.Stdin = bytes.NewBuffer(stdinData) + c.Stdout = stdout + c.Stderr = e.Stderr if err := c.Run(); err != nil { return nil, pluginErr(err, stdout.Bytes()) } diff --git a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go index 2833aba78..53256167f 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go @@ -17,6 +17,7 @@ package types020 import ( "encoding/json" "fmt" + "io" "net" "os" @@ -73,11 +74,15 @@ func (r *Result) GetAsVersion(version string) (types.Result, error) { } func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { data, err := json.MarshalIndent(r, "", " ") if err != nil { return err } - _, err = os.Stdout.Write(data) + _, err = writer.Write(data) return err } diff --git a/vendor/github.com/containernetworking/cni/pkg/types/current/types.go b/vendor/github.com/containernetworking/cni/pkg/types/current/types.go index 92980c1a7..7267a2e6d 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/current/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/current/types.go @@ -17,6 +17,7 @@ package current import ( "encoding/json" "fmt" + "io" "net" "os" @@ -75,13 +76,9 @@ func convertFrom020(result types.Result) (*Result, error) { Gateway: oldResult.IP4.Gateway, }) for _, route := range oldResult.IP4.Routes { - gw := route.GW - if gw == nil { - gw = oldResult.IP4.Gateway - } newResult.Routes = append(newResult.Routes, &types.Route{ Dst: route.Dst, - GW: gw, + GW: route.GW, }) } } @@ -93,21 +90,13 @@ func convertFrom020(result types.Result) (*Result, error) { Gateway: oldResult.IP6.Gateway, }) for _, route := range oldResult.IP6.Routes { - gw := route.GW - if gw == nil { - gw = oldResult.IP6.Gateway - } newResult.Routes = append(newResult.Routes, &types.Route{ Dst: route.Dst, - GW: gw, + GW: route.GW, }) } } - if len(newResult.IPs) == 0 { - return nil, fmt.Errorf("cannot convert: no valid IP addresses") - } - return newResult, nil } @@ -206,11 +195,15 @@ func (r *Result) GetAsVersion(version string) (types.Result, error) { } func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { data, err := json.MarshalIndent(r, "", " ") if err != nil { return err } - _, err = os.Stdout.Write(data) + _, err = writer.Write(data) return err } diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go index 4684a3207..d0d11006a 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -18,6 +18,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "net" "os" ) @@ -65,6 +66,9 @@ type NetConf struct { Capabilities map[string]bool `json:"capabilities,omitempty"` IPAM IPAM `json:"ipam,omitempty"` DNS DNS `json:"dns"` + + RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` + PrevResult Result `json:"-"` } type IPAM struct { @@ -75,15 +79,16 @@ type IPAM struct { type NetConfList struct { CNIVersion string `json:"cniVersion,omitempty"` - Name string `json:"name,omitempty"` - Plugins []*NetConf `json:"plugins,omitempty"` + Name string `json:"name,omitempty"` + DisableCheck bool `json:"disableCheck,omitempty"` + Plugins []*NetConf `json:"plugins,omitempty"` } type ResultFactoryFunc func([]byte) (Result, error) // Result is an interface that provides the result of plugin execution type Result interface { - // The highest CNI specification result verison the result supports + // The highest CNI specification result version the result supports // without having to convert Version() string @@ -94,6 +99,9 @@ type Result interface { // Prints the result in JSON format to stdout Print() error + // Prints the result in JSON format to provided writer + PrintTo(writer io.Writer) error + // Returns a JSON string representation of the result String() string } diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go index 612335a81..1df427243 100644 --- a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go +++ b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go @@ -86,9 +86,13 @@ func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) { // minor, and micro numbers or returns an error func ParseVersion(version string) (int, int, int, error) { var major, minor, micro int + if version == "" { + return -1, -1, -1, fmt.Errorf("invalid version %q: the version is empty", version) + } + parts := strings.Split(version, ".") - if len(parts) == 0 || len(parts) >= 4 { - return -1, -1, -1, fmt.Errorf("invalid version %q: too many or too few parts", version) + if len(parts) >= 4 { + return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version) } major, err := strconv.Atoi(parts[0]) @@ -114,7 +118,7 @@ func ParseVersion(version string) (int, int, int, error) { } // GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro -// nubmers, and compares them to determine whether the first version is greater +// numbers, and compares them to determine whether the first version is greater // than or equal to the second func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) { firstMajor, firstMinor, firstMicro, err := ParseVersion(version) diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go index c8e46d55b..8f3508e61 100644 --- a/vendor/github.com/containernetworking/cni/pkg/version/version.go +++ b/vendor/github.com/containernetworking/cni/pkg/version/version.go @@ -15,6 +15,7 @@ package version import ( + "encoding/json" "fmt" "github.com/containernetworking/cni/pkg/types" @@ -59,3 +60,24 @@ func NewResult(version string, resultBytes []byte) (types.Result, error) { return nil, fmt.Errorf("unsupported CNI result version %q", version) } + +// ParsePrevResult parses a prevResult in a NetConf structure and sets +// the NetConf's PrevResult member to the parsed Result object. +func ParsePrevResult(conf *types.NetConf) error { + if conf.RawPrevResult == nil { + return nil + } + + resultBytes, err := json.Marshal(conf.RawPrevResult) + if err != nil { + return fmt.Errorf("could not serialize prevResult: %v", err) + } + + conf.RawPrevResult = nil + conf.PrevResult, err = NewResult(conf.CNIVersion, resultBytes) + if err != nil { + return fmt.Errorf("could not parse prevResult: %v", err) + } + + return nil +} diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md index 913a4336f..827d5a87f 100644 --- a/vendor/github.com/containers/buildah/README.md +++ b/vendor/github.com/containers/buildah/README.md @@ -78,21 +78,21 @@ From [`./examples/lighttpd.sh`](examples/lighttpd.sh): $ cat > lighttpd.sh <<"EOF" #!/bin/bash -x -ctr1=`buildah from ${1:-fedora}` +ctr1=$(buildah from "${1:-fedora}") ## Get all updates and install our minimal httpd server -buildah run $ctr1 -- dnf update -y -buildah run $ctr1 -- dnf install -y lighttpd +buildah run "$ctr1" -- dnf update -y +buildah run "$ctr1" -- dnf install -y lighttpd ## Include some buildtime annotations -buildah config --annotation "com.example.build.host=$(uname -n)" $ctr1 +buildah config --annotation "com.example.build.host=$(uname -n)" "$ctr1" ## Run our server and expose the port -buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" $ctr1 -buildah config --port 80 $ctr1 +buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" "$ctr1" +buildah config --port 80 "$ctr1" ## Commit this container to an image name -buildah commit $ctr1 ${2:-$USER/lighttpd} +buildah commit "$ctr1" "${2:-$USER/lighttpd}" EOF $ chmod +x lighttpd.sh diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go index 6542b0377..250d75b24 100644 --- a/vendor/github.com/containers/buildah/add.go +++ b/vendor/github.com/containers/buildah/add.go @@ -11,8 +11,8 @@ import ( "syscall" "time" + "github.com/containers/buildah/pkg/chrootuser" "github.com/containers/buildah/util" - "github.com/containers/libpod/pkg/chrootuser" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" "github.com/opencontainers/runtime-spec/specs-go" @@ -32,6 +32,10 @@ type AddAndCopyOptions struct { // If the sources include directory trees, Hasher will be passed // tar-format archives of the directory trees. Hasher io.Writer + // Exludes contents in the .dockerignore file + Excludes []string + // current directory on host + ContextDir string } // addURL copies the contents of the source URL to the destination. This is @@ -84,6 +88,7 @@ func addURL(destination, srcurl string, owner idtools.IDPair, hasher io.Writer) // filesystem, optionally extracting contents of local files that look like // non-empty archives. func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, source ...string) error { + excludes := DockerIgnoreHelper(options.Excludes, options.ContextDir) mountPoint, err := b.Mount(b.MountLabel) if err != nil { return err @@ -139,6 +144,71 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption copyFileWithTar := b.copyFileWithTar(&containerOwner, options.Hasher) copyWithTar := b.copyWithTar(&containerOwner, options.Hasher) untarPath := b.untarPath(nil, options.Hasher) + err = addHelper(excludes, extract, dest, destfi, hostOwner, options, copyFileWithTar, copyWithTar, untarPath, source...) + if err != nil { + return err + } + return nil +} + +// user returns the user (and group) information which the destination should belong to. +func (b *Builder) user(mountPoint string, userspec string) (specs.User, error) { + if userspec == "" { + userspec = b.User() + } + + uid, gid, err := chrootuser.GetUser(mountPoint, userspec) + u := specs.User{ + UID: uid, + GID: gid, + Username: userspec, + } + if !strings.Contains(userspec, ":") { + groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID)) + if err2 != nil { + if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil { + err = err2 + } + } else { + u.AdditionalGids = groups + } + + } + return u, err +} + +// DockerIgnore struct keep info from .dockerignore +type DockerIgnore struct { + ExcludePath string + IsExcluded bool +} + +// DockerIgnoreHelper returns the lines from .dockerignore file without the comments +// and reverses the order +func DockerIgnoreHelper(lines []string, contextDir string) []DockerIgnore { + var excludes []DockerIgnore + // the last match of a file in the .dockerignmatches determines whether it is included or excluded + // reverse the order + for i := len(lines) - 1; i >= 0; i-- { + exclude := lines[i] + // ignore the comment in .dockerignore + if strings.HasPrefix(exclude, "#") || len(exclude) == 0 { + continue + } + excludeFlag := true + if strings.HasPrefix(exclude, "!") { + exclude = strings.TrimPrefix(exclude, "!") + excludeFlag = false + } + excludes = append(excludes, DockerIgnore{ExcludePath: filepath.Join(contextDir, exclude), IsExcluded: excludeFlag}) + } + if len(excludes) != 0 { + excludes = append(excludes, DockerIgnore{ExcludePath: filepath.Join(contextDir, ".dockerignore"), IsExcluded: true}) + } + return excludes +} + +func addHelper(excludes []DockerIgnore, extract bool, dest string, destfi os.FileInfo, hostOwner idtools.IDPair, options AddAndCopyOptions, copyFileWithTar, copyWithTar, untarPath func(src, dest string) error, source ...string) error { for _, src := range source { if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { // We assume that source is a file, and we're copying @@ -167,6 +237,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption if len(glob) == 0 { return errors.Wrapf(syscall.ENOENT, "no files found matching %q", src) } + outer: for _, gsrc := range glob { esrc, err := filepath.EvalSymlinks(gsrc) if err != nil { @@ -185,11 +256,59 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption return errors.Wrapf(err, "error creating directory %q", dest) } logrus.Debugf("copying %q to %q", esrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*") - if err = copyWithTar(esrc, dest); err != nil { - return errors.Wrapf(err, "error copying %q to %q", esrc, dest) + if len(excludes) == 0 { + if err = copyWithTar(esrc, dest); err != nil { + return errors.Wrapf(err, "error copying %q to %q", esrc, dest) + } + continue + } + err := filepath.Walk(esrc, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + for _, exclude := range excludes { + match, err := filepath.Match(filepath.Clean(exclude.ExcludePath), filepath.Clean(path)) + if err != nil { + return err + } + if !match { + continue + } + if exclude.IsExcluded { + return nil + } + break + } + // combine the filename with the dest directory + fpath := strings.TrimPrefix(path, options.ContextDir) + if err = copyFileWithTar(path, filepath.Join(dest, fpath)); err != nil { + return errors.Wrapf(err, "error copying %q to %q", path, dest) + } + return nil + }) + if err != nil { + return err } continue } + + for _, exclude := range excludes { + match, err := filepath.Match(filepath.Clean(exclude.ExcludePath), esrc) + if err != nil { + return err + } + if !match { + continue + } + if exclude.IsExcluded { + continue outer + } + break + } + if !extract || !archive.IsArchivePath(esrc) { // This source is a file, and either it's not an // archive, or we don't care whether or not it's an @@ -214,29 +333,3 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption } return nil } - -// user returns the user (and group) information which the destination should belong to. -func (b *Builder) user(mountPoint string, userspec string) (specs.User, error) { - if userspec == "" { - userspec = b.User() - } - - uid, gid, err := chrootuser.GetUser(mountPoint, userspec) - u := specs.User{ - UID: uid, - GID: gid, - Username: userspec, - } - if !strings.Contains(userspec, ":") { - groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID)) - if err2 != nil { - if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil { - err = err2 - } - } else { - u.AdditionalGids = groups - } - - } - return u, err -} diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 8f5364632..febc3d0d1 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -336,10 +336,10 @@ type BuilderOptions struct { // needs to be pulled and the image name alone can not be resolved to a // reference to a source image. No separator is implicitly added. Registry string - // PullBlobDirectory is the name of a directory in which we'll attempt + // BlobDirectory is the name of a directory in which we'll attempt // to store copies of layer blobs that we pull down, if any. It should // already exist. - PullBlobDirectory string + BlobDirectory string // Mount signals to NewBuilder() that the container should be mounted // immediately. Mount bool diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go index da28bea61..9ab90196c 100644 --- a/vendor/github.com/containers/buildah/commit.go +++ b/vendor/github.com/containers/buildah/commit.go @@ -114,7 +114,7 @@ type PushOptions struct { func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options CommitOptions) (string, reference.Canonical, digest.Digest, error) { var imgID string - systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) + systemContext := getSystemContext(b.store, options.SystemContext, options.SignaturePolicyPath) blocked, err := isReferenceBlocked(dest, systemContext) if err != nil { @@ -152,8 +152,8 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options if err != nil { return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID) } - var maybeCachedSrc types.ImageReference = src - var maybeCachedDest types.ImageReference = dest + var maybeCachedSrc = types.ImageReference(src) + var maybeCachedDest = types.ImageReference(dest) if options.BlobDirectory != "" { compress := types.PreserveOriginal if options.Compression != archive.Uncompressed { @@ -178,7 +178,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options systemContext.DirForceCompress = true } var manifestBytes []byte - if manifestBytes, err = cp.Image(ctx, policyContext, maybeCachedDest, maybeCachedSrc, getCopyOptions(options.ReportWriter, maybeCachedSrc, nil, maybeCachedDest, systemContext, "")); err != nil { + if manifestBytes, err = cp.Image(ctx, policyContext, maybeCachedDest, maybeCachedSrc, getCopyOptions(b.store, options.ReportWriter, maybeCachedSrc, nil, maybeCachedDest, systemContext, "")); err != nil { return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID) } if len(options.AdditionalTags) > 0 { @@ -230,7 +230,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options // Push copies the contents of the image to a new location. func Push(ctx context.Context, image string, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) { - systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) + systemContext := getSystemContext(options.Store, options.SystemContext, options.SignaturePolicyPath) if options.Quiet { options.ReportWriter = nil // Turns off logging output @@ -256,7 +256,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options if err != nil { return nil, "", err } - var maybeCachedSrc types.ImageReference = src + var maybeCachedSrc = types.ImageReference(src) if options.BlobDirectory != "" { compress := types.PreserveOriginal if options.Compression != archive.Uncompressed { @@ -276,7 +276,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options systemContext.DirForceCompress = true } var manifestBytes []byte - if manifestBytes, err = cp.Image(ctx, policyContext, dest, maybeCachedSrc, getCopyOptions(options.ReportWriter, maybeCachedSrc, nil, dest, systemContext, options.ManifestType)); err != nil { + if manifestBytes, err = cp.Image(ctx, policyContext, dest, maybeCachedSrc, getCopyOptions(options.Store, options.ReportWriter, maybeCachedSrc, nil, dest, systemContext, options.ManifestType)); err != nil { return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(maybeCachedSrc), transports.ImageName(dest)) } if options.ReportWriter != nil { diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go index e369dc407..667a1a484 100644 --- a/vendor/github.com/containers/buildah/common.go +++ b/vendor/github.com/containers/buildah/common.go @@ -5,9 +5,10 @@ import ( "os" "path/filepath" + "github.com/containers/buildah/unshare" cp "github.com/containers/image/copy" "github.com/containers/image/types" - "github.com/containers/libpod/pkg/rootless" + "github.com/containers/storage" ) const ( @@ -17,33 +18,16 @@ const ( DOCKER = "docker" ) -// userRegistriesFile is the path to the per user registry configuration file. -var userRegistriesFile = filepath.Join(os.Getenv("HOME"), ".config/containers/registries.conf") - -func getCopyOptions(reportWriter io.Writer, sourceReference types.ImageReference, sourceSystemContext *types.SystemContext, destinationReference types.ImageReference, destinationSystemContext *types.SystemContext, manifestType string) *cp.Options { - sourceCtx := &types.SystemContext{} +func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceReference types.ImageReference, sourceSystemContext *types.SystemContext, destinationReference types.ImageReference, destinationSystemContext *types.SystemContext, manifestType string) *cp.Options { + sourceCtx := getSystemContext(store, nil, "") if sourceSystemContext != nil { *sourceCtx = *sourceSystemContext - } else { - if rootless.IsRootless() { - if _, err := os.Stat(userRegistriesFile); err == nil { - sourceCtx.SystemRegistriesConfPath = userRegistriesFile - } - - } } - destinationCtx := &types.SystemContext{} + destinationCtx := getSystemContext(store, nil, "") if destinationSystemContext != nil { *destinationCtx = *destinationSystemContext - } else { - if rootless.IsRootless() { - if _, err := os.Stat(userRegistriesFile); err == nil { - destinationCtx.SystemRegistriesConfPath = userRegistriesFile - } - } } - return &cp.Options{ ReportWriter: reportWriter, SourceCtx: sourceCtx, @@ -52,7 +36,7 @@ func getCopyOptions(reportWriter io.Writer, sourceReference types.ImageReference } } -func getSystemContext(defaults *types.SystemContext, signaturePolicyPath string) *types.SystemContext { +func getSystemContext(store storage.Store, defaults *types.SystemContext, signaturePolicyPath string) *types.SystemContext { sc := &types.SystemContext{} if defaults != nil { *sc = *defaults @@ -60,11 +44,16 @@ func getSystemContext(defaults *types.SystemContext, signaturePolicyPath string) if signaturePolicyPath != "" { sc.SignaturePolicyPath = signaturePolicyPath } - if sc.SystemRegistriesConfPath == "" && rootless.IsRootless() { - if _, err := os.Stat(userRegistriesFile); err == nil { - sc.SystemRegistriesConfPath = userRegistriesFile + if store != nil { + if sc.BlobInfoCacheDir == "" { + sc.BlobInfoCacheDir = filepath.Join(store.GraphRoot(), "cache") + } + if sc.SystemRegistriesConfPath == "" && unshare.IsRootless() { + userRegistriesFile := filepath.Join(store.GraphRoot(), "registries.conf") + if _, err := os.Stat(userRegistriesFile); err == nil { + sc.SystemRegistriesConfPath = userRegistriesFile + } } - } return sc } diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index f50b11f6c..b1e30ca6a 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -11,6 +11,7 @@ import ( "os/exec" "path/filepath" "regexp" + "sort" "strconv" "strings" "time" @@ -20,6 +21,7 @@ import ( "github.com/containers/buildah/util" cp "github.com/containers/image/copy" "github.com/containers/image/docker/reference" + "github.com/containers/image/manifest" is "github.com/containers/image/storage" "github.com/containers/image/transports" "github.com/containers/image/transports/alltransports" @@ -171,14 +173,12 @@ type BuildOptions struct { } // Executor is a buildah-based implementation of the imagebuilder.Executor -// interface. +// interface. It coordinates the entire build by using one StageExecutors to +// handle each stage of the build. type Executor struct { - index int - name string - named map[string]*Executor + stages map[string]*StageExecutor store storage.Store contextDir string - builder *buildah.Builder pullPolicy buildah.PullPolicy registry string ignoreUnrecognizedInstructions bool @@ -196,11 +196,6 @@ type Executor struct { err io.Writer signaturePolicyPath string systemContext *types.SystemContext - mountPoint string - preserved int - volumes imagebuilder.VolumeSet - volumeCache map[string]string - volumeCacheInfo map[string]os.FileInfo reportWriter io.Writer isolation buildah.Isolation namespaceOptions []buildah.NamespaceOption @@ -217,16 +212,46 @@ type Executor struct { onbuild []string layers bool topLayers []string - noCache bool + useCache bool removeIntermediateCtrs bool forceRmIntermediateCtrs bool - containerIDs []string // Stores the IDs of the successful intermediate containers used during layer build imageMap map[string]string // Used to map images that we create to handle the AS construct. - copyFrom string // Used to keep track of the --from flag from COPY and ADD blobDirectory string + excludes []string + unusedArgs map[string]struct{} } -// builtinAllowedBuildArgs is list of built-in allowed build args +// StageExecutor bundles up what we need to know when executing one stage of a +// (possibly multi-stage) build. +// Each stage may need to produce an image to be used as the base in a later +// stage (with the last stage's image being the end product of the build), and +// it may need to leave its working container in place so that the container's +// root filesystem's contents can be used as the source for a COPY instruction +// in a later stage. +// Each stage has its own base image, so it starts with its own configuration +// and set of volumes. +// If we're naming the result of the build, only the last stage will apply that +// name to the image that it produces. +type StageExecutor struct { + executor *Executor + index int + stages int + name string + builder *buildah.Builder + preserved int + volumes imagebuilder.VolumeSet + volumeCache map[string]string + volumeCacheInfo map[string]os.FileInfo + mountPoint string + copyFrom string // Used to keep track of the --from flag from COPY and ADD + output string + containerIDs []string +} + +// builtinAllowedBuildArgs is list of built-in allowed build args. Normally we +// complain if we're given values for arguments which have no corresponding ARG +// instruction in the Dockerfile, since that's usually an indication of a user +// error, but for these values we make exceptions and ignore them. var builtinAllowedBuildArgs = map[string]bool{ "HTTP_PROXY": true, "http_proxy": true, @@ -238,63 +263,70 @@ var builtinAllowedBuildArgs = map[string]bool{ "no_proxy": true, } -// withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME. -func (b *Executor) withName(name string, index int, from string) *Executor { - if b.named == nil { - b.named = make(map[string]*Executor) - } - copied := *b - copied.index = index - copied.name = name - child := &copied - b.named[name] = child - b.named[from] = child +// startStage creates a new stage executor that will be referenced whenever a +// COPY or ADD statement uses a --from=NAME flag. +func (b *Executor) startStage(name string, index, stages int, from, output string) *StageExecutor { + if b.stages == nil { + b.stages = make(map[string]*StageExecutor) + } + stage := &StageExecutor{ + executor: b, + index: index, + stages: stages, + name: name, + volumeCache: make(map[string]string), + volumeCacheInfo: make(map[string]os.FileInfo), + output: output, + } + b.stages[name] = stage + b.stages[from] = stage if idx := strconv.Itoa(index); idx != name { - b.named[idx] = child + b.stages[idx] = stage } - return child + return stage } -// Preserve informs the executor that from this point on, it needs to ensure -// that only COPY and ADD instructions can modify the contents of this +// Preserve informs the stage executor that from this point on, it needs to +// ensure that only COPY and ADD instructions can modify the contents of this // directory or anything below it. -// The Executor handles this by caching the contents of directories which have -// been marked this way before executing a RUN instruction, invalidating that -// cache when an ADD or COPY instruction sets any location under the directory -// as the destination, and using the cache to reset the contents of the -// directory tree after processing each RUN instruction. +// The StageExecutor handles this by caching the contents of directories which +// have been marked this way before executing a RUN instruction, invalidating +// that cache when an ADD or COPY instruction sets any location under the +// directory as the destination, and using the cache to reset the contents of +// the directory tree after processing each RUN instruction. // It would be simpler if we could just mark the directory as a read-only bind // mount of itself during Run(), but the directory is expected to be remain -// writeable, even if any changes within it are ultimately discarded. -func (b *Executor) Preserve(path string) error { +// writeable while the RUN instruction is being handled, even if any changes +// made within the directory are ultimately discarded. +func (s *StageExecutor) Preserve(path string) error { logrus.Debugf("PRESERVE %q", path) - if b.volumes.Covers(path) { + if s.volumes.Covers(path) { // This path is already a subdirectory of a volume path that // we're already preserving, so there's nothing new to be done // except ensure that it exists. - archivedPath := filepath.Join(b.mountPoint, path) + archivedPath := filepath.Join(s.mountPoint, path) if err := os.MkdirAll(archivedPath, 0755); err != nil { return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath) } - if err := b.volumeCacheInvalidate(path); err != nil { + if err := s.volumeCacheInvalidate(path); err != nil { return errors.Wrapf(err, "error ensuring volume path %q is preserved", archivedPath) } return nil } // Figure out where the cache for this volume would be stored. - b.preserved++ - cacheDir, err := b.store.ContainerDirectory(b.builder.ContainerID) + s.preserved++ + cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID) if err != nil { return errors.Errorf("unable to locate temporary directory for container") } - cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", b.preserved)) + cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved)) // Save info about the top level of the location that we'll be archiving. - archivedPath := filepath.Join(b.mountPoint, path) + archivedPath := filepath.Join(s.mountPoint, path) // Try and resolve the symlink (if one exists) // Set archivedPath and path based on whether a symlink is found or not - if symLink, err := resolveSymlink(b.mountPoint, path); err == nil { - archivedPath = filepath.Join(b.mountPoint, symLink) + if symLink, err := resolveSymlink(s.mountPoint, path); err == nil { + archivedPath = filepath.Join(s.mountPoint, symLink) path = symLink } else { return errors.Wrapf(err, "error reading symbolic link to %q", path) @@ -311,20 +343,20 @@ func (b *Executor) Preserve(path string) error { logrus.Debugf("error reading info about %q: %v", archivedPath, err) return errors.Wrapf(err, "error reading info about volume path %q", archivedPath) } - b.volumeCacheInfo[path] = st - if !b.volumes.Add(path) { + s.volumeCacheInfo[path] = st + if !s.volumes.Add(path) { // This path is not a subdirectory of a volume path that we're // already preserving, so adding it to the list should work. return errors.Errorf("error adding %q to the volume cache", path) } - b.volumeCache[path] = cacheFile + s.volumeCache[path] = cacheFile // Now prune cache files for volumes that are now supplanted by this one. removed := []string{} - for cachedPath := range b.volumeCache { + for cachedPath := range s.volumeCache { // Walk our list of cached volumes, and check that they're // still in the list of locations that we need to cache. found := false - for _, volume := range b.volumes { + for _, volume := range s.volumes { if volume == cachedPath { // We need to keep this volume's cache. found = true @@ -339,47 +371,47 @@ func (b *Executor) Preserve(path string) error { } // Actually remove the caches that we decided to remove. for _, cachedPath := range removed { - archivedPath := filepath.Join(b.mountPoint, cachedPath) - logrus.Debugf("no longer need cache of %q in %q", archivedPath, b.volumeCache[cachedPath]) - if err := os.Remove(b.volumeCache[cachedPath]); err != nil { + archivedPath := filepath.Join(s.mountPoint, cachedPath) + logrus.Debugf("no longer need cache of %q in %q", archivedPath, s.volumeCache[cachedPath]) + if err := os.Remove(s.volumeCache[cachedPath]); err != nil { if os.IsNotExist(err) { continue } - return errors.Wrapf(err, "error removing %q", b.volumeCache[cachedPath]) + return errors.Wrapf(err, "error removing %q", s.volumeCache[cachedPath]) } - delete(b.volumeCache, cachedPath) + delete(s.volumeCache, cachedPath) } return nil } // Remove any volume cache item which will need to be re-saved because we're // writing to part of it. -func (b *Executor) volumeCacheInvalidate(path string) error { +func (s *StageExecutor) volumeCacheInvalidate(path string) error { invalidated := []string{} - for cachedPath := range b.volumeCache { + for cachedPath := range s.volumeCache { if strings.HasPrefix(path, cachedPath+string(os.PathSeparator)) { invalidated = append(invalidated, cachedPath) } } for _, cachedPath := range invalidated { - if err := os.Remove(b.volumeCache[cachedPath]); err != nil { + if err := os.Remove(s.volumeCache[cachedPath]); err != nil { if os.IsNotExist(err) { continue } - return errors.Wrapf(err, "error removing volume cache %q", b.volumeCache[cachedPath]) + return errors.Wrapf(err, "error removing volume cache %q", s.volumeCache[cachedPath]) } - archivedPath := filepath.Join(b.mountPoint, cachedPath) - logrus.Debugf("invalidated volume cache for %q from %q", archivedPath, b.volumeCache[cachedPath]) - delete(b.volumeCache, cachedPath) + archivedPath := filepath.Join(s.mountPoint, cachedPath) + logrus.Debugf("invalidated volume cache for %q from %q", archivedPath, s.volumeCache[cachedPath]) + delete(s.volumeCache, cachedPath) } return nil } // Save the contents of each of the executor's list of volumes for which we // don't already have a cache file. -func (b *Executor) volumeCacheSave() error { - for cachedPath, cacheFile := range b.volumeCache { - archivedPath := filepath.Join(b.mountPoint, cachedPath) +func (s *StageExecutor) volumeCacheSave() error { + for cachedPath, cacheFile := range s.volumeCache { + archivedPath := filepath.Join(s.mountPoint, cachedPath) _, err := os.Stat(cacheFile) if err == nil { logrus.Debugf("contents of volume %q are already cached in %q", archivedPath, cacheFile) @@ -411,9 +443,9 @@ func (b *Executor) volumeCacheSave() error { } // Restore the contents of each of the executor's list of volumes. -func (b *Executor) volumeCacheRestore() error { - for cachedPath, cacheFile := range b.volumeCache { - archivedPath := filepath.Join(b.mountPoint, cachedPath) +func (s *StageExecutor) volumeCacheRestore() error { + for cachedPath, cacheFile := range s.volumeCache { + archivedPath := filepath.Join(s.mountPoint, cachedPath) logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile) cache, err := os.Open(cacheFile) if err != nil { @@ -430,7 +462,7 @@ func (b *Executor) volumeCacheRestore() error { if err != nil { return errors.Wrapf(err, "error extracting archive at %q", archivedPath) } - if st, ok := b.volumeCacheInfo[cachedPath]; ok { + if st, ok := s.volumeCacheInfo[cachedPath]; ok { if err := os.Chmod(archivedPath, st.Mode()); err != nil { return errors.Wrapf(err, "error restoring permissions on %q", archivedPath) } @@ -447,10 +479,14 @@ func (b *Executor) volumeCacheRestore() error { // Copy copies data into the working tree. The "Download" field is how // imagebuilder tells us the instruction was "ADD" and not "COPY". -func (b *Executor) Copy(excludes []string, copies ...imagebuilder.Copy) error { +func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error { for _, copy := range copies { - logrus.Debugf("COPY %#v, %#v", excludes, copy) - if err := b.volumeCacheInvalidate(copy.Dest); err != nil { + if copy.Download { + logrus.Debugf("ADD %#v, %#v", excludes, copy) + } else { + logrus.Debugf("COPY %#v, %#v", excludes, copy) + } + if err := s.volumeCacheInvalidate(copy.Dest); err != nil { return err } sources := []string{} @@ -458,21 +494,23 @@ func (b *Executor) Copy(excludes []string, copies ...imagebuilder.Copy) error { if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { sources = append(sources, src) } else if len(copy.From) > 0 { - if other, ok := b.named[copy.From]; ok && other.index < b.index { + if other, ok := s.executor.stages[copy.From]; ok && other.index < s.index { sources = append(sources, filepath.Join(other.mountPoint, src)) } else { return errors.Errorf("the stage %q has not been built", copy.From) } } else { - sources = append(sources, filepath.Join(b.contextDir, src)) + sources = append(sources, filepath.Join(s.executor.contextDir, src)) } } options := buildah.AddAndCopyOptions{ - Chown: copy.Chown, + Chown: copy.Chown, + ContextDir: s.executor.contextDir, + Excludes: s.executor.excludes, } - if err := b.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil { + if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil { return err } } @@ -493,14 +531,14 @@ func convertMounts(mounts []Mount) []specs.Mount { return specmounts } -// Run executes a RUN instruction using the working container as a root -// directory. -func (b *Executor) Run(run imagebuilder.Run, config docker.Config) error { +// Run executes a RUN instruction using the stage's current working container +// as a root directory. +func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { logrus.Debugf("RUN %#v, %#v", run, config) - if b.builder == nil { + if s.builder == nil { return errors.Errorf("no build container available") } - stdin := b.in + stdin := s.executor.in if stdin == nil { devNull, err := os.Open(os.DevNull) if err != nil { @@ -511,20 +549,20 @@ func (b *Executor) Run(run imagebuilder.Run, config docker.Config) error { } options := buildah.RunOptions{ Hostname: config.Hostname, - Runtime: b.runtime, - Args: b.runtimeArgs, + Runtime: s.executor.runtime, + Args: s.executor.runtimeArgs, NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "", - Mounts: convertMounts(b.transientMounts), + Mounts: convertMounts(s.executor.transientMounts), Env: config.Env, User: config.User, WorkingDir: config.WorkingDir, Entrypoint: config.Entrypoint, Cmd: config.Cmd, Stdin: stdin, - Stdout: b.out, - Stderr: b.err, - Quiet: b.quiet, - NamespaceOptions: b.namespaceOptions, + Stdout: s.executor.out, + Stderr: s.executor.err, + Quiet: s.executor.quiet, + NamespaceOptions: s.executor.namespaceOptions, } if config.NetworkDisabled { options.ConfigureNetwork = buildah.NetworkDisabled @@ -536,11 +574,11 @@ func (b *Executor) Run(run imagebuilder.Run, config docker.Config) error { if run.Shell { args = append([]string{"/bin/sh", "-c"}, args...) } - if err := b.volumeCacheSave(); err != nil { + if err := s.volumeCacheSave(); err != nil { return err } - err := b.builder.Run(args, options) - if err2 := b.volumeCacheRestore(); err2 != nil { + err := s.builder.Run(args, options) + if err2 := s.volumeCacheRestore(); err2 != nil { if err == nil { return err2 } @@ -550,10 +588,10 @@ func (b *Executor) Run(run imagebuilder.Run, config docker.Config) error { // UnrecognizedInstruction is called when we encounter an instruction that the // imagebuilder parser didn't understand. -func (b *Executor) UnrecognizedInstruction(step *imagebuilder.Step) error { +func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error { errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", step.Command) err := fmt.Sprintf(errStr+"%#v", step) - if b.ignoreUnrecognizedInstructions { + if s.executor.ignoreUnrecognizedInstructions { logrus.Debugf(err) return nil } @@ -572,9 +610,15 @@ func (b *Executor) UnrecognizedInstruction(step *imagebuilder.Step) error { // NewExecutor creates a new instance of the imagebuilder.Executor interface. func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) { + excludes, err := imagebuilder.ParseDockerignore(options.ContextDirectory) + if err != nil { + return nil, err + } + exec := Executor{ store: store, contextDir: options.ContextDirectory, + excludes: excludes, pullPolicy: options.PullPolicy, registry: options.Registry, ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions, @@ -588,8 +632,6 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) { additionalTags: options.AdditionalTags, signaturePolicyPath: options.SignaturePolicyPath, systemContext: options.SystemContext, - volumeCache: make(map[string]string), - volumeCacheInfo: make(map[string]os.FileInfo), log: options.Log, in: options.In, out: options.Out, @@ -608,10 +650,12 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) { labels: append([]string{}, options.Labels...), annotations: append([]string{}, options.Annotations...), layers: options.Layers, - noCache: options.NoCache, + useCache: !options.NoCache, removeIntermediateCtrs: options.RemoveIntermediateCtrs, forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs, + imageMap: make(map[string]string), blobDirectory: options.BlobDirectory, + unusedArgs: make(map[string]struct{}), } if exec.err == nil { exec.err = os.Stderr @@ -628,12 +672,18 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) { fmt.Fprintf(exec.err, prefix+format+suffix, args...) } } + for arg := range options.Args { + if _, isBuiltIn := builtinAllowedBuildArgs[arg]; !isBuiltIn { + exec.unusedArgs[arg] = struct{}{} + } + } return &exec, nil } -// Prepare creates a working container based on specified image, or if one -// isn't specified, the first FROM instruction we can find in the parsed tree. -func (b *Executor) Prepare(ctx context.Context, stage imagebuilder.Stage, from string) error { +// Prepare creates a working container based on the specified image, or if one +// isn't specified, the first argument passed to the first FROM instruction we +// can find in the stage's parsed tree. +func (s *StageExecutor) Prepare(ctx context.Context, stage imagebuilder.Stage, from string) error { ib := stage.Builder node := stage.Node @@ -646,7 +696,8 @@ func (b *Executor) Prepare(ctx context.Context, stage imagebuilder.Stage, from s from = base } displayFrom := from - // stage.Name will be a string of integers for all stages without an "AS" clause + + // stage.Name will be a numeric string for all stages without an "AS" clause asImageName := stage.Name if asImageName != "" { if _, err := strconv.Atoi(asImageName); err != nil { @@ -657,38 +708,36 @@ func (b *Executor) Prepare(ctx context.Context, stage imagebuilder.Stage, from s } logrus.Debugf("FROM %#v", displayFrom) - if !b.quiet { - b.log("FROM %s", displayFrom) + if !s.executor.quiet { + s.executor.log("FROM %s", displayFrom) } builderOptions := buildah.BuilderOptions{ Args: ib.Args, FromImage: from, - PullPolicy: b.pullPolicy, - Registry: b.registry, - PullBlobDirectory: b.blobDirectory, - SignaturePolicyPath: b.signaturePolicyPath, - ReportWriter: b.reportWriter, - SystemContext: b.systemContext, - Isolation: b.isolation, - NamespaceOptions: b.namespaceOptions, - ConfigureNetwork: b.configureNetwork, - CNIPluginPath: b.cniPluginPath, - CNIConfigDir: b.cniConfigDir, - IDMappingOptions: b.idmappingOptions, - CommonBuildOpts: b.commonBuildOptions, - DefaultMountsFilePath: b.defaultMountsFilePath, - Format: b.outputFormat, - } - - var builder *buildah.Builder - var err error - // Check and see if the image was declared previously with - // an AS clause in the Dockerfile. - if asImageFound, ok := b.imageMap[from]; ok { + PullPolicy: s.executor.pullPolicy, + Registry: s.executor.registry, + BlobDirectory: s.executor.blobDirectory, + SignaturePolicyPath: s.executor.signaturePolicyPath, + ReportWriter: s.executor.reportWriter, + SystemContext: s.executor.systemContext, + Isolation: s.executor.isolation, + NamespaceOptions: s.executor.namespaceOptions, + ConfigureNetwork: s.executor.configureNetwork, + CNIPluginPath: s.executor.cniPluginPath, + CNIConfigDir: s.executor.cniConfigDir, + IDMappingOptions: s.executor.idmappingOptions, + CommonBuildOpts: s.executor.commonBuildOptions, + DefaultMountsFilePath: s.executor.defaultMountsFilePath, + Format: s.executor.outputFormat, + } + + // Check and see if the image is a pseudonym for the end result of a + // previous stage, named by an AS clause in the Dockerfile. + if asImageFound, ok := s.executor.imageMap[from]; ok { builderOptions.FromImage = asImageFound } - builder, err = buildah.NewBuilder(ctx, b.store, builderOptions) + builder, err := buildah.NewBuilder(ctx, s.executor.store, builderOptions) if err != nil { return errors.Wrapf(err, "error creating build container") } @@ -749,45 +798,43 @@ func (b *Executor) Prepare(ctx context.Context, stage imagebuilder.Stage, from s } return errors.Wrapf(err, "error mounting new container") } - b.mountPoint = mountPoint - b.builder = builder + s.mountPoint = mountPoint + s.builder = builder // Add the top layer of this image to b.topLayers so we can keep track of them // when building with cached images. - b.topLayers = append(b.topLayers, builder.TopLayer) + s.executor.topLayers = append(s.executor.topLayers, builder.TopLayer) logrus.Debugln("Container ID:", builder.ContainerID) return nil } -// Delete deletes the working container, if we have one. The Executor object -// should not be used to build another image, as the name of the output image -// isn't resettable. -func (b *Executor) Delete() (err error) { - if b.builder != nil { - err = b.builder.Delete() - b.builder = nil +// Delete deletes the stage's working container, if we have one. +func (s *StageExecutor) Delete() (err error) { + if s.builder != nil { + err = s.builder.Delete() + s.builder = nil } return err } // resolveNameToImageRef creates a types.ImageReference from b.output -func (b *Executor) resolveNameToImageRef() (types.ImageReference, error) { +func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, error) { var ( imageRef types.ImageReference err error ) - if b.output != "" { - imageRef, err = alltransports.ParseImageName(b.output) + if output != "" { + imageRef, err = alltransports.ParseImageName(output) if err != nil { - candidates, _, _, err := util.ResolveName(b.output, "", b.systemContext, b.store) + candidates, _, _, err := util.ResolveName(output, "", b.systemContext, b.store) if err != nil { - return nil, errors.Wrapf(err, "error parsing target image name %q", b.output) + return nil, errors.Wrapf(err, "error parsing target image name %q", output) } if len(candidates) == 0 { - return nil, errors.Errorf("error parsing target image name %q", b.output) + return nil, errors.Errorf("error parsing target image name %q", output) } imageRef2, err2 := is.Transport.ParseStoreReference(b.store, candidates[0]) if err2 != nil { - return nil, errors.Wrapf(err, "error parsing target image name %q", b.output) + return nil, errors.Wrapf(err, "error parsing target image name %q", output) } return imageRef2, nil } @@ -800,175 +847,205 @@ func (b *Executor) resolveNameToImageRef() (types.ImageReference, error) { return imageRef, nil } -// Execute runs each of the steps in the parsed tree, in turn. -func (b *Executor) Execute(ctx context.Context, stage imagebuilder.Stage) error { +// Execute runs each of the steps in the stage's parsed tree, in turn. +func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage) (imgID string, ref reference.Canonical, err error) { ib := stage.Builder node := stage.Node checkForLayers := true children := node.Children - commitName := b.output - b.containerIDs = nil + commitName := s.output - var leftoverArgs []string - for arg := range b.builder.Args { - if !builtinAllowedBuildArgs[arg] { - leftoverArgs = append(leftoverArgs, arg) - } - } for i, node := range node.Children { + // Resolve any arguments in this instruction so that we don't have to. step := ib.Step() if err := step.Resolve(node); err != nil { - return errors.Wrapf(err, "error resolving step %+v", *node) + return "", nil, errors.Wrapf(err, "error resolving step %+v", *node) } logrus.Debugf("Parsed Step: %+v", *step) + if !s.executor.quiet { + s.executor.log("%s", step.Original) + } + + // If this instruction declares an argument, remove it from the + // set of arguments that we were passed but which we haven't + // seen used by the Dockerfile. if step.Command == "arg" { - for index, arg := range leftoverArgs { - for _, Arg := range step.Args { - list := strings.SplitN(Arg, "=", 2) - if arg == list[0] { - leftoverArgs = append(leftoverArgs[:index], leftoverArgs[index+1:]...) - } + for _, Arg := range step.Args { + list := strings.SplitN(Arg, "=", 2) + if _, stillUnused := s.executor.unusedArgs[list[0]]; stillUnused { + delete(s.executor.unusedArgs, list[0]) } } } - if !b.quiet { - b.log("%s", step.Original) + + // Check if there's a --from if the step command is COPY or + // ADD. Set copyFrom to point to either the context directory + // or the root of the container from the specified stage. + s.copyFrom = s.executor.contextDir + for _, n := range step.Flags { + if strings.Contains(n, "--from") && (step.Command == "copy" || step.Command == "add") { + arr := strings.Split(n, "=") + stage, ok := s.executor.stages[arr[1]] + if !ok { + return "", nil, errors.Errorf("%s --from=%s: no stage found with that name", step.Command, arr[1]) + } + s.copyFrom = stage.mountPoint + break + } } - requiresStart := false - if i < len(node.Children)-1 { - requiresStart = ib.RequiresStart(&parser.Node{Children: node.Children[i+1:]}) + + // Determine if there are any RUN instructions to be run after + // this step. If not, we won't have to bother preserving the + // contents of any volumes declared between now and when we + // finish. + noRunsRemaining := false + if i < len(children)-1 { + noRunsRemaining = !ib.RequiresStart(&parser.Node{Children: children[i+1:]}) } - if !b.layers && !b.noCache { - err := ib.Run(step, b, requiresStart) + // If we're doing a single-layer build and not looking to take + // shortcuts using the cache, make a note of the instruction, + // process it, and then move on to the next instruction. + if !s.executor.layers && s.executor.useCache { + err := ib.Run(step, s, noRunsRemaining) if err != nil { - return errors.Wrapf(err, "error building at step %+v", *step) + return "", nil, errors.Wrapf(err, "error building at step %+v", *step) } continue } if i < len(children)-1 { - b.output = "" + commitName = "" } else { - b.output = commitName + commitName = s.output } + // TODO: this makes the tests happy, but it shouldn't be + // necessary unless this is the final stage. + commitName = s.executor.output + var ( cacheID string err error - imgID string ) - b.copyFrom = "" - // Check if --from exists in the step command of COPY or ADD - // If it exists, set b.copyfrom to that value - for _, n := range step.Flags { - if strings.Contains(n, "--from") && (step.Command == "copy" || step.Command == "add") { - arr := strings.Split(n, "=") - b.copyFrom = b.named[arr[1]].mountPoint - break - } - } - - // checkForLayers will be true if b.layers is true and a cached intermediate image is found. - // checkForLayers is set to false when either there is no cached image or a break occurs where - // the instructions in the Dockerfile change from a previous build. - // Don't check for cache if b.noCache is set to true. - if checkForLayers && !b.noCache { - cacheID, err = b.layerExists(ctx, node, children[:i]) + // If we're using the cache, and we've managed to stick with + // cached images so far, look for one that matches what we + // expect to produce for this instruction. + if checkForLayers && s.executor.useCache { + cacheID, err = s.layerExists(ctx, node, children[:i]) if err != nil { - return errors.Wrap(err, "error checking if cached image exists from a previous build") + return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build") } } - if cacheID != "" { - fmt.Fprintf(b.out, "--> Using cache %s\n", cacheID) + fmt.Fprintf(s.executor.out, "--> Using cache %s\n", cacheID) } - // If a cache is found for the last step, that means nothing in the - // Dockerfile changed. Just create a copy of the existing image and - // save it with the new name passed in by the user. + // If a cache is found and we're on the last step, that means + // nothing in this phase changed. Just create a copy of the + // existing image and save it with the name that we were going + // to assign to the one that we were building, and make sure + // that the builder's root fs matches it. if cacheID != "" && i == len(children)-1 { - if err := b.copyExistingImage(ctx, cacheID); err != nil { - return err + if imgID, ref, err = s.copyExistingImage(ctx, cacheID, commitName); err != nil { + return "", nil, err } - b.containerIDs = append(b.containerIDs, b.builder.ContainerID) break } + // If we didn't find a cached step that we could just reuse, + // process the instruction and commit the layer. if cacheID == "" || !checkForLayers { checkForLayers = false - err := ib.Run(step, b, requiresStart) + err := ib.Run(step, s, noRunsRemaining) if err != nil { - return errors.Wrapf(err, "error building at step %+v", *step) + return "", nil, errors.Wrapf(err, "error building at step %+v", *step) } } // Commit if no cache is found if cacheID == "" { - imgID, _, err = b.Commit(ctx, ib, getCreatedBy(node)) + imgID, ref, err = s.Commit(ctx, ib, getCreatedBy(node), commitName) if err != nil { - return errors.Wrapf(err, "error committing container for step %+v", *step) + return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) } if i == len(children)-1 { - b.log("COMMIT %s", b.output) + s.executor.log("COMMIT %s", commitName) } } else { - // Cache is found, assign imgID the id of the cached image so - // it is used to create the container for the next step. + // If we did find a cache, reuse the cached image's ID + // as the basis for the container for the next step. imgID = cacheID } - // Add container ID of successful intermediate container to b.containerIDs - b.containerIDs = append(b.containerIDs, b.builder.ContainerID) + // Prepare for the next step with imgID as the new base image. - if i != len(children)-1 { - if err := b.Prepare(ctx, stage, imgID); err != nil { - return errors.Wrap(err, "error preparing container for next step") + if i < len(children)-1 { + s.containerIDs = append(s.containerIDs, s.builder.ContainerID) + if err := s.Prepare(ctx, stage, imgID); err != nil { + return "", nil, errors.Wrap(err, "error preparing container for next step") } } } - if len(leftoverArgs) > 0 { - fmt.Fprintf(b.out, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs) + + if s.executor.layers { // print out the final imageID if we're using layers flag + fmt.Fprintf(s.executor.out, "--> %s\n", imgID) } - return nil + + return imgID, ref, nil } -// copyExistingImage creates a copy of an image already in store -func (b *Executor) copyExistingImage(ctx context.Context, cacheID string) error { +// copyExistingImage creates a copy of an image already in the store +func (s *StageExecutor) copyExistingImage(ctx context.Context, cacheID, output string) (string, reference.Canonical, error) { // Get the destination Image Reference - dest, err := b.resolveNameToImageRef() + dest, err := s.executor.resolveNameToImageRef(output) if err != nil { - return err + return "", nil, err } - policyContext, err := util.GetPolicyContext(b.systemContext) + policyContext, err := util.GetPolicyContext(s.executor.systemContext) if err != nil { - return err + return "", nil, err } defer policyContext.Destroy() // Look up the source image, expecting it to be in local storage - src, err := is.Transport.ParseStoreReference(b.store, cacheID) + src, err := is.Transport.ParseStoreReference(s.executor.store, cacheID) if err != nil { - return errors.Wrapf(err, "error getting source imageReference for %q", cacheID) + return "", nil, errors.Wrapf(err, "error getting source imageReference for %q", cacheID) } - if _, err := cp.Image(ctx, policyContext, dest, src, nil); err != nil { - return errors.Wrapf(err, "error copying image %q", cacheID) + manifestBytes, err := cp.Image(ctx, policyContext, dest, src, nil) + if err != nil { + return "", nil, errors.Wrapf(err, "error copying image %q", cacheID) } - b.log("COMMIT %s", b.output) - return nil + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return "", nil, errors.Wrapf(err, "error computing digest of manifest for image %q", cacheID) + } + img, err := is.Transport.GetStoreImage(s.executor.store, dest) + if err != nil { + return "", nil, errors.Wrapf(err, "error locating new copy of image %q (i.e., %q)", cacheID, transports.ImageName(dest)) + } + s.executor.log("COMMIT %s", s.output) + var ref reference.Canonical + if dref := dest.DockerReference(); dref != nil { + if ref, err = reference.WithDigest(dref, manifestDigest); err != nil { + return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q (i.e., %q)", cacheID, transports.ImageName(dest)) + } + } + return img.ID, ref, nil } // layerExists returns true if an intermediate image of currNode exists in the image store from a previous build. // It verifies this by checking the parent of the top layer of the image and the history. -func (b *Executor) layerExists(ctx context.Context, currNode *parser.Node, children []*parser.Node) (string, error) { +func (s *StageExecutor) layerExists(ctx context.Context, currNode *parser.Node, children []*parser.Node) (string, error) { // Get the list of images available in the image store - images, err := b.store.Images() + images, err := s.executor.store.Images() if err != nil { return "", errors.Wrap(err, "error getting image list from store") } for _, image := range images { - layer, err := b.store.Layer(image.TopLayer) + layer, err := s.executor.store.Layer(image.TopLayer) if err != nil { return "", errors.Wrapf(err, "error getting top layer info") } @@ -976,8 +1053,8 @@ func (b *Executor) layerExists(ctx context.Context, currNode *parser.Node, child // it means that this image is potentially a cached intermediate image from a previous // build. Next we double check that the history of this image is equivalent to the previous // lines in the Dockerfile up till the point we are at in the build. - if layer.Parent == b.topLayers[len(b.topLayers)-1] { - history, err := b.getImageHistory(ctx, image.ID) + if layer.Parent == s.executor.topLayers[len(s.executor.topLayers)-1] { + history, err := s.executor.getImageHistory(ctx, image.ID) if err != nil { return "", errors.Wrapf(err, "error getting history of %q", image.ID) } @@ -985,7 +1062,7 @@ func (b *Executor) layerExists(ctx context.Context, currNode *parser.Node, child if historyMatches(append(children, currNode), history) { // This checks if the files copied during build have been changed if the node is // a COPY or ADD command. - filesMatch, err := b.copiedFilesMatch(currNode, history[len(history)-1].Created) + filesMatch, err := s.copiedFilesMatch(currNode, history[len(history)-1].Created) if err != nil { return "", errors.Wrapf(err, "error checking if copied files match") } @@ -1045,24 +1122,16 @@ func historyMatches(children []*parser.Node, history []v1.History) bool { // getFilesToCopy goes through node to get all the src files that are copied, added or downloaded. // It is possible for the Dockerfile to have src as hom*, which means all files that have hom as a prefix. // Another format is hom?.txt, which means all files that have that name format with the ? replaced by another character. -func (b *Executor) getFilesToCopy(node *parser.Node) ([]string, error) { +func (s *StageExecutor) getFilesToCopy(node *parser.Node) ([]string, error) { currNode := node.Next var src []string for currNode.Next != nil { - if currNode.Next == nil { - break - } if strings.HasPrefix(currNode.Value, "http://") || strings.HasPrefix(currNode.Value, "https://") { src = append(src, currNode.Value) currNode = currNode.Next continue } - if b.copyFrom != "" { - src = append(src, filepath.Join(b.copyFrom, currNode.Value)) - currNode = currNode.Next - continue - } - matches, err := filepath.Glob(filepath.Join(b.contextDir, currNode.Value)) + matches, err := filepath.Glob(filepath.Join(s.copyFrom, currNode.Value)) if err != nil { return nil, errors.Wrapf(err, "error finding match for pattern %q", currNode.Value) } @@ -1076,12 +1145,12 @@ func (b *Executor) getFilesToCopy(node *parser.Node) ([]string, error) { // If it is either of those two it checks the timestamps on all the files copied/added // by the dockerfile. If the host version has a time stamp greater than the time stamp // of the build, the build will not use the cached version and will rebuild. -func (b *Executor) copiedFilesMatch(node *parser.Node, historyTime *time.Time) (bool, error) { +func (s *StageExecutor) copiedFilesMatch(node *parser.Node, historyTime *time.Time) (bool, error) { if node.Value != "add" && node.Value != "copy" { return true, nil } - src, err := b.getFilesToCopy(node) + src, err := s.getFilesToCopy(node) if err != nil { return false, err } @@ -1102,12 +1171,7 @@ func (b *Executor) copiedFilesMatch(node *parser.Node, historyTime *time.Time) ( // Change the time format to ensure we don't run into a parsing error when converting again from string // to time.Time. It is a known Go issue that the conversions cause errors sometimes, so specifying a particular // time format here when converting to a string. - // If the COPY has --from in the command, change the rootdir to mountpoint of the container it is copying from - rootdir := b.contextDir - if b.copyFrom != "" { - rootdir = b.copyFrom - } - timeIsGreater, err := resolveModifiedTime(rootdir, item, historyTime.Format(time.RFC3339Nano)) + timeIsGreater, err := resolveModifiedTime(s.copyFrom, item, historyTime.Format(time.RFC3339Nano)) if err != nil { return false, errors.Wrapf(err, "error resolving symlinks and comparing modified times: %q", item) } @@ -1139,43 +1203,45 @@ func urlContentModified(url string, historyTime *time.Time) (bool, error) { // Commit writes the container's contents to an image, using a passed-in tag as // the name if there is one, generating a unique ID-based one otherwise. -func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder, createdBy string) (string, reference.Canonical, error) { - imageRef, err := b.resolveNameToImageRef() +func (s *StageExecutor) Commit(ctx context.Context, ib *imagebuilder.Builder, createdBy, output string) (string, reference.Canonical, error) { + imageRef, err := s.executor.resolveNameToImageRef(output) if err != nil { return "", nil, err } if ib.Author != "" { - b.builder.SetMaintainer(ib.Author) + s.builder.SetMaintainer(ib.Author) } config := ib.Config() - b.builder.SetCreatedBy(createdBy) - b.builder.SetHostname(config.Hostname) - b.builder.SetDomainname(config.Domainname) - b.builder.SetUser(config.User) - b.builder.ClearPorts() + if createdBy != "" { + s.builder.SetCreatedBy(createdBy) + } + s.builder.SetHostname(config.Hostname) + s.builder.SetDomainname(config.Domainname) + s.builder.SetUser(config.User) + s.builder.ClearPorts() for p := range config.ExposedPorts { - b.builder.SetPort(string(p)) + s.builder.SetPort(string(p)) } for _, envSpec := range config.Env { spec := strings.SplitN(envSpec, "=", 2) - b.builder.SetEnv(spec[0], spec[1]) + s.builder.SetEnv(spec[0], spec[1]) } - b.builder.SetCmd(config.Cmd) - b.builder.ClearVolumes() + s.builder.SetCmd(config.Cmd) + s.builder.ClearVolumes() for v := range config.Volumes { - b.builder.AddVolume(v) + s.builder.AddVolume(v) } - b.builder.ClearOnBuild() + s.builder.ClearOnBuild() for _, onBuildSpec := range config.OnBuild { - b.builder.SetOnBuild(onBuildSpec) + s.builder.SetOnBuild(onBuildSpec) } - b.builder.SetWorkDir(config.WorkingDir) - b.builder.SetEntrypoint(config.Entrypoint) - b.builder.SetShell(config.Shell) - b.builder.SetStopSignal(config.StopSignal) + s.builder.SetWorkDir(config.WorkingDir) + s.builder.SetEntrypoint(config.Entrypoint) + s.builder.SetShell(config.Shell) + s.builder.SetStopSignal(config.StopSignal) if config.Healthcheck != nil { - b.builder.SetHealthcheck(&buildahdocker.HealthConfig{ + s.builder.SetHealthcheck(&buildahdocker.HealthConfig{ Test: append([]string{}, config.Healthcheck.Test...), Interval: config.Healthcheck.Interval, Timeout: config.Healthcheck.Timeout, @@ -1183,79 +1249,124 @@ func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder, created Retries: config.Healthcheck.Retries, }) } else { - b.builder.SetHealthcheck(nil) + s.builder.SetHealthcheck(nil) } - b.builder.ClearLabels() + s.builder.ClearLabels() for k, v := range config.Labels { - b.builder.SetLabel(k, v) + s.builder.SetLabel(k, v) } - for _, labelSpec := range b.labels { + for _, labelSpec := range s.executor.labels { label := strings.SplitN(labelSpec, "=", 2) if len(label) > 1 { - b.builder.SetLabel(label[0], label[1]) + s.builder.SetLabel(label[0], label[1]) } else { - b.builder.SetLabel(label[0], "") + s.builder.SetLabel(label[0], "") } } - for _, annotationSpec := range b.annotations { + for _, annotationSpec := range s.executor.annotations { annotation := strings.SplitN(annotationSpec, "=", 2) if len(annotation) > 1 { - b.builder.SetAnnotation(annotation[0], annotation[1]) + s.builder.SetAnnotation(annotation[0], annotation[1]) } else { - b.builder.SetAnnotation(annotation[0], "") + s.builder.SetAnnotation(annotation[0], "") } } if imageRef != nil { logName := transports.ImageName(imageRef) logrus.Debugf("COMMIT %q", logName) - if !b.quiet && !b.layers && !b.noCache { - b.log("COMMIT %s", logName) + if !s.executor.quiet && !s.executor.layers && s.executor.useCache { + s.executor.log("COMMIT %s", logName) } } else { logrus.Debugf("COMMIT") - if !b.quiet && !b.layers && !b.noCache { - b.log("COMMIT") + if !s.executor.quiet && !s.executor.layers && s.executor.useCache { + s.executor.log("COMMIT") } } - writer := b.reportWriter - if b.layers || b.noCache { + writer := s.executor.reportWriter + if s.executor.layers || !s.executor.useCache { writer = nil } options := buildah.CommitOptions{ - Compression: b.compression, - SignaturePolicyPath: b.signaturePolicyPath, - AdditionalTags: b.additionalTags, + Compression: s.executor.compression, + SignaturePolicyPath: s.executor.signaturePolicyPath, + AdditionalTags: s.executor.additionalTags, ReportWriter: writer, - PreferredManifestType: b.outputFormat, - SystemContext: b.systemContext, - IIDFile: b.iidfile, - Squash: b.squash, - BlobDirectory: b.blobDirectory, - Parent: b.builder.FromImageID, - } - imgID, ref, _, err := b.builder.Commit(ctx, imageRef, options) + PreferredManifestType: s.executor.outputFormat, + SystemContext: s.executor.systemContext, + IIDFile: s.executor.iidfile, + Squash: s.executor.squash, + BlobDirectory: s.executor.blobDirectory, + Parent: s.builder.FromImageID, + } + imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options) if err != nil { return "", nil, err } if options.IIDFile == "" && imgID != "" { - fmt.Fprintf(b.out, "--> %s\n", imgID) + fmt.Fprintf(s.executor.out, "--> %s\n", imgID) + } + var ref reference.Canonical + if dref := imageRef.DockerReference(); dref != nil { + if ref, err = reference.WithDigest(dref, manifestDigest); err != nil { + return "", nil, errors.Wrapf(err, "error computing canonical reference for new image %q", imgID) + } } return imgID, ref, nil } // Build takes care of the details of running Prepare/Execute/Commit/Delete // over each of the one or more parsed Dockerfiles and stages. -func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (string, reference.Canonical, error) { +func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (imageID string, ref reference.Canonical, err error) { if len(stages) == 0 { - errors.New("error building: no stages to build") + return "", nil, errors.New("error building: no stages to build") } var ( - stageExecutor *Executor - lastErr error + stageExecutor *StageExecutor + cleanupImages []string ) - b.imageMap = make(map[string]string) - stageCount := 0 - for _, stage := range stages { + cleanupStages := make(map[int]*StageExecutor) + + cleanup := func() error { + var lastErr error + // Clean up any containers associated with the final container + // built by a stage, for stages that succeeded, since we no + // longer need their filesystem contents. + for _, stage := range cleanupStages { + if err := stage.Delete(); err != nil { + logrus.Debugf("Failed to cleanup stage containers: %v", err) + lastErr = err + } + } + cleanupStages = nil + // Clean up any intermediate containers associated with stages, + // since we're not keeping them for debugging. + if b.removeIntermediateCtrs { + if err := b.deleteSuccessfulIntermediateCtrs(); err != nil { + logrus.Debugf("Failed to cleanup intermediate containers: %v", err) + lastErr = err + } + } + // Remove images from stages except the last one, since we're + // not going to use them as a starting point for any new + // stages. + for i := range cleanupImages { + removeID := cleanupImages[len(cleanupImages)-i-1] + if _, err := b.store.DeleteImage(removeID, true); err != nil { + logrus.Debugf("failed to remove intermediate image %q: %v", removeID, err) + if b.forceRmIntermediateCtrs || errors.Cause(err) != storage.ErrImageUsedByContainer { + lastErr = err + } + } + } + cleanupImages = nil + return lastErr + } + defer cleanup() + + for stageIndex, stage := range stages { + var lastErr error + ib := stage.Builder node := stage.Node base, err := ib.From(node) @@ -1264,82 +1375,73 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (strin return "", nil, err } - stageExecutor = b.withName(stage.Name, stage.Position, base) + // If this is the last stage, then the image that we produce at + // its end should be given the desired output name. + output := "" + if stageIndex == len(stages)-1 { + output = b.output + } + + stageExecutor = b.startStage(stage.Name, stage.Position, len(stages), base, output) if err := stageExecutor.Prepare(ctx, stage, base); err != nil { return "", nil, err } + // Always remove the intermediate/build containers, even if the build was unsuccessful. // If building with layers, remove all intermediate/build containers if b.forceRmIntermediateCtrs // is true. - if b.forceRmIntermediateCtrs || (!b.layers && !b.noCache) { - defer stageExecutor.Delete() + if b.forceRmIntermediateCtrs || !b.layers { + cleanupStages[stage.Position] = stageExecutor } - if err := stageExecutor.Execute(ctx, stage); err != nil { + if imageID, ref, err = stageExecutor.Execute(ctx, stage); err != nil { lastErr = err } - - // Delete the successful intermediate containers if an error in the build - // process occurs and b.removeIntermediateCtrs is true. if lastErr != nil { - if b.removeIntermediateCtrs { - stageExecutor.deleteSuccessfulIntermediateCtrs() - } return "", nil, lastErr } - b.containerIDs = append(b.containerIDs, stageExecutor.containerIDs...) - // If we've a stage.Name with alpha and not numeric, we've an - // AS clause in play. Create an intermediate image for this - // stage to be used by other FROM statements that will want - // to use it later in the Dockerfile. Note the id in our map. + if !b.forceRmIntermediateCtrs && b.removeIntermediateCtrs { + cleanupStages[stage.Position] = stageExecutor + } + + // If this is an intermediate stage, make a note to remove its + // image later. if _, err := strconv.Atoi(stage.Name); err != nil { - imgID, _, err := stageExecutor.Commit(ctx, stages[stageCount].Builder, "") - if err != nil { + if imageID, ref, err = stageExecutor.Commit(ctx, stages[stageIndex].Builder, "", output); err != nil { return "", nil, err } - b.imageMap[stage.Name] = imgID + b.imageMap[stage.Name] = imageID + cleanupImages = append(cleanupImages, imageID) } - stageCount++ + } + if len(b.unusedArgs) > 0 { + unusedList := make([]string, 0, len(b.unusedArgs)) + for k := range b.unusedArgs { + unusedList = append(unusedList, k) + } + sort.Strings(unusedList) + fmt.Fprintf(b.out, "[Warning] one or more build args were not consumed: %v\n", unusedList) } - var imageRef reference.Canonical - imageID := "" - - // Check if we have a one line Dockerfile making layers irrelevant - // or the user told us to ignore layers. + // Check if we have a one line Dockerfile (i.e., single phase, no + // actual steps) making layers irrelevant, or the user told us to + // ignore layers. singleLineDockerfile := (len(stages) < 2 && len(stages[0].Node.Children) < 1) - ignoreLayers := singleLineDockerfile || !b.layers && !b.noCache + ignoreLayers := singleLineDockerfile || !b.layers && b.useCache if ignoreLayers { - imgID, ref, err := stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "") - if err != nil { + if imageID, ref, err = stageExecutor.Commit(ctx, stages[len(stages)-1].Builder, "", b.output); err != nil { return "", nil, err } if singleLineDockerfile { b.log("COMMIT %s", ref) } - imageID = imgID - imageRef = ref - } - // If building with layers and b.removeIntermediateCtrs is true - // only remove intermediate container for each step if an error - // during the build process doesn't occur. - // If the build is unsuccessful, the container created at the step - // the failure happened will persist in the container store. - // This if condition will be false if not building with layers and - // the removal of intermediate/build containers will be handled by the - // defer statement above. - if b.removeIntermediateCtrs && (b.layers || b.noCache) { - if err := b.deleteSuccessfulIntermediateCtrs(); err != nil { - return "", nil, errors.Errorf("Failed to cleanup intermediate containers") - } } - // Remove intermediate images that we created for AS clause handling - for _, value := range b.imageMap { - if _, err := b.store.DeleteImage(value, true); err != nil { - logrus.Debugf("unable to remove intermediate image %q: %v", value, err) - } + + if err := cleanup(); err != nil { + return "", nil, err } - return imageID, imageRef, nil + + return imageID, ref, nil } // BuildDockerfiles parses a set of one or more Dockerfiles (which may be @@ -1450,7 +1552,6 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt // prepending a new FROM statement the Dockerfile that do not already have a corresponding // FROM command within them. func processCopyFrom(dockerfiles []io.ReadCloser) []io.ReadCloser { - var newDockerfiles []io.ReadCloser // fromMap contains the names of the images seen in a FROM // line in the Dockerfiles. The boolean value just completes the map object. @@ -1520,23 +1621,31 @@ func processCopyFrom(dockerfiles []io.ReadCloser) []io.ReadCloser { return newDockerfiles } -// deleteSuccessfulIntermediateCtrs goes through the container IDs in b.containerIDs -// and deletes the containers associated with that ID. +// deleteSuccessfulIntermediateCtrs goes through the container IDs in each +// stage's containerIDs list and deletes the containers associated with those +// IDs. func (b *Executor) deleteSuccessfulIntermediateCtrs() error { var lastErr error - for _, ctr := range b.containerIDs { - if err := b.store.DeleteContainer(ctr); err != nil { - logrus.Errorf("error deleting build container %q: %v\n", ctr, err) - lastErr = err + for _, s := range b.stages { + for _, ctr := range s.containerIDs { + if err := b.store.DeleteContainer(ctr); err != nil { + logrus.Errorf("error deleting build container %q: %v\n", ctr, err) + lastErr = err + } } + // The stages map includes some stages under multiple keys, so + // clearing their lists after we process a given stage is + // necessary to avoid triggering errors that would occur if we + // tried to delete a given stage's containers multiple times. + s.containerIDs = nil } return lastErr } -func (b *Executor) EnsureContainerPath(path string) error { - _, err := os.Stat(filepath.Join(b.mountPoint, path)) +func (s *StageExecutor) EnsureContainerPath(path string) error { + _, err := os.Stat(filepath.Join(s.mountPoint, path)) if err != nil && os.IsNotExist(err) { - err = os.MkdirAll(filepath.Join(b.mountPoint, path), 0755) + err = os.MkdirAll(filepath.Join(s.mountPoint, path), 0755) } if err != nil { return errors.Wrapf(err, "error ensuring container path %q", path) diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go index 4f5301b73..35dc5438a 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/util.go +++ b/vendor/github.com/containers/buildah/imagebuildah/util.go @@ -111,28 +111,3 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err func InitReexec() bool { return buildah.InitReexec() } - -// ReposToMap parses the specified repotags and returns a map with repositories -// as keys and the corresponding arrays of tags as values. -func ReposToMap(repotags []string) map[string][]string { - // map format is repo -> tag - repos := make(map[string][]string) - for _, repo := range repotags { - var repository, tag string - if strings.Contains(repo, ":") { - li := strings.LastIndex(repo, ":") - repository = repo[0:li] - tag = repo[li+1:] - } else if len(repo) > 0 { - repository = repo - tag = "" - } else { - logrus.Warnf("Found image with empty name") - } - repos[repository] = append(repos[repository], tag) - } - if len(repos) == 0 { - repos[""] = []string{""} - } - return repos -} diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go index f5f156be2..418487438 100644 --- a/vendor/github.com/containers/buildah/import.go +++ b/vendor/github.com/containers/buildah/import.go @@ -17,7 +17,11 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system return nil, errors.Errorf("Internal error: imageID is empty in importBuilderDataFromImage") } - uidmap, gidmap := convertStorageIDMaps(storage.DefaultStoreOptions.UIDMap, storage.DefaultStoreOptions.GIDMap) + storeopts, err := storage.DefaultStoreOptions(false, 0) + if err != nil { + return nil, err + } + uidmap, gidmap := convertStorageIDMaps(storeopts.UIDMap, storeopts.GIDMap) ref, err := is.Transport.ParseStoreReference(store, imageID) if err != nil { @@ -83,7 +87,7 @@ func importBuilder(ctx context.Context, store storage.Store, options ImportOptio return nil, err } - systemContext := getSystemContext(&types.SystemContext{}, options.SignaturePolicyPath) + systemContext := getSystemContext(store, &types.SystemContext{}, options.SignaturePolicyPath) builder, err := importBuilderDataFromImage(ctx, store, systemContext, c.ImageID, options.Container, c.ID) if err != nil { @@ -115,7 +119,7 @@ func importBuilderFromImage(ctx context.Context, store storage.Store, options Im return nil, errors.Errorf("image name must be specified") } - systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) + systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath) _, img, err := util.FindImage(store, "", systemContext, options.Image) if err != nil { diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go index 8cd5e4438..7c73da87e 100644 --- a/vendor/github.com/containers/buildah/info.go +++ b/vendor/github.com/containers/buildah/info.go @@ -11,7 +11,7 @@ import ( "strings" "time" - "github.com/containers/libpod/pkg/rootless" + "github.com/containers/buildah/unshare" "github.com/containers/storage" "github.com/containers/storage/pkg/system" "github.com/sirupsen/logrus" @@ -47,7 +47,7 @@ func hostInfo() (map[string]interface{}, error) { info["os"] = runtime.GOOS info["arch"] = runtime.GOARCH info["cpus"] = runtime.NumCPU() - info["rootless"] = rootless.IsRootless() + info["rootless"] = unshare.IsRootless() mi, err := system.ReadMemInfo() if err != nil { logrus.Error(err, "err reading memory info") diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go index 262c90220..29546caba 100644 --- a/vendor/github.com/containers/buildah/new.go +++ b/vendor/github.com/containers/buildah/new.go @@ -29,7 +29,7 @@ func pullAndFindImage(ctx context.Context, store storage.Store, srcRef types.Ima ReportWriter: options.ReportWriter, Store: store, SystemContext: options.SystemContext, - BlobDirectory: options.PullBlobDirectory, + BlobDirectory: options.BlobDirectory, } ref, err := pullImage(ctx, store, srcRef, pullOptions, sc) if err != nil { @@ -244,7 +244,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions options.FromImage = "" } - systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) + systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath) if options.FromImage != "" && options.FromImage != "scratch" { ref, _, img, err = resolveImage(ctx, systemContext, store, options) diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user.go new file mode 100644 index 000000000..c83dcc230 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user.go @@ -0,0 +1,108 @@ +package chrootuser + +import ( + "os/user" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +var ( + // ErrNoSuchUser indicates that the user provided by the caller does not + // exist in /etc/passws + ErrNoSuchUser = errors.New("user does not exist in /etc/passwd") +) + +// GetUser will return the uid, gid of the user specified in the userspec +// it will use the /etc/passwd and /etc/group files inside of the rootdir +// to return this information. +// userspec format [user | user:group | uid | uid:gid | user:gid | uid:group ] +func GetUser(rootdir, userspec string) (uint32, uint32, error) { + var gid64 uint64 + var gerr error = user.UnknownGroupError("error looking up group") + + spec := strings.SplitN(userspec, ":", 2) + userspec = spec[0] + groupspec := "" + if userspec == "" { + return 0, 0, nil + } + if len(spec) > 1 { + groupspec = spec[1] + } + + uid64, uerr := strconv.ParseUint(userspec, 10, 32) + if uerr == nil && groupspec == "" { + // We parsed the user name as a number, and there's no group + // component, so try to look up the primary GID of the user who + // has this UID. + var name string + name, gid64, gerr = lookupGroupForUIDInContainer(rootdir, uid64) + if gerr == nil { + userspec = name + } else { + // Leave userspec alone, but swallow the error and just + // use GID 0. + gid64 = 0 + gerr = nil + } + } + if uerr != nil { + // The user ID couldn't be parsed as a number, so try to look + // up the user's UID and primary GID. + uid64, gid64, uerr = lookupUserInContainer(rootdir, userspec) + gerr = uerr + } + + if groupspec != "" { + // We have a group name or number, so parse it. + gid64, gerr = strconv.ParseUint(groupspec, 10, 32) + if gerr != nil { + // The group couldn't be parsed as a number, so look up + // the group's GID. + gid64, gerr = lookupGroupInContainer(rootdir, groupspec) + } + } + + if uerr == nil && gerr == nil { + return uint32(uid64), uint32(gid64), nil + } + + err := errors.Wrapf(uerr, "error determining run uid") + if uerr == nil { + err = errors.Wrapf(gerr, "error determining run gid") + } + return 0, 0, err +} + +// GetGroup returns the gid by looking it up in the /etc/group file +// groupspec format [ group | gid ] +func GetGroup(rootdir, groupspec string) (uint32, error) { + gid64, gerr := strconv.ParseUint(groupspec, 10, 32) + if gerr != nil { + // The group couldn't be parsed as a number, so look up + // the group's GID. + gid64, gerr = lookupGroupInContainer(rootdir, groupspec) + } + if gerr != nil { + return 0, errors.Wrapf(gerr, "error looking up group for gid %q", groupspec) + } + return uint32(gid64), nil +} + +// GetAdditionalGroupsForUser returns a list of gids that userid is associated with +func GetAdditionalGroupsForUser(rootdir string, userid uint64) ([]uint32, error) { + gids, err := lookupAdditionalGroupsForUIDInContainer(rootdir, userid) + if err != nil { + return nil, errors.Wrapf(err, "error looking up supplemental groups for uid %d", userid) + } + return gids, nil +} + +// LookupUIDInContainer returns username and gid associated with a UID in a container +// it will use the /etc/passwd files inside of the rootdir +// to return this information. +func LookupUIDInContainer(rootdir string, uid uint64) (user string, gid uint64, err error) { + return lookupUIDInContainer(rootdir, uid) +} diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go new file mode 100644 index 000000000..79b0b24b5 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user_basic.go @@ -0,0 +1,27 @@ +// +build !linux + +package chrootuser + +import ( + "github.com/pkg/errors" +) + +func lookupUserInContainer(rootdir, username string) (uint64, uint64, error) { + return 0, 0, errors.New("user lookup not supported") +} + +func lookupGroupInContainer(rootdir, groupname string) (uint64, error) { + return 0, errors.New("group lookup not supported") +} + +func lookupGroupForUIDInContainer(rootdir string, userid uint64) (string, uint64, error) { + return "", 0, errors.New("primary group lookup by uid not supported") +} + +func lookupAdditionalGroupsForUIDInContainer(rootdir string, userid uint64) (gid []uint32, err error) { + return nil, errors.New("supplemental groups list lookup by uid not supported") +} + +func lookupUIDInContainer(rootdir string, uid uint64) (string, uint64, error) { + return "", 0, errors.New("UID lookup not supported") +} diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go new file mode 100644 index 000000000..583eca569 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user_linux.go @@ -0,0 +1,293 @@ +// +build linux + +package chrootuser + +import ( + "bufio" + "flag" + "fmt" + "io" + "os" + "os/exec" + "os/user" + "strconv" + "strings" + "sync" + + "github.com/containers/storage/pkg/reexec" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const ( + openChrootedCommand = "chrootuser-open" +) + +func init() { + reexec.Register(openChrootedCommand, openChrootedFileMain) +} + +func openChrootedFileMain() { + status := 0 + flag.Parse() + if len(flag.Args()) < 1 { + os.Exit(1) + } + // Our first parameter is the directory to chroot into. + if err := unix.Chdir(flag.Arg(0)); err != nil { + fmt.Fprintf(os.Stderr, "chdir(): %v", err) + os.Exit(1) + } + if err := unix.Chroot(flag.Arg(0)); err != nil { + fmt.Fprintf(os.Stderr, "chroot(): %v", err) + os.Exit(1) + } + // Anything else is a file we want to dump out. + for _, filename := range flag.Args()[1:] { + f, err := os.Open(filename) + if err != nil { + fmt.Fprintf(os.Stderr, "open(%q): %v", filename, err) + status = 1 + continue + } + _, err = io.Copy(os.Stdout, f) + if err != nil { + fmt.Fprintf(os.Stderr, "read(%q): %v", filename, err) + } + f.Close() + } + os.Exit(status) +} + +func openChrootedFile(rootdir, filename string) (*exec.Cmd, io.ReadCloser, error) { + // The child process expects a chroot and one or more filenames that + // will be consulted relative to the chroot directory and concatenated + // to its stdout. Start it up. + cmd := reexec.Command(openChrootedCommand, rootdir, filename) + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, nil, err + } + err = cmd.Start() + if err != nil { + return nil, nil, err + } + // Hand back the child's stdout for reading, and the child to reap. + return cmd, stdout, nil +} + +var ( + lookupUser, lookupGroup sync.Mutex +) + +type lookupPasswdEntry struct { + name string + uid uint64 + gid uint64 +} +type lookupGroupEntry struct { + name string + gid uint64 + user string +} + +func readWholeLine(rc *bufio.Reader) ([]byte, error) { + line, isPrefix, err := rc.ReadLine() + if err != nil { + return nil, err + } + for isPrefix { + // We didn't get a whole line. Keep reading chunks until we find an end of line, and discard them. + for isPrefix { + logrus.Debugf("discarding partial line %q", string(line)) + _, isPrefix, err = rc.ReadLine() + if err != nil { + return nil, err + } + } + // That last read was the end of a line, so now we try to read the (beginning of?) the next line. + line, isPrefix, err = rc.ReadLine() + if err != nil { + return nil, err + } + } + return line, nil +} + +func parseNextPasswd(rc *bufio.Reader) *lookupPasswdEntry { + line, err := readWholeLine(rc) + if err != nil { + return nil + } + fields := strings.Split(string(line), ":") + if len(fields) < 7 { + return nil + } + uid, err := strconv.ParseUint(fields[2], 10, 32) + if err != nil { + return nil + } + gid, err := strconv.ParseUint(fields[3], 10, 32) + if err != nil { + return nil + } + return &lookupPasswdEntry{ + name: fields[0], + uid: uid, + gid: gid, + } +} + +func parseNextGroup(rc *bufio.Reader) *lookupGroupEntry { + line, err := readWholeLine(rc) + if err != nil { + return nil + } + fields := strings.Split(string(line), ":") + if len(fields) < 4 { + return nil + } + gid, err := strconv.ParseUint(fields[2], 10, 32) + if err != nil { + return nil + } + return &lookupGroupEntry{ + name: fields[0], + gid: gid, + user: fields[3], + } +} + +func lookupUserInContainer(rootdir, username string) (uid uint64, gid uint64, err error) { + cmd, f, err := openChrootedFile(rootdir, "/etc/passwd") + if err != nil { + return 0, 0, err + } + defer func() { + _ = cmd.Wait() + }() + rc := bufio.NewReader(f) + defer f.Close() + + lookupUser.Lock() + defer lookupUser.Unlock() + + pwd := parseNextPasswd(rc) + for pwd != nil { + if pwd.name != username { + pwd = parseNextPasswd(rc) + continue + } + return pwd.uid, pwd.gid, nil + } + + return 0, 0, user.UnknownUserError(fmt.Sprintf("error looking up user %q", username)) +} + +func lookupGroupForUIDInContainer(rootdir string, userid uint64) (username string, gid uint64, err error) { + cmd, f, err := openChrootedFile(rootdir, "/etc/passwd") + if err != nil { + return "", 0, err + } + defer func() { + _ = cmd.Wait() + }() + rc := bufio.NewReader(f) + defer f.Close() + + lookupUser.Lock() + defer lookupUser.Unlock() + + pwd := parseNextPasswd(rc) + for pwd != nil { + if pwd.uid != userid { + pwd = parseNextPasswd(rc) + continue + } + return pwd.name, pwd.gid, nil + } + + return "", 0, ErrNoSuchUser +} + +func lookupAdditionalGroupsForUIDInContainer(rootdir string, userid uint64) (gid []uint32, err error) { + // Get the username associated with userid + username, _, err := lookupGroupForUIDInContainer(rootdir, userid) + if err != nil { + return nil, err + } + + cmd, f, err := openChrootedFile(rootdir, "/etc/group") + if err != nil { + return nil, err + } + defer func() { + _ = cmd.Wait() + }() + rc := bufio.NewReader(f) + defer f.Close() + + lookupGroup.Lock() + defer lookupGroup.Unlock() + + grp := parseNextGroup(rc) + for grp != nil { + if strings.Contains(grp.user, username) { + gid = append(gid, uint32(grp.gid)) + } + grp = parseNextGroup(rc) + } + return gid, nil +} + +func lookupGroupInContainer(rootdir, groupname string) (gid uint64, err error) { + cmd, f, err := openChrootedFile(rootdir, "/etc/group") + if err != nil { + return 0, err + } + defer func() { + _ = cmd.Wait() + }() + rc := bufio.NewReader(f) + defer f.Close() + + lookupGroup.Lock() + defer lookupGroup.Unlock() + + grp := parseNextGroup(rc) + for grp != nil { + if grp.name != groupname { + grp = parseNextGroup(rc) + continue + } + return grp.gid, nil + } + + return 0, user.UnknownGroupError(fmt.Sprintf("error looking up group %q", groupname)) +} + +func lookupUIDInContainer(rootdir string, uid uint64) (string, uint64, error) { + cmd, f, err := openChrootedFile(rootdir, "/etc/passwd") + if err != nil { + return "", 0, err + } + defer func() { + _ = cmd.Wait() + }() + rc := bufio.NewReader(f) + defer f.Close() + + lookupUser.Lock() + defer lookupUser.Unlock() + + pwd := parseNextPasswd(rc) + for pwd != nil { + if pwd.uid != uid { + pwd = parseNextPasswd(rc) + continue + } + return pwd.name, pwd.gid, nil + } + + return "", 0, user.UnknownUserError(fmt.Sprintf("error looking up uid %q", uid)) +} diff --git a/vendor/github.com/containers/buildah/pkg/formats/formats.go b/vendor/github.com/containers/buildah/pkg/formats/formats.go index 37f9b8a20..e95c32fc3 100644 --- a/vendor/github.com/containers/buildah/pkg/formats/formats.go +++ b/vendor/github.com/containers/buildah/pkg/formats/formats.go @@ -111,17 +111,13 @@ func (t StdoutTemplateArray) Out() error { if err != nil { return errors.Wrapf(err, parsingErrorStr) } - for i, raw := range t.Output { + for _, raw := range t.Output { basicTmpl := tmpl.Funcs(basicFunctions) if err := basicTmpl.Execute(w, raw); err != nil { return errors.Wrapf(err, parsingErrorStr) } - if i != len(t.Output)-1 { - fmt.Fprintln(w, "") - continue - } + fmt.Fprintln(w, "") } - fmt.Fprintln(w, "") return w.Flush() } diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index c309f686a..50318315f 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -9,7 +9,6 @@ import ( "github.com/spf13/cobra" "net" "os" - "os/exec" "path/filepath" "strconv" "strings" @@ -393,25 +392,11 @@ func IDMappingOptions(c *cobra.Command, isolation buildah.Isolation) (usernsOpti gidmap = uidmap } - useSlirp4netns := false - - if isolation == buildah.IsolationOCIRootless { - _, err := exec.LookPath("slirp4netns") - if execerr, ok := err.(*exec.Error); ok && !strings.Contains(execerr.Error(), "not found") { - return nil, nil, errors.Wrapf(err, "cannot lookup slirp4netns %v", execerr) - } - if err == nil { - useSlirp4netns = true - } else { - logrus.Warningf("could not find slirp4netns. Using host network namespace") - } - } - // By default, having mappings configured means we use a user // namespace. Otherwise, we don't. usernsOption := buildah.NamespaceOption{ Name: string(specs.UserNamespace), - Host: len(uidmap) == 0 && len(gidmap) == 0 && !useSlirp4netns, + Host: len(uidmap) == 0 && len(gidmap) == 0, } // If the user specifically requested that we either use or don't use // user namespaces, override that default. diff --git a/vendor/github.com/containers/buildah/pkg/secrets/secrets.go b/vendor/github.com/containers/buildah/pkg/secrets/secrets.go index 3b64f8952..97b681125 100644 --- a/vendor/github.com/containers/buildah/pkg/secrets/secrets.go +++ b/vendor/github.com/containers/buildah/pkg/secrets/secrets.go @@ -7,7 +7,6 @@ import ( "path/filepath" "strings" - "github.com/containers/libpod/pkg/rootless" "github.com/containers/storage/pkg/idtools" rspec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" @@ -133,12 +132,12 @@ func getMountsMap(path string) (string, string, error) { } // SecretMounts copies, adds, and mounts the secrets to the container root filesystem -func SecretMounts(mountLabel, containerWorkingDir, mountFile string) []rspec.Mount { - return SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, containerWorkingDir, 0, 0) +func SecretMounts(mountLabel, containerWorkingDir, mountFile string, rootless bool) []rspec.Mount { + return SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, containerWorkingDir, 0, 0, rootless) } // SecretMountsWithUIDGID specifies the uid/gid of the owner -func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPrefix string, uid, gid int) []rspec.Mount { +func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPrefix string, uid, gid int, rootless bool) []rspec.Mount { var ( secretMounts []rspec.Mount mountFiles []string @@ -148,17 +147,8 @@ func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPre // Note for testing purposes only if mountFile == "" { mountFiles = append(mountFiles, []string{OverrideMountsFile, DefaultMountsFile}...) - if rootless.IsRootless() { + if rootless { mountFiles = append([]string{UserOverrideMountsFile}, mountFiles...) - _, err := os.Stat(UserOverrideMountsFile) - if err != nil && os.IsNotExist(err) { - os.MkdirAll(filepath.Dir(UserOverrideMountsFile), 0755) - if f, err := os.Create(UserOverrideMountsFile); err != nil { - logrus.Warnf("could not create file %s: %v", UserOverrideMountsFile, err) - } else { - f.Close() - } - } } } else { mountFiles = append(mountFiles, mountFile) diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go index d3c9870af..5eec1b3dd 100644 --- a/vendor/github.com/containers/buildah/pull.go +++ b/vendor/github.com/containers/buildah/pull.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "strings" "github.com/containers/buildah/pkg/blobcache" @@ -153,13 +154,13 @@ func localImageNameForReference(ctx context.Context, store storage.Store, srcRef // Pull copies the contents of the image from somewhere else to local storage. func Pull(ctx context.Context, imageName string, options PullOptions) error { - systemContext := getSystemContext(options.SystemContext, options.SignaturePolicyPath) + systemContext := getSystemContext(options.Store, options.SystemContext, options.SignaturePolicyPath) boptions := BuilderOptions{ FromImage: imageName, SignaturePolicyPath: options.SignaturePolicyPath, SystemContext: systemContext, - PullBlobDirectory: options.BlobDirectory, + BlobDirectory: options.BlobDirectory, ReportWriter: options.ReportWriter, } @@ -236,7 +237,7 @@ func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageRefer if err != nil { return nil, errors.Wrapf(err, "error parsing image name %q", destName) } - var maybeCachedDestRef types.ImageReference = destRef + var maybeCachedDestRef = types.ImageReference(destRef) if options.BlobDirectory != "" { cachedRef, err := blobcache.NewBlobCache(destRef, options.BlobDirectory, types.PreserveOriginal) if err != nil { @@ -262,7 +263,7 @@ func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageRefer }() logrus.Debugf("copying %q to %q", transports.ImageName(srcRef), destName) - if _, err := cp.Image(ctx, policyContext, maybeCachedDestRef, srcRef, getCopyOptions(options.ReportWriter, srcRef, sc, maybeCachedDestRef, nil, "")); err != nil { + if _, err := cp.Image(ctx, policyContext, maybeCachedDestRef, srcRef, getCopyOptions(store, options.ReportWriter, srcRef, sc, maybeCachedDestRef, nil, "")); err != nil { logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", transports.ImageName(srcRef), destName, err) return nil, err } diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go index 2fa3cd572..cd6568b66 100644 --- a/vendor/github.com/containers/buildah/run.go +++ b/vendor/github.com/containers/buildah/run.go @@ -2,6 +2,7 @@ package buildah import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -21,6 +22,7 @@ import ( "github.com/containers/buildah/bind" "github.com/containers/buildah/chroot" "github.com/containers/buildah/pkg/secrets" + "github.com/containers/buildah/unshare" "github.com/containers/buildah/util" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" @@ -416,7 +418,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st } // Get the list of secrets mounts. - secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID)) + secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID), unshare.IsRootless()) // Add temporary copies of the contents of volume locations at the // volume locations, unless we already have something there. @@ -1720,7 +1722,7 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) { unix.CloseOnExec(fd) } - cmd := exec.Command(slirp4netns, "-r", "3", "-c", fmt.Sprintf("%d", pid), "tap0") + cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", fmt.Sprintf("%d", pid), "tap0") cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW} @@ -1765,7 +1767,7 @@ func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetwo var netconf, undo []*libcni.NetworkConfigList if isolation == IsolationOCIRootless { - if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host { + if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host && ns.Path == "" { return setupRootlessNetwork(pid) } } @@ -1835,7 +1837,7 @@ func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetwo rtconf := make(map[*libcni.NetworkConfigList]*libcni.RuntimeConf) teardown = func() { for _, nc := range undo { - if err = cni.DelNetworkList(nc, rtconf[nc]); err != nil { + if err = cni.DelNetworkList(context.Background(), nc, rtconf[nc]); err != nil { logrus.Errorf("error cleaning up network %v for %v: %v", rtconf[nc].IfName, command, err) } } @@ -1851,7 +1853,7 @@ func runConfigureNetwork(isolation Isolation, options RunOptions, configureNetwo CapabilityArgs: map[string]interface{}{}, } // Bring it up. - _, err := cni.AddNetworkList(nc, rtconf[nc]) + _, err := cni.AddNetworkList(context.Background(), nc, rtconf[nc]) if err != nil { return teardown, errors.Wrapf(err, "error configuring network list %v for %v", rtconf[nc].IfName, command) } diff --git a/vendor/github.com/containers/buildah/unshare/unshare.go b/vendor/github.com/containers/buildah/unshare/unshare.go index 1072c2035..91f4bb54a 100644 --- a/vendor/github.com/containers/buildah/unshare/unshare.go +++ b/vendor/github.com/containers/buildah/unshare/unshare.go @@ -11,6 +11,7 @@ import ( "runtime" "strconv" "strings" + "sync" "syscall" "github.com/containers/buildah/util" @@ -57,8 +58,8 @@ func (c *Cmd) Start() error { // Please the libpod "rootless" package to find the expected env variables. if os.Geteuid() != 0 { - c.Env = append(c.Env, "_LIBPOD_USERNS_CONFIGURED=done") - c.Env = append(c.Env, fmt.Sprintf("_LIBPOD_ROOTLESS_UID=%d", os.Geteuid())) + c.Env = append(c.Env, "_CONTAINERS_USERNS_CONFIGURED=done") + c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%d", os.Geteuid())) } // Create the pipe for reading the child's PID. @@ -272,3 +273,36 @@ func (c *Cmd) CombinedOutput() ([]byte, error) { func (c *Cmd) Output() ([]byte, error) { return nil, errors.New("unshare: Output() not implemented") } + +var ( + isRootlessOnce sync.Once + isRootless bool +) + +const ( + // UsernsEnvName is the environment variable, if set indicates in rootless mode + UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" +) + +// IsRootless tells us if we are running in rootless mode +func IsRootless() bool { + isRootlessOnce.Do(func() { + isRootless = os.Geteuid() != 0 || os.Getenv(UsernsEnvName) != "" + }) + return isRootless +} + +// GetRootlessUID returns the UID of the user in the parent userNS +func GetRootlessUID() int { + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Getuid() +} + +// RootlessEnv returns the environment settings for the rootless containers +func RootlessEnv() []string { + return append(os.Environ(), UsernsEnvName+"=") +} diff --git a/vendor/github.com/containers/buildah/unshare/unshare_unsupported.go b/vendor/github.com/containers/buildah/unshare/unshare_unsupported.go new file mode 100644 index 000000000..3336fdad9 --- /dev/null +++ b/vendor/github.com/containers/buildah/unshare/unshare_unsupported.go @@ -0,0 +1,27 @@ +// +build !linux + +package unshare + +import ( + "os" +) + +const ( + // UsernsEnvName is the environment variable, if set indicates in rootless mode + UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" +) + +// IsRootless tells us if we are running in rootless mode +func IsRootless() bool { + return false +} + +// GetRootlessUID returns the UID of the user in the parent userNS +func GetRootlessUID() int { + return os.Getuid() +} + +// RootlessEnv returns the environment settings for the rootless containers +func RootlessEnv() []string { + return append(os.Environ(), UsernsEnvName+"=") +} diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf index 53c2e673e..327de39b2 100644 --- a/vendor/github.com/containers/buildah/vendor.conf +++ b/vendor/github.com/containers/buildah/vendor.conf @@ -2,14 +2,13 @@ github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109 github.com/blang/semver v3.5.0 github.com/BurntSushi/toml v0.2.0 github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d -github.com/containernetworking/cni v0.7.0-alpha1 -github.com/containers/image v1.5 +github.com/containernetworking/cni v0.7.0-rc2 +github.com/containers/image f52cf78ebfa1916da406f8b6210d8f7764ec1185 github.com/vbauerster/mpb v3.3.4 github.com/mattn/go-isatty v0.0.4 github.com/VividCortex/ewma v1.1.1 github.com/boltdb/bolt v1.3.1 -github.com/containers/libpod v1.0 -github.com/containers/storage v1.11 +github.com/containers/storage v1.12.1 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83 github.com/docker/docker-credential-helpers v0.6.1 @@ -39,7 +38,7 @@ github.com/opencontainers/runc v1.0.0-rc6 github.com/opencontainers/runtime-spec v1.0.0 github.com/opencontainers/runtime-tools v0.8.0 github.com/opencontainers/selinux v1.1 -github.com/openshift/imagebuilder 705fe9255c57f8505efb9723a9ac4082b67973bc +github.com/openshift/imagebuilder v1.1.0 github.com/ostreedev/ostree-go 9ab99253d365aac3a330d1f7281cf29f3d22820b github.com/pkg/errors v0.8.1 github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac diff --git a/vendor/github.com/containers/image/README.md b/vendor/github.com/containers/image/README.md index 8fd6e513e..571e8342e 100644 --- a/vendor/github.com/containers/image/README.md +++ b/vendor/github.com/containers/image/README.md @@ -65,7 +65,7 @@ the primary downside is that creating new signatures with the Golang-only implem - `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. The `github.com/containers/image/ostree` package is completely disabled and impossible to import when this build tag is in use. -## [Contributing](CONTRIBUTING.md)** +## [Contributing](CONTRIBUTING.md) Information about contributing to this project. diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go index ba99336aa..3ed8a2b82 100644 --- a/vendor/github.com/containers/image/copy/copy.go +++ b/vendor/github.com/containers/image/copy/copy.go @@ -468,7 +468,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { } data := make([]copyLayerData, numLayers) - copyLayerHelper := func(index int, srcLayer types.BlobInfo, bar *mpb.Bar) { + copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) { defer copySemaphore.Release(1) defer copyGroup.Done() cld := copyLayerData{} @@ -483,24 +483,18 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) } } else { - cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, bar) + cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool) } data[index] = cld - bar.SetTotal(srcLayer.Size, true) } func() { // A scope for defer progressPool, progressCleanup := ic.c.newProgressPool(ctx) defer progressCleanup() - progressBars := make([]*mpb.Bar, numLayers) - for i, srcInfo := range srcInfos { - progressBars[i] = ic.c.createProgressBar(progressPool, srcInfo, "blob") - } - for i, srcLayer := range srcInfos { copySemaphore.Acquire(ctx, 1) - go copyLayerHelper(i, srcLayer, progressBars[i]) + go copyLayerHelper(i, srcLayer, progressPool) } // Wait for all layers to be copied @@ -592,7 +586,7 @@ func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) { // createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter // is ioutil.Discard, the progress bar's output will be discarded -func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string) *mpb.Bar { +func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { // shortDigestLen is the length of the digest used for blobs. const shortDigestLen = 12 @@ -604,11 +598,12 @@ func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind } bar := pool.AddBar(info.Size, + mpb.BarClearOnComplete(), mpb.PrependDecorators( decor.Name(prefix), ), mpb.AppendDecorators( - decor.CountersKibiByte("%.1f / %.1f"), + decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete), ), ) if c.progressOutput == ioutil.Discard { @@ -629,7 +624,7 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error { destInfo, err := func() (types.BlobInfo, error) { // A scope for defer progressPool, progressCleanup := c.newProgressPool(ctx) defer progressCleanup() - bar := c.createProgressBar(progressPool, srcInfo, "config") + bar := c.createProgressBar(progressPool, srcInfo, "config", "done") destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar) if err != nil { return types.BlobInfo{}, err @@ -656,7 +651,7 @@ type diffIDResult struct { // copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded -func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, bar *mpb.Bar) (types.BlobInfo, digest.Digest, error) { +func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) { cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" @@ -668,6 +663,8 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba } if reused { logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) + bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists") + bar.SetTotal(0, true) return blobInfo, cachedDiffID, nil } } @@ -679,10 +676,14 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba } defer srcStream.Close() + bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done") + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, diffIDIsNeeded, bar) if err != nil { return types.BlobInfo{}, "", err } + + diffID := cachedDiffID if diffIDIsNeeded { select { case <-ctx.Done(): @@ -695,11 +696,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) - return blobInfo, diffIDResult.digest, nil + diffID = diffIDResult.digest } - } else { - return blobInfo, cachedDiffID, nil } + + bar.SetTotal(srcInfo.Size, true) + return blobInfo, diffID, nil } // copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go index 43eb22ba2..40f11c62a 100644 --- a/vendor/github.com/containers/image/docker/docker_client.go +++ b/vendor/github.com/containers/image/docker/docker_client.go @@ -197,7 +197,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { registry := reference.Domain(ref.ref) - username, password, err := config.GetAuthentication(sys, reference.Domain(ref.ref)) + username, password, err := config.GetAuthentication(sys, registry) if err != nil { return nil, errors.Wrapf(err, "error getting username and password") } diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go index 38500dd0e..c116cbec3 100644 --- a/vendor/github.com/containers/image/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/docker/docker_image_dest.go @@ -16,7 +16,7 @@ import ( "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" - "github.com/containers/image/pkg/blobinfocache" + "github.com/containers/image/pkg/blobinfocache/none" "github.com/containers/image/types" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" @@ -129,7 +129,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, // This should not really be necessary, at least the copy code calls TryReusingBlob automatically. // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_. - haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, blobinfocache.NoCache, false) + haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false) if err != nil { return types.BlobInfo{}, err } diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go index cee60f824..351e73ea1 100644 --- a/vendor/github.com/containers/image/image/docker_schema2.go +++ b/vendor/github.com/containers/image/image/docker_schema2.go @@ -11,7 +11,7 @@ import ( "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" - "github.com/containers/image/pkg/blobinfocache" + "github.com/containers/image/pkg/blobinfocache/none" "github.com/containers/image/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -96,7 +96,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { if m.src == nil { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") } - stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), blobinfocache.NoCache) + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache) if err != nil { return nil, err } @@ -252,7 +252,7 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ logrus.Debugf("Uploading empty layer during conversion to schema 1") // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. - info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, blobinfocache.NoCache, false) + info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, none.NoCache, false) if err != nil { return nil, errors.Wrap(err, "Error uploading empty layer") } diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go index 6fe2a9a32..cdff26e06 100644 --- a/vendor/github.com/containers/image/image/oci.go +++ b/vendor/github.com/containers/image/image/oci.go @@ -7,7 +7,7 @@ import ( "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" - "github.com/containers/image/pkg/blobinfocache" + "github.com/containers/image/pkg/blobinfocache/none" "github.com/containers/image/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -61,7 +61,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { if m.src == nil { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") } - stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), blobinfocache.NoCache) + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go deleted file mode 100644 index 4ee809134..000000000 --- a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go +++ /dev/null @@ -1,329 +0,0 @@ -package blobinfocache - -import ( - "fmt" - "os" - "sync" - "time" - - "github.com/boltdb/bolt" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -var ( - // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade - // we can simply start over with a different filename; update blobInfoCacheFilename. - - // FIXME: For CRI-O, does this need to hide information between different users? - - // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest. - uncompressedDigestBucket = []byte("uncompressedDigest") - // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest - // (as a set of key=digest, value="" pairs) - digestByUncompressedBucket = []byte("digestByUncompressed") - // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing - // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value). - knownLocationsBucket = []byte("knownLocations") -) - -// Concurrency: -// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely -// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time. - -// pathLock contains a lock for a specific BoltDB database path. -type pathLock struct { - refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below! - mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database. -} - -var ( - // pathLocks contains a lock for each currently open file. - // This must be global so that independently created instances of boltDBCache exclude each other. - // The map is protected by pathLocksMutex. - // FIXME? Should this be based on device:inode numbers instead of paths instead? - pathLocks = map[string]*pathLock{} - pathLocksMutex = sync.Mutex{} -) - -// lockPath obtains the pathLock for path. -// The caller must call unlockPath eventually. -func lockPath(path string) { - pl := func() *pathLock { // A scope for defer - pathLocksMutex.Lock() - defer pathLocksMutex.Unlock() - pl, ok := pathLocks[path] - if ok { - pl.refCount++ - } else { - pl = &pathLock{refCount: 1, mutex: sync.Mutex{}} - pathLocks[path] = pl - } - return pl - }() - pl.mutex.Lock() -} - -// unlockPath releases the pathLock for path. -func unlockPath(path string) { - pathLocksMutex.Lock() - defer pathLocksMutex.Unlock() - pl, ok := pathLocks[path] - if !ok { - // Should this return an error instead? BlobInfoCache ultimately ignores errors… - panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path)) - } - pl.mutex.Unlock() - pl.refCount-- - if pl.refCount == 0 { - delete(pathLocks, path) - } -} - -// boltDBCache si a BlobInfoCache implementation which uses a BoltDB file at the specified path. -// -// Note that we don’t keep the database open across operations, because that would lock the file and block any other -// users; instead, we need to open/close it for every single write or lookup. -type boltDBCache struct { - path string -} - -// NewBoltDBCache returns a BlobInfoCache implementation which uses a BoltDB file at path. -// Most users should call DefaultCache instead. -func NewBoltDBCache(path string) types.BlobInfoCache { - return &boltDBCache{path: path} -} - -// view returns runs the specified fn within a read-only transaction on the database. -func (bdc *boltDBCache) view(fn func(tx *bolt.Tx) error) (retErr error) { - // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist, - // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding - // a read lock, blocking any future writes. - // Hence this preliminary check, which is RACY: Another process could remove the file - // between the Lstat call and opening the database. - if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) { - return err - } - - lockPath(bdc.path) - defer unlockPath(bdc.path) - db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) - if err != nil { - return err - } - defer func() { - if err := db.Close(); retErr == nil && err != nil { - retErr = err - } - }() - - return db.View(fn) -} - -// update returns runs the specified fn within a read-write transaction on the database. -func (bdc *boltDBCache) update(fn func(tx *bolt.Tx) error) (retErr error) { - lockPath(bdc.path) - defer unlockPath(bdc.path) - db, err := bolt.Open(bdc.path, 0600, nil) - if err != nil { - return err - } - defer func() { - if err := db.Close(); retErr == nil && err != nil { - retErr = err - } - }() - - return db.Update(fn) -} - -// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction. -func (bdc *boltDBCache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest { - if b := tx.Bucket(uncompressedDigestBucket); b != nil { - if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil { - d, err := digest.Parse(string(uncompressedBytes)) - if err == nil { - return d - } - // FIXME? Log err (but throttle the log volume on repeated accesses)? - } - } - // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest. - // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings - // when we already record a (compressed, uncompressed) pair. - if b := tx.Bucket(digestByUncompressedBucket); b != nil { - if b = b.Bucket([]byte(anyDigest.String())); b != nil { - c := b.Cursor() - if k, _ := c.First(); k != nil { // The bucket is non-empty - return anyDigest - } - } - } - return "" -} - -// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. -// May return anyDigest if it is known to be uncompressed. -// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). -func (bdc *boltDBCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { - var res digest.Digest - if err := bdc.view(func(tx *bolt.Tx) error { - res = bdc.uncompressedDigest(tx, anyDigest) - return nil - }); err != nil { // Including os.IsNotExist(err) - return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? - } - return res -} - -// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. -// It’s allowed for anyDigest == uncompressed. -// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. -// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. -// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) -func (bdc *boltDBCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { - _ = bdc.update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket) - if err != nil { - return err - } - key := []byte(anyDigest.String()) - if previousBytes := b.Get(key); previousBytes != nil { - previous, err := digest.Parse(string(previousBytes)) - if err != nil { - return err - } - if previous != uncompressed { - logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) - } - } - if err := b.Put(key, []byte(uncompressed.String())); err != nil { - return err - } - - b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String())) - if err != nil { - return err - } - if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again. - return err - } - return nil - }) // FIXME? Log error (but throttle the log volume on repeated accesses)? -} - -// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, -// and can be reused given the opaque location data. -func (bdc *boltDBCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { - _ = bdc.update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists(knownLocationsBucket) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(transport.Name())) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque)) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String())) - if err != nil { - return err - } - value, err := time.Now().MarshalBinary() - if err != nil { - return err - } - if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry. - return err - } - return nil - }) // FIXME? Log error (but throttle the log volume on repeated accesses)? -} - -// appendReplacementCandiates creates candidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates. -func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []candidateWithTime { - b := scopeBucket.Bucket([]byte(digest.String())) - if b == nil { - return candidates - } - _ = b.ForEach(func(k, v []byte) error { - t := time.Time{} - if err := t.UnmarshalBinary(v); err != nil { - return err - } - candidates = append(candidates, candidateWithTime{ - candidate: types.BICReplacementCandidate{ - Digest: digest, - Location: types.BICLocationReference{Opaque: string(k)}, - }, - lastSeen: t, - }) - return nil - }) // FIXME? Log error (but throttle the log volume on repeated accesses)? - return candidates -} - -// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused -// within the specified (transport scope) (if they still exist, which is not guaranteed). -// -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, -// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same -// uncompressed digest. -func (bdc *boltDBCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { - res := []candidateWithTime{} - var uncompressedDigestValue digest.Digest // = "" - if err := bdc.view(func(tx *bolt.Tx) error { - scopeBucket := tx.Bucket(knownLocationsBucket) - if scopeBucket == nil { - return nil - } - scopeBucket = scopeBucket.Bucket([]byte(transport.Name())) - if scopeBucket == nil { - return nil - } - scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque)) - if scopeBucket == nil { - return nil - } - - res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest) - if canSubstitute { - if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { - b := tx.Bucket(digestByUncompressedBucket) - if b != nil { - b = b.Bucket([]byte(uncompressedDigestValue.String())) - if b != nil { - if err := b.ForEach(func(k, _ []byte) error { - d, err := digest.Parse(string(k)) - if err != nil { - return err - } - if d != primaryDigest && d != uncompressedDigestValue { - res = bdc.appendReplacementCandidates(res, scopeBucket, d) - } - return nil - }); err != nil { - return err - } - } - } - if uncompressedDigestValue != primaryDigest { - res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue) - } - } - } - return nil - }); err != nil { // Including os.IsNotExist(err) - return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)? - } - - return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue) -} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go new file mode 100644 index 000000000..91d4e9137 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go @@ -0,0 +1,332 @@ +// Package boltdb implements a BlobInfoCache backed by BoltDB. +package boltdb + +import ( + "fmt" + "os" + "sync" + "time" + + "github.com/boltdb/bolt" + "github.com/containers/image/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +var ( + // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade + // we can simply start over with a different filename; update blobInfoCacheFilename. + + // FIXME: For CRI-O, does this need to hide information between different users? + + // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest. + uncompressedDigestBucket = []byte("uncompressedDigest") + // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest + // (as a set of key=digest, value="" pairs) + digestByUncompressedBucket = []byte("digestByUncompressed") + // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing + // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value). + knownLocationsBucket = []byte("knownLocations") +) + +// Concurrency: +// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely +// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time. + +// pathLock contains a lock for a specific BoltDB database path. +type pathLock struct { + refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below! + mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database. +} + +var ( + // pathLocks contains a lock for each currently open file. + // This must be global so that independently created instances of boltDBCache exclude each other. + // The map is protected by pathLocksMutex. + // FIXME? Should this be based on device:inode numbers instead of paths instead? + pathLocks = map[string]*pathLock{} + pathLocksMutex = sync.Mutex{} +) + +// lockPath obtains the pathLock for path. +// The caller must call unlockPath eventually. +func lockPath(path string) { + pl := func() *pathLock { // A scope for defer + pathLocksMutex.Lock() + defer pathLocksMutex.Unlock() + pl, ok := pathLocks[path] + if ok { + pl.refCount++ + } else { + pl = &pathLock{refCount: 1, mutex: sync.Mutex{}} + pathLocks[path] = pl + } + return pl + }() + pl.mutex.Lock() +} + +// unlockPath releases the pathLock for path. +func unlockPath(path string) { + pathLocksMutex.Lock() + defer pathLocksMutex.Unlock() + pl, ok := pathLocks[path] + if !ok { + // Should this return an error instead? BlobInfoCache ultimately ignores errors… + panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path)) + } + pl.mutex.Unlock() + pl.refCount-- + if pl.refCount == 0 { + delete(pathLocks, path) + } +} + +// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path. +// +// Note that we don’t keep the database open across operations, because that would lock the file and block any other +// users; instead, we need to open/close it for every single write or lookup. +type cache struct { + path string +} + +// New returns a BlobInfoCache implementation which uses a BoltDB file at path. +// +// Most users should call blobinfocache.DefaultCache instead. +func New(path string) types.BlobInfoCache { + return &cache{path: path} +} + +// view returns runs the specified fn within a read-only transaction on the database. +func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) { + // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist, + // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding + // a read lock, blocking any future writes. + // Hence this preliminary check, which is RACY: Another process could remove the file + // between the Lstat call and opening the database. + if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) { + return err + } + + lockPath(bdc.path) + defer unlockPath(bdc.path) + db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) + if err != nil { + return err + } + defer func() { + if err := db.Close(); retErr == nil && err != nil { + retErr = err + } + }() + + return db.View(fn) +} + +// update returns runs the specified fn within a read-write transaction on the database. +func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) { + lockPath(bdc.path) + defer unlockPath(bdc.path) + db, err := bolt.Open(bdc.path, 0600, nil) + if err != nil { + return err + } + defer func() { + if err := db.Close(); retErr == nil && err != nil { + retErr = err + } + }() + + return db.Update(fn) +} + +// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction. +func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest { + if b := tx.Bucket(uncompressedDigestBucket); b != nil { + if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil { + d, err := digest.Parse(string(uncompressedBytes)) + if err == nil { + return d + } + // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + } + // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + if b := tx.Bucket(digestByUncompressedBucket); b != nil { + if b = b.Bucket([]byte(anyDigest.String())); b != nil { + c := b.Cursor() + if k, _ := c.First(); k != nil { // The bucket is non-empty + return anyDigest + } + } + } + return "" +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + var res digest.Digest + if err := bdc.view(func(tx *bolt.Tx) error { + res = bdc.uncompressedDigest(tx, anyDigest) + return nil + }); err != nil { // Including os.IsNotExist(err) + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + _ = bdc.update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket) + if err != nil { + return err + } + key := []byte(anyDigest.String()) + if previousBytes := b.Get(key); previousBytes != nil { + previous, err := digest.Parse(string(previousBytes)) + if err != nil { + return err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + } + if err := b.Put(key, []byte(uncompressed.String())); err != nil { + return err + } + + b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String())) + if err != nil { + return err + } + if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again. + return err + } + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { + _ = bdc.update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(knownLocationsBucket) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(transport.Name())) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque)) + if err != nil { + return err + } + b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String())) + if err != nil { + return err + } + value, err := time.Now().MarshalBinary() + if err != nil { + return err + } + if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry. + return err + } + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates. +func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime { + b := scopeBucket.Bucket([]byte(digest.String())) + if b == nil { + return candidates + } + _ = b.ForEach(func(k, v []byte) error { + t := time.Time{} + if err := t.UnmarshalBinary(v); err != nil { + return err + } + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: types.BICReplacementCandidate{ + Digest: digest, + Location: types.BICLocationReference{Opaque: string(k)}, + }, + LastSeen: t, + }) + return nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? + return candidates +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + res := []prioritize.CandidateWithTime{} + var uncompressedDigestValue digest.Digest // = "" + if err := bdc.view(func(tx *bolt.Tx) error { + scopeBucket := tx.Bucket(knownLocationsBucket) + if scopeBucket == nil { + return nil + } + scopeBucket = scopeBucket.Bucket([]byte(transport.Name())) + if scopeBucket == nil { + return nil + } + scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque)) + if scopeBucket == nil { + return nil + } + + res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest) + if canSubstitute { + if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { + b := tx.Bucket(digestByUncompressedBucket) + if b != nil { + b = b.Bucket([]byte(uncompressedDigestValue.String())) + if b != nil { + if err := b.ForEach(func(k, _ []byte) error { + d, err := digest.Parse(string(k)) + if err != nil { + return err + } + if d != primaryDigest && d != uncompressedDigestValue { + res = bdc.appendReplacementCandidates(res, scopeBucket, d) + } + return nil + }); err != nil { + return err + } + } + } + if uncompressedDigestValue != primaryDigest { + res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue) + } + } + } + return nil + }); err != nil { // Including os.IsNotExist(err) + return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue) +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/pkg/blobinfocache/default.go index 459ae5c06..1e6e543b2 100644 --- a/vendor/github.com/containers/image/pkg/blobinfocache/default.go +++ b/vendor/github.com/containers/image/pkg/blobinfocache/default.go @@ -5,6 +5,8 @@ import ( "os" "path/filepath" + "github.com/containers/image/pkg/blobinfocache/boltdb" + "github.com/containers/image/pkg/blobinfocache/memory" "github.com/containers/image/types" "github.com/sirupsen/logrus" ) @@ -50,14 +52,14 @@ func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { dir, err := blobInfoCacheDir(sys, os.Geteuid()) if err != nil { logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename) - return NewMemoryCache() + return memory.New() } path := filepath.Join(dir, blobInfoCacheFilename) if err := os.MkdirAll(dir, 0700); err != nil { logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err) - return NewMemoryCache() + return memory.New() } logrus.Debugf("Using blob info cache at %s", path) - return NewBoltDBCache(path) + return boltdb.New(path) } diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go new file mode 100644 index 000000000..5479319de --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -0,0 +1,110 @@ +// Package prioritize provides utilities for prioritizing locations in +// types.BlobInfoCache.CandidateLocations. +package prioritize + +import ( + "sort" + "time" + + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" +) + +// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates, +// and therefore ultimately by types.BlobInfoCache.CandidateLocations. +// This is a heuristic/guess, and could well use a different value. +const replacementAttempts = 5 + +// CandidateWithTime is the input to types.BICReplacementCandidate prioritization. +type CandidateWithTime struct { + Candidate types.BICReplacementCandidate // The replacement candidate + LastSeen time.Time // Time the candidate was last known to exist (either read or written) +} + +// candidateSortState is a local state implementing sort.Interface on candidates to prioritize, +// along with the specially-treated digest values for the implementation of sort.Interface.Less +type candidateSortState struct { + cs []CandidateWithTime // The entries to sort + primaryDigest digest.Digest // The digest the user actually asked for + uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest +} + +func (css *candidateSortState) Len() int { + return len(css.cs) +} + +func (css *candidateSortState) Less(i, j int) bool { + xi := css.cs[i] + xj := css.cs[j] + + // primaryDigest entries come first, more recent first. + // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first. + // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) + + // First, deal with the primaryDigest/uncompressedDigest cases: + if xi.Candidate.Digest != xj.Candidate.Digest { + // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter + if xi.Candidate.Digest == css.primaryDigest { + return true + } + if xj.Candidate.Digest == css.primaryDigest { + return false + } + if css.uncompressedDigest != "" { + if xi.Candidate.Digest == css.uncompressedDigest { + return false + } + if xj.Candidate.Digest == css.uncompressedDigest { + return true + } + } + } else { // xi.Candidate.Digest == xj.Candidate.Digest + // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time + if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) { + return xi.LastSeen.After(xj.LastSeen) + } + } + + // Neither of the digests are primaryDigest/uncompressedDigest: + if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time + return xi.LastSeen.After(xj.LastSeen) + } + // Fall back to digest, if timestamps end up _exactly_ the same (how?!) + return xi.Candidate.Digest < xj.Candidate.Digest +} + +func (css *candidateSortState) Swap(i, j int) { + css.cs[i], css.cs[j] = css.cs[j], css.cs[i] +} + +// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the +// number of entries to limit, only to make testing simpler. +func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate { + // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should + // compare equal. + sort.Sort(&candidateSortState{ + cs: cs, + primaryDigest: primaryDigest, + uncompressedDigest: uncompressedDigest, + }) + + resLength := len(cs) + if resLength > maxCandidates { + resLength = maxCandidates + } + res := make([]types.BICReplacementCandidate, resLength) + for i := range res { + res[i] = cs[i].Candidate + } + return res +} + +// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, +// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest), +// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. +// +// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course +// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) +func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate { + return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/memory.go b/vendor/github.com/containers/image/pkg/blobinfocache/memory.go deleted file mode 100644 index cf6ca5263..000000000 --- a/vendor/github.com/containers/image/pkg/blobinfocache/memory.go +++ /dev/null @@ -1,141 +0,0 @@ -package blobinfocache - -import ( - "sync" - "time" - - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -// locationKey only exists to make lookup in knownLocations easier. -type locationKey struct { - transport string - scope types.BICTransportScope - blobDigest digest.Digest -} - -// memoryCache implements an in-memory-only BlobInfoCache -type memoryCache struct { - mutex *sync.Mutex // synchronizes concurrent accesses - uncompressedDigests map[digest.Digest]digest.Digest - digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest - knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference -} - -// NewMemoryCache returns a BlobInfoCache implementation which is in-memory only. -// This is primarily intended for tests, but also used as a fallback if DefaultCache -// can’t determine, or set up, the location for a persistent cache. -// Manual users of types.{ImageSource,ImageDestination} might also use this instead of a persistent cache. -func NewMemoryCache() types.BlobInfoCache { - return &memoryCache{ - mutex: new(sync.Mutex), - uncompressedDigests: map[digest.Digest]digest.Digest{}, - digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, - knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, - } -} - -// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. -// May return anyDigest if it is known to be uncompressed. -// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). -func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { - mem.mutex.Lock() - defer mem.mutex.Unlock() - return mem.uncompressedDigest(anyDigest) -} - -// uncompressedDigest returns an uncompressed digest corresponding to anyDigest. -// May return anyDigest if it is known to be uncompressed. -// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). -func (mem *memoryCache) uncompressedDigest(anyDigest digest.Digest) digest.Digest { - if d, ok := mem.uncompressedDigests[anyDigest]; ok { - return d - } - // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest. - // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings - // when we already record a (compressed, uncompressed) pair. - if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 { - return anyDigest - } - return "" -} - -// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. -// It’s allowed for anyDigest == uncompressed. -// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. -// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. -// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) -func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { - mem.mutex.Lock() - defer mem.mutex.Unlock() - if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed { - logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) - } - mem.uncompressedDigests[anyDigest] = uncompressed - - anyDigestSet, ok := mem.digestsByUncompressed[uncompressed] - if !ok { - anyDigestSet = map[digest.Digest]struct{}{} - mem.digestsByUncompressed[uncompressed] = anyDigestSet - } - anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again. -} - -// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, -// and can be reused given the opaque location data. -func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { - mem.mutex.Lock() - defer mem.mutex.Unlock() - key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest} - locationScope, ok := mem.knownLocations[key] - if !ok { - locationScope = map[types.BICLocationReference]time.Time{} - mem.knownLocations[key] = locationScope - } - locationScope[location] = time.Now() // Possibly overwriting an older entry. -} - -// appendReplacementCandiates creates candidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. -func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []candidateWithTime { - locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present - for l, t := range locations { - candidates = append(candidates, candidateWithTime{ - candidate: types.BICReplacementCandidate{ - Digest: digest, - Location: l, - }, - lastSeen: t, - }) - } - return candidates -} - -// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused -// within the specified (transport scope) (if they still exist, which is not guaranteed). -// -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, -// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same -// uncompressed digest. -func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { - mem.mutex.Lock() - defer mem.mutex.Unlock() - res := []candidateWithTime{} - res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest) - var uncompressedDigest digest.Digest // = "" - if canSubstitute { - if uncompressedDigest = mem.uncompressedDigest(primaryDigest); uncompressedDigest != "" { - otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map - for d := range otherDigests { - if d != primaryDigest && d != uncompressedDigest { - res = mem.appendReplacementCandidates(res, transport, scope, d) - } - } - if uncompressedDigest != primaryDigest { - res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest) - } - } - } - return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) -} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go new file mode 100644 index 000000000..dfb338634 --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go @@ -0,0 +1,145 @@ +// Package memory implements an in-memory BlobInfoCache. +package memory + +import ( + "sync" + "time" + + "github.com/containers/image/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/types" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// locationKey only exists to make lookup in knownLocations easier. +type locationKey struct { + transport string + scope types.BICTransportScope + blobDigest digest.Digest +} + +// cache implements an in-memory-only BlobInfoCache +type cache struct { + mutex sync.Mutex + // The following fields can only be accessed with mutex held. + uncompressedDigests map[digest.Digest]digest.Digest + digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest + knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference +} + +// New returns a BlobInfoCache implementation which is in-memory only. +// +// This is primarily intended for tests, but also used as a fallback +// if blobinfocache.DefaultCache can’t determine, or set up, the +// location for a persistent cache. Most users should use +// blobinfocache.DefaultCache. instead of calling this directly. +// Manual users of types.{ImageSource,ImageDestination} might also use +// this instead of a persistent cache. +func New() types.BlobInfoCache { + return &cache{ + uncompressedDigests: map[digest.Digest]digest.Digest{}, + digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, + knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, + } +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + mem.mutex.Lock() + defer mem.mutex.Unlock() + return mem.uncompressedDigestLocked(anyDigest) +} + +// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held. +func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest { + if d, ok := mem.uncompressedDigests[anyDigest]; ok { + return d + } + // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 { + return anyDigest + } + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + mem.uncompressedDigests[anyDigest] = uncompressed + + anyDigestSet, ok := mem.digestsByUncompressed[uncompressed] + if !ok { + anyDigestSet = map[digest.Digest]struct{}{} + mem.digestsByUncompressed[uncompressed] = anyDigestSet + } + anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again. +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest} + locationScope, ok := mem.knownLocations[key] + if !ok { + locationScope = map[types.BICLocationReference]time.Time{} + mem.knownLocations[key] = locationScope + } + locationScope[location] = time.Now() // Possibly overwriting an older entry. +} + +// appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. +func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime { + locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present + for l, t := range locations { + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: types.BICReplacementCandidate{ + Digest: digest, + Location: l, + }, + LastSeen: t, + }) + } + return candidates +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + mem.mutex.Lock() + defer mem.mutex.Unlock() + res := []prioritize.CandidateWithTime{} + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest) + var uncompressedDigest digest.Digest // = "" + if canSubstitute { + if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { + otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map + for d := range otherDigests { + if d != primaryDigest && d != uncompressedDigest { + res = mem.appendReplacementCandidates(res, transport, scope, d) + } + } + if uncompressedDigest != primaryDigest { + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest) + } + } + } + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/none.go b/vendor/github.com/containers/image/pkg/blobinfocache/none.go deleted file mode 100644 index 5658d89ff..000000000 --- a/vendor/github.com/containers/image/pkg/blobinfocache/none.go +++ /dev/null @@ -1,47 +0,0 @@ -package blobinfocache - -import ( - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" -) - -// noCache implements a dummy BlobInfoCache which records no data. -type noCache struct { -} - -// NoCache implements BlobInfoCache by not recording any data. -// -// This exists primarily for implementations of configGetter for Manifest.Inspect, -// because configs only have one representation. -// Any use of BlobInfoCache with blobs should usually use at least a short-lived cache. -var NoCache types.BlobInfoCache = noCache{} - -// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. -// May return anyDigest if it is known to be uncompressed. -// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). -func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { - return "" -} - -// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. -// It’s allowed for anyDigest == uncompressed. -// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. -// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. -// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) -func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { -} - -// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, -// and can be reused given the opaque location data. -func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { -} - -// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused -// within the specified (transport scope) (if they still exist, which is not guaranteed). -// -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, -// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same -// uncompressed digest. -func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { - return nil -} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/pkg/blobinfocache/none/none.go new file mode 100644 index 000000000..e5dca25ce --- /dev/null +++ b/vendor/github.com/containers/image/pkg/blobinfocache/none/none.go @@ -0,0 +1,49 @@ +// Package none implements a dummy BlobInfoCache which records no data. +package none + +import ( + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" +) + +// noCache implements a dummy BlobInfoCache which records no data. +type noCache struct { +} + +// NoCache implements BlobInfoCache by not recording any data. +// +// This exists primarily for implementations of configGetter for +// Manifest.Inspect, because configs only have one representation. +// Any use of BlobInfoCache with blobs should usually use at least a +// short-lived cache, ideally blobinfocache.DefaultCache. +var NoCache types.BlobInfoCache = noCache{} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return nil +} diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go b/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go deleted file mode 100644 index 02709aa1c..000000000 --- a/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go +++ /dev/null @@ -1,108 +0,0 @@ -package blobinfocache - -import ( - "sort" - "time" - - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" -) - -// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates, -// and therefore ultimately by types.BlobInfoCache.CandidateLocations. -// This is a heuristic/guess, and could well use a different value. -const replacementAttempts = 5 - -// candidateWithTime is the input to types.BICReplacementCandidate prioritization. -type candidateWithTime struct { - candidate types.BICReplacementCandidate // The replacement candidate - lastSeen time.Time // Time the candidate was last known to exist (either read or written) -} - -// candidateSortState is a local state implementing sort.Interface on candidates to prioritize, -// along with the specially-treated digest values for the implementation of sort.Interface.Less -type candidateSortState struct { - cs []candidateWithTime // The entries to sort - primaryDigest digest.Digest // The digest the user actually asked for - uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest -} - -func (css *candidateSortState) Len() int { - return len(css.cs) -} - -func (css *candidateSortState) Less(i, j int) bool { - xi := css.cs[i] - xj := css.cs[j] - - // primaryDigest entries come first, more recent first. - // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first. - // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) - - // First, deal with the primaryDigest/uncompressedDigest cases: - if xi.candidate.Digest != xj.candidate.Digest { - // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter - if xi.candidate.Digest == css.primaryDigest { - return true - } - if xj.candidate.Digest == css.primaryDigest { - return false - } - if css.uncompressedDigest != "" { - if xi.candidate.Digest == css.uncompressedDigest { - return false - } - if xj.candidate.Digest == css.uncompressedDigest { - return true - } - } - } else { // xi.candidate.Digest == xj.candidate.Digest - // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time - if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) { - return xi.lastSeen.After(xj.lastSeen) - } - } - - // Neither of the digests are primaryDigest/uncompressedDigest: - if !xi.lastSeen.Equal(xj.lastSeen) { // Order primarily by time - return xi.lastSeen.After(xj.lastSeen) - } - // Fall back to digest, if timestamps end up _exactly_ the same (how?!) - return xi.candidate.Digest < xj.candidate.Digest -} - -func (css *candidateSortState) Swap(i, j int) { - css.cs[i], css.cs[j] = css.cs[j], css.cs[i] -} - -// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the -// number of entries to limit, only to make testing simpler. -func destructivelyPrioritizeReplacementCandidatesWithMax(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate { - // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should - // compare equal. - sort.Sort(&candidateSortState{ - cs: cs, - primaryDigest: primaryDigest, - uncompressedDigest: uncompressedDigest, - }) - - resLength := len(cs) - if resLength > maxCandidates { - resLength = maxCandidates - } - res := make([]types.BICReplacementCandidate, resLength) - for i := range res { - res[i] = cs[i].candidate - } - return res -} - -// destructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, -// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest), -// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. -// -// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course -// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) -func destructivelyPrioritizeReplacementCandidates(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate { - return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) -} diff --git a/vendor/github.com/containers/image/signature/policy_config.go b/vendor/github.com/containers/image/signature/policy_config.go index 39c0f2a55..12398e385 100644 --- a/vendor/github.com/containers/image/signature/policy_config.go +++ b/vendor/github.com/containers/image/signature/policy_config.go @@ -30,7 +30,7 @@ import ( // -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path' var systemDefaultPolicyPath = builtinDefaultPolicyPath -// builtinDefaultPolicyPath is the policy pat used for DefaultPolicy(). +// builtinDefaultPolicyPath is the policy path used for DefaultPolicy(). // DO NOT change this, instead see systemDefaultPolicyPath above. const builtinDefaultPolicyPath = "/etc/containers/policy.json" diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go index 67dc6142b..b39d2bcc0 100644 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ b/vendor/github.com/containers/image/storage/storage_image.go @@ -18,7 +18,7 @@ import ( "github.com/containers/image/image" "github.com/containers/image/internal/tmpdir" "github.com/containers/image/manifest" - "github.com/containers/image/pkg/blobinfocache" + "github.com/containers/image/pkg/blobinfocache/none" "github.com/containers/image/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" @@ -595,12 +595,12 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { if !haveDiffID { // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), // or to even check if we had it. - // Use blobinfocache.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller + // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller // that relies on using a blob digest that has never been seeen by the store had better call // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only // so far we are going to accommodate that (if we should be doing that at all). logrus.Debugf("looking for diffID for blob %+v", blob.Digest) - has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, blobinfocache.NoCache, false) + has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false) if err != nil { return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) } @@ -732,7 +732,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { if err != nil { return errors.Wrapf(err, "error copying non-layer blob %q to image", blob) } - if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v); err != nil { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } @@ -765,14 +765,14 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { if err != nil { return errors.Wrapf(err, "error computing manifest digest") } - if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest); err != nil { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest, manifest.Digest); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) return err } - if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest); err != nil { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest, manifest.Digest); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } @@ -781,7 +781,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { } // Save the signatures, if we have any. if len(s.signatures) > 0 { - if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures); err != nil { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go index 02d2f5c08..3a6be6e00 100644 --- a/vendor/github.com/containers/image/storage/storage_transport.go +++ b/vendor/github.com/containers/image/storage/storage_transport.go @@ -4,6 +4,7 @@ package storage import ( "fmt" + "os" "path/filepath" "strings" @@ -180,7 +181,10 @@ func (s *storageTransport) GetStore() (storage.Store, error) { // Return the transport's previously-set store. If we don't have one // of those, initialize one now. if s.store == nil { - options := storage.DefaultStoreOptions + options, err := storage.DefaultStoreOptions(os.Getuid() != 0, os.Getuid()) + if err != nil { + return nil, err + } options.UIDMap = s.defaultUIDMap options.GIDMap = s.defaultGIDMap store, err := storage.GetStore(options) diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf index 1c5b6b378..89b29722b 100644 --- a/vendor/github.com/containers/image/vendor.conf +++ b/vendor/github.com/containers/image/vendor.conf @@ -1,7 +1,7 @@ github.com/containers/image github.com/sirupsen/logrus v1.0.0 -github.com/containers/storage master +github.com/containers/storage v1.12.1 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 diff --git a/vendor/github.com/containers/image/version/version.go b/vendor/github.com/containers/image/version/version.go index 2a3bc1b5c..9915cb2fa 100644 --- a/vendor/github.com/containers/image/version/version.go +++ b/vendor/github.com/containers/image/version/version.go @@ -8,10 +8,10 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 1 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 5 + VersionPatch = 6 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" + VersionDev = "-dev" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index 10d628dbe..bbac78b60 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -71,7 +71,7 @@ type Container struct { type ContainerStore interface { FileBasedStore MetadataStore - BigDataStore + ContainerBigDataStore FlaggableStore // Create creates a container that has a specified ID (or generates a @@ -456,7 +456,7 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) { return size, nil } if data, err := r.BigData(id, key); err == nil && data != nil { - if r.SetBigData(id, key, data) == nil { + if err = r.SetBigData(id, key, data); err == nil { c, ok := r.lookup(id) if !ok { return -1, ErrContainerUnknown @@ -464,6 +464,8 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) { if size, ok := c.BigDataSizes[key]; ok { return size, nil } + } else { + return -1, err } } return -1, ErrSizeUnknown @@ -484,7 +486,7 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { return d, nil } if data, err := r.BigData(id, key); err == nil && data != nil { - if r.SetBigData(id, key, data) == nil { + if err = r.SetBigData(id, key, data); err == nil { c, ok := r.lookup(id) if !ok { return "", ErrContainerUnknown @@ -492,6 +494,8 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { if d, ok := c.BigDataDigests[key]; ok { return d, nil } + } else { + return "", err } } return "", ErrDigestUnknown diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go index aef6becfe..40b912bb3 100644 --- a/vendor/github.com/containers/storage/containers_ffjson.go +++ b/vendor/github.com/containers/storage/containers_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson . DO NOT EDIT. -// source: containers.go +// source: ./containers.go package storage diff --git a/vendor/github.com/containers/storage/drivers/copy/copy.go b/vendor/github.com/containers/storage/drivers/copy/copy.go index 2617824c5..bcbc61284 100644 --- a/vendor/github.com/containers/storage/drivers/copy/copy.go +++ b/vendor/github.com/containers/storage/drivers/copy/copy.go @@ -19,6 +19,7 @@ import ( "syscall" "time" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/pools" "github.com/containers/storage/pkg/system" rsystem "github.com/opencontainers/runc/libcontainer/system" @@ -212,7 +213,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { return nil } - if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + if err := idtools.SafeLchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { return err } diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index 93505f5fb..38b5a3ef3 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -8,7 +8,6 @@ import ( "strings" "time" - "github.com/containers/image/manifest" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/truncindex" @@ -117,7 +116,7 @@ type ImageStore interface { ROImageStore RWFileBasedStore RWMetadataStore - RWBigDataStore + RWImageBigDataStore FlaggableStore // Create creates an image that has a specified ID (or a random one) and @@ -272,7 +271,7 @@ func (r *imageStore) Load() error { } } } - if shouldSave && !r.IsReadWrite() { + if shouldSave && (!r.IsReadWrite() || !r.Locked()) { return ErrDuplicateImageNames } r.images = images @@ -291,7 +290,7 @@ func (r *imageStore) Save() error { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath()) } if !r.Locked() { - return errors.New("image store is not locked") + return errors.New("image store is not locked for writing") } rpath := r.imagespath() if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { @@ -595,15 +594,7 @@ func (r *imageStore) BigDataSize(id, key string) (int64, error) { return size, nil } if data, err := r.BigData(id, key); err == nil && data != nil { - if r.SetBigData(id, key, data) == nil { - image, ok := r.lookup(id) - if !ok { - return -1, ErrImageUnknown - } - if size, ok := image.BigDataSizes[key]; ok { - return size, nil - } - } + return int64(len(data)), nil } return -1, ErrSizeUnknown } @@ -622,17 +613,6 @@ func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) { if d, ok := image.BigDataDigests[key]; ok { return d, nil } - if data, err := r.BigData(id, key); err == nil && data != nil { - if r.SetBigData(id, key, data) == nil { - image, ok := r.lookup(id) - if !ok { - return "", ErrImageUnknown - } - if d, ok := image.BigDataDigests[key]; ok { - return d, nil - } - } - } return "", ErrDigestUnknown } @@ -655,7 +635,7 @@ func imageSliceWithoutValue(slice []*Image, value *Image) []*Image { return modified } -func (r *imageStore) SetBigData(id, key string, data []byte) error { +func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { if key == "" { return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item") } @@ -672,7 +652,10 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error { } var newDigest digest.Digest if bigDataNameIsManifest(key) { - if newDigest, err = manifest.Digest(data); err != nil { + if digestManifest == nil { + return errors.Wrapf(ErrDigestUnknown, "error digesting manifest: no manifest digest callback provided") + } + if newDigest, err = digestManifest(data); err != nil { return errors.Wrapf(err, "error digesting manifest") } } else { diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go index 6b40ebd59..539acfe93 100644 --- a/vendor/github.com/containers/storage/images_ffjson.go +++ b/vendor/github.com/containers/storage/images_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson . DO NOT EDIT. -// source: images.go +// source: ./images.go package storage diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index d612f0459..110e737b2 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -229,6 +229,7 @@ type LayerStore interface { type layerStore struct { lockfile Locker + mountsLockfile Locker rundir string driver drivers.Driver layerdir string @@ -291,7 +292,6 @@ func (r *layerStore) Load() error { idlist := []string{} ids := make(map[string]*Layer) names := make(map[string]*Layer) - mounts := make(map[string]*Layer) compressedsums := make(map[digest.Digest][]string) uncompressedsums := make(map[digest.Digest][]string) if r.lockfile.IsReadWrite() { @@ -319,39 +319,29 @@ func (r *layerStore) Load() error { label.ReserveLabel(layer.MountLabel) } } + err = nil } - if shouldSave && !r.IsReadWrite() { + if shouldSave && (!r.IsReadWrite() || !r.Locked()) { return ErrDuplicateLayerNames } - mpath := r.mountspath() - data, err = ioutil.ReadFile(mpath) - if err != nil && !os.IsNotExist(err) { - return err - } - layerMounts := []layerMountPoint{} - if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil { - for _, mount := range layerMounts { - if mount.MountPoint != "" { - if layer, ok := ids[mount.ID]; ok { - mounts[mount.MountPoint] = layer - layer.MountPoint = mount.MountPoint - layer.MountCount = mount.MountCount - } - } - } - } r.layers = layers r.idindex = truncindex.NewTruncIndex(idlist) r.byid = ids r.byname = names - r.bymount = mounts r.bycompressedsum = compressedsums r.byuncompressedsum = uncompressedsums - err = nil + // Load and merge information about which layers are mounted, and where. + if r.IsReadWrite() { + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + if err = r.loadMounts(); err != nil { + return err + } + } // Last step: if we're writable, try to remove anything that a previous // user of this storage area marked for deletion but didn't manage to // actually delete. - if r.IsReadWrite() { + if r.IsReadWrite() && r.Locked() { for _, layer := range r.layers { if layer.Flags == nil { layer.Flags = make(map[string]interface{}) @@ -373,12 +363,36 @@ func (r *layerStore) Load() error { return err } +func (r *layerStore) loadMounts() error { + mounts := make(map[string]*Layer) + mpath := r.mountspath() + data, err := ioutil.ReadFile(mpath) + if err != nil && !os.IsNotExist(err) { + return err + } + layerMounts := []layerMountPoint{} + if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil { + for _, mount := range layerMounts { + if mount.MountPoint != "" { + if layer, ok := r.lookup(mount.ID); ok { + mounts[mount.MountPoint] = layer + layer.MountPoint = mount.MountPoint + layer.MountCount = mount.MountCount + } + } + } + err = nil + } + r.bymount = mounts + return err +} + func (r *layerStore) Save() error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath()) } if !r.Locked() { - return errors.New("layer store is not locked") + return errors.New("layer store is not locked for writing") } rpath := r.layerspath() if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { @@ -388,6 +402,25 @@ func (r *layerStore) Save() error { if err != nil { return err } + if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil { + return err + } + if !r.IsReadWrite() { + return nil + } + r.mountsLockfile.Lock() + defer r.mountsLockfile.Unlock() + defer r.mountsLockfile.Touch() + return r.saveMounts() +} + +func (r *layerStore) saveMounts() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath()) + } + if !r.mountsLockfile.Locked() { + return errors.New("layer store mount information is not locked for writing") + } mpath := r.mountspath() if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil { return err @@ -406,11 +439,10 @@ func (r *layerStore) Save() error { if err != nil { return err } - if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil { + if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil { return err } - defer r.Touch() - return ioutils.AtomicWriteFile(mpath, jmdata, 0600) + return r.loadMounts() } func newLayerStore(rundir string, layerdir string, driver drivers.Driver, uidMap, gidMap []idtools.IDMap) (LayerStore, error) { @@ -426,16 +458,21 @@ func newLayerStore(rundir string, layerdir string, driver drivers.Driver, uidMap } lockfile.Lock() defer lockfile.Unlock() + mountsLockfile, err := GetLockfile(filepath.Join(rundir, "mountpoints.lock")) + if err != nil { + return nil, err + } rlstore := layerStore{ - lockfile: lockfile, - driver: driver, - rundir: rundir, - layerdir: layerdir, - byid: make(map[string]*Layer), - bymount: make(map[string]*Layer), - byname: make(map[string]*Layer), - uidMap: copyIDMap(uidMap), - gidMap: copyIDMap(gidMap), + lockfile: lockfile, + mountsLockfile: mountsLockfile, + driver: driver, + rundir: rundir, + layerdir: layerdir, + byid: make(map[string]*Layer), + bymount: make(map[string]*Layer), + byname: make(map[string]*Layer), + uidMap: copyIDMap(uidMap), + gidMap: copyIDMap(gidMap), } if err := rlstore.Load(); err != nil { return nil, err @@ -451,13 +488,14 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROL lockfile.Lock() defer lockfile.Unlock() rlstore := layerStore{ - lockfile: lockfile, - driver: driver, - rundir: rundir, - layerdir: layerdir, - byid: make(map[string]*Layer), - bymount: make(map[string]*Layer), - byname: make(map[string]*Layer), + lockfile: lockfile, + mountsLockfile: nil, + driver: driver, + rundir: rundir, + layerdir: layerdir, + byid: make(map[string]*Layer), + bymount: make(map[string]*Layer), + byname: make(map[string]*Layer), } if err := rlstore.Load(); err != nil { return nil, err @@ -673,6 +711,16 @@ func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel } func (r *layerStore) Mounted(id string) (int, error) { + if !r.IsReadWrite() { + return 0, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath()) + } + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + if modified, err := r.mountsLockfile.Modified(); modified || err != nil { + if err = r.loadMounts(); err != nil { + return 0, err + } + } layer, ok := r.lookup(id) if !ok { return 0, ErrLayerUnknown @@ -684,13 +732,21 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) if !r.IsReadWrite() { return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) } + r.mountsLockfile.Lock() + defer r.mountsLockfile.Unlock() + if modified, err := r.mountsLockfile.Modified(); modified || err != nil { + if err = r.loadMounts(); err != nil { + return "", err + } + } + defer r.mountsLockfile.Touch() layer, ok := r.lookup(id) if !ok { return "", ErrLayerUnknown } if layer.MountCount > 0 { layer.MountCount++ - return layer.MountPoint, r.Save() + return layer.MountPoint, r.saveMounts() } if options.MountLabel == "" { options.MountLabel = layer.MountLabel @@ -709,7 +765,7 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) layer.MountPoint = filepath.Clean(mountpoint) layer.MountCount++ r.bymount[layer.MountPoint] = layer - err = r.Save() + err = r.saveMounts() } return mountpoint, err } @@ -718,6 +774,14 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) { if !r.IsReadWrite() { return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) } + r.mountsLockfile.Lock() + defer r.mountsLockfile.Unlock() + if modified, err := r.mountsLockfile.Modified(); modified || err != nil { + if err = r.loadMounts(); err != nil { + return false, err + } + } + defer r.mountsLockfile.Touch() layer, ok := r.lookup(id) if !ok { layerByMount, ok := r.bymount[filepath.Clean(id)] @@ -731,7 +795,7 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) { } if layer.MountCount > 1 { layer.MountCount-- - return true, r.Save() + return true, r.saveMounts() } err := r.driver.Put(id) if err == nil || os.IsNotExist(err) { @@ -740,12 +804,22 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) { } layer.MountCount-- layer.MountPoint = "" - return false, r.Save() + return false, r.saveMounts() } return true, err } func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { + if !r.IsReadWrite() { + return nil, nil, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath()) + } + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + if modified, err := r.mountsLockfile.Modified(); modified || err != nil { + if err = r.loadMounts(); err != nil { + return nil, nil, err + } + } layer, ok := r.lookup(id) if !ok { return nil, nil, ErrLayerUnknown @@ -862,14 +936,23 @@ func (r *layerStore) Delete(id string) error { return ErrLayerUnknown } id = layer.ID - // This check is needed for idempotency of delete where the layer could have been - // already unmounted (since c/storage gives you that API directly) - for layer.MountCount > 0 { + // The layer may already have been explicitly unmounted, but if not, we + // should try to clean that up before we start deleting anything at the + // driver level. + mountCount, err := r.Mounted(id) + if err != nil { + return errors.Wrapf(err, "error checking if layer %q is still mounted", id) + } + for mountCount > 0 { if _, err := r.Unmount(id, false); err != nil { return err } + mountCount, err = r.Mounted(id) + if err != nil { + return errors.Wrapf(err, "error checking if layer %q is still mounted", id) + } } - err := r.driver.Remove(id) + err = r.driver.Remove(id) if err == nil { os.Remove(r.tspath(id)) delete(r.byid, id) @@ -1235,7 +1318,20 @@ func (r *layerStore) Touch() error { } func (r *layerStore) Modified() (bool, error) { - return r.lockfile.Modified() + var mmodified bool + lmodified, err := r.lockfile.Modified() + if err != nil { + return lmodified, err + } + if r.IsReadWrite() { + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + mmodified, err = r.mountsLockfile.Modified() + if err != nil { + return lmodified, err + } + } + return lmodified || mmodified, nil } func (r *layerStore) IsReadWrite() bool { diff --git a/vendor/github.com/containers/storage/lockfile.go b/vendor/github.com/containers/storage/lockfile.go index 7f07b9ac5..3a1befcbe 100644 --- a/vendor/github.com/containers/storage/lockfile.go +++ b/vendor/github.com/containers/storage/lockfile.go @@ -35,7 +35,7 @@ type Locker interface { // IsReadWrite() checks if the lock file is read-write IsReadWrite() bool - // Locked() checks if lock is locked + // Locked() checks if lock is locked for writing by a thread in this process Locked() bool } @@ -66,7 +66,10 @@ func getLockfile(path string, ro bool) (Locker, error) { if lockfiles == nil { lockfiles = make(map[string]Locker) } - cleanPath := filepath.Clean(path) + cleanPath, err := filepath.Abs(path) + if err != nil { + return nil, errors.Wrapf(err, "error ensuring that path %q is an absolute path", path) + } if locker, ok := lockfiles[cleanPath]; ok { if ro && locker.IsReadWrite() { return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath) diff --git a/vendor/github.com/containers/storage/lockfile_darwin.go b/vendor/github.com/containers/storage/lockfile_darwin.go deleted file mode 100644 index 041d54c05..000000000 --- a/vendor/github.com/containers/storage/lockfile_darwin.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build darwin freebsd - -package storage - -import ( - "time" - - "golang.org/x/sys/unix" -) - -func (l *lockfile) TouchedSince(when time.Time) bool { - st := unix.Stat_t{} - err := unix.Fstat(int(l.fd), &st) - if err != nil { - return true - } - touched := time.Unix(st.Mtimespec.Unix()) - return when.Before(touched) -} diff --git a/vendor/github.com/containers/storage/lockfile_otherunix.go b/vendor/github.com/containers/storage/lockfile_otherunix.go new file mode 100644 index 000000000..041d54c05 --- /dev/null +++ b/vendor/github.com/containers/storage/lockfile_otherunix.go @@ -0,0 +1,19 @@ +// +build darwin freebsd + +package storage + +import ( + "time" + + "golang.org/x/sys/unix" +) + +func (l *lockfile) TouchedSince(when time.Time) bool { + st := unix.Stat_t{} + err := unix.Fstat(int(l.fd), &st) + if err != nil { + return true + } + touched := time.Unix(st.Mtimespec.Unix()) + return when.Before(touched) +} diff --git a/vendor/github.com/containers/storage/lockfile_unix.go b/vendor/github.com/containers/storage/lockfile_unix.go index 0adbc49a5..a9dc64122 100644 --- a/vendor/github.com/containers/storage/lockfile_unix.go +++ b/vendor/github.com/containers/storage/lockfile_unix.go @@ -32,7 +32,7 @@ func getLockFile(path string, ro bool) (Locker, error) { } return &lockfile{ stateMutex: &sync.Mutex{}, - writeMutex: &sync.Mutex{}, + rwMutex: &sync.RWMutex{}, file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), @@ -42,10 +42,10 @@ func getLockFile(path string, ro bool) (Locker, error) { } type lockfile struct { - // stateMutex is used to synchronize concurrent accesses + // rwMutex serializes concurrent reader-writer acquisitions in the same process space + rwMutex *sync.RWMutex + // stateMutex is used to synchronize concurrent accesses to the state below stateMutex *sync.Mutex - // writeMutex is used to serialize and avoid recursive writer locks - writeMutex *sync.Mutex counter int64 file string fd uintptr @@ -65,23 +65,24 @@ func (l *lockfile) lock(l_type int16) { Len: 0, Pid: int32(os.Getpid()), } - if l_type == unix.F_WRLCK { - // If we try to lock as a writer, lock the writerMutex first to - // avoid multiple writer acquisitions of the same process. - // Note: it's important to lock it prior to the stateMutex to - // avoid a deadlock. - l.writeMutex.Lock() + switch l_type { + case unix.F_RDLCK: + l.rwMutex.RLock() + case unix.F_WRLCK: + l.rwMutex.Lock() + default: + panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", l_type)) } l.stateMutex.Lock() - l.locktype = l_type if l.counter == 0 { // Optimization: only use the (expensive) fcntl syscall when - // the counter is 0. If it's greater than that, we're owning - // the lock already and can only be a reader. + // the counter is 0. In this case, we're either the first + // reader lock or a writer lock. for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil { time.Sleep(10 * time.Millisecond) } } + l.locktype = l_type l.locked = true l.counter++ l.stateMutex.Unlock() @@ -133,19 +134,28 @@ func (l *lockfile) Unlock() { time.Sleep(10 * time.Millisecond) } } - if l.locktype == unix.F_WRLCK { - l.writeMutex.Unlock() + if l.locktype == unix.F_RDLCK { + l.rwMutex.RUnlock() + } else { + l.rwMutex.Unlock() } l.stateMutex.Unlock() } -// Locked checks if lockfile is locked. +// Locked checks if lockfile is locked for writing by a thread in this process. func (l *lockfile) Locked() bool { - return l.locked + l.stateMutex.Lock() + defer l.stateMutex.Unlock() + return l.locked && (l.locktype == unix.F_WRLCK) } // Touch updates the lock file with the UID of the user. func (l *lockfile) Touch() error { + l.stateMutex.Lock() + if !l.locked || (l.locktype != unix.F_WRLCK) { + panic("attempted to update last-writer in lockfile without the write lock") + } + l.stateMutex.Unlock() l.lw = stringid.GenerateRandomID() id := []byte(l.lw) _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET) @@ -170,6 +180,11 @@ func (l *lockfile) Touch() error { // was loaded. func (l *lockfile) Modified() (bool, error) { id := []byte(l.lw) + l.stateMutex.Lock() + if !l.locked { + panic("attempted to check last-writer in lockfile without locking it first") + } + l.stateMutex.Unlock() _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET) if err != nil { return true, err @@ -179,7 +194,7 @@ func (l *lockfile) Modified() (bool, error) { return true, err } if n != len(id) { - return true, unix.ENOSPC + return true, nil } lw := l.lw l.lw = string(id) diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index ba1704250..9cc717e5a 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -636,7 +636,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L if chownOpts == nil { chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} } - if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + if err := idtools.SafeLchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index 6e33ac38d..5602c7e21 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -7,6 +7,7 @@ import ( "strings" "syscall" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" "golang.org/x/sys/unix" ) @@ -130,7 +131,7 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { return false, err } - if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + if err := idtools.SafeChown(originalPath, hdr.Uid, hdr.Gid); err != nil { return false, err } diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index 9c591aff8..815589382 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -7,6 +7,9 @@ import ( "sort" "strconv" "strings" + "syscall" + + "github.com/pkg/errors" ) // IDMap contains a single entry for user namespace range remapping. An array @@ -277,3 +280,18 @@ func parseSubidFile(path, username string) (ranges, error) { } return rangeList, nil } + +func checkChownErr(err error, name string, uid, gid int) error { + if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL { + return errors.Wrapf(err, "there might not be enough IDs available in the namespace (requested %d:%d for %s)", uid, gid, name) + } + return err +} + +func SafeChown(name string, uid, gid int) error { + return checkChownErr(os.Chown(name, uid, gid), name, uid, gid) +} + +func SafeLchown(name string, uid, gid int) error { + return checkChownErr(os.Lchown(name, uid, gid), name, uid, gid) +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index b5870506a..bdbdf1b50 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -30,7 +30,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown paths = []string{path} } else if err == nil && chownExisting { // short-circuit--we were called with an existing directory and chown was requested - return os.Chown(path, ownerUID, ownerGID) + return SafeChown(path, ownerUID, ownerGID) } else if err == nil { // nothing to do; directory path fully exists already and chown was NOT requested return nil @@ -60,7 +60,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { - if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { + if err := SafeChown(pathComponent, ownerUID, ownerGID); err != nil { return err } } diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go index 05319eacc..1ae728a61 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go @@ -3,6 +3,7 @@ package reexec import ( + "context" "os/exec" "syscall" @@ -20,11 +21,23 @@ func Self() string { // This will use the in-memory version (/proc/self/exe) of the current binary, // it is thus safe to delete or replace the on-disk binary (os.Args[0]). func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - SysProcAttr: &syscall.SysProcAttr{ - Pdeathsig: unix.SIGTERM, - }, + cmd := exec.Command(Self()) + cmd.Args = args + cmd.SysProcAttr = &syscall.SysProcAttr{ + Pdeathsig: unix.SIGTERM, } + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary, and also +// sets SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + cmd.SysProcAttr = &syscall.SysProcAttr{ + Pdeathsig: unix.SIGTERM, + } + return cmd } diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go index 778a720e3..1ecaa906f 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go @@ -3,6 +3,7 @@ package reexec import ( + "context" "os/exec" ) @@ -16,8 +17,14 @@ func Self() string { // For example if current binary is "docker" at "/usr/bin/", then cmd.Path will // be set to "/usr/bin/docker". func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - } + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd } diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go index 76edd8242..9d9374268 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go @@ -3,6 +3,7 @@ package reexec import ( + "context" "os/exec" ) @@ -10,3 +11,8 @@ import ( func Command(args ...string) *exec.Cmd { return nil } + +// CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go index ca871c422..673ab476a 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go @@ -3,6 +3,7 @@ package reexec import ( + "context" "os/exec" ) @@ -16,8 +17,16 @@ func Self() string { // For example if current binary is "docker.exe" at "C:\", then cmd.Path will // be set to "C:\docker.exe". func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - } + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd } diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index d53703d6b..7e39e3959 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -32,7 +32,7 @@ import ( var ( // DefaultStoreOptions is a reasonable default set of options. - DefaultStoreOptions StoreOptions + defaultStoreOptions StoreOptions stores []*store storesLock sync.Mutex ) @@ -102,19 +102,21 @@ type ROBigDataStore interface { BigDataNames(id string) ([]string, error) } -// A RWBigDataStore wraps up the read-write big-data related methods of the -// various types of file-based lookaside stores that we implement. -type RWBigDataStore interface { - // SetBigData stores a (potentially large) piece of data associated with this - // ID. - SetBigData(id, key string, data []byte) error +// A RWImageBigDataStore wraps up how we store big-data associated with images. +type RWImageBigDataStore interface { + // SetBigData stores a (potentially large) piece of data associated + // with this ID. + // Pass github.com/containers/image/manifest.Digest as digestManifest + // to allow ByDigest to find images by their correct digests. + SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error } -// A BigDataStore wraps up the most common big-data related methods of the -// various types of file-based lookaside stores that we implement. -type BigDataStore interface { +// A ContainerBigDataStore wraps up how we store big-data associated with containers. +type ContainerBigDataStore interface { ROBigDataStore - RWBigDataStore + // SetBigData stores a (potentially large) piece of data associated + // with this ID. + SetBigData(id, key string, data []byte) error } // A FlaggableStore can have flags set and cleared on items which it manages. @@ -352,9 +354,11 @@ type Store interface { // of named data associated with an image. ImageBigDataDigest(id, key string) (digest.Digest, error) - // SetImageBigData stores a (possibly large) chunk of named data associated - // with an image. - SetImageBigData(id, key string, data []byte) error + // SetImageBigData stores a (possibly large) chunk of named data + // associated with an image. Pass + // github.com/containers/image/manifest.Digest as digestManifest to + // allow ImagesByDigest to find images by their correct digests. + SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error // ImageSize computes the size of the image's layers and ancillary data. ImageSize(id string) (int64, error) @@ -546,14 +550,22 @@ type store struct { // } func GetStore(options StoreOptions) (Store, error) { if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 { - options = DefaultStoreOptions + options = defaultStoreOptions } if options.GraphRoot != "" { - options.GraphRoot = filepath.Clean(options.GraphRoot) + dir, err := filepath.Abs(options.GraphRoot) + if err != nil { + return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.GraphRoot) + } + options.GraphRoot = dir } if options.RunRoot != "" { - options.RunRoot = filepath.Clean(options.RunRoot) + dir, err := filepath.Abs(options.RunRoot) + if err != nil { + return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.RunRoot) + } + options.RunRoot = dir } storesLock.Lock() @@ -1321,7 +1333,7 @@ func (s *store) Metadata(id string) (string, error) { } for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -1343,7 +1355,7 @@ func (s *store) Metadata(id string) (string, error) { } for _, s := range append([]ROImageStore{istore}, istores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -1359,7 +1371,7 @@ func (s *store) Metadata(id string) (string, error) { if err != nil { return "", err } - cstore.Lock() + cstore.RLock() defer cstore.Unlock() if modified, err := cstore.Modified(); modified || err != nil { if err = cstore.Load(); err != nil { @@ -1383,7 +1395,7 @@ func (s *store) ListImageBigData(id string) ([]string, error) { } for _, s := range append([]ROImageStore{istore}, istores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -1409,7 +1421,7 @@ func (s *store) ImageBigDataSize(id, key string) (int64, error) { } for _, s := range append([]ROImageStore{istore}, istores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -1436,7 +1448,7 @@ func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) { stores = append([]ROImageStore{ristore}, stores...) for _, r := range stores { ristore := r - ristore.Lock() + ristore.RLock() defer ristore.Unlock() if modified, err := ristore.Modified(); modified || err != nil { if err = ristore.Load(); err != nil { @@ -1477,7 +1489,7 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) { return nil, ErrImageUnknown } -func (s *store) SetImageBigData(id, key string, data []byte) error { +func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { ristore, err := s.ImageStore() if err != nil { return err @@ -1491,7 +1503,7 @@ func (s *store) SetImageBigData(id, key string, data []byte) error { } } - return ristore.SetBigData(id, key, data) + return ristore.SetBigData(id, key, data, digestManifest) } func (s *store) ImageSize(id string) (int64, error) { @@ -1507,7 +1519,7 @@ func (s *store) ImageSize(id string) (int64, error) { } for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -1529,7 +1541,7 @@ func (s *store) ImageSize(id string) (int64, error) { // Look for the image's record. for _, s := range append([]ROImageStore{istore}, istores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -1617,7 +1629,7 @@ func (s *store) ContainerSize(id string) (int64, error) { } for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -1641,7 +1653,7 @@ func (s *store) ContainerSize(id string) (int64, error) { if err != nil { return -1, err } - rcstore.Lock() + rcstore.RLock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { if err = rcstore.Load(); err != nil { @@ -1705,7 +1717,7 @@ func (s *store) ListContainerBigData(id string) ([]string, error) { return nil, err } - rcstore.Lock() + rcstore.RLock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { if err = rcstore.Load(); err != nil { @@ -1721,7 +1733,7 @@ func (s *store) ContainerBigDataSize(id, key string) (int64, error) { if err != nil { return -1, err } - rcstore.Lock() + rcstore.RLock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { if err = rcstore.Load(); err != nil { @@ -1736,7 +1748,7 @@ func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) { if err != nil { return "", err } - rcstore.Lock() + rcstore.RLock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { if err = rcstore.Load(); err != nil { @@ -1751,7 +1763,7 @@ func (s *store) ContainerBigData(id, key string) ([]byte, error) { if err != nil { return nil, err } - rcstore.Lock() + rcstore.RLock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { if err = rcstore.Load(); err != nil { @@ -1787,7 +1799,7 @@ func (s *store) Exists(id string) bool { } for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -1809,7 +1821,7 @@ func (s *store) Exists(id string) bool { } for _, s := range append([]ROImageStore{istore}, istores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -1825,7 +1837,7 @@ func (s *store) Exists(id string) bool { if err != nil { return false } - rcstore.Lock() + rcstore.RLock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { if err = rcstore.Load(); err != nil { @@ -1912,7 +1924,7 @@ func (s *store) Names(id string) ([]string, error) { } for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -1934,7 +1946,7 @@ func (s *store) Names(id string) ([]string, error) { } for _, s := range append([]ROImageStore{istore}, istores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -1950,7 +1962,7 @@ func (s *store) Names(id string) ([]string, error) { if err != nil { return nil, err } - rcstore.Lock() + rcstore.RLock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { if err = rcstore.Load(); err != nil { @@ -1974,7 +1986,7 @@ func (s *store) Lookup(name string) (string, error) { } for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -1996,7 +2008,7 @@ func (s *store) Lookup(name string) (string, error) { } for _, s := range append([]ROImageStore{istore}, istores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -2012,7 +2024,7 @@ func (s *store) Lookup(name string) (string, error) { if err != nil { return "", err } - cstore.Lock() + cstore.RLock() defer cstore.Unlock() if modified, err := cstore.Modified(); modified || err != nil { if err = cstore.Load(); err != nil { @@ -2464,7 +2476,7 @@ func (s *store) Mounted(id string) (int, error) { if err != nil { return 0, err } - rlstore.Lock() + rlstore.RLock() defer rlstore.Unlock() if modified, err := rlstore.Modified(); modified || err != nil { if err = rlstore.Load(); err != nil { @@ -2507,7 +2519,7 @@ func (s *store) Changes(from, to string) ([]archive.Change, error) { } for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -2532,7 +2544,7 @@ func (s *store) DiffSize(from, to string) (int64, error) { } for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -2612,7 +2624,7 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye } for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -2659,7 +2671,7 @@ func (s *store) LayerSize(id string) (int64, error) { } for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -2678,7 +2690,7 @@ func (s *store) LayerParentOwners(id string) ([]int, []int, error) { if err != nil { return nil, nil, err } - rlstore.Lock() + rlstore.RLock() defer rlstore.Unlock() if modified, err := rlstore.Modified(); modified || err != nil { if err = rlstore.Load(); err != nil { @@ -2700,14 +2712,14 @@ func (s *store) ContainerParentOwners(id string) ([]int, []int, error) { if err != nil { return nil, nil, err } - rlstore.Lock() + rlstore.RLock() defer rlstore.Unlock() if modified, err := rlstore.Modified(); modified || err != nil { if err = rlstore.Load(); err != nil { return nil, nil, err } } - rcstore.Lock() + rcstore.RLock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { if err = rcstore.Load(); err != nil { @@ -2738,7 +2750,7 @@ func (s *store) Layers() ([]Layer, error) { for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -2767,7 +2779,7 @@ func (s *store) Images() ([]Image, error) { } for _, s := range append([]ROImageStore{istore}, istores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -2789,7 +2801,7 @@ func (s *store) Containers() ([]Container, error) { return nil, err } - rcstore.Lock() + rcstore.RLock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { if err = rcstore.Load(); err != nil { @@ -2811,7 +2823,7 @@ func (s *store) Layer(id string) (*Layer, error) { } for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -2837,7 +2849,7 @@ func (s *store) Image(id string) (*Image, error) { } for _, s := range append([]ROImageStore{istore}, istores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -2870,7 +2882,7 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { } for _, s := range append([]ROImageStore{istore}, istores...) { store := s - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -2903,7 +2915,7 @@ func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { return nil, err } for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() + store.RLock() defer store.Unlock() if modified, err := store.Modified(); modified || err != nil { if err = store.Load(); err != nil { @@ -2924,7 +2936,7 @@ func (s *store) Container(id string) (*Container, error) { if err != nil { return nil, err } - rcstore.Lock() + rcstore.RLock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { if err = rcstore.Load(); err != nil { @@ -2940,7 +2952,7 @@ func (s *store) ContainerLayerID(id string) (string, error) { if err != nil { return "", err } - rcstore.Lock() + rcstore.RLock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { if err = rcstore.Load(); err != nil { @@ -2963,7 +2975,7 @@ func (s *store) ContainerByLayer(id string) (*Container, error) { if err != nil { return nil, err } - rcstore.Lock() + rcstore.RLock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { if err = rcstore.Load(); err != nil { @@ -2988,7 +3000,7 @@ func (s *store) ContainerDirectory(id string) (string, error) { if err != nil { return "", err } - rcstore.Lock() + rcstore.RLock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { if err = rcstore.Load(); err != nil { @@ -3015,7 +3027,7 @@ func (s *store) ContainerRunDirectory(id string) (string, error) { return "", err } - rcstore.Lock() + rcstore.RLock() defer rcstore.Unlock() if modified, err := rcstore.Modified(); modified || err != nil { if err = rcstore.Load(); err != nil { @@ -3205,8 +3217,20 @@ func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} { return ret } -// DefaultConfigFile path to the system wide storage.conf file -const DefaultConfigFile = "/etc/containers/storage.conf" +// defaultConfigFile path to the system wide storage.conf file +const defaultConfigFile = "/etc/containers/storage.conf" + +// DefaultConfigFile returns the path to the storage config file used +func DefaultConfigFile(rootless bool) (string, error) { + if rootless { + home, err := homeDir() + if err != nil { + return "", errors.Wrapf(err, "cannot determine users homedir") + } + return filepath.Join(home, ".config/containers/storage.conf"), nil + } + return defaultConfigFile, nil +} // TOML-friendly explicit tables used for conversions. type tomlConfig struct { @@ -3346,19 +3370,19 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { } func init() { - DefaultStoreOptions.RunRoot = "/var/run/containers/storage" - DefaultStoreOptions.GraphRoot = "/var/lib/containers/storage" - DefaultStoreOptions.GraphDriverName = "" + defaultStoreOptions.RunRoot = "/var/run/containers/storage" + defaultStoreOptions.GraphRoot = "/var/lib/containers/storage" + defaultStoreOptions.GraphDriverName = "" - ReloadConfigurationFile(DefaultConfigFile, &DefaultStoreOptions) + ReloadConfigurationFile(defaultConfigFile, &defaultStoreOptions) } func GetDefaultMountOptions() ([]string, error) { mountOpts := []string{ ".mountopt", - fmt.Sprintf("%s.mountopt", DefaultStoreOptions.GraphDriverName), + fmt.Sprintf("%s.mountopt", defaultStoreOptions.GraphDriverName), } - for _, option := range DefaultStoreOptions.GraphDriverOptions { + for _, option := range defaultStoreOptions.GraphDriverOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go new file mode 100644 index 000000000..e74956c9e --- /dev/null +++ b/vendor/github.com/containers/storage/utils.go @@ -0,0 +1,234 @@ +package storage + +import ( + "fmt" + "os" + "os/exec" + "os/user" + "path/filepath" + "strings" + + "github.com/BurntSushi/toml" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping +func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap string) (*IDMappingOptions, error) { + options := IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + } + if subGIDMap == "" && subUIDMap != "" { + subGIDMap = subUIDMap + } + if subUIDMap == "" && subGIDMap != "" { + subUIDMap = subGIDMap + } + if len(GIDMapSlice) == 0 && len(UIDMapSlice) != 0 { + GIDMapSlice = UIDMapSlice + } + if len(UIDMapSlice) == 0 && len(GIDMapSlice) != 0 { + UIDMapSlice = GIDMapSlice + } + if len(UIDMapSlice) == 0 && subUIDMap == "" && os.Getuid() != 0 { + UIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getuid())} + } + if len(GIDMapSlice) == 0 && subGIDMap == "" && os.Getuid() != 0 { + GIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getgid())} + } + + if subUIDMap != "" && subGIDMap != "" { + mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap) + if err != nil { + return nil, errors.Wrapf(err, "failed to create NewIDMappings for uidmap=%s gidmap=%s", subUIDMap, subGIDMap) + } + options.UIDMap = mappings.UIDs() + options.GIDMap = mappings.GIDs() + } + parsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, "UID") + if err != nil { + return nil, errors.Wrapf(err, "failed to create ParseUIDMap UID=%s", UIDMapSlice) + } + parsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, "GID") + if err != nil { + return nil, errors.Wrapf(err, "failed to create ParseGIDMap GID=%s", UIDMapSlice) + } + options.UIDMap = append(options.UIDMap, parsedUIDMap...) + options.GIDMap = append(options.GIDMap, parsedGIDMap...) + if len(options.UIDMap) > 0 { + options.HostUIDMapping = false + } + if len(options.GIDMap) > 0 { + options.HostGIDMapping = false + } + return &options, nil +} + +// GetRootlessRuntimeDir returns the runtime directory when running as non root +func GetRootlessRuntimeDir(rootlessUid int) (string, error) { + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + if runtimeDir == "" { + tmpDir := fmt.Sprintf("/run/user/%d", rootlessUid) + st, err := system.Stat(tmpDir) + if err == nil && int(st.UID()) == os.Getuid() && st.Mode() == 0700 { + return tmpDir, nil + } + } + tmpDir := fmt.Sprintf("%s/%d", os.TempDir(), rootlessUid) + if err := os.MkdirAll(tmpDir, 0700); err != nil { + logrus.Errorf("failed to create %s: %v", tmpDir, err) + } else { + return tmpDir, nil + } + home, err := homeDir() + if err != nil { + return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty") + } + resolvedHome, err := filepath.EvalSymlinks(home) + if err != nil { + return "", errors.Wrapf(err, "cannot resolve %s", home) + } + return filepath.Join(resolvedHome, "rundir"), nil +} + +// getRootlessDirInfo returns the parent path of where the storage for containers and +// volumes will be in rootless mode +func getRootlessDirInfo(rootlessUid int) (string, string, error) { + rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUid) + if err != nil { + return "", "", err + } + + dataDir := os.Getenv("XDG_DATA_HOME") + if dataDir == "" { + home, err := homeDir() + if err != nil { + return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty") + } + // runc doesn't like symlinks in the rootfs path, and at least + // on CoreOS /home is a symlink to /var/home, so resolve any symlink. + resolvedHome, err := filepath.EvalSymlinks(home) + if err != nil { + return "", "", errors.Wrapf(err, "cannot resolve %s", home) + } + dataDir = filepath.Join(resolvedHome, ".local", "share") + } + return dataDir, rootlessRuntime, nil +} + +// getRootlessStorageOpts returns the storage opts for containers running as non root +func getRootlessStorageOpts(rootlessUid int) (StoreOptions, error) { + var opts StoreOptions + + dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUid) + if err != nil { + return opts, err + } + opts.RunRoot = rootlessRuntime + opts.GraphRoot = filepath.Join(dataDir, "containers", "storage") + if path, err := exec.LookPath("fuse-overlayfs"); err == nil { + opts.GraphDriverName = "overlay" + opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)} + } else { + opts.GraphDriverName = "vfs" + } + return opts, nil +} + +type tomlOptionsConfig struct { + MountProgram string `toml:"mount_program"` +} + +func getTomlStorage(storeOptions *StoreOptions) *tomlConfig { + config := new(tomlConfig) + + config.Storage.Driver = storeOptions.GraphDriverName + config.Storage.RunRoot = storeOptions.RunRoot + config.Storage.GraphRoot = storeOptions.GraphRoot + for _, i := range storeOptions.GraphDriverOptions { + s := strings.Split(i, "=") + if s[0] == "overlay.mount_program" { + config.Storage.Options.MountProgram = s[1] + } + } + + return config +} + +// DefaultStoreOptions returns the default storage ops for containers +func DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) { + var ( + defaultRootlessRunRoot string + defaultRootlessGraphRoot string + err error + ) + storageOpts := defaultStoreOptions + if rootless { + storageOpts, err = getRootlessStorageOpts(rootlessUid) + if err != nil { + return storageOpts, err + } + } + + storageConf, err := DefaultConfigFile(rootless) + if err != nil { + return storageOpts, err + } + if _, err = os.Stat(storageConf); err == nil { + defaultRootlessRunRoot = storageOpts.RunRoot + defaultRootlessGraphRoot = storageOpts.GraphRoot + storageOpts = StoreOptions{} + ReloadConfigurationFile(storageConf, &storageOpts) + } + + if !os.IsNotExist(err) { + return storageOpts, errors.Wrapf(err, "cannot stat %s", storageConf) + } + + if rootless { + if err == nil { + // If the file did not specify a graphroot or runroot, + // set sane defaults so we don't try and use root-owned + // directories + if storageOpts.RunRoot == "" { + storageOpts.RunRoot = defaultRootlessRunRoot + } + if storageOpts.GraphRoot == "" { + storageOpts.GraphRoot = defaultRootlessGraphRoot + } + } else { + if err := os.MkdirAll(filepath.Dir(storageConf), 0755); err != nil { + return storageOpts, errors.Wrapf(err, "cannot make directory %s", filepath.Dir(storageConf)) + } + file, err := os.OpenFile(storageConf, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) + if err != nil { + return storageOpts, errors.Wrapf(err, "cannot open %s", storageConf) + } + + tomlConfiguration := getTomlStorage(&storageOpts) + defer file.Close() + enc := toml.NewEncoder(file) + if err := enc.Encode(tomlConfiguration); err != nil { + os.Remove(storageConf) + + return storageOpts, errors.Wrapf(err, "failed to encode %s", storageConf) + } + } + } + return storageOpts, nil +} + +func homeDir() (string, error) { + home := os.Getenv("HOME") + if home == "" { + usr, err := user.Current() + if err != nil { + return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty") + } + home = usr.HomeDir + } + return home, nil +} diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf index c143b049d..62a3f98ca 100644 --- a/vendor/github.com/containers/storage/vendor.conf +++ b/vendor/github.com/containers/storage/vendor.conf @@ -1,18 +1,15 @@ github.com/BurntSushi/toml master github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165 github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8 -github.com/containers/image master github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00 github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 -github.com/docker/libtrust master github.com/klauspost/compress v1.4.1 github.com/klauspost/cpuid v1.2.0 github.com/klauspost/pgzip v1.2.1 github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6 github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062 github.com/opencontainers/go-digest master -github.com/opencontainers/image-spec master github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07 github.com/opencontainers/selinux v1.1 github.com/ostreedev/ostree-go master diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go index dfc216389..a08be9ecd 100644 --- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go +++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go @@ -1,6 +1,7 @@ package ocicni import ( + "context" "errors" "fmt" "net" @@ -511,7 +512,7 @@ func (network *cniNetwork) addToNetwork(cacheDir string, podNetwork *PodNetwork, netconf, cninet := network.NetworkConfig, network.CNIConfig logrus.Infof("About to add CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type) - res, err := cninet.AddNetworkList(netconf, rt) + res, err := cninet.AddNetworkList(context.Background(), netconf, rt) if err != nil { logrus.Errorf("Error adding network: %v", err) return nil, err @@ -529,7 +530,7 @@ func (network *cniNetwork) deleteFromNetwork(cacheDir string, podNetwork *PodNet netconf, cninet := network.NetworkConfig, network.CNIConfig logrus.Infof("About to del CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type) - err = cninet.DelNetworkList(netconf, rt) + err = cninet.DelNetworkList(context.Background(), netconf, rt) if err != nil { logrus.Errorf("Error deleting network: %v", err) return err diff --git a/vendor/github.com/cri-o/ocicni/vendor.conf b/vendor/github.com/cri-o/ocicni/vendor.conf new file mode 100644 index 000000000..d769d5177 --- /dev/null +++ b/vendor/github.com/cri-o/ocicni/vendor.conf @@ -0,0 +1,13 @@ +github.com/containernetworking/cni fbb95fff8a5239a4295c991efa8a397d43118f7e +github.com/fsnotify/fsnotify 1485a34d5d5723fea214f5710708e19a831720e4 +github.com/sirupsen/logrus 787e519fa85519b874dead61020de598e9a23944 +github.com/onsi/ginkgo eea6ad008b96acdaa524f5b409513bf062b500ad +github.com/onsi/gomega 90e289841c1ed79b7a598a7cd9959750cb5e89e2 +golang.org/x/net 63eda1eb0650888965ead1296efd04d0b2b61128 +gopkg.in/yaml.v2 51d6538a90f86fe93ac480b35f37b2be17fef232 +golang.org/x/text e3703dcdd614d2d7488fff034c75c551ea25da95 +golang.org/x/sys f49334f85ddcf0f08d7fb6dd7363e9e6d6b777eb +github.com/hpcloud/tail a1dbeea552b7c8df4b542c66073e393de198a800 +gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8 +gopkg.in/fsnotify/fsnotify.v1 c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9 +github.com/konsorten/go-windows-terminal-sequences f55edac94c9bbba5d6182a4be46d86a2c9b5b50e -- cgit v1.2.3-54-g00ecf