summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.papr.yml41
-rwxr-xr-xAPI.md157
-rw-r--r--Makefile4
-rw-r--r--cmd/podman/generate_kube.go2
-rw-r--r--cmd/podman/main.go1
-rw-r--r--cmd/podman/restart.go81
-rw-r--r--cmd/podman/varlink/io.podman.varlink157
-rw-r--r--contrib/cirrus/README.md36
-rw-r--r--libpod/container_internal.go12
-rw-r--r--libpod/container_internal_linux.go2
-rw-r--r--libpod/kube.go64
-rw-r--r--libpod/options.go19
-rw-r--r--pkg/varlinkapi/images.go2
-rw-r--r--test/bin2img/bin2img.go5
-rw-r--r--test/e2e/rootless_test.go4
-rw-r--r--vendor.conf4
-rw-r--r--vendor/github.com/containers/buildah/buildah.go4
-rw-r--r--vendor/github.com/containers/buildah/commit.go62
-rw-r--r--vendor/github.com/containers/buildah/image.go104
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go99
-rw-r--r--vendor/github.com/containers/buildah/new.go1
-rw-r--r--vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go517
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go10
-rw-r--r--vendor/github.com/containers/buildah/pull.go15
-rw-r--r--vendor/github.com/containers/buildah/vendor.conf5
-rw-r--r--vendor/github.com/containers/image/copy/copy.go101
-rw-r--r--vendor/github.com/containers/image/directory/directory_dest.go27
-rw-r--r--vendor/github.com/containers/image/directory/directory_src.go4
-rw-r--r--vendor/github.com/containers/image/docker/cache.go23
-rw-r--r--vendor/github.com/containers/image/docker/docker_client.go59
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_dest.go159
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_src.go5
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/dest.go37
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/src.go4
-rw-r--r--vendor/github.com/containers/image/image/docker_schema2.go7
-rw-r--r--vendor/github.com/containers/image/image/oci.go3
-rw-r--r--vendor/github.com/containers/image/oci/archive/oci_dest.go31
-rw-r--r--vendor/github.com/containers/image/oci/archive/oci_src.go8
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_dest.go33
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_src.go6
-rw-r--r--vendor/github.com/containers/image/openshift/openshift.go34
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_dest.go36
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_src.go6
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go329
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/default.go63
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/memory.go123
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/none.go47
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go108
-rw-r--r--vendor/github.com/containers/image/storage/storage_image.go110
-rw-r--r--vendor/github.com/containers/image/tarball/tarball_src.go5
-rw-r--r--vendor/github.com/containers/image/types/types.go102
-rw-r--r--vendor/github.com/containers/image/vendor.conf1
52 files changed, 2512 insertions, 367 deletions
diff --git a/.papr.yml b/.papr.yml
index fbdbcb5cd..9c0096152 100644
--- a/.papr.yml
+++ b/.papr.yml
@@ -133,44 +133,3 @@ required: false
timeout: 90m
context: "Fedora 28 Cloud"
-
----
-
-host:
- distro: fedora/29/cloud/pungi
- specs:
- ram: 8192
- cpus: 4
-packages:
- - btrfs-progs-devel
- - glib2-devel
- - glibc-devel
- - glibc-static
- - git
- - go-md2man
- - gpgme-devel
- - libassuan-devel
- - libgpg-error-devel
- - libseccomp-devel
- - libselinux-devel
- - ostree-devel
- - pkgconfig
- - make
- - nc
- - go-compilers-golang-compiler
- - podman
- - python3-varlink
- - python3-dateutil
- - python3-psutil
- - container-selinux
- - https://kojipkgs.fedoraproject.org//packages/runc/1.0.0/54.dev.git00dc700.fc28/x86_64/runc-1.0.0-54.dev.git00dc700.fc28.x86_64.rpm
-
-tests:
- - sed 's/^expand-check.*/expand-check=0/g' -i /etc/selinux/semanage.conf
- - dnf -y reinstall container-selinux
- - sh .papr.sh -b -i -t
-
-required: false
-
-timeout: 90m
-context: "Fedora 29 Cloud"
diff --git a/API.md b/API.md
index d45bb8123..a338f0212 100755
--- a/API.md
+++ b/API.md
@@ -244,6 +244,12 @@ method ContainerExists(name: [string](https://godoc.org/builtin#string)) [int](h
ContainerExists takes a full or partial container ID or name and returns an int as to
whether the container exists in local storage. A result of 0 means the container does
exists; whereas a result of 1 means it could not be found.
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.ContainerExists '{"name": "flamboyant_payne"}'{
+ "exists": 0
+}
+~~~
### <a name="ContainerRestore"></a>func ContainerRestore
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -298,12 +304,36 @@ $ varlink call unix:/run/podman/io.podman/io.podman.CreatePod '{"create": {"name
method DeleteStoppedContainers() [[]string](#[]string)</div>
DeleteStoppedContainers will delete all containers that are not running. It will return a list the deleted
container IDs. See also [RemoveContainer](RemoveContainer).
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.DeleteStoppedContainers
+{
+ "containers": [
+ "451410b931d00def8aa9b4f8084e4d4a39e5e04ea61f358cf53a5cf95afcdcee",
+ "8b60f754a3e01389494a9581ade97d35c2765b6e2f19acd2d3040c82a32d1bc0",
+ "cf2e99d4d3cad6073df199ed32bbe64b124f3e1aba6d78821aa8460e70d30084",
+ "db901a329587312366e5ecff583d08f0875b4b79294322df67d90fc6eed08fc1"
+ ]
+}
+~~~
### <a name="DeleteUnusedImages"></a>func DeleteUnusedImages
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
method DeleteUnusedImages() [[]string](#[]string)</div>
DeleteUnusedImages deletes any images not associated with a container. The IDs of the deleted images are returned
in a string array.
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.DeleteUnusedImages
+{
+ "images": [
+ "166ea6588079559c724c15223f52927f514f73dd5c5cf2ae2d143e3b2e6e9b52",
+ "da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
+ "3ef70f7291f47dfe2b82931a993e16f5a44a0e7a68034c3e0e086d77f5829adc",
+ "59788edf1f3e78cd0ebe6ce1446e9d10788225db3dedcfd1a59f764bad2b2690"
+ ]
+}
+~~~
### <a name="ExportContainer"></a>func ExportContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -312,6 +342,13 @@ ExportContainer creates an image from a container. It takes the name or ID of a
path representing the target tarfile. If the container cannot be found, a [ContainerNotFound](#ContainerNotFound)
error will be returned.
The return value is the written tarfile.
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.ExportContainer '{"name": "flamboyant_payne", "path": "/tmp/payne.tar" }'
+{
+ "tarfile": "/tmp/payne.tar"
+}
+~~~
### <a name="ExportImage"></a>func ExportImage
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -403,6 +440,32 @@ method GetPod(name: [string](https://godoc.org/builtin#string)) [ListPodData](#L
GetPod takes a name or ID of a pod and returns single [ListPodData](#ListPodData)
structure. A [PodNotFound](#PodNotFound) error will be returned if the pod cannot be found.
See also [ListPods](ListPods).
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.GetPod '{"name": "foobar"}'
+{
+ "pod": {
+ "cgroup": "machine.slice",
+ "containersinfo": [
+ {
+ "id": "00c130a45de0411f109f1a0cfea2e298df71db20fa939de5cab8b2160a36be45",
+ "name": "1840835294cf-infra",
+ "status": "running"
+ },
+ {
+ "id": "49a5cce72093a5ca47c6de86f10ad7bb36391e2d89cef765f807e460865a0ec6",
+ "name": "upbeat_murdock",
+ "status": "running"
+ }
+ ],
+ "createdat": "2018-12-07 13:10:15.014139258 -0600 CST",
+ "id": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f",
+ "name": "foobar",
+ "numberofcontainers": "2",
+ "status": "Running"
+ }
+}
+~~~
### <a name="GetPodStats"></a>func GetPodStats
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -455,6 +518,13 @@ method ImageExists(name: [string](https://godoc.org/builtin#string)) [int](https
ImageExists talks a full or partial image ID or name and returns an int as to whether
the image exists in local storage. An int result of 0 means the image does exist in
local storage; whereas 1 indicates the image does not exists in local storage.
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.ImageExists '{"name": "imageddoesntexist"}'
+{
+ "exists": 1
+}
+~~~
### <a name="ImportImage"></a>func ImportImage
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -499,6 +569,13 @@ Containers in a pod are killed independently. If there is an error killing one c
will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).
If the pod was killed with no errors, the pod ID is returned.
See also [StopPod](StopPod).
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.KillPod '{"name": "foobar", "signal": 15}'
+{
+ "pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f"
+}
+~~~
### <a name="ListContainerChanges"></a>func ListContainerChanges
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -511,6 +588,18 @@ its base image. It returns a struct of changed, deleted, and added path names.
method ListContainerMounts() [[]string](#[]string)</div>
ListContainerMounts gathers all the mounted container mount points and returns them as an array
of strings
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.ListContainerMounts
+{
+ "mounts": [
+ "/var/lib/containers/storage/overlay/b215fb622c65ba3b06c6d2341be80b76a9de7ae415ce419e65228873d4f0dcc8/merged",
+ "/var/lib/containers/storage/overlay/5eaf806073f79c0ed9a695180ad598e34f963f7407da1d2ccf3560bdab49b26f/merged",
+ "/var/lib/containers/storage/overlay/1ecb6b1dbb251737c7a24a31869096839c3719d8b250bf075f75172ddcc701e1/merged",
+ "/var/lib/containers/storage/overlay/7137b28a3c422165fe920cba851f2f8da271c6b5908672c451ebda03ad3919e2/merged"
+ ]
+}
+~~~
### <a name="ListContainerPorts"></a>func ListContainerPorts
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -554,12 +643,61 @@ an image currently in storage. See also [InspectImage](InspectImage).
method ListPods() [ListPodData](#ListPodData)</div>
ListPods returns a list of pods in no particular order. They are
returned as an array of ListPodData structs. See also [GetPod](#GetPod).
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.ListPods
+{
+ "pods": [
+ {
+ "cgroup": "machine.slice",
+ "containersinfo": [
+ {
+ "id": "00c130a45de0411f109f1a0cfea2e298df71db20fa939de5cab8b2160a36be45",
+ "name": "1840835294cf-infra",
+ "status": "running"
+ },
+ {
+ "id": "49a5cce72093a5ca47c6de86f10ad7bb36391e2d89cef765f807e460865a0ec6",
+ "name": "upbeat_murdock",
+ "status": "running"
+ }
+ ],
+ "createdat": "2018-12-07 13:10:15.014139258 -0600 CST",
+ "id": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f",
+ "name": "foobar",
+ "numberofcontainers": "2",
+ "status": "Running"
+ },
+ {
+ "cgroup": "machine.slice",
+ "containersinfo": [
+ {
+ "id": "1ca4b7bbba14a75ba00072d4b705c77f3df87db0109afaa44d50cb37c04a477e",
+ "name": "784306f655c6-infra",
+ "status": "running"
+ }
+ ],
+ "createdat": "2018-12-07 13:09:57.105112457 -0600 CST",
+ "id": "784306f655c6200aea321dd430ba685e9b2cc1f7d7528a72f3ff74ffb29485a2",
+ "name": "nostalgic_pike",
+ "numberofcontainers": "1",
+ "status": "Running"
+ }
+ ]
+}
+~~~
### <a name="MountContainer"></a>func MountContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
method MountContainer(name: [string](https://godoc.org/builtin#string)) [string](https://godoc.org/builtin#string)</div>
MountContainer mounts a container by name or full/partial ID. Upon a successful mount, the destination
mount is returned as a string.
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.MountContainer '{"name": "jolly_shannon"}'{
+ "path": "/var/lib/containers/storage/overlay/419eeb04e783ea159149ced67d9fcfc15211084d65e894792a96bedfae0470ca/merged"
+}
+~~~
### <a name="PauseContainer"></a>func PauseContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -577,6 +715,13 @@ Containers in a pod are paused independently. If there is an error pausing one c
will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).
If the pod was paused with no errors, the pod ID is returned.
See also [UnpausePod](#UnpausePod).
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.PausePod '{"name": "foobar"}'
+{
+ "pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f"
+}
+~~~
### <a name="Ping"></a>func Ping
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -771,6 +916,11 @@ This method has not been implemented yet.
method UnmountContainer(name: [string](https://godoc.org/builtin#string), force: [bool](https://godoc.org/builtin#bool)) </div>
UnmountContainer umounts a container by its name or full/partial container ID.
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.UnmountContainer '{"name": "jolly_shannon", "force": false}'
+{}
+~~~
### <a name="UnpauseContainer"></a>func UnpauseContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
@@ -788,6 +938,13 @@ Containers in a pod are unpaused independently. If there is an error unpausing o
will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).
If the pod was unpaused with no errors, the pod ID is returned.
See also [PausePod](#PausePod).
+#### Example
+~~~
+$ varlink call -m unix:/run/podman/io.podman/io.podman.UnpausePod '{"name": "foobar"}'
+{
+ "pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f"
+}
+~~~
### <a name="UpdateContainer"></a>func UpdateContainer
<div style="background-color: #E8E8E8; padding: 15px; margin: 10px; border-radius: 10px;">
diff --git a/Makefile b/Makefile
index f99aaaeca..55f34bcc0 100644
--- a/Makefile
+++ b/Makefile
@@ -108,7 +108,7 @@ test/goecho/goecho: .gopathok $(wildcard test/goecho/*.go)
$(GO) build -ldflags '$(LDFLAGS)' -o $@ $(PROJECT)/test/goecho
podman: .gopathok $(PODMAN_VARLINK_DEPENDENCIES)
- $(GO) build -i -ldflags '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS)" -o bin/$@ $(PROJECT)/cmd/podman
+ $(GO) build -ldflags '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS)" -o bin/$@ $(PROJECT)/cmd/podman
local-cross: $(CROSS_BUILD_TARGETS)
@@ -116,7 +116,7 @@ bin/podman.cross.%: .gopathok
TARGET="$*"; \
GOOS="$${TARGET%%.*}" \
GOARCH="$${TARGET##*.}" \
- $(GO) build -i -ldflags '$(LDFLAGS_PODMAN)' -tags '$(BUILDTAGS_CROSS)' -o "$@" $(PROJECT)/cmd/podman
+ $(GO) build -ldflags '$(LDFLAGS_PODMAN)' -tags '$(BUILDTAGS_CROSS)' -o "$@" $(PROJECT)/cmd/podman
python:
ifdef HAS_PYTHON3
diff --git a/cmd/podman/generate_kube.go b/cmd/podman/generate_kube.go
index de9f701b0..6483ffd72 100644
--- a/cmd/podman/generate_kube.go
+++ b/cmd/podman/generate_kube.go
@@ -88,7 +88,7 @@ func generateKubeYAMLCmd(c *cli.Context) error {
return err
}
- header := `# Generation of Kubenetes YAML is still under development!
+ header := `# Generation of Kubernetes YAML is still under development!
#
# Save the output of this file and use kubectl create -f to import
# it into Kubernetes.
diff --git a/cmd/podman/main.go b/cmd/podman/main.go
index 280448dc8..796b0b03a 100644
--- a/cmd/podman/main.go
+++ b/cmd/podman/main.go
@@ -36,6 +36,7 @@ var cmdsNotRequiringRootless = map[string]bool{
"logout": true,
"kill": true,
"pause": true,
+ "restart": true,
"run": true,
"unpause": true,
"search": true,
diff --git a/cmd/podman/restart.go b/cmd/podman/restart.go
index 630493ef4..c6fe1025a 100644
--- a/cmd/podman/restart.go
+++ b/cmd/podman/restart.go
@@ -1,9 +1,13 @@
package main
import (
+ "fmt"
+ "os"
+
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
@@ -47,6 +51,10 @@ func restartCmd(c *cli.Context) error {
restartContainers []*libpod.Container
)
+ if os.Geteuid() != 0 {
+ rootless.SetSkipStorageSetup(true)
+ }
+
args := c.Args()
runOnly := c.Bool("running")
all := c.Bool("all")
@@ -95,6 +103,29 @@ func restartCmd(c *cli.Context) error {
}
}
+ maxWorkers := shared.Parallelize("restart")
+ if c.GlobalIsSet("max-workers") {
+ maxWorkers = c.GlobalInt("max-workers")
+ }
+
+ logrus.Debugf("Setting maximum workers to %d", maxWorkers)
+
+ if rootless.IsRootless() {
+ // With rootless containers we cannot really restart an existing container
+ // as we would need to join the mount namespace as well to be able to reuse
+ // the storage.
+ if err := stopRootlessContainers(restartContainers, timeout, useTimeout, maxWorkers); err != nil {
+ return err
+ }
+ became, ret, err := rootless.BecomeRootInUserNS()
+ if err != nil {
+ return err
+ }
+ if became {
+ os.Exit(ret)
+ }
+ }
+
// We now have a slice of all the containers to be restarted. Iterate them to
// create restart Funcs with a timeout as needed
for _, ctr := range restartContainers {
@@ -114,13 +145,49 @@ func restartCmd(c *cli.Context) error {
})
}
- maxWorkers := shared.Parallelize("restart")
- if c.GlobalIsSet("max-workers") {
- maxWorkers = c.GlobalInt("max-workers")
- }
-
- logrus.Debugf("Setting maximum workers to %d", maxWorkers)
-
restartErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, restartFuncs)
return printParallelOutput(restartErrors, errCount)
}
+
+func stopRootlessContainers(stopContainers []*libpod.Container, timeout uint, useTimeout bool, maxWorkers int) error {
+ var stopFuncs []shared.ParallelWorkerInput
+ for _, ctr := range stopContainers {
+ state, err := ctr.State()
+ if err != nil {
+ return err
+ }
+ if state != libpod.ContainerStateRunning {
+ continue
+ }
+
+ ctrTimeout := ctr.StopTimeout()
+ if useTimeout {
+ ctrTimeout = timeout
+ }
+
+ c := ctr
+ f := func() error {
+ return c.StopWithTimeout(ctrTimeout)
+ }
+
+ stopFuncs = append(stopFuncs, shared.ParallelWorkerInput{
+ ContainerID: c.ID(),
+ ParallelFunc: f,
+ })
+
+ restartErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, stopFuncs)
+ var lastError error
+ for _, result := range restartErrors {
+ if result != nil {
+ if errCount > 1 {
+ fmt.Println(result.Error())
+ }
+ lastError = result
+ }
+ }
+ if lastError != nil {
+ return lastError
+ }
+ }
+ return nil
+}
diff --git a/cmd/podman/varlink/io.podman.varlink b/cmd/podman/varlink/io.podman.varlink
index 3cdc99a83..e596a5c91 100644
--- a/cmd/podman/varlink/io.podman.varlink
+++ b/cmd/podman/varlink/io.podman.varlink
@@ -465,6 +465,13 @@ method ListContainerChanges(name: string) -> (container: ContainerChanges)
# path representing the target tarfile. If the container cannot be found, a [ContainerNotFound](#ContainerNotFound)
# error will be returned.
# The return value is the written tarfile.
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.ExportContainer '{"name": "flamboyant_payne", "path": "/tmp/payne.tar" }'
+# {
+# "tarfile": "/tmp/payne.tar"
+# }
+# ~~~
method ExportContainer(name: string, path: string) -> (tarfile: string)
# GetContainerStats takes the name or ID of a container and returns a single ContainerStats structure which
@@ -581,6 +588,18 @@ method RemoveContainer(name: string, force: bool) -> (container: string)
# DeleteStoppedContainers will delete all containers that are not running. It will return a list the deleted
# container IDs. See also [RemoveContainer](RemoveContainer).
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.DeleteStoppedContainers
+# {
+# "containers": [
+# "451410b931d00def8aa9b4f8084e4d4a39e5e04ea61f358cf53a5cf95afcdcee",
+# "8b60f754a3e01389494a9581ade97d35c2765b6e2f19acd2d3040c82a32d1bc0",
+# "cf2e99d4d3cad6073df199ed32bbe64b124f3e1aba6d78821aa8460e70d30084",
+# "db901a329587312366e5ecff583d08f0875b4b79294322df67d90fc6eed08fc1"
+# ]
+# }
+# ~~~
method DeleteStoppedContainers() -> (containers: []string)
# ListImages returns an array of ImageInList structures which provide basic information about
@@ -638,6 +657,18 @@ method SearchImage(name: string, limit: int) -> (images: []ImageSearch)
# DeleteUnusedImages deletes any images not associated with a container. The IDs of the deleted images are returned
# in a string array.
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.DeleteUnusedImages
+# {
+# "images": [
+# "166ea6588079559c724c15223f52927f514f73dd5c5cf2ae2d143e3b2e6e9b52",
+# "da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
+# "3ef70f7291f47dfe2b82931a993e16f5a44a0e7a68034c3e0e086d77f5829adc",
+# "59788edf1f3e78cd0ebe6ce1446e9d10788225db3dedcfd1a59f764bad2b2690"
+# ]
+# }
+# ~~~
method DeleteUnusedImages() -> (images: []string)
# Commit, creates an image from an existing container. It requires the name or
@@ -689,11 +720,80 @@ method CreatePod(create: PodCreate) -> (pod: string)
# ListPods returns a list of pods in no particular order. They are
# returned as an array of ListPodData structs. See also [GetPod](#GetPod).
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.ListPods
+# {
+# "pods": [
+# {
+# "cgroup": "machine.slice",
+# "containersinfo": [
+# {
+# "id": "00c130a45de0411f109f1a0cfea2e298df71db20fa939de5cab8b2160a36be45",
+# "name": "1840835294cf-infra",
+# "status": "running"
+# },
+# {
+# "id": "49a5cce72093a5ca47c6de86f10ad7bb36391e2d89cef765f807e460865a0ec6",
+# "name": "upbeat_murdock",
+# "status": "running"
+# }
+# ],
+# "createdat": "2018-12-07 13:10:15.014139258 -0600 CST",
+# "id": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f",
+# "name": "foobar",
+# "numberofcontainers": "2",
+# "status": "Running"
+# },
+# {
+# "cgroup": "machine.slice",
+# "containersinfo": [
+# {
+# "id": "1ca4b7bbba14a75ba00072d4b705c77f3df87db0109afaa44d50cb37c04a477e",
+# "name": "784306f655c6-infra",
+# "status": "running"
+# }
+# ],
+# "createdat": "2018-12-07 13:09:57.105112457 -0600 CST",
+# "id": "784306f655c6200aea321dd430ba685e9b2cc1f7d7528a72f3ff74ffb29485a2",
+# "name": "nostalgic_pike",
+# "numberofcontainers": "1",
+# "status": "Running"
+# }
+# ]
+# }
+# ~~~
method ListPods() -> (pods: []ListPodData)
# GetPod takes a name or ID of a pod and returns single [ListPodData](#ListPodData)
# structure. A [PodNotFound](#PodNotFound) error will be returned if the pod cannot be found.
# See also [ListPods](ListPods).
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.GetPod '{"name": "foobar"}'
+# {
+# "pod": {
+# "cgroup": "machine.slice",
+# "containersinfo": [
+# {
+# "id": "00c130a45de0411f109f1a0cfea2e298df71db20fa939de5cab8b2160a36be45",
+# "name": "1840835294cf-infra",
+# "status": "running"
+# },
+# {
+# "id": "49a5cce72093a5ca47c6de86f10ad7bb36391e2d89cef765f807e460865a0ec6",
+# "name": "upbeat_murdock",
+# "status": "running"
+# }
+# ],
+# "createdat": "2018-12-07 13:10:15.014139258 -0600 CST",
+# "id": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f",
+# "name": "foobar",
+# "numberofcontainers": "2",
+# "status": "Running"
+# }
+# }
+# ~~~
method GetPod(name: string) -> (pod: ListPodData)
# InspectPod takes the name or ID of an image and returns a string respresentation of data associated with the
@@ -751,6 +851,13 @@ method RestartPod(name: string) -> (pod: string)
# will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).
# If the pod was killed with no errors, the pod ID is returned.
# See also [StopPod](StopPod).
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.KillPod '{"name": "foobar", "signal": 15}'
+# {
+# "pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f"
+# }
+# ~~~
method KillPod(name: string, signal: int) -> (pod: string)
# PausePod takes the name or ID of a pod and pauses the running containers associated with it. If the pod cannot be found,
@@ -759,6 +866,13 @@ method KillPod(name: string, signal: int) -> (pod: string)
# will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).
# If the pod was paused with no errors, the pod ID is returned.
# See also [UnpausePod](#UnpausePod).
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.PausePod '{"name": "foobar"}'
+# {
+# "pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f"
+# }
+# ~~~
method PausePod(name: string) -> (pod: string)
# UnpausePod takes the name or ID of a pod and unpauses the paused containers associated with it. If the pod cannot be
@@ -767,6 +881,13 @@ method PausePod(name: string) -> (pod: string)
# will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).
# If the pod was unpaused with no errors, the pod ID is returned.
# See also [PausePod](#PausePod).
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.UnpausePod '{"name": "foobar"}'
+# {
+# "pod": "1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f"
+# }
+# ~~~
method UnpausePod(name: string) -> (pod: string)
# RemovePod takes the name or ID of a pod as well a boolean representing whether a running
@@ -824,11 +945,24 @@ method GetPodStats(name: string) -> (pod: string, containers: []ContainerStats)
# ImageExists talks a full or partial image ID or name and returns an int as to whether
# the image exists in local storage. An int result of 0 means the image does exist in
# local storage; whereas 1 indicates the image does not exists in local storage.
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.ImageExists '{"name": "imageddoesntexist"}'
+# {
+# "exists": 1
+# }
+# ~~~
method ImageExists(name: string) -> (exists: int)
# ContainerExists takes a full or partial container ID or name and returns an int as to
# whether the container exists in local storage. A result of 0 means the container does
# exists; whereas a result of 1 means it could not be found.
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.ContainerExists '{"name": "flamboyant_payne"}'{
+# "exists": 0
+# }
+# ~~~
method ContainerExists(name: string) -> (exists: int)
# ContainerCheckPoint performs a checkpopint on a container by its name or full/partial container
@@ -845,13 +979,36 @@ method ContainerRunlabel(runlabel: Runlabel) -> ()
# ListContainerMounts gathers all the mounted container mount points and returns them as an array
# of strings
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.ListContainerMounts
+# {
+# "mounts": [
+# "/var/lib/containers/storage/overlay/b215fb622c65ba3b06c6d2341be80b76a9de7ae415ce419e65228873d4f0dcc8/merged",
+# "/var/lib/containers/storage/overlay/5eaf806073f79c0ed9a695180ad598e34f963f7407da1d2ccf3560bdab49b26f/merged",
+# "/var/lib/containers/storage/overlay/1ecb6b1dbb251737c7a24a31869096839c3719d8b250bf075f75172ddcc701e1/merged",
+# "/var/lib/containers/storage/overlay/7137b28a3c422165fe920cba851f2f8da271c6b5908672c451ebda03ad3919e2/merged"
+# ]
+# }
+# ~~~
method ListContainerMounts() -> (mounts: []string)
# MountContainer mounts a container by name or full/partial ID. Upon a successful mount, the destination
# mount is returned as a string.
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.MountContainer '{"name": "jolly_shannon"}'{
+# "path": "/var/lib/containers/storage/overlay/419eeb04e783ea159149ced67d9fcfc15211084d65e894792a96bedfae0470ca/merged"
+# }
+# ~~~
method MountContainer(name: string) -> (path: string)
# UnmountContainer umounts a container by its name or full/partial container ID.
+# #### Example
+# ~~~
+# $ varlink call -m unix:/run/podman/io.podman/io.podman.UnmountContainer '{"name": "jolly_shannon", "force": false}'
+# {}
+# ~~~
method UnmountContainer(name: string, force: bool) -> ()
# This function is not implemented yet.
diff --git a/contrib/cirrus/README.md b/contrib/cirrus/README.md
index c5c976358..436dc5257 100644
--- a/contrib/cirrus/README.md
+++ b/contrib/cirrus/README.md
@@ -142,38 +142,50 @@ the ``cache_images`` Task) some input parameters are required:
to limit the base-images produced. For example,
``PACKER_BUILDS=fedora,image-builder-image``.
-The following process should be performed on a bare-metal CentOS 7 machine
-with network access to GCE. Software dependencies can be obtained from
-the ``packer/image-builder-image_base_setup.sh`` script.
-
-Alternatively, an existing image-builder-image may be used from within GCE.
-However it must be created with elevated cloud privileges. For example,
+If there is an existing 'image-builder-image' within GCE, it may be utilized
+to produce base-images (in addition to cache-images). However it must be
+created with support for nested-virtualization, and with elevated cloud
+privileges (to access GCE, from within the GCE VM). For example:
```
$ alias pgcloud='sudo podman run -it --rm -e AS_ID=$UID
- -e AS_USER=$USER -v /home/$USER:/home/$USER:z cevich/gcloud_centos:latest'
+ -e AS_USER=$USER -v $HOME:$HOME:z quay.io/cevich/gcloud_centos:latest'
$ URL=https://www.googleapis.com/auth
$ SCOPES=$URL/userinfo.email,$URL/compute,$URL/devstorage.full_control
+# The --min-cpu-platform is critical for nested-virt.
$ pgcloud compute instances create $USER-making-images \
--image-family image-builder-image \
--boot-disk-size "200GB" \
--min-cpu-platform "Intel Haswell" \
--machine-type n1-standard-2 \
--scopes $SCOPES
+```
+
+Alternatively, if there is no image-builder-image available yet, a bare-metal
+CentOS 7 machine with network access to GCE is required. Software dependencies
+can be obtained from the ``packer/image-builder-image_base_setup.sh`` script.
-$ pgcloud compute ssh centos@$USER-making-images
+In both cases, the following can be used to setup and build base-images.
+
+```
+$ IP_ADDRESS=1.2.3.4 # EXTERNAL_IP from command output above
+$ rsync -av $PWD centos@$IP_ADDRESS:.
+$ scp $GOOGLE_APPLICATION_CREDENTIALS centos@$IP_ADDRESS:.
+$ ssh centos@$IP_ADDRESS
...
```
-When ready, change to the ``packer`` sub-directory, and run:
+When ready, change to the ``packer`` sub-directory, and build the images:
```
+$ cd libpod/contrib/cirrus/packer
$ make libpod_base_images GCP_PROJECT_ID=<VALUE> \
GOOGLE_APPLICATION_CREDENTIALS=<VALUE> \
RHEL_IMAGE_FILE=<VALUE> \
RHEL_CSUM_FILE=<VALUE> \
+ RHSM_COMMAND=<VALUE> \
PACKER_BUILDS=<OPTIONAL>
```
@@ -182,9 +194,9 @@ produce a ``packer-manifest.json`` output file. This contains the base-image
names suitable for updating in ``.cirrus.yml``, `env` keys ``*_BASE_IMAGE``.
On failure, it should be possible to determine the problem from the packer
-output. The only exception is for the Fedora and FAH builds, which utilize
-local qemu-kvm virtualisation. To observe the serial-port output from those
-builds, set the ``TTYDEV`` parameter to your current device. For example:
+output. Sometimes that means setting `PACKER_LOG=1` and troubleshooting
+the nested virt calls. It's also possible to observe the (nested) qemu-kvm
+console output. Simply set the ``TTYDEV`` parameter, for example:
```
$ make libpod_base_images ... TTYDEV=$(tty)
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index f69acb33b..af17d8495 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -18,12 +18,12 @@ import (
"github.com/containers/libpod/pkg/ctime"
"github.com/containers/libpod/pkg/hooks"
"github.com/containers/libpod/pkg/hooks/exec"
- "github.com/containers/libpod/pkg/lookup"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/mount"
+ "github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
@@ -1027,7 +1027,7 @@ func (c *Container) writeStringToRundir(destFile, output string) (string, error)
return filepath.Join(c.state.DestinationRunDir, destFile), nil
}
-func (c *Container) addLocalVolumes(ctx context.Context, g *generate.Generator) error {
+func (c *Container) addLocalVolumes(ctx context.Context, g *generate.Generator, execUser *user.ExecUser) error {
var uid, gid int
mountPoint := c.state.Mountpoint
if !c.state.Mounted {
@@ -1053,12 +1053,8 @@ func (c *Container) addLocalVolumes(ctx context.Context, g *generate.Generator)
}
if c.config.User != "" {
- if !c.state.Mounted {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s must be mounted in order to translate User field", c.ID())
- }
- execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.config.User, nil)
- if err != nil {
- return err
+ if execUser == nil {
+ return errors.Wrapf(ErrInternal, "nil pointer passed to addLocalVolumes for execUser")
}
uid = execUser.Uid
gid = execUser.Gid
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index f9b0592f9..4f2955110 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -236,7 +236,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// Bind builtin image volumes
if c.config.Rootfs == "" && c.config.ImageVolumes {
- if err := c.addLocalVolumes(ctx, &g); err != nil {
+ if err := c.addLocalVolumes(ctx, &g, execUser); err != nil {
return nil, errors.Wrapf(err, "error mounting image volumes")
}
}
diff --git a/libpod/kube.go b/libpod/kube.go
index 05a6537c4..c164ca0c5 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -11,6 +11,7 @@ import (
"github.com/containers/libpod/pkg/util"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/api/core/v1"
@@ -335,20 +336,69 @@ func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, error) {
return vms, nil
}
+func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v1.Capabilities {
+ var (
+ drop []v1.Capability
+ add []v1.Capability
+ )
+ // Find caps in the defaultCaps but not in the container's
+ // those indicate a dropped cap
+ for _, capability := range defaultCaps {
+ if !util.StringInSlice(capability, containerCaps) {
+ cap := v1.Capability(capability)
+ drop = append(drop, cap)
+ }
+ }
+ // Find caps in the container but not in the defaults; those indicate
+ // an added cap
+ for _, capability := range containerCaps {
+ if !util.StringInSlice(capability, defaultCaps) {
+ cap := v1.Capability(capability)
+ add = append(add, cap)
+ }
+ }
+
+ return &v1.Capabilities{
+ Add: add,
+ Drop: drop,
+ }
+}
+
+func capAddDrop(caps *specs.LinuxCapabilities) (*v1.Capabilities, error) {
+ g, err := generate.New("linux")
+ if err != nil {
+ return nil, err
+ }
+ // Combine all the default capabilities into a slice
+ defaultCaps := append(g.Config.Process.Capabilities.Ambient, g.Config.Process.Capabilities.Bounding...)
+ defaultCaps = append(defaultCaps, g.Config.Process.Capabilities.Effective...)
+ defaultCaps = append(defaultCaps, g.Config.Process.Capabilities.Inheritable...)
+ defaultCaps = append(defaultCaps, g.Config.Process.Capabilities.Permitted...)
+
+ // Combine all the container's capabilities into a slic
+ containerCaps := append(caps.Ambient, caps.Bounding...)
+ containerCaps = append(containerCaps, caps.Effective...)
+ containerCaps = append(containerCaps, caps.Inheritable...)
+ containerCaps = append(containerCaps, caps.Permitted...)
+
+ calculatedCaps := determineCapAddDropFromCapabilities(defaultCaps, containerCaps)
+ return calculatedCaps, nil
+}
+
// generateKubeSecurityContext generates a securityContext based on the existing container
func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
priv := c.Privileged()
ro := c.IsReadOnly()
allowPrivEscalation := !c.Spec().Process.NoNewPrivileges
- // TODO enable use of capabilities when we can figure out how to extract cap-add|remove
- //caps := v1.Capabilities{
- // //Add: c.config.Spec.Process.Capabilities
- //}
+ newCaps, err := capAddDrop(c.config.Spec.Process.Capabilities)
+ if err != nil {
+ return nil, err
+ }
+
sc := v1.SecurityContext{
- // TODO enable use of capabilities when we can figure out how to extract cap-add|remove
- //Capabilities: &caps,
- Privileged: &priv,
+ Capabilities: newCaps,
+ Privileged: &priv,
// TODO How do we know if selinux were passed into podman
//SELinuxOptions:
// RunAsNonRoot is an optional parameter; our first implementations should be root only; however
diff --git a/libpod/options.go b/libpod/options.go
index 352e6a506..9aa657ddd 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -29,9 +29,12 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption {
return ErrRuntimeFinalized
}
+ setField := false
+
if config.RunRoot != "" {
rt.config.StorageConfig.RunRoot = config.RunRoot
rt.configuredFrom.storageRunRootSet = true
+ setField = true
}
if config.GraphRoot != "" {
@@ -42,16 +45,20 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption {
// of the c/storage store by default
rt.config.StaticDir = filepath.Join(config.GraphRoot, "libpod")
rt.configuredFrom.libpodStaticDirSet = true
+
+ setField = true
}
if config.GraphDriverName != "" {
rt.config.StorageConfig.GraphDriverName = config.GraphDriverName
rt.configuredFrom.storageGraphDriverSet = true
+ setField = true
}
if config.GraphDriverOptions != nil {
rt.config.StorageConfig.GraphDriverOptions = make([]string, len(config.GraphDriverOptions))
copy(rt.config.StorageConfig.GraphDriverOptions, config.GraphDriverOptions)
+ setField = true
}
if config.UIDMap != nil {
@@ -64,6 +71,18 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption {
copy(rt.config.StorageConfig.GIDMap, config.GIDMap)
}
+ // If any one of runroot, graphroot, graphdrivername,
+ // or graphdriveroptions are set, then GraphRoot and RunRoot
+ // must be set
+ if setField {
+ if rt.config.StorageConfig.GraphRoot == "" {
+ rt.config.StorageConfig.GraphRoot = storage.DefaultStoreOptions.GraphRoot
+ }
+ if rt.config.StorageConfig.RunRoot == "" {
+ rt.config.StorageConfig.RunRoot = storage.DefaultStoreOptions.RunRoot
+ }
+ }
+
return nil
}
}
diff --git a/pkg/varlinkapi/images.go b/pkg/varlinkapi/images.go
index cb3b1c73b..a5673f636 100644
--- a/pkg/varlinkapi/images.go
+++ b/pkg/varlinkapi/images.go
@@ -508,7 +508,7 @@ func (i *LibpodAPI) PullImage(call iopodman.VarlinkCall, name string) error {
// ImageExists returns bool as to whether the input image exists in local storage
func (i *LibpodAPI) ImageExists(call iopodman.VarlinkCall, name string) error {
_, err := i.Runtime.ImageRuntime().NewFromLocal(name)
- if errors.Cause(err) == libpod.ErrNoSuchImage {
+ if errors.Cause(err) == image.ErrNoSuchImage {
return call.ReplyImageExists(1)
}
if err != nil {
diff --git a/test/bin2img/bin2img.go b/test/bin2img/bin2img.go
index 644832c77..ed493dcc1 100644
--- a/test/bin2img/bin2img.go
+++ b/test/bin2img/bin2img.go
@@ -9,6 +9,7 @@ import (
"os"
"runtime"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/storage"
"github.com/containers/image/types"
sstorage "github.com/containers/storage"
@@ -156,7 +157,7 @@ func main() {
os.Exit(1)
}
defer img.Close()
- layer, err := img.PutBlob(ctx, layerBuffer, layerInfo, false)
+ layer, err := img.PutBlob(ctx, layerBuffer, layerInfo, blobinfocache.NewMemoryCache(), false)
if err != nil {
logrus.Errorf("error preparing to write image: %v", err)
os.Exit(1)
@@ -184,7 +185,7 @@ func main() {
Digest: digest.Canonical.FromBytes(cbytes),
Size: int64(len(cbytes)),
}
- configInfo, err = img.PutBlob(ctx, bytes.NewBuffer(cbytes), configInfo, false)
+ configInfo, err = img.PutBlob(ctx, bytes.NewBuffer(cbytes), configInfo, blobinfocache.NewMemoryCache(), false)
if err != nil {
logrus.Errorf("error saving configuration: %v", err)
os.Exit(1)
diff --git a/test/e2e/rootless_test.go b/test/e2e/rootless_test.go
index b15e6731b..037af9688 100644
--- a/test/e2e/rootless_test.go
+++ b/test/e2e/rootless_test.go
@@ -187,6 +187,10 @@ var _ = Describe("Podman rootless", func() {
cmd.WaitWithDefaultTimeout()
Expect(cmd.ExitCode()).To(Equal(0))
+ cmd = rootlessTest.PodmanAsUser([]string{"restart", "-l", "-t", "0"}, 1000, 1000, env)
+ cmd.WaitWithDefaultTimeout()
+ Expect(cmd.ExitCode()).To(Equal(0))
+
canUseExec := canExec()
if canUseExec {
diff --git a/vendor.conf b/vendor.conf
index 34e90469f..f2d7fa414 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -11,7 +11,7 @@ github.com/containerd/cgroups 58556f5ad8448d99a6f7bea69ea4bdb7747cfeb0
github.com/containerd/continuity master
github.com/containernetworking/cni v0.7.0-alpha1
github.com/containernetworking/plugins 1562a1e60ed101aacc5e08ed9dbeba8e9f3d4ec1
-github.com/containers/image 63a1cbdc5e6537056695cf0d627c0a33b334df53
+github.com/containers/image d53afe179b381fafb427e6b9cf9b1996a98c1067
github.com/containers/storage db40f96d853dfced60c563e61fb66ba231ce7c8d
github.com/containers/psgo 5dde6da0bc8831b35243a847625bcf18183bd1ee
github.com/coreos/go-systemd v14
@@ -92,7 +92,7 @@ k8s.io/kube-openapi 275e2ce91dec4c05a4094a7b1daee5560b555ac9 https://github.com/
k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e https://github.com/kubernetes/utils
github.com/mrunalp/fileutils master
github.com/varlink/go master
-github.com/containers/buildah 9c65e5699cfa486531b3f123d9ce74873f0e18aa
+github.com/containers/buildah dd0f4f1b1eb49b841179049ac498e4b0f874b462
github.com/Nvveen/Gotty master
github.com/fsouza/go-dockerclient master
github.com/openshift/imagebuilder master
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 91ce2b09d..cbdb5c9f9 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -317,6 +317,10 @@ type BuilderOptions struct {
// the registry together, can not be resolved to a reference to a
// source image. No separator is implicitly added.
Transport string
+ // PullBlobDirectory is the name of a directory in which we'll attempt
+ // to store copies of layer blobs that we pull down, if any. It should
+ // already exist.
+ PullBlobDirectory string
// Mount signals to NewBuilder() that the container should be mounted
// immediately.
Mount bool
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index 71696b432..591ead4e6 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -7,6 +7,7 @@ import (
"io/ioutil"
"time"
+ "github.com/containers/buildah/pkg/blobcache"
"github.com/containers/buildah/util"
cp "github.com/containers/image/copy"
"github.com/containers/image/docker/reference"
@@ -55,6 +56,12 @@ type CommitOptions struct {
// Squash tells the builder to produce an image with a single layer
// instead of with possibly more than one layer.
Squash bool
+ // BlobDirectory is the name of a directory in which we'll look for
+ // prebuilt copies of layer blobs that we might otherwise need to
+ // regenerate from on-disk layers. If blobs are available, the
+ // manifest of the new image will reference the blobs rather than
+ // on-disk layers.
+ BlobDirectory string
// OnBuild is a list of commands to be run by images based on this image
OnBuild []string
@@ -85,6 +92,11 @@ type PushOptions struct {
// ManifestType is the format to use when saving the imge using the 'dir' transport
// possible options are oci, v2s1, and v2s2
ManifestType string
+ // BlobDirectory is the name of a directory in which we'll look for
+ // prebuilt copies of layer blobs that we might otherwise need to
+ // regenerate from on-disk layers, substituting them in the list of
+ // blobs to copy whenever possible.
+ BlobDirectory string
}
// Commit writes the contents of the container, along with its updated
@@ -128,13 +140,37 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
}
}
- src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.Compression, options.HistoryTimestamp)
+ src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.BlobDirectory, options.Compression, options.HistoryTimestamp)
if err != nil {
return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID)
}
+ var maybeCachedSrc types.ImageReference = src
+ var maybeCachedDest types.ImageReference = dest
+ if options.BlobDirectory != "" {
+ compress := types.PreserveOriginal
+ if options.Compression != archive.Uncompressed {
+ compress = types.Compress
+ }
+ cache, err := blobcache.NewBlobCache(src, options.BlobDirectory, compress)
+ if err != nil {
+ return imgID, nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(src), options.BlobDirectory)
+ }
+ maybeCachedSrc = cache
+ cache, err = blobcache.NewBlobCache(dest, options.BlobDirectory, compress)
+ if err != nil {
+ return imgID, nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(dest), options.BlobDirectory)
+ }
+ maybeCachedDest = cache
+ }
// "Copy" our image to where it needs to be.
+ switch options.Compression {
+ case archive.Uncompressed:
+ systemContext.OCIAcceptUncompressedLayers = true
+ case archive.Gzip:
+ systemContext.DirForceCompress = true
+ }
var manifestBytes []byte
- if manifestBytes, err = cp.Image(ctx, policyContext, dest, src, getCopyOptions(options.ReportWriter, src, nil, dest, systemContext, "")); err != nil {
+ if manifestBytes, err = cp.Image(ctx, policyContext, maybeCachedDest, maybeCachedSrc, getCopyOptions(options.ReportWriter, maybeCachedSrc, nil, maybeCachedDest, systemContext, "")); err != nil {
return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID)
}
if len(options.AdditionalTags) > 0 {
@@ -209,10 +245,28 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
if err != nil {
return nil, "", err
}
+ var maybeCachedSrc types.ImageReference = src
+ if options.BlobDirectory != "" {
+ compress := types.PreserveOriginal
+ if options.Compression != archive.Uncompressed {
+ compress = types.Compress
+ }
+ cache, err := blobcache.NewBlobCache(src, options.BlobDirectory, compress)
+ if err != nil {
+ return nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(src), options.BlobDirectory)
+ }
+ maybeCachedSrc = cache
+ }
// Copy everything.
+ switch options.Compression {
+ case archive.Uncompressed:
+ systemContext.OCIAcceptUncompressedLayers = true
+ case archive.Gzip:
+ systemContext.DirForceCompress = true
+ }
var manifestBytes []byte
- if manifestBytes, err = cp.Image(ctx, policyContext, dest, src, getCopyOptions(options.ReportWriter, src, nil, dest, systemContext, options.ManifestType)); err != nil {
- return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(src), transports.ImageName(dest))
+ if manifestBytes, err = cp.Image(ctx, policyContext, dest, maybeCachedSrc, getCopyOptions(options.ReportWriter, maybeCachedSrc, nil, dest, systemContext, options.ManifestType)); err != nil {
+ return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(maybeCachedSrc), transports.ImageName(dest))
}
if options.ReportWriter != nil {
fmt.Fprintf(options.ReportWriter, "")
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index c0bf90ddd..163d34269 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -57,22 +57,24 @@ type containerImageRef struct {
squash bool
tarPath func(path string) (io.ReadCloser, error)
parent string
+ blobDirectory string
}
type containerImageSource struct {
- path string
- ref *containerImageRef
- store storage.Store
- containerID string
- mountLabel string
- layerID string
- names []string
- compression archive.Compression
- config []byte
- configDigest digest.Digest
- manifest []byte
- manifestType string
- exporting bool
+ path string
+ ref *containerImageRef
+ store storage.Store
+ containerID string
+ mountLabel string
+ layerID string
+ names []string
+ compression archive.Compression
+ config []byte
+ configDigest digest.Digest
+ manifest []byte
+ manifestType string
+ exporting bool
+ blobDirectory string
}
func (i *containerImageRef) NewImage(ctx context.Context, sc *types.SystemContext) (types.ImageCloser, error) {
@@ -105,11 +107,11 @@ func expectedDockerDiffIDs(image docker.V2Image) int {
// Compute the media types which we need to attach to a layer, given the type of
// compression that we'll be applying.
-func (i *containerImageRef) computeLayerMIMEType(what string) (omediaType, dmediaType string, err error) {
+func computeLayerMIMEType(what string, layerCompression archive.Compression) (omediaType, dmediaType string, err error) {
omediaType = v1.MediaTypeImageLayer
dmediaType = docker.V2S2MediaTypeUncompressedLayer
- if i.compression != archive.Uncompressed {
- switch i.compression {
+ if layerCompression != archive.Uncompressed {
+ switch layerCompression {
case archive.Gzip:
omediaType = v1.MediaTypeImageLayerGzip
dmediaType = manifest.DockerV2Schema2LayerMediaType
@@ -280,19 +282,21 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// The default layer media type assumes no compression.
omediaType := v1.MediaTypeImageLayer
dmediaType := docker.V2S2MediaTypeUncompressedLayer
+ // Look up this layer.
+ layer, err := i.store.Layer(layerID)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to locate layer %q", layerID)
+ }
// If we're not re-exporting the data, and we're reusing layers individually, reuse
// the blobsum and diff IDs.
if !i.exporting && !i.squash && layerID != i.layerID {
- layer, err2 := i.store.Layer(layerID)
- if err2 != nil {
- return nil, errors.Wrapf(err, "unable to locate layer %q", layerID)
- }
if layer.UncompressedDigest == "" {
return nil, errors.Errorf("unable to look up size of layer %q", layerID)
}
layerBlobSum := layer.UncompressedDigest
layerBlobSize := layer.UncompressedSize
- // Note this layer in the manifest, using the uncompressed blobsum.
+ diffID := layer.UncompressedDigest
+ // Note this layer in the manifest, using the appropriate blobsum.
olayerDescriptor := v1.Descriptor{
MediaType: omediaType,
Digest: layerBlobSum,
@@ -305,13 +309,13 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
Size: layerBlobSize,
}
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
- // Note this layer in the list of diffIDs, again using the uncompressed blobsum.
- oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, layerBlobSum)
- dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, layerBlobSum)
+ // Note this layer in the list of diffIDs, again using the uncompressed digest.
+ oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, diffID)
+ dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, diffID)
continue
}
- // Figure out if we need to change the media type, in case we're using compression.
- omediaType, dmediaType, err = i.computeLayerMIMEType(what)
+ // Figure out if we need to change the media type, in case we've changed the compression.
+ omediaType, dmediaType, err = computeLayerMIMEType(what, i.compression)
if err != nil {
return nil, err
}
@@ -368,8 +372,9 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
logrus.Debugf("%s size is %d bytes", what, size)
// Rename the layer so that we can more easily find it by digest later.
- if err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String())); err != nil {
- return nil, errors.Wrapf(err, "error storing %s to file while renaming %q to %q", what, filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String()))
+ finalBlobName := filepath.Join(path, destHasher.Digest().String())
+ if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
+ return nil, errors.Wrapf(err, "error storing %s to file while renaming %q to %q", what, filepath.Join(path, "layer"), finalBlobName)
}
// Add a note in the manifest about the layer. The blobs are identified by their possibly-
// compressed blob digests.
@@ -472,19 +477,20 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
panic("unreachable code: unsupported manifest type")
}
src = &containerImageSource{
- path: path,
- ref: i,
- store: i.store,
- containerID: i.containerID,
- mountLabel: i.mountLabel,
- layerID: i.layerID,
- names: i.names,
- compression: i.compression,
- config: config,
- configDigest: digest.Canonical.FromBytes(config),
- manifest: imageManifest,
- manifestType: manifestType,
- exporting: i.exporting,
+ path: path,
+ ref: i,
+ store: i.store,
+ containerID: i.containerID,
+ mountLabel: i.mountLabel,
+ layerID: i.layerID,
+ names: i.names,
+ compression: i.compression,
+ config: config,
+ configDigest: digest.Canonical.FromBytes(config),
+ manifest: imageManifest,
+ manifestType: manifestType,
+ exporting: i.exporting,
+ blobDirectory: i.blobDirectory,
}
return src, nil
}
@@ -551,7 +557,7 @@ func (i *containerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.B
return nil, nil
}
-func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) {
+func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, cache types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) {
if blob.Digest == i.configDigest {
logrus.Debugf("start reading config")
reader := bytes.NewReader(i.config)
@@ -561,7 +567,16 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo)
}
return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil
}
- layerFile, err := os.OpenFile(filepath.Join(i.path, blob.Digest.String()), os.O_RDONLY, 0600)
+ var layerFile *os.File
+ for _, path := range []string{i.blobDirectory, i.path} {
+ layerFile, err = os.OpenFile(filepath.Join(path, blob.Digest.String()), os.O_RDONLY, 0600)
+ if err == nil {
+ break
+ }
+ if !os.IsNotExist(err) {
+ logrus.Debugf("error checking for layer %q in %q: %v", blob.Digest.String(), path, err)
+ }
+ }
if err != nil {
logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err)
return nil, -1, errors.Wrapf(err, "error opening file %q to buffer layer blob", filepath.Join(i.path, blob.Digest.String()))
@@ -584,7 +599,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo)
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
}
-func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squash bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) {
+func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squash bool, blobDirectory string, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) {
var name reference.Named
container, err := b.store.Container(b.ContainerID)
if err != nil {
@@ -630,6 +645,7 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa
squash: squash,
tarPath: b.tarPath(),
parent: parent,
+ blobDirectory: blobDirectory,
}
return ref, nil
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index e6ee6a071..2681bc198 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -10,6 +10,7 @@ import (
"os"
"os/exec"
"path/filepath"
+ "regexp"
"strconv"
"strings"
"time"
@@ -168,6 +169,8 @@ type BuildOptions struct {
// ForceRmIntermediateCtrs tells the builder to remove all intermediate containers even if
// the build was unsuccessful.
ForceRmIntermediateCtrs bool
+ // BlobDirectory is a directory which we'll use for caching layer blobs.
+ BlobDirectory string
}
// Executor is a buildah-based implementation of the imagebuilder.Executor
@@ -224,6 +227,7 @@ type Executor struct {
containerIDs []string // Stores the IDs of the successful intermediate containers used during layer build
imageMap map[string]string // Used to map images that we create to handle the AS construct.
copyFrom string // Used to keep track of the --from flag from COPY and ADD
+ blobDirectory string
}
// builtinAllowedBuildArgs is list of built-in allowed build args
@@ -239,7 +243,7 @@ var builtinAllowedBuildArgs = map[string]bool{
}
// withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME.
-func (b *Executor) withName(name string, index int) *Executor {
+func (b *Executor) withName(name string, index int, from string) *Executor {
if b.named == nil {
b.named = make(map[string]*Executor)
}
@@ -248,6 +252,7 @@ func (b *Executor) withName(name string, index int) *Executor {
copied.name = name
child := &copied
b.named[name] = child
+ b.named[from] = child
if idx := strconv.Itoa(index); idx != name {
b.named[idx] = child
}
@@ -609,6 +614,7 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
noCache: options.NoCache,
removeIntermediateCtrs: options.RemoveIntermediateCtrs,
forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
+ blobDirectory: options.BlobDirectory,
}
if exec.err == nil {
exec.err = os.Stderr
@@ -664,6 +670,7 @@ func (b *Executor) Prepare(ctx context.Context, stage imagebuilder.Stage, from s
PullPolicy: b.pullPolicy,
Registry: b.registry,
Transport: b.transport,
+ PullBlobDirectory: b.blobDirectory,
SignaturePolicyPath: b.signaturePolicyPath,
ReportWriter: b.reportWriter,
SystemContext: b.systemContext,
@@ -1227,6 +1234,7 @@ func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder, created
SystemContext: b.systemContext,
IIDFile: b.iidfile,
Squash: b.squash,
+ BlobDirectory: b.blobDirectory,
Parent: b.builder.FromImageID,
}
imgID, ref, _, err := b.builder.Commit(ctx, imageRef, options)
@@ -1252,8 +1260,16 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (strin
b.imageMap = make(map[string]string)
stageCount := 0
for _, stage := range stages {
- stageExecutor = b.withName(stage.Name, stage.Position)
- if err := stageExecutor.Prepare(ctx, stage, ""); err != nil {
+ ib := stage.Builder
+ node := stage.Node
+ base, err := ib.From(node)
+ if err != nil {
+ logrus.Debugf("Build(node.Children=%#v)", node.Children)
+ return "", nil, err
+ }
+
+ stageExecutor = b.withName(stage.Name, stage.Position, base)
+ if err := stageExecutor.Prepare(ctx, stage, base); err != nil {
return "", nil, err
}
// Always remove the intermediate/build containers, even if the build was unsuccessful.
@@ -1392,6 +1408,9 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
dockerfiles = append(dockerfiles, data)
}
+
+ dockerfiles = processCopyFrom(dockerfiles)
+
mainNode, err := imagebuilder.ParseDockerfile(dockerfiles[0])
if err != nil {
return "", nil, errors.Wrapf(err, "error parsing main Dockerfile")
@@ -1415,6 +1434,80 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
return exec.Build(ctx, stages)
}
+// processCopyFrom goes through the Dockerfiles and handles any 'COPY --from' instances
+// prepending a new FROM statement the Dockerfile that do not already have a corresponding
+// FROM command within them.
+func processCopyFrom(dockerfiles []io.ReadCloser) []io.ReadCloser {
+
+ var newDockerfiles []io.ReadCloser
+ // fromMap contains the names of the images seen in a FROM
+ // line in the Dockerfiles. The boolean value just completes the map object.
+ fromMap := make(map[string]bool)
+ // asMap contains the names of the images seen after a "FROM image AS"
+ // line in the Dockefiles. The boolean value just completes the map object.
+ asMap := make(map[string]bool)
+
+ copyRE := regexp.MustCompile(`\s*COPY\s+--from=`)
+ fromRE := regexp.MustCompile(`\s*FROM\s+`)
+ asRE := regexp.MustCompile(`(?i)\s+as\s+`)
+ for _, dfile := range dockerfiles {
+ if dfileBinary, err := ioutil.ReadAll(dfile); err == nil {
+ dfileString := fmt.Sprintf("%s", dfileBinary)
+ copyFromContent := copyRE.Split(dfileString, -1)
+ // no "COPY --from=", just continue
+ if len(copyFromContent) < 2 {
+ newDockerfiles = append(newDockerfiles, ioutil.NopCloser(strings.NewReader(dfileString)))
+ continue
+ }
+ // Load all image names in our Dockerfiles into a map
+ // for easy reference later.
+ fromContent := fromRE.Split(dfileString, -1)
+ for i := 0; i < len(fromContent); i++ {
+ imageName := strings.Split(fromContent[i], " ")
+ if len(imageName) > 0 {
+ finalImage := strings.Split(imageName[0], "\n")
+ if finalImage[0] != "" {
+ fromMap[strings.TrimSpace(finalImage[0])] = true
+ }
+ }
+ }
+ logrus.Debug("fromMap: ", fromMap)
+
+ // Load all image names associated with an 'as' or 'AS' in
+ // our Dockerfiles into a map for easy reference later.
+ asContent := asRE.Split(dfileString, -1)
+ // Skip the first entry in the array as it's stuff before
+ // the " as " and we don't care.
+ for i := 1; i < len(asContent); i++ {
+ asName := strings.Split(asContent[i], " ")
+ if len(asName) > 0 {
+ finalAsImage := strings.Split(asName[0], "\n")
+ if finalAsImage[0] != "" {
+ asMap[strings.TrimSpace(finalAsImage[0])] = true
+ }
+ }
+ }
+ logrus.Debug("asMap: ", asMap)
+
+ for i := 1; i < len(copyFromContent); i++ {
+ fromArray := strings.Split(copyFromContent[i], " ")
+ // If the image isn't a stage number or already declared,
+ // add a FROM statement for it to the top of our Dockerfile.
+ trimmedFrom := strings.TrimSpace(fromArray[0])
+ _, okFrom := fromMap[trimmedFrom]
+ _, okAs := asMap[trimmedFrom]
+ _, err := strconv.Atoi(trimmedFrom)
+ if !okFrom && !okAs && err != nil {
+ from := "FROM " + trimmedFrom
+ newDockerfiles = append(newDockerfiles, ioutil.NopCloser(strings.NewReader(from)))
+ }
+ }
+ newDockerfiles = append(newDockerfiles, ioutil.NopCloser(strings.NewReader(dfileString)))
+ } // End if dfileBinary, err := ioutil.ReadAll(dfile); err == nil
+ } // End for _, dfile := range dockerfiles {
+ return newDockerfiles
+}
+
// deleteSuccessfulIntermediateCtrs goes through the container IDs in b.containerIDs
// and deletes the containers associated with that ID.
func (b *Executor) deleteSuccessfulIntermediateCtrs() error {
diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go
index 13bebf420..7e7f97e49 100644
--- a/vendor/github.com/containers/buildah/new.go
+++ b/vendor/github.com/containers/buildah/new.go
@@ -34,6 +34,7 @@ func pullAndFindImage(ctx context.Context, store storage.Store, imageName string
Store: store,
SystemContext: options.SystemContext,
Transport: options.Transport,
+ BlobDirectory: options.PullBlobDirectory,
}
ref, err := pullImage(ctx, store, imageName, pullOptions, sc)
if err != nil {
diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
new file mode 100644
index 000000000..63022f15d
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go
@@ -0,0 +1,517 @@
+package blobcache
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/containers/buildah/docker"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/image"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/ioutils"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ _ types.ImageReference = &blobCacheReference{}
+ _ types.ImageSource = &blobCacheSource{}
+ _ types.ImageDestination = &blobCacheDestination{}
+)
+
+const (
+ compressedNote = ".compressed"
+ decompressedNote = ".decompressed"
+)
+
+// BlobCache is an object which saves copies of blobs that are written to it while passing them
+// through to some real destination, and which can be queried directly in order to read them
+// back.
+type BlobCache interface {
+ types.ImageReference
+ // HasBlob checks if a blob that matches the passed-in digest (and
+ // size, if not -1), is present in the cache.
+ HasBlob(types.BlobInfo) (bool, int64, error)
+ // Directories returns the list of cache directories.
+ Directory() string
+ // ClearCache() clears the contents of the cache directories. Note
+ // that this also clears content which was not placed there by this
+ // cache implementation.
+ ClearCache() error
+}
+
+type blobCacheReference struct {
+ reference types.ImageReference
+ directory string
+ compress types.LayerCompression
+}
+
+type blobCacheSource struct {
+ reference *blobCacheReference
+ source types.ImageSource
+ sys types.SystemContext
+ cacheHits int64
+ cacheMisses int64
+ cacheErrors int64
+}
+
+type blobCacheDestination struct {
+ reference *blobCacheReference
+ destination types.ImageDestination
+}
+
+func makeFilename(blobSum digest.Digest, isConfig bool) string {
+ if isConfig {
+ return blobSum.String() + ".config"
+ }
+ return blobSum.String()
+}
+
+// NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are
+// written to the destination image created from the resulting reference will also be stored
+// as-is to the specifed directory or a temporary directory. The cache directory's contents
+// can be cleared by calling the returned BlobCache()'s ClearCache() method.
+// The compress argument controls whether or not the cache will try to substitute a compressed
+// or different version of a blob when preparing the list of layers when reading an image.
+func NewBlobCache(ref types.ImageReference, directory string, compress types.LayerCompression) (BlobCache, error) {
+ if directory == "" {
+ return nil, errors.Errorf("error creating cache around reference %q: no directory specified", transports.ImageName(ref))
+ }
+ switch compress {
+ case types.Compress, types.Decompress, types.PreserveOriginal:
+ // valid value, accept it
+ default:
+ return nil, errors.Errorf("unhandled LayerCompression value %v", compress)
+ }
+ return &blobCacheReference{
+ reference: ref,
+ directory: directory,
+ compress: compress,
+ }, nil
+}
+
+func (r *blobCacheReference) Transport() types.ImageTransport {
+ return r.reference.Transport()
+}
+
+func (r *blobCacheReference) StringWithinTransport() string {
+ return r.reference.StringWithinTransport()
+}
+
+func (r *blobCacheReference) DockerReference() reference.Named {
+ return r.reference.DockerReference()
+}
+
+func (r *blobCacheReference) PolicyConfigurationIdentity() string {
+ return r.reference.PolicyConfigurationIdentity()
+}
+
+func (r *blobCacheReference) PolicyConfigurationNamespaces() []string {
+ return r.reference.PolicyConfigurationNamespaces()
+}
+
+func (r *blobCacheReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ return r.reference.DeleteImage(ctx, sys)
+}
+
+func (r *blobCacheReference) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) {
+ if blobinfo.Digest == "" {
+ return false, -1, nil
+ }
+
+ for _, isConfig := range []bool{false, true} {
+ filename := filepath.Join(r.directory, makeFilename(blobinfo.Digest, isConfig))
+ fileInfo, err := os.Stat(filename)
+ if err == nil && (blobinfo.Size == -1 || blobinfo.Size == fileInfo.Size()) {
+ return true, fileInfo.Size(), nil
+ }
+ if !os.IsNotExist(err) {
+ return false, -1, errors.Wrapf(err, "error checking size of %q", filename)
+ }
+ }
+
+ return false, -1, nil
+}
+
+func (r *blobCacheReference) Directory() string {
+ return r.directory
+}
+
+func (r *blobCacheReference) ClearCache() error {
+ f, err := os.Open(r.directory)
+ if err != nil {
+ return errors.Wrapf(err, "error opening directory %q", r.directory)
+ }
+ defer f.Close()
+ names, err := f.Readdirnames(-1)
+ if err != nil {
+ return errors.Wrapf(err, "error reading directory %q", r.directory)
+ }
+ for _, name := range names {
+ pathname := filepath.Join(r.directory, name)
+ if err = os.RemoveAll(pathname); err != nil {
+ return errors.Wrapf(err, "error removing %q while clearing cache for %q", pathname, transports.ImageName(r))
+ }
+ }
+ return nil
+}
+
+func (r *blobCacheReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ src, err := r.NewImageSource(ctx, sys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating new image %q", transports.ImageName(r.reference))
+ }
+ return image.FromSource(ctx, sys, src)
+}
+
+func (r *blobCacheReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ src, err := r.reference.NewImageSource(ctx, sys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating new image source %q", transports.ImageName(r.reference))
+ }
+ logrus.Debugf("starting to read from image %q using blob cache in %q (compression=%v)", transports.ImageName(r.reference), r.directory, r.compress)
+ return &blobCacheSource{reference: r, source: src, sys: *sys}, nil
+}
+
+func (r *blobCacheReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ dest, err := r.reference.NewImageDestination(ctx, sys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating new image destination %q", transports.ImageName(r.reference))
+ }
+ logrus.Debugf("starting to write to image %q using blob cache in %q", transports.ImageName(r.reference), r.directory)
+ return &blobCacheDestination{reference: r, destination: dest}, nil
+}
+
+func (s *blobCacheSource) Reference() types.ImageReference {
+ return s.reference
+}
+
+func (s *blobCacheSource) Close() error {
+ logrus.Debugf("finished reading from image %q using blob cache: cache had %d hits, %d misses, %d errors", transports.ImageName(s.reference), s.cacheHits, s.cacheMisses, s.cacheErrors)
+ return s.source.Close()
+}
+
+func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false))
+ manifestBytes, err := ioutil.ReadFile(filename)
+ if err == nil {
+ s.cacheHits++
+ return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil
+ }
+ if !os.IsNotExist(err) {
+ s.cacheErrors++
+ return nil, "", errors.Wrapf(err, "error checking for manifest file %q", filename)
+ }
+ }
+ s.cacheMisses++
+ return s.source.GetManifest(ctx, instanceDigest)
+}
+
+func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ present, size, err := s.reference.HasBlob(blobinfo)
+ if err != nil {
+ return nil, -1, err
+ }
+ if present {
+ for _, isConfig := range []bool{false, true} {
+ filename := filepath.Join(s.reference.directory, makeFilename(blobinfo.Digest, isConfig))
+ f, err := os.Open(filename)
+ if err == nil {
+ s.cacheHits++
+ return f, size, nil
+ }
+ if !os.IsNotExist(err) {
+ s.cacheErrors++
+ return nil, -1, errors.Wrapf(err, "error checking for cache file %q", filepath.Join(s.reference.directory, filename))
+ }
+ }
+ }
+ s.cacheMisses++
+ rc, size, err := s.source.GetBlob(ctx, blobinfo, cache)
+ if err != nil {
+ return rc, size, errors.Wrapf(err, "error reading blob from source image %q", transports.ImageName(s.reference))
+ }
+ return rc, size, nil
+}
+
+func (s *blobCacheSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ return s.source.GetSignatures(ctx, instanceDigest)
+}
+
+func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
+ signatures, err := s.source.GetSignatures(ctx, nil)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error checking if image %q has signatures", transports.ImageName(s.reference))
+ }
+ canReplaceBlobs := !(len(signatures) > 0 && len(signatures[0]) > 0)
+
+ infos, err := s.source.LayerInfosForCopy(ctx)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference))
+ }
+ if infos == nil {
+ image, err := s.reference.NewImage(ctx, &s.sys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error opening image to get layer infos for copying image %q through cache", transports.ImageName(s.reference))
+ }
+ defer image.Close()
+ infos = image.LayerInfos()
+ }
+
+ if canReplaceBlobs && s.reference.compress != types.PreserveOriginal {
+ replacedInfos := make([]types.BlobInfo, 0, len(infos))
+ for _, info := range infos {
+ var replaceDigest []byte
+ var err error
+ blobFile := filepath.Join(s.reference.directory, makeFilename(info.Digest, false))
+ alternate := ""
+ switch s.reference.compress {
+ case types.Compress:
+ alternate = blobFile + compressedNote
+ replaceDigest, err = ioutil.ReadFile(alternate)
+ case types.Decompress:
+ alternate = blobFile + decompressedNote
+ replaceDigest, err = ioutil.ReadFile(alternate)
+ }
+ if err == nil && digest.Digest(replaceDigest).Validate() == nil {
+ alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false))
+ fileInfo, err := os.Stat(alternate)
+ if err == nil {
+ logrus.Debugf("suggesting cached blob with digest %q and compression %v in place of blob with digest %q", string(replaceDigest), s.reference.compress, info.Digest.String())
+ info.Digest = digest.Digest(replaceDigest)
+ info.Size = fileInfo.Size()
+ switch info.MediaType {
+ case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip:
+ switch s.reference.compress {
+ case types.Compress:
+ info.MediaType = v1.MediaTypeImageLayerGzip
+ case types.Decompress:
+ info.MediaType = v1.MediaTypeImageLayer
+ }
+ case docker.V2S2MediaTypeUncompressedLayer, manifest.DockerV2Schema2LayerMediaType:
+ switch s.reference.compress {
+ case types.Compress:
+ info.MediaType = manifest.DockerV2Schema2LayerMediaType
+ case types.Decompress:
+ info.MediaType = docker.V2S2MediaTypeUncompressedLayer
+ }
+ }
+ }
+ }
+ replacedInfos = append(replacedInfos, info)
+ }
+ infos = replacedInfos
+ }
+
+ return infos, nil
+}
+
+func (d *blobCacheDestination) Reference() types.ImageReference {
+ return d.reference
+}
+
+func (d *blobCacheDestination) Close() error {
+ logrus.Debugf("finished writing to image %q using blob cache", transports.ImageName(d.reference))
+ return d.destination.Close()
+}
+
+func (d *blobCacheDestination) SupportedManifestMIMETypes() []string {
+ return d.destination.SupportedManifestMIMETypes()
+}
+
+func (d *blobCacheDestination) SupportsSignatures(ctx context.Context) error {
+ return d.destination.SupportsSignatures(ctx)
+}
+
+func (d *blobCacheDestination) DesiredLayerCompression() types.LayerCompression {
+ return d.destination.DesiredLayerCompression()
+}
+
+func (d *blobCacheDestination) AcceptsForeignLayerURLs() bool {
+ return d.destination.AcceptsForeignLayerURLs()
+}
+
+func (d *blobCacheDestination) MustMatchRuntimeOS() bool {
+ return d.destination.MustMatchRuntimeOS()
+}
+
+func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool {
+ return d.destination.IgnoresEmbeddedDockerReference()
+}
+
+// Decompress and save the contents of the decompressReader stream into the passed-in temporary
+// file. If we successfully save all of the data, rename the file to match the digest of the data,
+// and make notes about the relationship between the file that holds a copy of the compressed data
+// and this new file.
+func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) {
+ defer wg.Done()
+ // Decompress from and digest the reading end of that pipe.
+ decompressed, err3 := archive.DecompressStream(decompressReader)
+ digester := digest.Canonical.Digester()
+ if err3 == nil {
+ // Read the decompressed data through the filter over the pipe, blocking until the
+ // writing end is closed.
+ _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed)
+ } else {
+ // Drain the pipe to keep from stalling the PutBlob() thread.
+ io.Copy(ioutil.Discard, decompressReader)
+ }
+ decompressReader.Close()
+ decompressed.Close()
+ tempFile.Close()
+ // Determine the name that we should give to the uncompressed copy of the blob.
+ decompressedFilename := filepath.Join(filepath.Dir(tempFile.Name()), makeFilename(digester.Digest(), isConfig))
+ if err3 == nil {
+ // Rename the temporary file.
+ if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil {
+ logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3)
+ // Remove the temporary file.
+ if err3 = os.Remove(tempFile.Name()); err3 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
+ }
+ } else {
+ *alternateDigest = digester.Digest()
+ // Note the relationship between the two files.
+ if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil {
+ logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3)
+ }
+ if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil {
+ logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3)
+ }
+ }
+ } else {
+ // Remove the temporary file.
+ if err3 = os.Remove(tempFile.Name()); err3 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3)
+ }
+ }
+}
+
+func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ var tempfile *os.File
+ var err error
+ var n int
+ var alternateDigest digest.Digest
+ wg := new(sync.WaitGroup)
+ defer wg.Wait()
+ compression := archive.Uncompressed
+ if inputInfo.Digest != "" {
+ filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
+ tempfile, err = ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
+ if err == nil {
+ stream = io.TeeReader(stream, tempfile)
+ defer func() {
+ if err == nil {
+ if err = os.Rename(tempfile.Name(), filename); err != nil {
+ if err2 := os.Remove(tempfile.Name()); err2 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2)
+ }
+ err = errors.Wrapf(err, "error renaming new layer for blob %q into place at %q", inputInfo.Digest.String(), filename)
+ }
+ } else {
+ if err2 := os.Remove(tempfile.Name()); err2 != nil {
+ logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2)
+ }
+ }
+ tempfile.Close()
+ }()
+ } else {
+ logrus.Debugf("error while creating a temporary file under %q to hold blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err)
+ }
+ if !isConfig {
+ initial := make([]byte, 8)
+ n, err = stream.Read(initial)
+ if n > 0 {
+ // Build a Reader that will still return the bytes that we just
+ // read, for PutBlob()'s sake.
+ stream = io.MultiReader(bytes.NewReader(initial[:n]), stream)
+ if n >= len(initial) {
+ compression = archive.DetectCompression(initial[:n])
+ }
+ if compression != archive.Uncompressed {
+ // The stream is compressed, so create a file which we'll
+ // use to store a decompressed copy.
+ decompressedTemp, err2 := ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig))
+ if err2 != nil {
+ logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2)
+ decompressedTemp.Close()
+ } else {
+ // Write a copy of the compressed data to a pipe,
+ // closing the writing end of the pipe after
+ // PutBlob() returns.
+ decompressReader, decompressWriter := io.Pipe()
+ defer decompressWriter.Close()
+ stream = io.TeeReader(stream, decompressWriter)
+ // Let saveStream() close the reading end and handle the temporary file.
+ wg.Add(1)
+ go saveStream(wg, decompressReader, decompressedTemp, filename, inputInfo.Digest, isConfig, &alternateDigest)
+ }
+ }
+ }
+ }
+ }
+ newBlobInfo, err := d.destination.PutBlob(ctx, stream, inputInfo, cache, isConfig)
+ if err != nil {
+ return newBlobInfo, errors.Wrapf(err, "error storing blob to image destination for cache %q", transports.ImageName(d.reference))
+ }
+ if alternateDigest.Validate() == nil {
+ logrus.Debugf("added blob %q (also %q) to the cache at %q", inputInfo.Digest.String(), alternateDigest.String(), d.reference.directory)
+ } else {
+ logrus.Debugf("added blob %q to the cache at %q", inputInfo.Digest.String(), d.reference.directory)
+ }
+ return newBlobInfo, nil
+}
+
+func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ present, reusedInfo, err := d.destination.TryReusingBlob(ctx, info, cache, canSubstitute)
+ if err != nil || present {
+ return present, reusedInfo, err
+ }
+
+ for _, isConfig := range []bool{false, true} {
+ filename := filepath.Join(d.reference.directory, makeFilename(info.Digest, isConfig))
+ f, err := os.Open(filename)
+ if err == nil {
+ defer f.Close()
+ uploadedInfo, err := d.destination.PutBlob(ctx, f, info, cache, isConfig)
+ if err != nil {
+ return false, types.BlobInfo{}, err
+ }
+ return true, uploadedInfo, nil
+ }
+ }
+
+ return false, types.BlobInfo{}, nil
+}
+
+func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte) error {
+ manifestDigest, err := manifest.Digest(manifestBytes)
+ if err != nil {
+ logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err)
+ } else {
+ filename := filepath.Join(d.reference.directory, makeFilename(manifestDigest, false))
+ if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil {
+ logrus.Warnf("error saving manifest as %q: %v", filename, err)
+ }
+ }
+ return d.destination.PutManifest(ctx, manifestBytes)
+}
+
+func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
+ return d.destination.PutSignatures(ctx, signatures)
+}
+
+func (d *blobCacheDestination) Commit(ctx context.Context) error {
+ return d.destination.Commit(ctx)
+}
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index 03b340294..e3a4fe62a 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -112,6 +112,10 @@ var (
Usage: "use `[username[:password]]` for accessing the registry",
},
cli.BoolFlag{
+ Name: "disable-compression, D",
+ Usage: "don't compress layers by default",
+ },
+ cli.BoolFlag{
Name: "disable-content-trust",
Usage: "This is a Docker specific option and is a NOOP",
},
@@ -192,6 +196,12 @@ var (
Name: "add-host",
Usage: "add a custom host-to-IP mapping (`host:ip`) (default [])",
},
+ cli.StringFlag{
+ Name: "blob-cache",
+ Value: "",
+ Usage: "assume image blobs in the specified directory will be available for pushing",
+ Hidden: true, // this is here mainly so that we can test the API during integration tests
+ },
cli.StringSliceFlag{
Name: "cap-add",
Usage: "add the specified capability when running (default [])",
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index 1a51edb0e..e677c8925 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -5,6 +5,7 @@ import (
"io"
"strings"
+ "github.com/containers/buildah/pkg/blobcache"
"github.com/containers/buildah/util"
cp "github.com/containers/image/copy"
"github.com/containers/image/docker/reference"
@@ -40,6 +41,10 @@ type PullOptions struct {
// image name alone can not be resolved to a reference to a source
// image. No separator is implicitly added.
Transport string
+ // BlobDirectory is the name of a directory in which we'll attempt to
+ // store copies of layer blobs that we pull down, if any. It should
+ // already exist.
+ BlobDirectory string
}
func localImageNameForReference(ctx context.Context, store storage.Store, srcRef types.ImageReference, spec string) (string, error) {
@@ -182,6 +187,14 @@ func pullImage(ctx context.Context, store storage.Store, imageName string, optio
if err != nil {
return nil, errors.Wrapf(err, "error parsing image name %q", destName)
}
+ var maybeCachedDestRef types.ImageReference = destRef
+ if options.BlobDirectory != "" {
+ cachedRef, err := blobcache.NewBlobCache(destRef, options.BlobDirectory, types.PreserveOriginal)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(destRef), options.BlobDirectory)
+ }
+ maybeCachedDestRef = cachedRef
+ }
policy, err := signature.DefaultPolicy(sc)
if err != nil {
@@ -200,7 +213,7 @@ func pullImage(ctx context.Context, store storage.Store, imageName string, optio
}()
logrus.Debugf("copying %q to %q", spec, destName)
- if _, err := cp.Image(ctx, policyContext, destRef, srcRef, getCopyOptions(options.ReportWriter, srcRef, sc, destRef, nil, "")); err != nil {
+ if _, err := cp.Image(ctx, policyContext, maybeCachedDestRef, srcRef, getCopyOptions(options.ReportWriter, srcRef, sc, maybeCachedDestRef, nil, "")); err != nil {
logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", spec, destName, err)
return nil, err
}
diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf
index acba0011e..61325114c 100644
--- a/vendor/github.com/containers/buildah/vendor.conf
+++ b/vendor/github.com/containers/buildah/vendor.conf
@@ -3,9 +3,10 @@ github.com/blang/semver master
github.com/BurntSushi/toml master
github.com/containerd/continuity master
github.com/containernetworking/cni v0.7.0-alpha1
-github.com/containers/image 63a1cbdc5e6537056695cf0d627c0a33b334df53
+github.com/containers/image d53afe179b381fafb427e6b9cf9b1996a98c1067
+github.com/boltdb/bolt master
github.com/containers/libpod fe4f09493f41f675d24c969d1b60d1a6a45ddb9e
-github.com/containers/storage 3161726d1db0d0d4e86a9667dd476f09b997f497
+github.com/containers/storage db40f96d853dfced60c563e61fb66ba231ce7c8d
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go
index 313d802b3..013080e8d 100644
--- a/vendor/github.com/containers/image/copy/copy.go
+++ b/vendor/github.com/containers/image/copy/copy.go
@@ -13,6 +13,7 @@ import (
"time"
"github.com/containers/image/image"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/pkg/compression"
"github.com/containers/image/signature"
"github.com/containers/image/transports"
@@ -24,14 +25,16 @@ import (
)
type digestingReader struct {
- source io.Reader
- digester digest.Digester
- expectedDigest digest.Digest
- validationFailed bool
+ source io.Reader
+ digester digest.Digester
+ expectedDigest digest.Digest
+ validationFailed bool
+ validationSucceeded bool
}
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
-// and set validationFailed to true if the source stream does not match expectedDigest.
+// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
+// (neither is set if EOF is never reached).
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
if err := expectedDigest.Validate(); err != nil {
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
@@ -64,6 +67,7 @@ func (d *digestingReader) Read(p []byte) (int, error) {
d.validationFailed = true
return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
}
+ d.validationSucceeded = true
}
return n, err
}
@@ -71,21 +75,22 @@ func (d *digestingReader) Read(p []byte) (int, error) {
// copier allows us to keep track of diffID values for blobs, and other
// data shared across one or more images in a possible manifest list.
type copier struct {
- cachedDiffIDs map[digest.Digest]digest.Digest
dest types.ImageDestination
rawSource types.ImageSource
reportWriter io.Writer
progressInterval time.Duration
progress chan types.ProgressProperties
+ blobInfoCache types.BlobInfoCache
}
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
type imageCopier struct {
- c *copier
- manifestUpdates *types.ManifestUpdateOptions
- src types.Image
- diffIDsAreNeeded bool
- canModifyManifest bool
+ c *copier
+ manifestUpdates *types.ManifestUpdateOptions
+ src types.Image
+ diffIDsAreNeeded bool
+ canModifyManifest bool
+ canSubstituteBlobs bool
}
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
@@ -141,12 +146,15 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
}()
c := &copier{
- cachedDiffIDs: make(map[digest.Digest]digest.Digest),
dest: dest,
rawSource: rawSource,
reportWriter: reportWriter,
progressInterval: options.ProgressInterval,
progress: options.Progress,
+ // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
+ // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
+ // we might want to add a separate CommonCtx — or would that be too confusing?
+ blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
}
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
@@ -235,6 +243,13 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
src: src,
// diffIDsAreNeeded is computed later
canModifyManifest: len(sigs) == 0,
+ // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
+ // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
+ // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
+ // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
+ // that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
+ // and we would reuse and sign it.
+ canSubstituteBlobs: len(sigs) == 0 && options.SignBy == "",
}
if err := ic.updateEmbeddedDockerReference(); err != nil {
@@ -498,32 +513,24 @@ type diffIDResult struct {
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
- // Check if we already have a blob with this digest
- haveBlob, extantBlobSize, err := ic.c.dest.HasBlob(ctx, srcInfo)
- if err != nil {
- return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
- }
- // If we already have a cached diffID for this blob, we don't need to compute it
- diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.c.cachedDiffIDs[srcInfo.Digest] == "")
- // If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again
- if haveBlob && !diffIDIsNeeded {
- // Check the blob sizes match, if we were given a size this time
- if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize {
- return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size)
- }
- srcInfo.Size = extantBlobSize
- // Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob
- blobinfo, err := ic.c.dest.ReapplyBlob(ctx, srcInfo)
+ cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
+ diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
+
+ // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
+ if !diffIDIsNeeded {
+ reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
if err != nil {
- return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest)
+ return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
+ }
+ if reused {
+ ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest)
+ return blobInfo, cachedDiffID, nil
}
- ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest)
- return blobinfo, ic.c.cachedDiffIDs[srcInfo.Digest], err
}
// Fallback: copy the layer, computing the diffID if we need to do so
ic.c.Printf("Copying blob %s\n", srcInfo.Digest)
- srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo)
+ srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
if err != nil {
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
}
@@ -543,11 +550,13 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
}
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
- ic.c.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
+ // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
+ // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
+ ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
return blobInfo, diffIDResult.digest, nil
}
} else {
- return blobInfo, ic.c.cachedDiffIDs[srcInfo.Digest], nil
+ return blobInfo, cachedDiffID, nil
}
}
@@ -624,7 +633,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
// === Process input through digestingReader to validate against the expected digest.
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
// use a separate validation failure indicator.
- // Note that we don't use a stronger "validationSucceeded" indicator, because
+ // Note that for this check we don't use the stronger "validationSucceeded" indicator, because
// dest.PutBlob may detect that the layer already exists, in which case we don't
// read stream to the end, and validation does not happen.
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
@@ -660,8 +669,10 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
// === Deal with layer compression/decompression if necessary
var inputInfo types.BlobInfo
+ var compressionOperation types.LayerCompression
if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
logrus.Debugf("Compressing blob on the fly")
+ compressionOperation = types.Compress
pipeReader, pipeWriter := io.Pipe()
defer pipeReader.Close()
@@ -674,6 +685,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
inputInfo.Size = -1
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
logrus.Debugf("Blob will be decompressed")
+ compressionOperation = types.Decompress
s, err := decompressor(destStream)
if err != nil {
return types.BlobInfo{}, err
@@ -684,6 +696,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
inputInfo.Size = -1
} else {
logrus.Debugf("Using original blob without modification")
+ compressionOperation = types.PreserveOriginal
inputInfo = srcInfo
}
@@ -699,7 +712,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
}
// === Finally, send the layer stream to dest.
- uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, isConfig)
+ uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
}
@@ -722,6 +735,22 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
}
+ if digestingReader.validationSucceeded {
+ // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
+ // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
+ // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
+ // (because inputInfo.Digest == "", this must have been computed afresh).
+ switch compressionOperation {
+ case types.PreserveOriginal:
+ break // Do nothing, we have only one digest and we might not have even verified it.
+ case types.Compress:
+ c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
+ case types.Decompress:
+ c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
+ default:
+ return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
+ }
+ }
return uploadedInfo, nil
}
diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go
index d888931fe..d75c195b2 100644
--- a/vendor/github.com/containers/image/directory/directory_dest.go
+++ b/vendor/github.com/containers/image/directory/directory_dest.go
@@ -127,10 +127,11 @@ func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
if err != nil {
return types.BlobInfo{}, err
@@ -169,27 +170,27 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
-// it returns a non-nil error only on an unexpected failure.
-func (d *dirImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
}
blobPath := d.ref.layerPath(info.Digest)
finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) {
- return false, -1, nil
+ return false, types.BlobInfo{}, nil
}
if err != nil {
- return false, -1, err
+ return false, types.BlobInfo{}, err
}
- return true, finfo.Size(), nil
-}
+ return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
-func (d *dirImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go
index 5e17c37c0..3625def80 100644
--- a/vendor/github.com/containers/image/directory/directory_src.go
+++ b/vendor/github.com/containers/image/directory/directory_src.go
@@ -49,7 +49,9 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
r, err := os.Open(s.ref.layerPath(info.Digest))
if err != nil {
return nil, -1, err
diff --git a/vendor/github.com/containers/image/docker/cache.go b/vendor/github.com/containers/image/docker/cache.go
new file mode 100644
index 000000000..64ad57a7c
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/cache.go
@@ -0,0 +1,23 @@
+package docker
+
+import (
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/types"
+)
+
+// bicTransportScope returns a BICTransportScope appropriate for ref.
+func bicTransportScope(ref dockerReference) types.BICTransportScope {
+ // Blobs can be reused across the whole registry.
+ return types.BICTransportScope{Opaque: reference.Domain(ref.ref)}
+}
+
+// newBICLocationReference returns a BICLocationReference appropriate for ref.
+func newBICLocationReference(ref dockerReference) types.BICLocationReference {
+ // Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob).
+ return types.BICLocationReference{Opaque: ref.ref.Name()}
+}
+
+// parseBICLocationReference returns a repository for encoded lr.
+func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) {
+ return reference.ParseNormalizedNamed(lr.Opaque)
+}
diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go
index ea1a8ec06..7f55dbe7f 100644
--- a/vendor/github.com/containers/image/docker/docker_client.go
+++ b/vendor/github.com/containers/image/docker/docker_client.go
@@ -70,10 +70,11 @@ type extensionSignatureList struct {
}
type bearerToken struct {
- Token string `json:"token"`
- AccessToken string `json:"access_token"`
- ExpiresIn int `json:"expires_in"`
- IssuedAt time.Time `json:"issued_at"`
+ Token string `json:"token"`
+ AccessToken string `json:"access_token"`
+ ExpiresIn int `json:"expires_in"`
+ IssuedAt time.Time `json:"issued_at"`
+ expirationTime time.Time
}
// dockerClient is configuration for dealing with a single Docker registry.
@@ -88,14 +89,14 @@ type dockerClient struct {
password string
signatureBase signatureStorageBase
scope authScope
+ extraScope *authScope // If non-nil, a temporary extra token scope (necessary for mounting from another repo)
// The following members are detected registry properties:
// They are set after a successful detectProperties(), and never change afterwards.
scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
challenges []challenge
supportsSignatures bool
- // The following members are private state for setupRequestAuth, both are valid if token != nil.
- token *bearerToken
- tokenExpiration time.Time
+ // Private state for setupRequestAuth
+ tokenCache map[string]bearerToken
}
type authScope struct {
@@ -131,6 +132,7 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
if token.IssuedAt.IsZero() {
token.IssuedAt = time.Now().UTC()
}
+ token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
return token, nil
}
@@ -260,6 +262,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
registry: registry,
client: &http.Client{Transport: tr},
insecureSkipTLSVerify: skipVerify,
+ tokenCache: map[string]bearerToken{},
}, nil
}
@@ -463,24 +466,23 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
req.SetBasicAuth(c.username, c.password)
return nil
case "bearer":
- if c.token == nil || time.Now().After(c.tokenExpiration) {
- realm, ok := challenge.Parameters["realm"]
- if !ok {
- return errors.Errorf("missing realm in bearer auth challenge")
- }
- service, _ := challenge.Parameters["service"] // Will be "" if not present
- var scope string
- if c.scope.remoteName != "" && c.scope.actions != "" {
- scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
- }
- token, err := c.getBearerToken(req.Context(), realm, service, scope)
+ cacheKey := ""
+ scopes := []authScope{c.scope}
+ if c.extraScope != nil {
+ // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
+ cacheKey = fmt.Sprintf("%s:%s", c.extraScope.remoteName, c.extraScope.actions)
+ scopes = append(scopes, *c.extraScope)
+ }
+ token, ok := c.tokenCache[cacheKey]
+ if !ok || time.Now().After(token.expirationTime) {
+ t, err := c.getBearerToken(req.Context(), challenge, scopes)
if err != nil {
return err
}
- c.token = token
- c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
+ token = *t
+ c.tokenCache[cacheKey] = token
}
- req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token))
return nil
default:
logrus.Debugf("no handler for %s authentication", challenge.Scheme)
@@ -490,7 +492,12 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
return nil
}
-func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) {
+func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) {
+ realm, ok := challenge.Parameters["realm"]
+ if !ok {
+ return nil, errors.Errorf("missing realm in bearer auth challenge")
+ }
+
authReq, err := http.NewRequest("GET", realm, nil)
if err != nil {
return nil, err
@@ -500,11 +507,13 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
if c.username != "" {
getParams.Add("account", c.username)
}
- if service != "" {
+ if service, ok := challenge.Parameters["service"]; ok && service != "" {
getParams.Add("service", service)
}
- if scope != "" {
- getParams.Add("scope", scope)
+ for _, scope := range scopes {
+ if scope.remoteName != "" && scope.actions != "" {
+ getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
+ }
}
authReq.URL.RawQuery = getParams.Encode()
if c.username != "" && c.password != "" {
diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go
index 9bbffef93..2f471f648 100644
--- a/vendor/github.com/containers/image/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/docker/docker_image_dest.go
@@ -15,6 +15,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/types"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
@@ -113,17 +114,21 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
if inputInfo.Digest.String() != "" {
- haveBlob, size, err := d.HasBlob(ctx, inputInfo)
+ // This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
+ // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
+ // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_.
+ haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, blobinfocache.NoCache, false)
if err != nil {
return types.BlobInfo{}, err
}
if haveBlob {
- return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
+ return reusedInfo, nil
}
}
@@ -160,7 +165,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
}
- // FIXME: DELETE uploadLocation on failure
+ // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope)
locationQuery := uploadLocation.Query()
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
@@ -177,19 +182,15 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}
logrus.Debugf("Upload of layer %s complete", computedDigest)
+ cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref))
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
+// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
+// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
// it returns a non-nil error only on an unexpected failure.
-func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
- if info.Digest == "" {
- return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
- }
- checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
-
+func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest) (bool, int64, error) {
+ checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
logrus.Debugf("Checking %s", checkPath)
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth)
if err != nil {
@@ -202,7 +203,7 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf
return true, getBlobSize(res), nil
case http.StatusUnauthorized:
logrus.Debugf("... not authorized")
- return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", info.Digest, d.ref.ref.Name())
+ return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
case http.StatusNotFound:
logrus.Debugf("... not present")
return false, -1, nil
@@ -211,8 +212,134 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf
}
}
-func (d *dockerImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
+// mountBlob tries to mount blob srcDigest from srcRepo to the current destination.
+func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest) error {
+ u := url.URL{
+ Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)),
+ RawQuery: url.Values{
+ "mount": {srcDigest.String()},
+ "from": {reference.Path(srcRepo)},
+ }.Encode(),
+ }
+ mountPath := u.String()
+ logrus.Debugf("Trying to mount %s", mountPath)
+ res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth)
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+ switch res.StatusCode {
+ case http.StatusCreated:
+ logrus.Debugf("... mount OK")
+ return nil
+ case http.StatusAccepted:
+ // Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process.
+ // Abort, and let the ultimate caller do an upload when its ready, instead.
+ // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
+ uploadLocation, err := res.Location()
+ if err != nil {
+ return errors.Wrap(err, "Error determining upload URL after a mount attempt")
+ }
+ logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
+ res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth)
+ if err != nil {
+ logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
+ } else {
+ defer res2.Body.Close()
+ if res2.StatusCode != http.StatusNoContent {
+ logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode))
+ }
+ }
+ // Anyway, if canceling the upload fails, ignore it and return the more important error:
+ return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
+ default:
+ logrus.Debugf("Error mounting, response %#v", *res)
+ return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
+ }
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ if info.Digest == "" {
+ return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ }
+
+ // First, check whether the blob happens to already exist at the destination.
+ exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest)
+ if err != nil {
+ return false, types.BlobInfo{}, err
+ }
+ if exists {
+ cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
+ return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
+ }
+
+ // Then try reusing blobs from other locations.
+
+ // Checking candidateRepo, and mounting from it, requires an expanded token scope.
+ // We still want to reuse the ping information and other aspects of the client, so rather than make a fresh copy, there is this a bit ugly extraScope hack.
+ if d.c.extraScope != nil {
+ return false, types.BlobInfo{}, errors.New("Internal error: dockerClient.extraScope was set before TryReusingBlob")
+ }
+ defer func() {
+ d.c.extraScope = nil
+ }()
+ for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) {
+ candidateRepo, err := parseBICLocationReference(candidate.Location)
+ if err != nil {
+ logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
+ continue
+ }
+ logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name())
+
+ // Sanity checks:
+ if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
+ logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
+ continue
+ }
+ if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
+ logrus.Debug("... Already tried the primary destination")
+ continue
+ }
+
+ // Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway.
+ d.c.extraScope = &authScope{
+ remoteName: reference.Path(candidateRepo),
+ actions: "pull",
+ }
+ // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
+ // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
+ // So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure.
+ // On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly.
+ // Even worse, docker/distribution does not actually reasonably implement canceling uploads
+ // (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask);
+ // so, be a nice client and don't create unnecesary upload sessions on the server.
+ exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest)
+ if err != nil {
+ logrus.Debugf("... Failed: %v", err)
+ continue
+ }
+ if !exists {
+ // FIXME? Should we drop the blob from cache here (and elsewhere?)?
+ continue // logrus.Debug() already happened in blobExists
+ }
+ if candidateRepo.Name() != d.ref.ref.Name() {
+ if err := d.mountBlob(ctx, candidateRepo, candidate.Digest); err != nil {
+ logrus.Debugf("... Mount failed: %v", err)
+ continue
+ }
+ }
+ cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
+ return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil
+ }
+
+ return false, types.BlobInfo{}, nil
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go
index aedab9731..fbed6297f 100644
--- a/vendor/github.com/containers/image/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/docker/docker_image_src.go
@@ -162,7 +162,9 @@ func getBlobSize(resp *http.Response) int64 {
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if len(info.URLs) != 0 {
return s.getExternalBlob(ctx, info.URLs)
}
@@ -177,6 +179,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (i
// print url also
return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
}
+ cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref))
return res.Body, getBlobSize(res), nil
}
diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go
index d6510ccf1..ad8a48a03 100644
--- a/vendor/github.com/containers/image/docker/tarfile/dest.go
+++ b/vendor/github.com/containers/image/docker/tarfile/dest.go
@@ -85,10 +85,11 @@ func (d *Destination) IgnoresEmbeddedDockerReference() bool {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
// Ouch, we need to stream the blob into a temporary file just to determine the size.
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
@@ -120,12 +121,12 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
}
// Maybe the blob has been already sent
- ok, size, err := d.HasBlob(ctx, inputInfo)
+ ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false)
if err != nil {
return types.BlobInfo{}, err
}
if ok {
- return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
+ return reusedInfo, nil
}
if isConfig {
@@ -151,29 +152,21 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with
-// the matching digest which can be reapplied using ReapplyBlob. Unlike
-// PutBlob, the digest can not be empty. If HasBlob returns true, the size of
-// the blob must also be returned. If the destination does not contain the
-// blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); it
-// returns a non-nil error only on an unexpected failure.
-func (d *Destination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, -1, errors.Errorf("Can not check for a blob with unknown digest")
+ return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
}
if blob, ok := d.blobs[info.Digest]; ok {
- return true, blob.Size, nil
+ return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
}
- return false, -1, nil
-}
-
-// ReapplyBlob informs the image destination that a blob for which HasBlob
-// previously returned true would have been passed to PutBlob if it had
-// returned false. Like HasBlob and unlike PutBlob, the digest can not be
-// empty. If the blob is a filesystem layer, this signifies that the changes
-// it describes need to be applied again when composing a filesystem tree.
-func (d *Destination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
+ return false, types.BlobInfo{}, nil
}
func (d *Destination) createRepositoriesFile(rootLayerID string) error {
diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go
index 942893a81..d94ed9783 100644
--- a/vendor/github.com/containers/image/docker/tarfile/src.go
+++ b/vendor/github.com/containers/image/docker/tarfile/src.go
@@ -398,7 +398,9 @@ func (r uncompressedReadCloser) Close() error {
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if err := s.ensureCachedDataIsPresent(); err != nil {
return nil, 0, err
}
diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go
index b639ab714..cee60f824 100644
--- a/vendor/github.com/containers/image/image/docker_schema2.go
+++ b/vendor/github.com/containers/image/image/docker_schema2.go
@@ -11,6 +11,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -95,7 +96,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
}
- stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor))
+ stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), blobinfocache.NoCache)
if err != nil {
return nil, err
}
@@ -249,7 +250,9 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
if historyEntry.EmptyLayer {
if !haveGzippedEmptyLayer {
logrus.Debugf("Uploading empty layer during conversion to schema 1")
- info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, false)
+ // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
+ // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
+ info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, blobinfocache.NoCache, false)
if err != nil {
return nil, errors.Wrap(err, "Error uploading empty layer")
}
diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go
index 298db360d..6fe2a9a32 100644
--- a/vendor/github.com/containers/image/image/oci.go
+++ b/vendor/github.com/containers/image/image/oci.go
@@ -7,6 +7,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -60,7 +61,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
}
- stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config))
+ stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), blobinfocache.NoCache)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/oci/archive/oci_dest.go b/vendor/github.com/containers/image/oci/archive/oci_dest.go
index 3c6b7dffa..3997ac2ee 100644
--- a/vendor/github.com/containers/image/oci/archive/oci_dest.go
+++ b/vendor/github.com/containers/image/oci/archive/oci_dest.go
@@ -77,20 +77,27 @@ func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool {
return d.unpackedDest.IgnoresEmbeddedDockerReference()
}
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
-func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
- return d.unpackedDest.PutBlob(ctx, stream, inputInfo, isConfig)
-}
-
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob
-func (d *ociArchiveImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
- return d.unpackedDest.HasBlob(ctx, info)
-}
-
-func (d *ociArchiveImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return d.unpackedDest.ReapplyBlob(ctx, info)
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig)
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute)
}
// PutManifest writes manifest to the destination
diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go
index d04773c1f..084d818f7 100644
--- a/vendor/github.com/containers/image/oci/archive/oci_src.go
+++ b/vendor/github.com/containers/image/oci/archive/oci_src.go
@@ -76,9 +76,11 @@ func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest
return s.unpackedSrc.GetManifest(ctx, instanceDigest)
}
-// GetBlob returns a stream for the specified blob, and the blob's size.
-func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
- return s.unpackedSrc.GetBlob(ctx, info)
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ return s.unpackedSrc.GetBlob(ctx, info, cache)
}
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go
index 351632750..b5a6e08f8 100644
--- a/vendor/github.com/containers/image/oci/layout/oci_dest.go
+++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go
@@ -107,13 +107,15 @@ func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, DockerReference() returns nil.
}
-// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// PutBlob writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
if err != nil {
return types.BlobInfo{}, err
@@ -173,30 +175,29 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
-// it returns a non-nil error only on an unexpected failure.
-func (d *ociImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if info.Digest == "" {
- return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
}
blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
if err != nil {
- return false, -1, err
+ return false, types.BlobInfo{}, err
}
finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) {
- return false, -1, nil
+ return false, types.BlobInfo{}, nil
}
if err != nil {
- return false, -1, err
+ return false, types.BlobInfo{}, err
}
- return true, finfo.Size(), nil
-}
-
-func (d *ociImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
+ return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go
index 33115c00d..086a7040d 100644
--- a/vendor/github.com/containers/image/oci/layout/oci_src.go
+++ b/vendor/github.com/containers/image/oci/layout/oci_src.go
@@ -92,8 +92,10 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
return m, mimeType, nil
}
-// GetBlob returns a stream for the specified blob, and the blob's size.
-func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if len(info.URLs) != 0 {
return s.getExternalBlob(ctx, info.URLs)
}
diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go
index dbd04f10b..0cce1e6c7 100644
--- a/vendor/github.com/containers/image/openshift/openshift.go
+++ b/vendor/github.com/containers/image/openshift/openshift.go
@@ -212,11 +212,13 @@ func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *
}
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
-func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if err := s.ensureImageIsResolved(ctx); err != nil {
return nil, 0, err
}
- return s.docker.GetBlob(ctx, info)
+ return s.docker.GetBlob(ctx, info, cache)
}
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
@@ -379,23 +381,23 @@ func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
+// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
-func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
- return d.docker.PutBlob(ctx, stream, inputInfo, isConfig)
-}
-
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
-// it returns a non-nil error only on an unexpected failure.
-func (d *openshiftImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
- return d.docker.HasBlob(ctx, info)
-}
-
-func (d *openshiftImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return d.docker.ReapplyBlob(ctx, info)
+func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig)
+}
+
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
+ return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute)
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go
index afff7dc1b..064898948 100644
--- a/vendor/github.com/containers/image/ostree/ostree_dest.go
+++ b/vendor/github.com/containers/image/ostree/ostree_dest.go
@@ -132,7 +132,15 @@ func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, DockerReference() returns nil.
}
-func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+// PutBlob writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
if err != nil {
return types.BlobInfo{}, err
@@ -322,12 +330,18 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
}
-func (d *ostreeImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
-
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if d.repo == nil {
repo, err := openRepo(d.ref.repo)
if err != nil {
- return false, 0, err
+ return false, types.BlobInfo{}, err
}
d.repo = repo
}
@@ -335,29 +349,25 @@ func (d *ostreeImageDestination) HasBlob(ctx context.Context, info types.BlobInf
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
- return found, -1, err
+ return found, types.BlobInfo{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size")
if err != nil || !found {
- return found, -1, err
+ return found, types.BlobInfo{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.size")
if err != nil || !found {
- return found, -1, err
+ return found, types.BlobInfo{}, err
}
size, err := strconv.ParseInt(data, 10, 64)
if err != nil {
- return false, -1, err
+ return false, types.BlobInfo{}, err
}
- return true, size, nil
-}
-
-func (d *ostreeImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
- return info, nil
+ return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
}
// PutManifest writes manifest to the destination.
diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go
index 1f325b2a7..e73cae198 100644
--- a/vendor/github.com/containers/image/ostree/ostree_src.go
+++ b/vendor/github.com/containers/image/ostree/ostree_src.go
@@ -255,8 +255,10 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
return getter.Get(path)
}
-// GetBlob returns a stream for the specified blob, and the blob's size.
-func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
blob := info.Digest.Hex()
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go
new file mode 100644
index 000000000..4ee809134
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go
@@ -0,0 +1,329 @@
+package blobinfocache
+
+import (
+ "fmt"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/boltdb/bolt"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade
+ // we can simply start over with a different filename; update blobInfoCacheFilename.
+
+ // FIXME: For CRI-O, does this need to hide information between different users?
+
+ // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest.
+ uncompressedDigestBucket = []byte("uncompressedDigest")
+ // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest
+ // (as a set of key=digest, value="" pairs)
+ digestByUncompressedBucket = []byte("digestByUncompressed")
+ // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing
+ // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value).
+ knownLocationsBucket = []byte("knownLocations")
+)
+
+// Concurrency:
+// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely
+// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time.
+
+// pathLock contains a lock for a specific BoltDB database path.
+type pathLock struct {
+ refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below!
+ mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database.
+}
+
+var (
+ // pathLocks contains a lock for each currently open file.
+ // This must be global so that independently created instances of boltDBCache exclude each other.
+ // The map is protected by pathLocksMutex.
+ // FIXME? Should this be based on device:inode numbers instead of paths instead?
+ pathLocks = map[string]*pathLock{}
+ pathLocksMutex = sync.Mutex{}
+)
+
+// lockPath obtains the pathLock for path.
+// The caller must call unlockPath eventually.
+func lockPath(path string) {
+ pl := func() *pathLock { // A scope for defer
+ pathLocksMutex.Lock()
+ defer pathLocksMutex.Unlock()
+ pl, ok := pathLocks[path]
+ if ok {
+ pl.refCount++
+ } else {
+ pl = &pathLock{refCount: 1, mutex: sync.Mutex{}}
+ pathLocks[path] = pl
+ }
+ return pl
+ }()
+ pl.mutex.Lock()
+}
+
+// unlockPath releases the pathLock for path.
+func unlockPath(path string) {
+ pathLocksMutex.Lock()
+ defer pathLocksMutex.Unlock()
+ pl, ok := pathLocks[path]
+ if !ok {
+ // Should this return an error instead? BlobInfoCache ultimately ignores errors…
+ panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path))
+ }
+ pl.mutex.Unlock()
+ pl.refCount--
+ if pl.refCount == 0 {
+ delete(pathLocks, path)
+ }
+}
+
+// boltDBCache si a BlobInfoCache implementation which uses a BoltDB file at the specified path.
+//
+// Note that we don’t keep the database open across operations, because that would lock the file and block any other
+// users; instead, we need to open/close it for every single write or lookup.
+type boltDBCache struct {
+ path string
+}
+
+// NewBoltDBCache returns a BlobInfoCache implementation which uses a BoltDB file at path.
+// Most users should call DefaultCache instead.
+func NewBoltDBCache(path string) types.BlobInfoCache {
+ return &boltDBCache{path: path}
+}
+
+// view returns runs the specified fn within a read-only transaction on the database.
+func (bdc *boltDBCache) view(fn func(tx *bolt.Tx) error) (retErr error) {
+ // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist,
+ // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding
+ // a read lock, blocking any future writes.
+ // Hence this preliminary check, which is RACY: Another process could remove the file
+ // between the Lstat call and opening the database.
+ if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) {
+ return err
+ }
+
+ lockPath(bdc.path)
+ defer unlockPath(bdc.path)
+ db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true})
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := db.Close(); retErr == nil && err != nil {
+ retErr = err
+ }
+ }()
+
+ return db.View(fn)
+}
+
+// update returns runs the specified fn within a read-write transaction on the database.
+func (bdc *boltDBCache) update(fn func(tx *bolt.Tx) error) (retErr error) {
+ lockPath(bdc.path)
+ defer unlockPath(bdc.path)
+ db, err := bolt.Open(bdc.path, 0600, nil)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err := db.Close(); retErr == nil && err != nil {
+ retErr = err
+ }
+ }()
+
+ return db.Update(fn)
+}
+
+// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction.
+func (bdc *boltDBCache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
+ if b := tx.Bucket(uncompressedDigestBucket); b != nil {
+ if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil {
+ d, err := digest.Parse(string(uncompressedBytes))
+ if err == nil {
+ return d
+ }
+ // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+ }
+ // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest.
+ // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
+ // when we already record a (compressed, uncompressed) pair.
+ if b := tx.Bucket(digestByUncompressedBucket); b != nil {
+ if b = b.Bucket([]byte(anyDigest.String())); b != nil {
+ c := b.Cursor()
+ if k, _ := c.First(); k != nil { // The bucket is non-empty
+ return anyDigest
+ }
+ }
+ }
+ return ""
+}
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (bdc *boltDBCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ var res digest.Digest
+ if err := bdc.view(func(tx *bolt.Tx) error {
+ res = bdc.uncompressedDigest(tx, anyDigest)
+ return nil
+ }); err != nil { // Including os.IsNotExist(err)
+ return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+ return res
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (bdc *boltDBCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+ _ = bdc.update(func(tx *bolt.Tx) error {
+ b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket)
+ if err != nil {
+ return err
+ }
+ key := []byte(anyDigest.String())
+ if previousBytes := b.Get(key); previousBytes != nil {
+ previous, err := digest.Parse(string(previousBytes))
+ if err != nil {
+ return err
+ }
+ if previous != uncompressed {
+ logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
+ }
+ }
+ if err := b.Put(key, []byte(uncompressed.String())); err != nil {
+ return err
+ }
+
+ b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket)
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String()))
+ if err != nil {
+ return err
+ }
+ if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again.
+ return err
+ }
+ return nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (bdc *boltDBCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+ _ = bdc.update(func(tx *bolt.Tx) error {
+ b, err := tx.CreateBucketIfNotExists(knownLocationsBucket)
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(transport.Name()))
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque))
+ if err != nil {
+ return err
+ }
+ b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String()))
+ if err != nil {
+ return err
+ }
+ value, err := time.Now().MarshalBinary()
+ if err != nil {
+ return err
+ }
+ if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry.
+ return err
+ }
+ return nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+}
+
+// appendReplacementCandiates creates candidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates.
+func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []candidateWithTime {
+ b := scopeBucket.Bucket([]byte(digest.String()))
+ if b == nil {
+ return candidates
+ }
+ _ = b.ForEach(func(k, v []byte) error {
+ t := time.Time{}
+ if err := t.UnmarshalBinary(v); err != nil {
+ return err
+ }
+ candidates = append(candidates, candidateWithTime{
+ candidate: types.BICReplacementCandidate{
+ Digest: digest,
+ Location: types.BICLocationReference{Opaque: string(k)},
+ },
+ lastSeen: t,
+ })
+ return nil
+ }) // FIXME? Log error (but throttle the log volume on repeated accesses)?
+ return candidates
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (bdc *boltDBCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ res := []candidateWithTime{}
+ var uncompressedDigestValue digest.Digest // = ""
+ if err := bdc.view(func(tx *bolt.Tx) error {
+ scopeBucket := tx.Bucket(knownLocationsBucket)
+ if scopeBucket == nil {
+ return nil
+ }
+ scopeBucket = scopeBucket.Bucket([]byte(transport.Name()))
+ if scopeBucket == nil {
+ return nil
+ }
+ scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque))
+ if scopeBucket == nil {
+ return nil
+ }
+
+ res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest)
+ if canSubstitute {
+ if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" {
+ b := tx.Bucket(digestByUncompressedBucket)
+ if b != nil {
+ b = b.Bucket([]byte(uncompressedDigestValue.String()))
+ if b != nil {
+ if err := b.ForEach(func(k, _ []byte) error {
+ d, err := digest.Parse(string(k))
+ if err != nil {
+ return err
+ }
+ if d != primaryDigest && d != uncompressedDigestValue {
+ res = bdc.appendReplacementCandidates(res, scopeBucket, d)
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ }
+ }
+ if uncompressedDigestValue != primaryDigest {
+ res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue)
+ }
+ }
+ }
+ return nil
+ }); err != nil { // Including os.IsNotExist(err)
+ return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
+ }
+
+ return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
+}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/pkg/blobinfocache/default.go
new file mode 100644
index 000000000..6da9f2805
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/default.go
@@ -0,0 +1,63 @@
+package blobinfocache
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/containers/image/types"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // blobInfoCacheFilename is the file name used for blob info caches.
+ // If the format changes in an incompatible way, increase the version number.
+ blobInfoCacheFilename = "blob-info-cache-v1.boltdb"
+ // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes.
+ systemBlobInfoCacheDir = "/var/lib/containers/cache"
+)
+
+// blobInfoCacheDir returns a path to a blob info cache appropripate for sys and euid.
+// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory.
+func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) {
+ if sys != nil && sys.BlobInfoCacheDir != "" {
+ return sys.BlobInfoCacheDir, nil
+ }
+
+ // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged
+ // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe.
+ if euid == 0 {
+ if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil
+ }
+ return systemBlobInfoCacheDir, nil
+ }
+
+ // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts.
+ dataDir := os.Getenv("XDG_DATA_HOME")
+ if dataDir == "" {
+ home := os.Getenv("HOME")
+ if home == "" {
+ return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty")
+ }
+ dataDir = filepath.Join(home, ".local", "share")
+ }
+ return filepath.Join(dataDir, "containers", "cache"), nil
+}
+
+// DefaultCache returns the default BlobInfoCache implementation appropriate for sys.
+func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
+ dir, err := blobInfoCacheDir(sys, os.Geteuid())
+ if err != nil {
+ logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename)
+ return NewMemoryCache()
+ }
+ path := filepath.Join(dir, blobInfoCacheFilename)
+ if err := os.MkdirAll(dir, 0700); err != nil {
+ logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", err)
+ return NewMemoryCache()
+ }
+
+ logrus.Debugf("Using blob info cache at %s", path)
+ return NewBoltDBCache(path)
+}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/memory.go b/vendor/github.com/containers/image/pkg/blobinfocache/memory.go
new file mode 100644
index 000000000..1ce7dee13
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/memory.go
@@ -0,0 +1,123 @@
+package blobinfocache
+
+import (
+ "time"
+
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/sirupsen/logrus"
+)
+
+// locationKey only exists to make lookup in knownLocations easier.
+type locationKey struct {
+ transport string
+ scope types.BICTransportScope
+ blobDigest digest.Digest
+}
+
+// memoryCache implements an in-memory-only BlobInfoCache
+type memoryCache struct {
+ uncompressedDigests map[digest.Digest]digest.Digest
+ digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
+ knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
+}
+
+// NewMemoryCache returns a BlobInfoCache implementation which is in-memory only.
+// This is primarily intended for tests, but also used as a fallback if DefaultCache
+// can’t determine, or set up, the location for a persistent cache.
+// Manual users of types.{ImageSource,ImageDestination} might also use this instead of a persistent cache.
+func NewMemoryCache() types.BlobInfoCache {
+ return &memoryCache{
+ uncompressedDigests: map[digest.Digest]digest.Digest{},
+ digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
+ knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
+ }
+}
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ if d, ok := mem.uncompressedDigests[anyDigest]; ok {
+ return d
+ }
+ // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest.
+ // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
+ // when we already record a (compressed, uncompressed) pair.
+ if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 {
+ return anyDigest
+ }
+ return ""
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+ if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
+ logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
+ }
+ mem.uncompressedDigests[anyDigest] = uncompressed
+
+ anyDigestSet, ok := mem.digestsByUncompressed[uncompressed]
+ if !ok {
+ anyDigestSet = map[digest.Digest]struct{}{}
+ mem.digestsByUncompressed[uncompressed] = anyDigestSet
+ }
+ anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+ key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
+ locationScope, ok := mem.knownLocations[key]
+ if !ok {
+ locationScope = map[types.BICLocationReference]time.Time{}
+ mem.knownLocations[key] = locationScope
+ }
+ locationScope[location] = time.Now() // Possibly overwriting an older entry.
+}
+
+// appendReplacementCandiates creates candidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
+func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []candidateWithTime {
+ locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
+ for l, t := range locations {
+ candidates = append(candidates, candidateWithTime{
+ candidate: types.BICReplacementCandidate{
+ Digest: digest,
+ Location: l,
+ },
+ lastSeen: t,
+ })
+ }
+ return candidates
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ res := []candidateWithTime{}
+ res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
+ var uncompressedDigest digest.Digest // = ""
+ if canSubstitute {
+ if uncompressedDigest = mem.UncompressedDigest(primaryDigest); uncompressedDigest != "" {
+ otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
+ for d := range otherDigests {
+ if d != primaryDigest && d != uncompressedDigest {
+ res = mem.appendReplacementCandidates(res, transport, scope, d)
+ }
+ }
+ if uncompressedDigest != primaryDigest {
+ res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest)
+ }
+ }
+ }
+ return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
+}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/none.go b/vendor/github.com/containers/image/pkg/blobinfocache/none.go
new file mode 100644
index 000000000..5658d89ff
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/none.go
@@ -0,0 +1,47 @@
+package blobinfocache
+
+import (
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// noCache implements a dummy BlobInfoCache which records no data.
+type noCache struct {
+}
+
+// NoCache implements BlobInfoCache by not recording any data.
+//
+// This exists primarily for implementations of configGetter for Manifest.Inspect,
+// because configs only have one representation.
+// Any use of BlobInfoCache with blobs should usually use at least a short-lived cache.
+var NoCache types.BlobInfoCache = noCache{}
+
+// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
+ return ""
+}
+
+// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+// It’s allowed for anyDigest == uncompressed.
+// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
+}
+
+// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+// and can be reused given the opaque location data.
+func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
+}
+
+// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+// within the specified (transport scope) (if they still exist, which is not guaranteed).
+//
+// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+// uncompressed digest.
+func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go b/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go
new file mode 100644
index 000000000..02709aa1c
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go
@@ -0,0 +1,108 @@
+package blobinfocache
+
+import (
+ "sort"
+ "time"
+
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+)
+
+// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates,
+// and therefore ultimately by types.BlobInfoCache.CandidateLocations.
+// This is a heuristic/guess, and could well use a different value.
+const replacementAttempts = 5
+
+// candidateWithTime is the input to types.BICReplacementCandidate prioritization.
+type candidateWithTime struct {
+ candidate types.BICReplacementCandidate // The replacement candidate
+ lastSeen time.Time // Time the candidate was last known to exist (either read or written)
+}
+
+// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
+// along with the specially-treated digest values for the implementation of sort.Interface.Less
+type candidateSortState struct {
+ cs []candidateWithTime // The entries to sort
+ primaryDigest digest.Digest // The digest the user actually asked for
+ uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
+}
+
+func (css *candidateSortState) Len() int {
+ return len(css.cs)
+}
+
+func (css *candidateSortState) Less(i, j int) bool {
+ xi := css.cs[i]
+ xj := css.cs[j]
+
+ // primaryDigest entries come first, more recent first.
+ // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first.
+ // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
+
+ // First, deal with the primaryDigest/uncompressedDigest cases:
+ if xi.candidate.Digest != xj.candidate.Digest {
+ // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
+ if xi.candidate.Digest == css.primaryDigest {
+ return true
+ }
+ if xj.candidate.Digest == css.primaryDigest {
+ return false
+ }
+ if css.uncompressedDigest != "" {
+ if xi.candidate.Digest == css.uncompressedDigest {
+ return false
+ }
+ if xj.candidate.Digest == css.uncompressedDigest {
+ return true
+ }
+ }
+ } else { // xi.candidate.Digest == xj.candidate.Digest
+ // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
+ if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) {
+ return xi.lastSeen.After(xj.lastSeen)
+ }
+ }
+
+ // Neither of the digests are primaryDigest/uncompressedDigest:
+ if !xi.lastSeen.Equal(xj.lastSeen) { // Order primarily by time
+ return xi.lastSeen.After(xj.lastSeen)
+ }
+ // Fall back to digest, if timestamps end up _exactly_ the same (how?!)
+ return xi.candidate.Digest < xj.candidate.Digest
+}
+
+func (css *candidateSortState) Swap(i, j int) {
+ css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
+}
+
+// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
+// number of entries to limit, only to make testing simpler.
+func destructivelyPrioritizeReplacementCandidatesWithMax(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate {
+ // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
+ // compare equal.
+ sort.Sort(&candidateSortState{
+ cs: cs,
+ primaryDigest: primaryDigest,
+ uncompressedDigest: uncompressedDigest,
+ })
+
+ resLength := len(cs)
+ if resLength > maxCandidates {
+ resLength = maxCandidates
+ }
+ res := make([]types.BICReplacementCandidate, resLength)
+ for i := range res {
+ res[i] = cs[i].candidate
+ }
+ return res
+}
+
+// destructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
+// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
+// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
+//
+// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
+// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
+func destructivelyPrioritizeReplacementCandidates(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate {
+ return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
+}
diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go
index d1b010a76..bd6813119 100644
--- a/vendor/github.com/containers/image/storage/storage_image.go
+++ b/vendor/github.com/containers/image/storage/storage_image.go
@@ -16,6 +16,7 @@ import (
"github.com/containers/image/image"
"github.com/containers/image/internal/tmpdir"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
@@ -99,8 +100,10 @@ func (s storageImageSource) Close() error {
return nil
}
-// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given.
-func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
if info.Digest == image.GzippedEmptyLayerDigest {
return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
}
@@ -317,9 +320,17 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
}
-// PutBlob stores a layer or data blob in our temporary directory, checking that any information
-// in the blobinfo matches the incoming data.
-func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
+// PutBlob writes contents of stream and returns data representing the result.
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+// inputInfo.MediaType describes the blob format, if known.
+// May update cache.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
+ // Stores a layer or data blob in our temporary directory, checking that any information
+ // in the blobinfo matches the incoming data.
errorBlobInfo := types.BlobInfo{
Digest: "",
Size: -1,
@@ -370,6 +381,8 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
if blobSize < 0 {
blobSize = counter.Count
}
+ // This is safe because we have just computed both values ourselves.
+ cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
return types.BlobInfo{
Digest: blobDigest,
Size: blobSize,
@@ -377,59 +390,82 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}, nil
}
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be
-// reapplied using ReapplyBlob.
-//
-// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
-// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
-// it returns a non-nil error only on an unexpected failure.
-func (s *storageImageDestination) HasBlob(ctx context.Context, blobinfo types.BlobInfo) (bool, int64, error) {
+// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+// info.Digest must not be empty.
+// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+// May use and/or update cache.
+func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if blobinfo.Digest == "" {
- return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`)
+ return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`)
}
if err := blobinfo.Digest.Validate(); err != nil {
- return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
+ return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
}
+
// Check if we've already cached it in a file.
if size, ok := s.fileSizes[blobinfo.Digest]; ok {
- return true, size, nil
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: size,
+ MediaType: blobinfo.MediaType,
+ }, nil
}
+
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
+ return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
}
if len(layers) > 0 {
// Save this for completeness.
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, layers[0].UncompressedSize, nil
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: layers[0].UncompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
}
+
// Check if we have a was-compressed layer in storage that's based on that blob.
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
- return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
+ return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
}
if len(layers) > 0 {
// Record the uncompressed value so that we can use it to calculate layer IDs.
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
- return true, layers[0].CompressedSize, nil
+ return true, types.BlobInfo{
+ Digest: blobinfo.Digest,
+ Size: layers[0].CompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+
+ // Does the blob correspond to a known DiffID which we already have available?
+ // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
+ // uncompressed layer, and that can happen only if canSubstitute.
+ if canSubstitute {
+ if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
+ layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
+ if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
+ return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest)
+ }
+ if len(layers) > 0 {
+ s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
+ return true, types.BlobInfo{
+ Digest: uncompressedDigest,
+ Size: layers[0].UncompressedSize,
+ MediaType: blobinfo.MediaType,
+ }, nil
+ }
+ }
}
- // Nope, we don't have it.
- return false, -1, nil
-}
-// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the
-// same one when it walks the list in the manifest.
-func (s *storageImageDestination) ReapplyBlob(ctx context.Context, blobinfo types.BlobInfo) (types.BlobInfo, error) {
- present, size, err := s.HasBlob(ctx, blobinfo)
- if !present {
- return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo)
- }
- if err != nil {
- return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo)
- }
- blobinfo.Size = size
- return blobinfo, nil
+ // Nope, we don't have it.
+ return false, types.BlobInfo{}, nil
}
// computeID computes a recommended image ID based on information we have so far. If
@@ -514,8 +550,12 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
if !haveDiffID {
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
// or to even check if we had it.
+ // Use blobinfocache.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
+ // that relies on using a blob digest that has never been seeen by the store had better call
+ // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
+ // so far we are going to accommodate that (if we should be doing that at all).
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
- has, _, err := s.HasBlob(ctx, blob.BlobInfo)
+ has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, blobinfocache.NoCache, false)
if err != nil {
return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
}
diff --git a/vendor/github.com/containers/image/tarball/tarball_src.go b/vendor/github.com/containers/image/tarball/tarball_src.go
index 17af60b30..ee963b8d8 100644
--- a/vendor/github.com/containers/image/tarball/tarball_src.go
+++ b/vendor/github.com/containers/image/tarball/tarball_src.go
@@ -207,7 +207,10 @@ func (is *tarballImageSource) Close() error {
return nil
}
-func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo) (io.ReadCloser, int64, error) {
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
// We should only be asked about things in the manifest. Maybe the configuration blob.
if blobinfo.Digest == is.configID {
return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go
index a552e4597..dda332776 100644
--- a/vendor/github.com/containers/image/types/types.go
+++ b/vendor/github.com/containers/image/types/types.go
@@ -100,6 +100,82 @@ type BlobInfo struct {
MediaType string
}
+// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present.
+// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest).
+// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable.
+//
+// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
+// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility,
+// at least by not failing hard when encountering unknown data.
+type BICTransportScope struct {
+ Opaque string
+}
+
+// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope.
+// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob
+// can look it up using BlobInfoCache.CandidateLocations.
+//
+// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
+// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility,
+// at least by not failing hard when encountering unknown data.
+type BICLocationReference struct {
+ Opaque string
+}
+
+// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations.
+type BICReplacementCandidate struct {
+ Digest digest.Digest
+ Location BICLocationReference
+}
+
+// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies.
+//
+// It records two kinds of data:
+// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
+// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
+// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion),
+// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
+//
+// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
+// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
+//
+// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
+// compress/decompress blobs for their own purposes.
+//
+// - Known blob locations, managed by individual transports:
+// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
+// recording transport-specific information that allows the transport to reuse the blob in the future;
+// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
+//
+// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
+// can be directly reused within a registry, or mounted across registries within a registry server.)
+//
+// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
+// users of the cahce should just fall back to copying the blobs the usual way.
+type BlobInfoCache interface {
+ // UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
+ // May return anyDigest if it is known to be uncompressed.
+ // Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
+ UncompressedDigest(anyDigest digest.Digest) digest.Digest
+ // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
+ // It’s allowed for anyDigest == uncompressed.
+ // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
+ // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
+ // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
+ RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest)
+
+ // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
+ // and can be reused given the opaque location data.
+ RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference)
+ // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
+ // within the specified (transport scope) (if they still exist, which is not guaranteed).
+ //
+ // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
+ // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
+ // uncompressed digest.
+ CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate
+}
+
// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list).
// This is primarily useful for copying images around; for examining their properties, Image (below)
// is usually more useful.
@@ -120,7 +196,8 @@ type ImageSource interface {
GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error)
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
- GetBlob(context.Context, BlobInfo) (io.ReadCloser, int64, error)
+ // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+ GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error)
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
@@ -148,8 +225,7 @@ const (
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
//
// There is a specific required order for some of the calls:
-// PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
-// ReapplyBlob, if used, MUST only be called if HasBlob returned true for the same blob digest
+// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents)
// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist.
//
@@ -183,17 +259,19 @@ type ImageDestination interface {
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
+ // May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
- PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, isConfig bool) (BlobInfo, error)
- // HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
- // Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
- // If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
- // it returns a non-nil error only on an unexpected failure.
- HasBlob(ctx context.Context, info BlobInfo) (bool, int64, error)
- // ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree.
- ReapplyBlob(ctx context.Context, info BlobInfo) (BlobInfo, error)
+ PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error)
+ // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
+ // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
+ // info.Digest must not be empty.
+ // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
+ // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
+ // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
+ // May use and/or update cache.
+ TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error)
// PutManifest writes manifest to the destination.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
@@ -375,6 +453,8 @@ type SystemContext struct {
ArchitectureChoice string
// If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match.
OSChoice string
+ // If not "", overrides the system's default directory containing a blob info cache.
+ BlobInfoCacheDir string
// Additional tags when creating or copying a docker-archive.
DockerArchiveAdditionalTags []reference.NamedTagged
diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf
index de6dcbecf..88537981a 100644
--- a/vendor/github.com/containers/image/vendor.conf
+++ b/vendor/github.com/containers/image/vendor.conf
@@ -43,3 +43,4 @@ github.com/syndtr/gocapability master
github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175
github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a
github.com/ulikunitz/xz v0.5.4
+github.com/boltdb/bolt master