summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--vendor.conf2
-rw-r--r--vendor/github.com/containers/storage/containers_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/chown.go43
-rw-r--r--vendor/github.com/containers/storage/drivers/chown_unix.go55
-rw-r--r--vendor/github.com/containers/storage/drivers/chown_windows.go14
-rw-r--r--vendor/github.com/containers/storage/drivers/chroot_windows.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/windows/windows.go5
-rw-r--r--vendor/github.com/containers/storage/images.go34
-rw-r--r--vendor/github.com/containers/storage/images_ffjson.go112
-rw-r--r--vendor/github.com/containers/storage/layers_ffjson.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go16
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_110.go11
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_19.go10
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go118
-rw-r--r--vendor/github.com/containers/storage/pkg/stringutils/README.md1
-rw-r--r--vendor/github.com/containers/storage/pkg/stringutils/stringutils.go99
-rw-r--r--vendor/github.com/containers/storage/pkg/system/stat_windows.go14
-rw-r--r--vendor/github.com/containers/storage/store.go363
18 files changed, 811 insertions, 92 deletions
diff --git a/vendor.conf b/vendor.conf
index 5f9391b08..c69357d56 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -11,7 +11,7 @@ github.com/containerd/continuity master
github.com/containernetworking/cni v0.6.0
github.com/containernetworking/plugins 1fb94a4222eafc6f948eacdca9c9f2158b427e53
github.com/containers/image ad33f7b73fbac0acf05b9e2cea021b61b4b0c3e0
-github.com/containers/storage 0b8ab959bba614a4f88bb3791dbc078c3d47f259
+github.com/containers/storage 4993aae31ced3971f5b72f28c4e3fe38c34fa634
github.com/coreos/go-systemd v14
github.com/cri-o/ocicni master
github.com/cyphar/filepath-securejoin v0.2.1
diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go
index 40b912bb3..aef6becfe 100644
--- a/vendor/github.com/containers/storage/containers_ffjson.go
+++ b/vendor/github.com/containers/storage/containers_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: ./containers.go
+// source: containers.go
package storage
diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go
index c12e73b3b..bcba12de9 100644
--- a/vendor/github.com/containers/storage/drivers/chown.go
+++ b/vendor/github.com/containers/storage/drivers/chown.go
@@ -6,7 +6,6 @@ import (
"fmt"
"os"
"path/filepath"
- "syscall"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/reexec"
@@ -56,47 +55,7 @@ func chownByMapsMain() {
if err != nil {
return fmt.Errorf("error walking to %q: %v", path, err)
}
- sysinfo := info.Sys()
- if st, ok := sysinfo.(*syscall.Stat_t); ok {
- // Map an on-disk UID/GID pair from host to container
- // using the first map, then back to the host using the
- // second map. Skip that first step if they're 0, to
- // compensate for cases where a parent layer should
- // have had a mapped value, but didn't.
- uid, gid := int(st.Uid), int(st.Gid)
- if toContainer != nil {
- pair := idtools.IDPair{
- UID: uid,
- GID: gid,
- }
- mappedUid, mappedGid, err := toContainer.ToContainer(pair)
- if err != nil {
- if (uid != 0) || (gid != 0) {
- return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err)
- }
- mappedUid, mappedGid = uid, gid
- }
- uid, gid = mappedUid, mappedGid
- }
- if toHost != nil {
- pair := idtools.IDPair{
- UID: uid,
- GID: gid,
- }
- mappedPair, err := toHost.ToHost(pair)
- if err != nil {
- return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err)
- }
- uid, gid = mappedPair.UID, mappedPair.GID
- }
- if uid != int(st.Uid) || gid != int(st.Gid) {
- // Make the change.
- if err := syscall.Lchown(path, uid, gid); err != nil {
- return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err)
- }
- }
- }
- return nil
+ return platformLChown(path, info, toHost, toContainer)
}
if err := filepath.Walk(".", chown); err != nil {
fmt.Fprintf(os.Stderr, "error during chown: %v", err)
diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go
new file mode 100644
index 000000000..5454657ec
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/chown_unix.go
@@ -0,0 +1,55 @@
+// +build !windows
+
+package graphdriver
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+
+ "github.com/containers/storage/pkg/idtools"
+)
+
+func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
+ sysinfo := info.Sys()
+ if st, ok := sysinfo.(*syscall.Stat_t); ok {
+ // Map an on-disk UID/GID pair from host to container
+ // using the first map, then back to the host using the
+ // second map. Skip that first step if they're 0, to
+ // compensate for cases where a parent layer should
+ // have had a mapped value, but didn't.
+ uid, gid := int(st.Uid), int(st.Gid)
+ if toContainer != nil {
+ pair := idtools.IDPair{
+ UID: uid,
+ GID: gid,
+ }
+ mappedUid, mappedGid, err := toContainer.ToContainer(pair)
+ if err != nil {
+ if (uid != 0) || (gid != 0) {
+ return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err)
+ }
+ mappedUid, mappedGid = uid, gid
+ }
+ uid, gid = mappedUid, mappedGid
+ }
+ if toHost != nil {
+ pair := idtools.IDPair{
+ UID: uid,
+ GID: gid,
+ }
+ mappedPair, err := toHost.ToHost(pair)
+ if err != nil {
+ return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err)
+ }
+ uid, gid = mappedPair.UID, mappedPair.GID
+ }
+ if uid != int(st.Uid) || gid != int(st.Gid) {
+ // Make the change.
+ if err := syscall.Lchown(path, uid, gid); err != nil {
+ return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err)
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go
new file mode 100644
index 000000000..31bd5bb52
--- /dev/null
+++ b/vendor/github.com/containers/storage/drivers/chown_windows.go
@@ -0,0 +1,14 @@
+// +build windows
+
+package graphdriver
+
+import (
+ "os"
+ "syscall"
+
+ "github.com/containers/storage/pkg/idtools"
+)
+
+func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
+ return &os.PathError{"lchown", path, syscall.EWINDOWS}
+}
diff --git a/vendor/github.com/containers/storage/drivers/chroot_windows.go b/vendor/github.com/containers/storage/drivers/chroot_windows.go
index 1df031789..f4dc22a96 100644
--- a/vendor/github.com/containers/storage/drivers/chroot_windows.go
+++ b/vendor/github.com/containers/storage/drivers/chroot_windows.go
@@ -1,7 +1,7 @@
package graphdriver
import (
- "os"
+ "fmt"
"syscall"
)
diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go
index b750715bf..1d84b0b6a 100644
--- a/vendor/github.com/containers/storage/drivers/windows/windows.go
+++ b/vendor/github.com/containers/storage/drivers/windows/windows.go
@@ -940,11 +940,6 @@ func (d *Driver) AdditionalImageStores() []string {
return nil
}
-// AdditionalImageStores returns additional image stores supported by the driver
-func (d *Driver) AdditionalImageStores() []string {
- return nil
-}
-
// UpdateLayerIDMap changes ownerships in the layer's filesystem tree from
// matching those in toContainer to matching those in toHost.
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go
index 76a11fb0c..80fae6dce 100644
--- a/vendor/github.com/containers/storage/images.go
+++ b/vendor/github.com/containers/storage/images.go
@@ -40,6 +40,11 @@ type Image struct {
// same top layer.
TopLayer string `json:"layer,omitempty"`
+ // MappedTopLayers are the IDs of alternate versions of the top layer
+ // which have the same contents and parent, and which differ from
+ // TopLayer only in which ID mappings they use.
+ MappedTopLayers []string `json:"mapped-layers,omitempty"`
+
// Metadata is data we keep for the convenience of the caller. It is not
// expected to be large, since it is kept in memory.
Metadata string `json:"metadata,omitempty"`
@@ -126,16 +131,17 @@ type imageStore struct {
func copyImage(i *Image) *Image {
return &Image{
- ID: i.ID,
- Digest: i.Digest,
- Names: copyStringSlice(i.Names),
- TopLayer: i.TopLayer,
- Metadata: i.Metadata,
- BigDataNames: copyStringSlice(i.BigDataNames),
- BigDataSizes: copyStringInt64Map(i.BigDataSizes),
- BigDataDigests: copyStringDigestMap(i.BigDataDigests),
- Created: i.Created,
- Flags: copyStringInterfaceMap(i.Flags),
+ ID: i.ID,
+ Digest: i.Digest,
+ Names: copyStringSlice(i.Names),
+ TopLayer: i.TopLayer,
+ MappedTopLayers: copyStringSlice(i.MappedTopLayers),
+ Metadata: i.Metadata,
+ BigDataNames: copyStringSlice(i.BigDataNames),
+ BigDataSizes: copyStringInt64Map(i.BigDataSizes),
+ BigDataDigests: copyStringDigestMap(i.BigDataDigests),
+ Created: i.Created,
+ Flags: copyStringInterfaceMap(i.Flags),
}
}
@@ -362,6 +368,14 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
return image, err
}
+func (r *imageStore) addMappedTopLayer(id, layer string) error {
+ if image, ok := r.lookup(id); ok {
+ image.MappedTopLayers = append(image.MappedTopLayers, layer)
+ return r.Save()
+ }
+ return ErrImageUnknown
+}
+
func (r *imageStore) Metadata(id string) (string, error) {
if image, ok := r.lookup(id); ok {
return image.Metadata, nil
diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go
index f91ee6d4f..6b40ebd59 100644
--- a/vendor/github.com/containers/storage/images_ffjson.go
+++ b/vendor/github.com/containers/storage/images_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: ./images.go
+// source: images.go
package storage
@@ -64,6 +64,22 @@ func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
fflib.WriteJsonString(buf, string(j.TopLayer))
buf.WriteByte(',')
}
+ if len(j.MappedTopLayers) != 0 {
+ buf.WriteString(`"mapped-layers":`)
+ if j.MappedTopLayers != nil {
+ buf.WriteString(`[`)
+ for i, v := range j.MappedTopLayers {
+ if i != 0 {
+ buf.WriteString(`,`)
+ }
+ fflib.WriteJsonString(buf, string(v))
+ }
+ buf.WriteString(`]`)
+ } else {
+ buf.WriteString(`null`)
+ }
+ buf.WriteByte(',')
+ }
if len(j.Metadata) != 0 {
buf.WriteString(`"metadata":`)
fflib.WriteJsonString(buf, string(j.Metadata))
@@ -157,6 +173,8 @@ const (
ffjtImageTopLayer
+ ffjtImageMappedTopLayers
+
ffjtImageMetadata
ffjtImageBigDataNames
@@ -178,6 +196,8 @@ var ffjKeyImageNames = []byte("names")
var ffjKeyImageTopLayer = []byte("layer")
+var ffjKeyImageMappedTopLayers = []byte("mapped-layers")
+
var ffjKeyImageMetadata = []byte("metadata")
var ffjKeyImageBigDataNames = []byte("big-data-names")
@@ -311,7 +331,12 @@ mainparse:
case 'm':
- if bytes.Equal(ffjKeyImageMetadata, kn) {
+ if bytes.Equal(ffjKeyImageMappedTopLayers, kn) {
+ currentKey = ffjtImageMappedTopLayers
+ state = fflib.FFParse_want_colon
+ goto mainparse
+
+ } else if bytes.Equal(ffjKeyImageMetadata, kn) {
currentKey = ffjtImageMetadata
state = fflib.FFParse_want_colon
goto mainparse
@@ -363,6 +388,12 @@ mainparse:
goto mainparse
}
+ if fflib.EqualFoldRight(ffjKeyImageMappedTopLayers, kn) {
+ currentKey = ffjtImageMappedTopLayers
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
if fflib.SimpleLetterEqualFold(ffjKeyImageTopLayer, kn) {
currentKey = ffjtImageTopLayer
state = fflib.FFParse_want_colon
@@ -416,6 +447,9 @@ mainparse:
case ffjtImageTopLayer:
goto handle_TopLayer
+ case ffjtImageMappedTopLayers:
+ goto handle_MappedTopLayers
+
case ffjtImageMetadata:
goto handle_Metadata
@@ -600,6 +634,80 @@ handle_TopLayer:
state = fflib.FFParse_after_value
goto mainparse
+handle_MappedTopLayers:
+
+ /* handler: j.MappedTopLayers type=[]string kind=slice quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+ j.MappedTopLayers = nil
+ } else {
+
+ j.MappedTopLayers = []string{}
+
+ wantVal := true
+
+ for {
+
+ var tmpJMappedTopLayers string
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ /* handler: tmpJMappedTopLayers type=string kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ tmpJMappedTopLayers = string(string(outBuf))
+
+ }
+ }
+
+ j.MappedTopLayers = append(j.MappedTopLayers, tmpJMappedTopLayers)
+
+ wantVal = false
+ }
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
handle_Metadata:
/* handler: j.Metadata type=string kind=string quoted=false*/
diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go
index 09b5d0f33..125b5d8c9 100644
--- a/vendor/github.com/containers/storage/layers_ffjson.go
+++ b/vendor/github.com/containers/storage/layers_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: ./layers.go
+// source: layers.go
package storage
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index dcc5227fe..4c4382625 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -56,6 +56,11 @@ type (
// replaced with the matching name from this map.
RebaseNames map[string]string
InUserNS bool
+ // CopyPass indicates that the contents of any archive we're creating
+ // will instantly be extracted and written to disk, so we can deviate
+ // from the traditional behavior/format to get features like subsecond
+ // precision in timestamps.
+ CopyPass bool
}
)
@@ -396,6 +401,11 @@ type tarAppender struct {
// by the AUFS standard are used as the tar whiteout
// standard.
WhiteoutConverter tarWhiteoutConverter
+ // CopyPass indicates that the contents of any archive we're creating
+ // will instantly be extracted and written to disk, so we can deviate
+ // from the traditional behavior/format to get features like subsecond
+ // precision in timestamps.
+ CopyPass bool
}
func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender {
@@ -446,6 +456,9 @@ func (ta *tarAppender) addTarFile(path, name string) error {
if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
return err
}
+ if ta.CopyPass {
+ copyPassHeader(hdr)
+ }
// if it's not a directory and has more than 1 link,
// it's hard linked, so set the type flag accordingly
@@ -710,6 +723,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
options.ChownOpts,
)
ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
+ ta.CopyPass = options.CopyPass
defer func() {
// Make sure to check the error on Close.
@@ -1039,6 +1053,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
UIDMaps: tarMappings.UIDs(),
GIDMaps: tarMappings.GIDs(),
Compression: Uncompressed,
+ CopyPass: true,
}
archive, err := TarWithOptions(src, options)
if err != nil {
@@ -1145,6 +1160,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
}
hdr.Name = filepath.Base(dst)
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+ copyPassHeader(hdr)
if err := remapIDs(archiver.TarIDMappings, nil, archiver.ChownOpts, hdr); err != nil {
return err
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_110.go b/vendor/github.com/containers/storage/pkg/archive/archive_110.go
new file mode 100644
index 000000000..22b8b48cc
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_110.go
@@ -0,0 +1,11 @@
+// +build go1.10
+
+package archive
+
+import (
+ "archive/tar"
+)
+
+func copyPassHeader(hdr *tar.Header) {
+ hdr.Format = tar.FormatPAX
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_19.go b/vendor/github.com/containers/storage/pkg/archive/archive_19.go
new file mode 100644
index 000000000..d10d595fa
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_19.go
@@ -0,0 +1,10 @@
+// +build !go1.10
+
+package archive
+
+import (
+ "archive/tar"
+)
+
+func copyPassHeader(hdr *tar.Header) {
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
index 211f4e92b..9b8103e4d 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
-// source: ./pkg/archive/archive.go
+// source: pkg/archive/archive.go
package archive
@@ -491,6 +491,11 @@ func (j *TarOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
} else {
buf.WriteString(`,"InUserNS":false`)
}
+ if j.CopyPass {
+ buf.WriteString(`,"CopyPass":true`)
+ } else {
+ buf.WriteString(`,"CopyPass":false`)
+ }
buf.WriteByte('}')
return nil
}
@@ -524,6 +529,8 @@ const (
ffjtTarOptionsRebaseNames
ffjtTarOptionsInUserNS
+
+ ffjtTarOptionsCopyPass
)
var ffjKeyTarOptionsIncludeFiles = []byte("IncludeFiles")
@@ -552,6 +559,8 @@ var ffjKeyTarOptionsRebaseNames = []byte("RebaseNames")
var ffjKeyTarOptionsInUserNS = []byte("InUserNS")
+var ffjKeyTarOptionsCopyPass = []byte("CopyPass")
+
// UnmarshalJSON umarshall json - template of ffjson
func (j *TarOptions) UnmarshalJSON(input []byte) error {
fs := fflib.NewFFLexer(input)
@@ -624,6 +633,11 @@ mainparse:
currentKey = ffjtTarOptionsChownOpts
state = fflib.FFParse_want_colon
goto mainparse
+
+ } else if bytes.Equal(ffjKeyTarOptionsCopyPass, kn) {
+ currentKey = ffjtTarOptionsCopyPass
+ state = fflib.FFParse_want_colon
+ goto mainparse
}
case 'E':
@@ -704,6 +718,12 @@ mainparse:
}
+ if fflib.EqualFoldRight(ffjKeyTarOptionsCopyPass, kn) {
+ currentKey = ffjtTarOptionsCopyPass
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
if fflib.EqualFoldRight(ffjKeyTarOptionsInUserNS, kn) {
currentKey = ffjtTarOptionsInUserNS
state = fflib.FFParse_want_colon
@@ -838,6 +858,9 @@ mainparse:
case ffjtTarOptionsInUserNS:
goto handle_InUserNS
+ case ffjtTarOptionsCopyPass:
+ goto handle_CopyPass
+
case ffjtTarOptionsnosuchkey:
err = fs.SkipField(tok)
if err != nil {
@@ -1481,6 +1504,41 @@ handle_InUserNS:
state = fflib.FFParse_after_value
goto mainparse
+handle_CopyPass:
+
+ /* handler: j.CopyPass type=bool kind=bool quoted=false*/
+
+ {
+ if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
+ }
+ }
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+ tmpb := fs.Output.Bytes()
+
+ if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
+
+ j.CopyPass = true
+
+ } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
+
+ j.CopyPass = false
+
+ } else {
+ err = errors.New("unexpected bytes for true/false value")
+ return fs.WrapErr(err)
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
wantedvalue:
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
wrongtokenerror:
@@ -1773,6 +1831,11 @@ func (j *tarAppender) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
if err != nil {
return err
}
+ if j.CopyPass {
+ buf.WriteString(`,"CopyPass":true`)
+ } else {
+ buf.WriteString(`,"CopyPass":false`)
+ }
buf.WriteByte('}')
return nil
}
@@ -1792,6 +1855,8 @@ const (
ffjttarAppenderChownOpts
ffjttarAppenderWhiteoutConverter
+
+ ffjttarAppenderCopyPass
)
var ffjKeytarAppenderTarWriter = []byte("TarWriter")
@@ -1806,6 +1871,8 @@ var ffjKeytarAppenderChownOpts = []byte("ChownOpts")
var ffjKeytarAppenderWhiteoutConverter = []byte("WhiteoutConverter")
+var ffjKeytarAppenderCopyPass = []byte("CopyPass")
+
// UnmarshalJSON umarshall json - template of ffjson
func (j *tarAppender) UnmarshalJSON(input []byte) error {
fs := fflib.NewFFLexer(input)
@@ -1881,6 +1948,11 @@ mainparse:
currentKey = ffjttarAppenderChownOpts
state = fflib.FFParse_want_colon
goto mainparse
+
+ } else if bytes.Equal(ffjKeytarAppenderCopyPass, kn) {
+ currentKey = ffjttarAppenderCopyPass
+ state = fflib.FFParse_want_colon
+ goto mainparse
}
case 'I':
@@ -1917,6 +1989,12 @@ mainparse:
}
+ if fflib.EqualFoldRight(ffjKeytarAppenderCopyPass, kn) {
+ currentKey = ffjttarAppenderCopyPass
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
if fflib.SimpleLetterEqualFold(ffjKeytarAppenderWhiteoutConverter, kn) {
currentKey = ffjttarAppenderWhiteoutConverter
state = fflib.FFParse_want_colon
@@ -1988,6 +2066,9 @@ mainparse:
case ffjttarAppenderWhiteoutConverter:
goto handle_WhiteoutConverter
+ case ffjttarAppenderCopyPass:
+ goto handle_CopyPass
+
case ffjttarAppendernosuchkey:
err = fs.SkipField(tok)
if err != nil {
@@ -2211,6 +2292,41 @@ handle_WhiteoutConverter:
state = fflib.FFParse_after_value
goto mainparse
+handle_CopyPass:
+
+ /* handler: j.CopyPass type=bool kind=bool quoted=false*/
+
+ {
+ if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
+ }
+ }
+
+ {
+ if tok == fflib.FFTok_null {
+
+ } else {
+ tmpb := fs.Output.Bytes()
+
+ if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
+
+ j.CopyPass = true
+
+ } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
+
+ j.CopyPass = false
+
+ } else {
+ err = errors.New("unexpected bytes for true/false value")
+ return fs.WrapErr(err)
+ }
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
wantedvalue:
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
wrongtokenerror:
diff --git a/vendor/github.com/containers/storage/pkg/stringutils/README.md b/vendor/github.com/containers/storage/pkg/stringutils/README.md
new file mode 100644
index 000000000..b3e454573
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/stringutils/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with strings
diff --git a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
new file mode 100644
index 000000000..8c4c39875
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
@@ -0,0 +1,99 @@
+// Package stringutils provides helper functions for dealing with strings.
+package stringutils
+
+import (
+ "bytes"
+ "math/rand"
+ "strings"
+)
+
+// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n.
+func GenerateRandomAlphaOnlyString(n int) string {
+ // make a really long string
+ letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = letters[rand.Intn(len(letters))]
+ }
+ return string(b)
+}
+
+// GenerateRandomASCIIString generates an ASCII random string with length n.
+func GenerateRandomASCIIString(n int) string {
+ chars := "abcdefghijklmnopqrstuvwxyz" +
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
+ "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
+ res := make([]byte, n)
+ for i := 0; i < n; i++ {
+ res[i] = chars[rand.Intn(len(chars))]
+ }
+ return string(res)
+}
+
+// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...).
+// For maxlen of 3 and lower, no ellipsis is appended.
+func Ellipsis(s string, maxlen int) string {
+ r := []rune(s)
+ if len(r) <= maxlen {
+ return s
+ }
+ if maxlen <= 3 {
+ return string(r[:maxlen])
+ }
+ return string(r[:maxlen-3]) + "..."
+}
+
+// Truncate truncates a string to maxlen.
+func Truncate(s string, maxlen int) string {
+ r := []rune(s)
+ if len(r) <= maxlen {
+ return s
+ }
+ return string(r[:maxlen])
+}
+
+// InSlice tests whether a string is contained in a slice of strings or not.
+// Comparison is case insensitive
+func InSlice(slice []string, s string) bool {
+ for _, ss := range slice {
+ if strings.ToLower(s) == strings.ToLower(ss) {
+ return true
+ }
+ }
+ return false
+}
+
+func quote(word string, buf *bytes.Buffer) {
+ // Bail out early for "simple" strings
+ if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") {
+ buf.WriteString(word)
+ return
+ }
+
+ buf.WriteString("'")
+
+ for i := 0; i < len(word); i++ {
+ b := word[i]
+ if b == '\'' {
+ // Replace literal ' with a close ', a \', and an open '
+ buf.WriteString("'\\''")
+ } else {
+ buf.WriteByte(b)
+ }
+ }
+
+ buf.WriteString("'")
+}
+
+// ShellQuoteArguments takes a list of strings and escapes them so they will be
+// handled right when passed as arguments to a program via a shell
+func ShellQuoteArguments(args []string) string {
+ var buf bytes.Buffer
+ for i, arg := range args {
+ if i != 0 {
+ buf.WriteByte(' ')
+ }
+ quote(arg, &buf)
+ }
+ return buf.String()
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go
index 6c6397268..d30636052 100644
--- a/vendor/github.com/containers/storage/pkg/system/stat_windows.go
+++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go
@@ -28,6 +28,20 @@ func (s StatT) Mtim() time.Time {
return time.Time(s.mtim)
}
+// UID returns file's user id of owner.
+//
+// on windows this is always 0 because there is no concept of UID
+func (s StatT) UID() uint32 {
+ return 0
+}
+
+// GID returns file's group id of owner.
+//
+// on windows this is always 0 because there is no concept of GID
+func (s StatT) GID() uint32 {
+ return 0
+}
+
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 088d9c0c5..29972b690 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -7,6 +7,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "reflect"
"strconv"
"strings"
"sync"
@@ -18,9 +19,11 @@ import (
"github.com/BurntSushi/toml"
drivers "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
+ "github.com/containers/storage/pkg/stringutils"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
@@ -346,6 +349,9 @@ type Store interface {
// with an image.
SetImageBigData(id, key string, data []byte) error
+ // ImageSize computes the size of the image's layers and ancillary data.
+ ImageSize(id string) (int64, error)
+
// ListContainerBigData retrieves a list of the (possibly large) chunks of
// named data associated with a container.
ListContainerBigData(id string) ([]string, error)
@@ -366,6 +372,10 @@ type Store interface {
// associated with a container.
SetContainerBigData(id, key string, data []byte) error
+ // ContainerSize computes the size of the container's layer and ancillary
+ // data. Warning: this is a potentially expensive operation.
+ ContainerSize(id string) (int64, error)
+
// Layer returns a specific layer.
Layer(id string) (*Layer, error)
@@ -949,6 +959,106 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o
return ristore.Create(id, names, layer, metadata, creationDate, options.Digest)
}
+func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, readWrite bool, rlstore LayerStore, lstores []ROLayerStore, options IDMappingOptions) (*Layer, error) {
+ layerMatchesMappingOptions := func(layer *Layer, options IDMappingOptions) bool {
+ // If we want host mapping, and the layer uses mappings, it's not the best match.
+ if options.HostUIDMapping && len(layer.UIDMap) != 0 {
+ return false
+ }
+ if options.HostGIDMapping && len(layer.GIDMap) != 0 {
+ return false
+ }
+ // If we don't care about the mapping, it's fine.
+ if len(options.UIDMap) == 0 && len(options.GIDMap) == 0 {
+ return true
+ }
+ // Compare the maps.
+ return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap)
+ }
+ var layer, parentLayer *Layer
+ var layerHomeStore ROLayerStore
+ // Locate the image's top layer and its parent, if it has one.
+ for _, store := range append([]ROLayerStore{rlstore}, lstores...) {
+ if store != rlstore {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ }
+ // Walk the top layer list.
+ for _, candidate := range append([]string{image.TopLayer}, image.MappedTopLayers...) {
+ if cLayer, err := store.Get(candidate); err == nil {
+ // We want the layer's parent, too, if it has one.
+ var cParentLayer *Layer
+ if cLayer.Parent != "" {
+ // Its parent should be around here, somewhere.
+ if cParentLayer, err = store.Get(cLayer.Parent); err != nil {
+ // Nope, couldn't find it. We're not going to be able
+ // to diff this one properly.
+ continue
+ }
+ }
+ // If the layer matches the desired mappings, it's a perfect match,
+ // so we're actually done here.
+ if layerMatchesMappingOptions(cLayer, options) {
+ return cLayer, nil
+ }
+ // Record the first one that we found, even if it's not ideal, so that
+ // we have a starting point.
+ if layer == nil {
+ layer = cLayer
+ parentLayer = cParentLayer
+ layerHomeStore = store
+ }
+ }
+ }
+ }
+ if layer == nil {
+ return nil, ErrLayerUnknown
+ }
+ // The top layer's mappings don't match the ones we want, but it's in a read-only
+ // image store, so we can't create and add a mapped copy of the layer to the image.
+ if !readWrite {
+ return layer, nil
+ }
+ // The top layer's mappings don't match the ones we want, and it's in an image store
+ // that lets us edit image metadata...
+ if istore, ok := ristore.(*imageStore); ok {
+ // ... so extract the layer's contents, create a new copy of it with the
+ // desired mappings, and register it as an alternate top layer in the image.
+ noCompression := archive.Uncompressed
+ diffOptions := DiffOptions{
+ Compression: &noCompression,
+ }
+ rc, err := layerHomeStore.Diff("", layer.ID, &diffOptions)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading layer %q to create an ID-mapped version of it")
+ }
+ defer rc.Close()
+ layerOptions := LayerOptions{
+ IDMappingOptions: IDMappingOptions{
+ HostUIDMapping: options.HostUIDMapping,
+ HostGIDMapping: options.HostGIDMapping,
+ UIDMap: copyIDMap(options.UIDMap),
+ GIDMap: copyIDMap(options.GIDMap),
+ },
+ }
+ mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, rc)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating ID-mapped copy of layer %q")
+ }
+ if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil {
+ if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil {
+ err = errors.WithMessage(err, fmt.Sprintf("error deleting layer %q: %v", mappedLayer.ID, err2))
+ }
+ return nil, errors.Wrapf(err, "error registering ID-mapped layer with image %q", image.ID)
+ }
+ layer = mappedLayer
+ }
+ return layer, nil
+}
+
func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) {
if options == nil {
options = &ContainerOptions{}
@@ -977,6 +1087,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
uidMap := options.UIDMap
gidMap := options.GIDMap
if image != "" {
+ var imageHomeStore ROImageStore
istore, err := s.ImageStore()
if err != nil {
return nil, err
@@ -994,6 +1105,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
}
cimage, err = store.Get(image)
if err == nil {
+ imageHomeStore = store
break
}
}
@@ -1006,22 +1118,9 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
if err != nil {
return nil, err
}
- var ilayer *Layer
- for _, store := range append([]ROLayerStore{rlstore}, lstores...) {
- if store != rlstore {
- store.Lock()
- defer store.Unlock()
- if modified, err := store.Modified(); modified || err != nil {
- store.Load()
- }
- }
- ilayer, err = store.Get(cimage.TopLayer)
- if err == nil {
- break
- }
- }
- if ilayer == nil {
- return nil, ErrLayerUnknown
+ ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, imageHomeStore == istore, rlstore, lstores, options.IDMappingOptions)
+ if err != nil {
+ return nil, err
}
imageTopLayer = ilayer
if !options.HostUIDMapping && len(options.UIDMap) == 0 {
@@ -1279,6 +1378,200 @@ func (s *store) SetImageBigData(id, key string, data []byte) error {
return ristore.SetBigData(id, key, data)
}
+func (s *store) ImageSize(id string) (int64, error) {
+ var image *Image
+
+ lstore, err := s.LayerStore()
+ if err != nil {
+ return -1, errors.Wrapf(err, "error loading primary layer store data")
+ }
+ lstores, err := s.ROLayerStores()
+ if err != nil {
+ return -1, errors.Wrapf(err, "error loading additional layer stores")
+ }
+ for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ }
+
+ var imageStore ROBigDataStore
+ istore, err := s.ImageStore()
+ if err != nil {
+ return -1, errors.Wrapf(err, "error loading primary image store data")
+ }
+ istores, err := s.ROImageStores()
+ if err != nil {
+ return -1, errors.Wrapf(err, "error loading additional image stores")
+ }
+
+ // Look for the image's record.
+ for _, store := range append([]ROImageStore{istore}, istores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ if image, err = store.Get(id); err == nil {
+ imageStore = store
+ break
+ }
+ }
+ if image == nil {
+ return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
+ }
+
+ // Start with a list of the image's top layers.
+ queue := make(map[string]struct{})
+ for _, layerID := range append([]string{image.TopLayer}, image.MappedTopLayers...) {
+ queue[layerID] = struct{}{}
+ }
+ visited := make(map[string]struct{})
+ // Walk all of the layers.
+ var size int64
+ for len(visited) < len(queue) {
+ for layerID := range queue {
+ // Visit each layer only once.
+ if _, ok := visited[layerID]; ok {
+ continue
+ }
+ visited[layerID] = struct{}{}
+ // Look for the layer and the store that knows about it.
+ var layerStore ROLayerStore
+ var layer *Layer
+ for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ if layer, err = store.Get(layerID); err == nil {
+ layerStore = store
+ break
+ }
+ }
+ if layer == nil {
+ return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", layerID)
+ }
+ // The UncompressedSize is only valid if there's a digest to go with it.
+ n := layer.UncompressedSize
+ if layer.UncompressedDigest == "" {
+ // Compute the size.
+ n, err = layerStore.DiffSize("", layer.ID)
+ if err != nil {
+ return -1, errors.Wrapf(err, "size/digest of layer with ID %q could not be calculated", layerID)
+ }
+ }
+ // Count this layer.
+ size += n
+ // Make a note to visit the layer's parent if we haven't already.
+ if layer.Parent != "" {
+ queue[layer.Parent] = struct{}{}
+ }
+ }
+ }
+
+ // Count big data items.
+ names, err := imageStore.BigDataNames(id)
+ if err != nil {
+ return -1, errors.Wrapf(err, "error reading list of big data items for image %q", id)
+ }
+ for _, name := range names {
+ n, err := imageStore.BigDataSize(id, name)
+ if err != nil {
+ return -1, errors.Wrapf(err, "error reading size of big data item %q for image %q", name, id)
+ }
+ size += n
+ }
+
+ return size, nil
+}
+
+func (s *store) ContainerSize(id string) (int64, error) {
+ lstore, err := s.LayerStore()
+ if err != nil {
+ return -1, err
+ }
+ lstores, err := s.ROLayerStores()
+ if err != nil {
+ return -1, err
+ }
+ for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ }
+
+ // Get the location of the container directory and container run directory.
+ // Do it before we lock the container store because they do, too.
+ cdir, err := s.ContainerDirectory(id)
+ if err != nil {
+ return -1, err
+ }
+ rdir, err := s.ContainerRunDirectory(id)
+ if err != nil {
+ return -1, err
+ }
+
+ rcstore, err := s.ContainerStore()
+ if err != nil {
+ return -1, err
+ }
+ rcstore.Lock()
+ defer rcstore.Unlock()
+ if modified, err := rcstore.Modified(); modified || err != nil {
+ rcstore.Load()
+ }
+
+ // Read the container record.
+ container, err := rcstore.Get(id)
+ if err != nil {
+ return -1, err
+ }
+
+ // Read the container's layer's size.
+ var layer *Layer
+ var size int64
+ for _, store := range append([]ROLayerStore{lstore}, lstores...) {
+ if layer, err = store.Get(container.LayerID); err == nil {
+ size, err = store.DiffSize("", layer.ID)
+ if err != nil {
+ return -1, errors.Wrapf(err, "error determining size of layer with ID %q", layer.ID)
+ }
+ break
+ }
+ }
+ if layer == nil {
+ return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", container.LayerID)
+ }
+
+ // Count big data items.
+ names, err := rcstore.BigDataNames(id)
+ if err != nil {
+ return -1, errors.Wrapf(err, "error reading list of big data items for container %q", container.ID)
+ }
+ for _, name := range names {
+ n, err := rcstore.BigDataSize(id, name)
+ if err != nil {
+ return -1, errors.Wrapf(err, "error reading size of big data item %q for container %q", name, id)
+ }
+ size += n
+ }
+
+ // Count the size of our container directory and container run directory.
+ n, err := directory.Size(cdir)
+ if err != nil {
+ return -1, err
+ }
+ size += n
+ n, err = directory.Size(rdir)
+ if err != nil {
+ return -1, err
+ }
+ size += n
+
+ return size, nil
+}
+
func (s *store) ListContainerBigData(id string) ([]string, error) {
rcstore, err := s.ContainerStore()
if err != nil {
@@ -1614,7 +1907,7 @@ func (s *store) DeleteLayer(id string) error {
return err
}
for _, image := range images {
- if image.TopLayer == id {
+ if image.TopLayer == id || stringutils.InSlice(image.MappedTopLayers, id) {
return errors.Wrapf(ErrLayerUsedByImage, "Layer %v used by image %v", id, image.ID)
}
}
@@ -1697,10 +1990,13 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
childrenByParent[parent] = &([]string{layer.ID})
}
}
- anyImageByTopLayer := make(map[string]string)
+ otherImagesByTopLayer := make(map[string]string)
for _, img := range images {
if img.ID != id {
- anyImageByTopLayer[img.TopLayer] = img.ID
+ otherImagesByTopLayer[img.TopLayer] = img.ID
+ for _, layerID := range img.MappedTopLayers {
+ otherImagesByTopLayer[layerID] = img.ID
+ }
}
}
if commit {
@@ -1714,27 +2010,38 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
if rcstore.Exists(layer) {
break
}
- if _, ok := anyImageByTopLayer[layer]; ok {
+ if _, ok := otherImagesByTopLayer[layer]; ok {
break
}
parent := ""
if l, err := rlstore.Get(layer); err == nil {
parent = l.Parent
}
- otherRefs := 0
- if childList, ok := childrenByParent[layer]; ok && childList != nil {
- children := *childList
- for _, child := range children {
- if child != lastRemoved {
- otherRefs++
+ hasOtherRefs := func() bool {
+ layersToCheck := []string{layer}
+ if layer == image.TopLayer {
+ layersToCheck = append(layersToCheck, image.MappedTopLayers...)
+ }
+ for _, layer := range layersToCheck {
+ if childList, ok := childrenByParent[layer]; ok && childList != nil {
+ children := *childList
+ for _, child := range children {
+ if child != lastRemoved {
+ return true
+ }
+ }
}
}
+ return false
}
- if otherRefs != 0 {
+ if hasOtherRefs() {
break
}
lastRemoved = layer
layersToRemove = append(layersToRemove, lastRemoved)
+ if layer == image.TopLayer {
+ layersToRemove = append(layersToRemove, image.MappedTopLayers...)
+ }
layer = parent
}
} else {
@@ -2293,7 +2600,7 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
return nil, err
}
for _, image := range imageList {
- if image.TopLayer == layer.ID {
+ if image.TopLayer == layer.ID || stringutils.InSlice(image.MappedTopLayers, layer.ID) {
images = append(images, &image)
}
}