aboutsummaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go39
-rw-r--r--vendor/github.com/containers/storage/drivers/quota/projectquota.go30
-rw-r--r--vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go3
-rw-r--r--vendor/github.com/containers/storage/go.mod2
-rw-r--r--vendor/github.com/containers/storage/go.sum6
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_unix.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/compression.go380
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go220
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/internal/compression.go172
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/storage_linux.go41
-rw-r--r--vendor/github.com/containers/storage/storage.conf3
-rw-r--r--vendor/github.com/containers/storage/types/utils.go10
14 files changed, 506 insertions, 413 deletions
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 5d245052c..7aa332e41 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.32.6
+1.33.0
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 2fa54a207..ecfbae916 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -364,12 +364,12 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// Try to enable project quota support over xfs.
if d.quotaCtl, err = quota.NewControl(home); err == nil {
projectQuotaSupported = true
- } else if opts.quota.Size > 0 {
- return nil, fmt.Errorf("Storage option overlay.size not supported. Filesystem does not support Project Quota: %v", err)
+ } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 {
+ return nil, fmt.Errorf("Storage options overlay.size and overlay.inodes not supported. Filesystem does not support Project Quota: %v", err)
}
- } else if opts.quota.Size > 0 {
+ } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 {
// if xfs is not the backing fs then error out if the storage-opt overlay.size is used.
- return nil, fmt.Errorf("Storage option overlay.size only supported for backingFS XFS. Found %v", backingFs)
+ return nil, fmt.Errorf("Storage option overlay.size and overlay.inodes only supported for backingFS XFS. Found %v", backingFs)
}
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy)
@@ -400,6 +400,13 @@ func parseOptions(options []string) (*overlayOptions, error) {
return nil, err
}
o.quota.Size = uint64(size)
+ case "inodes":
+ logrus.Debugf("overlay: inodes=%s", val)
+ inodes, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ o.quota.Inodes = uint64(inodes)
case "imagestore", "additionalimagestore":
logrus.Debugf("overlay: imagestore=%s", val)
// Additional read only image stores to use for lower paths
@@ -613,6 +620,10 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
if unshare.IsRootless() {
flags = fmt.Sprintf("%s,userxattr", flags)
}
+ if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0600, int(unix.Mkdev(0, 0))); err != nil {
+ logrus.Debugf("unable to create kernel-style whiteout: %v", err)
+ return supportsDType, errors.Wrapf(err, "unable to create kernel-style whiteout")
+ }
if len(flags) < unix.Getpagesize() {
err := unix.Mount("overlay", mergedDir, "overlay", 0, flags)
@@ -784,6 +795,13 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10)
}
+ if _, ok := opts.StorageOpt["inodes"]; !ok {
+ if opts.StorageOpt == nil {
+ opts.StorageOpt = map[string]string{}
+ }
+ opts.StorageOpt["inodes"] = strconv.FormatUint(d.options.quota.Inodes, 10)
+ }
+
return d.create(id, parent, opts)
}
@@ -794,6 +812,9 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
if _, ok := opts.StorageOpt["size"]; ok {
return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers")
}
+ if _, ok := opts.StorageOpt["inodes"]; ok {
+ return fmt.Errorf("--storage-opt inodes is only supported for ReadWrite Layers")
+ }
}
return d.create(id, parent, opts)
@@ -850,7 +871,9 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
if driver.options.quota.Size > 0 {
quota.Size = driver.options.quota.Size
}
-
+ if driver.options.quota.Inodes > 0 {
+ quota.Inodes = driver.options.quota.Inodes
+ }
}
// Set container disk quota limit
// If it is set to 0, we will track the disk usage, but not enforce a limit
@@ -922,6 +945,12 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e
return err
}
driver.options.quota.Size = uint64(size)
+ case "inodes":
+ inodes, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return err
+ }
+ driver.options.quota.Inodes = uint64(inodes)
default:
return fmt.Errorf("Unknown option %s", key)
}
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
index d805623b5..a435f6b82 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
@@ -38,8 +38,8 @@ struct fsxattr {
#ifndef PRJQUOTA
#define PRJQUOTA 2
#endif
-#ifndef XFS_PROJ_QUOTA
-#define XFS_PROJ_QUOTA 2
+#ifndef FS_PROJ_QUOTA
+#define FS_PROJ_QUOTA 2
#endif
#ifndef Q_XSETPQLIM
#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA)
@@ -61,9 +61,10 @@ import (
"golang.org/x/sys/unix"
)
-// Quota limit params - currently we only control blocks hard limit
+// Quota limit params - currently we only control blocks hard limit and inodes
type Quota struct {
- Size uint64
+ Size uint64
+ Inodes uint64
}
// Control - Context to be used by storage driver (e.g. overlay)
@@ -119,7 +120,8 @@ func NewControl(basePath string) (*Control, error) {
// a quota on the first available project id
//
quota := Quota{
- Size: 0,
+ Size: 0,
+ Inodes: 0,
}
if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil {
return nil, err
@@ -166,7 +168,7 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
//
// set the quota limit for the container's project id
//
- logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID)
+ logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID)
return setProjectQuota(q.backingFsBlockDev, projectID, quota)
}
@@ -175,11 +177,18 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er
var d C.fs_disk_quota_t
d.d_version = C.FS_DQUOT_VERSION
d.d_id = C.__u32(projectID)
- d.d_flags = C.XFS_PROJ_QUOTA
+ d.d_flags = C.FS_PROJ_QUOTA
- d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT
- d.d_blk_hardlimit = C.__u64(quota.Size / 512)
- d.d_blk_softlimit = d.d_blk_hardlimit
+ if quota.Size > 0 {
+ d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT
+ d.d_blk_hardlimit = C.__u64(quota.Size / 512)
+ d.d_blk_softlimit = d.d_blk_hardlimit
+ }
+ if quota.Inodes > 0 {
+ d.d_fieldmask = C.FS_DQ_IHARD | C.FS_DQ_ISOFT
+ d.d_ino_hardlimit = C.__u64(quota.Inodes)
+ d.d_ino_softlimit = d.d_ino_hardlimit
+ }
var cs = C.CString(backingFsBlockDev)
defer C.free(unsafe.Pointer(cs))
@@ -202,6 +211,7 @@ func (q *Control) GetQuota(targetPath string, quota *Quota) error {
return err
}
quota.Size = uint64(d.d_blk_hardlimit) * 512
+ quota.Inodes = uint64(d.d_ino_hardlimit)
return nil
}
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
index be6c75a58..7469138db 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
@@ -8,7 +8,8 @@ import (
// Quota limit params - currently we only control blocks hard limit
type Quota struct {
- Size uint64
+ Size uint64
+ Inodes uint64
}
// Control - Context to be used by storage driver (e.g. overlay)
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index d46000ace..e4f484d6b 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -16,7 +16,7 @@ require (
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
github.com/moby/sys/mountinfo v0.4.1
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/runc v1.0.0
+ github.com/opencontainers/runc v1.0.1
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/selinux v1.8.2
github.com/pkg/errors v0.9.1
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index 081da00e4..2607dbc9b 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -99,7 +99,7 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/cilium/ebpf v0.6.1/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
@@ -468,8 +468,8 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runc v1.0.0 h1:QOhAQAYUlKeofuyeKdR6ITvOnXLPbEAjPMjz9wCUXcU=
-github.com/opencontainers/runc v1.0.0/go.mod h1:MU2S3KEB2ZExnhnAQYbwjdYV6HwKtDlNbA2Z2OeNDeA=
+github.com/opencontainers/runc v1.0.1 h1:G18PGckGdAm3yVQRWDVQ1rLSLntiniKJ0cNRT2Tm5gs=
+github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 50e3e3555..48e846f7c 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -645,10 +645,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
file.Close()
- case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
+ case tar.TypeBlock, tar.TypeChar:
if inUserns { // cannot create devices in a userns
+ logrus.Debugf("Tar: Can't create device %v while running in user namespace", path)
return nil
}
+ fallthrough
+ case tar.TypeFifo:
// Handle this is an OS-specific way
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
return err
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
index e257737e7..7c3e442da 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
@@ -11,7 +11,6 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
- "github.com/opencontainers/runc/libcontainer/userns"
"golang.org/x/sys/unix"
)
@@ -88,11 +87,6 @@ func minor(device uint64) uint64 {
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
- if userns.RunningInUserNS() {
- // cannot create a device if running in user namespace
- return nil
- }
-
mode := uint32(hdr.Mode & 07777)
switch hdr.Typeflag {
case tar.TypeBlock:
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression.go b/vendor/github.com/containers/storage/pkg/chunked/compression.go
index 605be4b8f..f2811fb9a 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/compression.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/compression.go
@@ -2,72 +2,29 @@ package chunked
import (
"bytes"
- "encoding/base64"
"encoding/binary"
- "encoding/json"
"fmt"
"io"
- "io/ioutil"
- "time"
- "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/chunked/compressor"
+ "github.com/containers/storage/pkg/chunked/internal"
"github.com/klauspost/compress/zstd"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/vbatts/tar-split/archive/tar"
)
-type zstdTOC struct {
- Version int `json:"version"`
- Entries []zstdFileMetadata `json:"entries"`
-}
-
-type zstdFileMetadata struct {
- Type string `json:"type"`
- Name string `json:"name"`
- Linkname string `json:"linkName,omitempty"`
- Mode int64 `json:"mode,omitempty"`
- Size int64 `json:"size"`
- UID int `json:"uid"`
- GID int `json:"gid"`
- ModTime time.Time `json:"modtime"`
- AccessTime time.Time `json:"accesstime"`
- ChangeTime time.Time `json:"changetime"`
- Devmajor int64 `json:"devMajor"`
- Devminor int64 `json:"devMinor"`
- Xattrs map[string]string `json:"xattrs,omitempty"`
- Digest string `json:"digest,omitempty"`
- Offset int64 `json:"offset,omitempty"`
- EndOffset int64 `json:"endOffset,omitempty"`
-
- // Currently chunking is not supported.
- ChunkSize int64 `json:"chunkSize,omitempty"`
- ChunkOffset int64 `json:"chunkOffset,omitempty"`
- ChunkDigest string `json:"chunkDigest,omitempty"`
-}
-
const (
- TypeReg = "reg"
- TypeChunk = "chunk"
- TypeLink = "hardlink"
- TypeChar = "char"
- TypeBlock = "block"
- TypeDir = "dir"
- TypeFifo = "fifo"
- TypeSymlink = "symlink"
+ TypeReg = internal.TypeReg
+ TypeChunk = internal.TypeChunk
+ TypeLink = internal.TypeLink
+ TypeChar = internal.TypeChar
+ TypeBlock = internal.TypeBlock
+ TypeDir = internal.TypeDir
+ TypeFifo = internal.TypeFifo
+ TypeSymlink = internal.TypeSymlink
)
-var tarTypes = map[byte]string{
- tar.TypeReg: TypeReg,
- tar.TypeRegA: TypeReg,
- tar.TypeLink: TypeLink,
- tar.TypeChar: TypeChar,
- tar.TypeBlock: TypeBlock,
- tar.TypeDir: TypeDir,
- tar.TypeFifo: TypeFifo,
- tar.TypeSymlink: TypeSymlink,
-}
-
var typesToTar = map[string]byte{
TypeReg: tar.TypeReg,
TypeLink: tar.TypeLink,
@@ -78,14 +35,6 @@ var typesToTar = map[string]byte{
TypeSymlink: tar.TypeSymlink,
}
-func getType(t byte) (string, error) {
- r, found := tarTypes[t]
- if !found {
- return "", fmt.Errorf("unknown tarball type: %v", t)
- }
- return r, nil
-}
-
func typeToTarType(t string) (byte, error) {
r, found := typesToTar[t]
if !found {
@@ -94,52 +43,30 @@ func typeToTarType(t string) (byte, error) {
return r, nil
}
-const (
- manifestChecksumKey = "io.containers.zstd-chunked.manifest-checksum"
- manifestInfoKey = "io.containers.zstd-chunked.manifest-position"
-
- // manifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
- manifestTypeCRFS = 1
-
- // footerSizeSupported is the footer size supported by this implementation.
- // Newer versions of the image format might increase this value, so reject
- // any version that is not supported.
- footerSizeSupported = 40
-)
-
-var (
- // when the zstd decoder encounters a skippable frame + 1 byte for the size, it
- // will ignore it.
- // https://tools.ietf.org/html/rfc8478#section-3.1.2
- skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18}
-
- zstdChunkedFrameMagic = []byte{0x47, 0x6e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78}
-)
-
func isZstdChunkedFrameMagic(data []byte) bool {
if len(data) < 8 {
return false
}
- return bytes.Equal(zstdChunkedFrameMagic, data[:8])
+ return bytes.Equal(internal.ZstdChunkedFrameMagic, data[:8])
}
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
// be specified.
// This function uses the io.containers.zstd-chunked. annotations when specified.
func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, error) {
- footerSize := int64(footerSizeSupported)
+ footerSize := int64(internal.FooterSizeSupported)
if blobSize <= footerSize {
return nil, errors.New("blob too small")
}
- manifestChecksumAnnotation := annotations[manifestChecksumKey]
+ manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey]
if manifestChecksumAnnotation == "" {
- return nil, fmt.Errorf("manifest checksum annotation %q not found", manifestChecksumKey)
+ return nil, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
}
var offset, length, lengthUncompressed, manifestType uint64
- if offsetMetadata := annotations[manifestInfoKey]; offsetMetadata != "" {
+ if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil {
return nil, err
}
@@ -173,7 +100,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
}
}
- if manifestType != manifestTypeCRFS {
+ if manifestType != internal.ManifestTypeCRFS {
return nil, errors.New("invalid manifest type")
}
@@ -235,279 +162,8 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
return manifest, nil
}
-func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
- if _, err := dest.Write(skippableFrameMagic); err != nil {
- return err
- }
-
- var size []byte = make([]byte, 4)
- binary.LittleEndian.PutUint32(size, uint32(len(data)))
- if _, err := dest.Write(size); err != nil {
- return err
- }
- if _, err := dest.Write(data); err != nil {
- return err
- }
- return nil
-}
-
-func writeZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []zstdFileMetadata, level int) error {
- // 8 is the size of the zstd skippable frame header + the frame size
- manifestOffset := offset + 8
-
- toc := zstdTOC{
- Version: 1,
- Entries: metadata,
- }
-
- // Generate the manifest
- manifest, err := json.Marshal(toc)
- if err != nil {
- return err
- }
-
- var compressedBuffer bytes.Buffer
- zstdWriter, err := zstdWriterWithLevel(&compressedBuffer, level)
- if err != nil {
- return err
- }
- if _, err := zstdWriter.Write(manifest); err != nil {
- zstdWriter.Close()
- return err
- }
- if err := zstdWriter.Close(); err != nil {
- return err
- }
- compressedManifest := compressedBuffer.Bytes()
-
- manifestDigester := digest.Canonical.Digester()
- manifestChecksum := manifestDigester.Hash()
- if _, err := manifestChecksum.Write(compressedManifest); err != nil {
- return err
- }
-
- outMetadata[manifestChecksumKey] = manifestDigester.Digest().String()
- outMetadata[manifestInfoKey] = fmt.Sprintf("%d:%d:%d:%d", manifestOffset, len(compressedManifest), len(manifest), manifestTypeCRFS)
- if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil {
- return err
- }
-
- // Store the offset to the manifest and its size in LE order
- var manifestDataLE []byte = make([]byte, footerSizeSupported)
- binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
- binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest)))
- binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest)))
- binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(manifestTypeCRFS))
- copy(manifestDataLE[32:], zstdChunkedFrameMagic)
-
- return appendZstdSkippableFrame(dest, manifestDataLE)
-}
-
-func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
- // total written so far. Used to retrieve partial offsets in the file
- dest := ioutils.NewWriteCounter(destFile)
-
- tr := tar.NewReader(reader)
- tr.RawAccounting = true
-
- buf := make([]byte, 4096)
-
- zstdWriter, err := zstdWriterWithLevel(dest, level)
- if err != nil {
- return err
- }
- defer func() {
- if zstdWriter != nil {
- zstdWriter.Close()
- zstdWriter.Flush()
- }
- }()
-
- restartCompression := func() (int64, error) {
- var offset int64
- if zstdWriter != nil {
- if err := zstdWriter.Close(); err != nil {
- return 0, err
- }
- if err := zstdWriter.Flush(); err != nil {
- return 0, err
- }
- offset = dest.Count
- zstdWriter.Reset(dest)
- }
- return offset, nil
- }
-
- var metadata []zstdFileMetadata
- for {
- hdr, err := tr.Next()
- if err != nil {
- if err == io.EOF {
- break
- }
- return err
- }
-
- rawBytes := tr.RawBytes()
- if _, err := zstdWriter.Write(rawBytes); err != nil {
- return err
- }
- payloadDigester := digest.Canonical.Digester()
- payloadChecksum := payloadDigester.Hash()
-
- payloadDest := io.MultiWriter(payloadChecksum, zstdWriter)
-
- // Now handle the payload, if any
- var startOffset, endOffset int64
- checksum := ""
- for {
- read, errRead := tr.Read(buf)
- if errRead != nil && errRead != io.EOF {
- return err
- }
-
- // restart the compression only if there is
- // a payload.
- if read > 0 {
- if startOffset == 0 {
- startOffset, err = restartCompression()
- if err != nil {
- return err
- }
- }
- _, err := payloadDest.Write(buf[:read])
- if err != nil {
- return err
- }
- }
- if errRead == io.EOF {
- if startOffset > 0 {
- endOffset, err = restartCompression()
- if err != nil {
- return err
- }
- checksum = payloadDigester.Digest().String()
- }
- break
- }
- }
-
- typ, err := getType(hdr.Typeflag)
- if err != nil {
- return err
- }
- xattrs := make(map[string]string)
- for k, v := range hdr.Xattrs {
- xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
- }
- m := zstdFileMetadata{
- Type: typ,
- Name: hdr.Name,
- Linkname: hdr.Linkname,
- Mode: hdr.Mode,
- Size: hdr.Size,
- UID: hdr.Uid,
- GID: hdr.Gid,
- ModTime: hdr.ModTime,
- AccessTime: hdr.AccessTime,
- ChangeTime: hdr.ChangeTime,
- Devmajor: hdr.Devmajor,
- Devminor: hdr.Devminor,
- Xattrs: xattrs,
- Digest: checksum,
- Offset: startOffset,
- EndOffset: endOffset,
-
- // ChunkSize is 0 for the last chunk
- ChunkSize: 0,
- ChunkOffset: 0,
- ChunkDigest: checksum,
- }
- metadata = append(metadata, m)
- }
-
- rawBytes := tr.RawBytes()
- if _, err := zstdWriter.Write(rawBytes); err != nil {
- return err
- }
- if err := zstdWriter.Flush(); err != nil {
- return err
- }
- if err := zstdWriter.Close(); err != nil {
- return err
- }
- zstdWriter = nil
-
- return writeZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level)
-}
-
-type zstdChunkedWriter struct {
- tarSplitOut *io.PipeWriter
- tarSplitErr chan error
-}
-
-func (w zstdChunkedWriter) Close() error {
- err := <-w.tarSplitErr
- if err != nil {
- w.tarSplitOut.Close()
- return err
- }
- return w.tarSplitOut.Close()
-}
-
-func (w zstdChunkedWriter) Write(p []byte) (int, error) {
- select {
- case err := <-w.tarSplitErr:
- w.tarSplitOut.Close()
- return 0, err
- default:
- return w.tarSplitOut.Write(p)
- }
-}
-
-// zstdChunkedWriterWithLevel writes a zstd compressed tarball where each file is
-// compressed separately so it can be addressed separately. Idea based on CRFS:
-// https://github.com/google/crfs
-// The difference with CRFS is that the zstd compression is used instead of gzip.
-// The reason for it is that zstd supports embedding metadata ignored by the decoder
-// as part of the compressed stream.
-// A manifest json file with all the metadata is appended at the end of the tarball
-// stream, using zstd skippable frames.
-// The final file will look like:
-// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2]
-// Where:
-// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER]
-// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST]
-// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER]
-// MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format.
-func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level int) (io.WriteCloser, error) {
- ch := make(chan error, 1)
- r, w := io.Pipe()
-
- go func() {
- ch <- writeZstdChunkedStream(out, metadata, r, level)
- io.Copy(ioutil.Discard, r)
- r.Close()
- close(ch)
- }()
-
- return zstdChunkedWriter{
- tarSplitOut: w,
- tarSplitErr: ch,
- }, nil
-}
-
-func zstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
- el := zstd.EncoderLevelFromZstd(level)
- return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
-}
-
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
+// Deprecated: Use pkg/chunked/compressor.ZstdCompressor.
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
- if level == nil {
- l := 3
- level = &l
- }
-
- return zstdChunkedWriterWithLevel(r, metadata, *level)
+ return compressor.ZstdCompressor(r, metadata, level)
}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
new file mode 100644
index 000000000..a205b73fd
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
@@ -0,0 +1,220 @@
+package compressor
+
+// NOTE: This is used from github.com/containers/image by callers that
+// don't otherwise use containers/storage, so don't make this depend on any
+// larger software like the graph drivers.
+
+import (
+ "encoding/base64"
+ "io"
+ "io/ioutil"
+
+ "github.com/containers/storage/pkg/chunked/internal"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/opencontainers/go-digest"
+ "github.com/vbatts/tar-split/archive/tar"
+)
+
+func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
+ // total written so far. Used to retrieve partial offsets in the file
+ dest := ioutils.NewWriteCounter(destFile)
+
+ tr := tar.NewReader(reader)
+ tr.RawAccounting = true
+
+ buf := make([]byte, 4096)
+
+ zstdWriter, err := internal.ZstdWriterWithLevel(dest, level)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if zstdWriter != nil {
+ zstdWriter.Close()
+ zstdWriter.Flush()
+ }
+ }()
+
+ restartCompression := func() (int64, error) {
+ var offset int64
+ if zstdWriter != nil {
+ if err := zstdWriter.Close(); err != nil {
+ return 0, err
+ }
+ if err := zstdWriter.Flush(); err != nil {
+ return 0, err
+ }
+ offset = dest.Count
+ zstdWriter.Reset(dest)
+ }
+ return offset, nil
+ }
+
+ var metadata []internal.ZstdFileMetadata
+ for {
+ hdr, err := tr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ rawBytes := tr.RawBytes()
+ if _, err := zstdWriter.Write(rawBytes); err != nil {
+ return err
+ }
+ payloadDigester := digest.Canonical.Digester()
+ payloadChecksum := payloadDigester.Hash()
+
+ payloadDest := io.MultiWriter(payloadChecksum, zstdWriter)
+
+ // Now handle the payload, if any
+ var startOffset, endOffset int64
+ checksum := ""
+ for {
+ read, errRead := tr.Read(buf)
+ if errRead != nil && errRead != io.EOF {
+ return err
+ }
+
+ // restart the compression only if there is
+ // a payload.
+ if read > 0 {
+ if startOffset == 0 {
+ startOffset, err = restartCompression()
+ if err != nil {
+ return err
+ }
+ }
+ _, err := payloadDest.Write(buf[:read])
+ if err != nil {
+ return err
+ }
+ }
+ if errRead == io.EOF {
+ if startOffset > 0 {
+ endOffset, err = restartCompression()
+ if err != nil {
+ return err
+ }
+ checksum = payloadDigester.Digest().String()
+ }
+ break
+ }
+ }
+
+ typ, err := internal.GetType(hdr.Typeflag)
+ if err != nil {
+ return err
+ }
+ xattrs := make(map[string]string)
+ for k, v := range hdr.Xattrs {
+ xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
+ }
+ m := internal.ZstdFileMetadata{
+ Type: typ,
+ Name: hdr.Name,
+ Linkname: hdr.Linkname,
+ Mode: hdr.Mode,
+ Size: hdr.Size,
+ UID: hdr.Uid,
+ GID: hdr.Gid,
+ ModTime: hdr.ModTime,
+ AccessTime: hdr.AccessTime,
+ ChangeTime: hdr.ChangeTime,
+ Devmajor: hdr.Devmajor,
+ Devminor: hdr.Devminor,
+ Xattrs: xattrs,
+ Digest: checksum,
+ Offset: startOffset,
+ EndOffset: endOffset,
+
+ // ChunkSize is 0 for the last chunk
+ ChunkSize: 0,
+ ChunkOffset: 0,
+ ChunkDigest: checksum,
+ }
+ metadata = append(metadata, m)
+ }
+
+ rawBytes := tr.RawBytes()
+ if _, err := zstdWriter.Write(rawBytes); err != nil {
+ return err
+ }
+ if err := zstdWriter.Flush(); err != nil {
+ return err
+ }
+ if err := zstdWriter.Close(); err != nil {
+ return err
+ }
+ zstdWriter = nil
+
+ return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level)
+}
+
+type zstdChunkedWriter struct {
+ tarSplitOut *io.PipeWriter
+ tarSplitErr chan error
+}
+
+func (w zstdChunkedWriter) Close() error {
+ err := <-w.tarSplitErr
+ if err != nil {
+ w.tarSplitOut.Close()
+ return err
+ }
+ return w.tarSplitOut.Close()
+}
+
+func (w zstdChunkedWriter) Write(p []byte) (int, error) {
+ select {
+ case err := <-w.tarSplitErr:
+ w.tarSplitOut.Close()
+ return 0, err
+ default:
+ return w.tarSplitOut.Write(p)
+ }
+}
+
+// zstdChunkedWriterWithLevel writes a zstd compressed tarball where each file is
+// compressed separately so it can be addressed separately. Idea based on CRFS:
+// https://github.com/google/crfs
+// The difference with CRFS is that the zstd compression is used instead of gzip.
+// The reason for it is that zstd supports embedding metadata ignored by the decoder
+// as part of the compressed stream.
+// A manifest json file with all the metadata is appended at the end of the tarball
+// stream, using zstd skippable frames.
+// The final file will look like:
+// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2]
+// Where:
+// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER]
+// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST]
+// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER]
+// MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format.
+func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level int) (io.WriteCloser, error) {
+ ch := make(chan error, 1)
+ r, w := io.Pipe()
+
+ go func() {
+ ch <- writeZstdChunkedStream(out, metadata, r, level)
+ io.Copy(ioutil.Discard, r)
+ r.Close()
+ close(ch)
+ }()
+
+ return zstdChunkedWriter{
+ tarSplitOut: w,
+ tarSplitErr: ch,
+ }, nil
+}
+
+// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
+func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
+ if level == nil {
+ l := 3
+ level = &l
+ }
+
+ return zstdChunkedWriterWithLevel(r, metadata, *level)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
new file mode 100644
index 000000000..af0025c20
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
@@ -0,0 +1,172 @@
+package internal
+
+// NOTE: This is used from github.com/containers/image by callers that
+// don't otherwise use containers/storage, so don't make this depend on any
+// larger software like the graph drivers.
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/klauspost/compress/zstd"
+ "github.com/opencontainers/go-digest"
+)
+
+type ZstdTOC struct {
+ Version int `json:"version"`
+ Entries []ZstdFileMetadata `json:"entries"`
+}
+
+type ZstdFileMetadata struct {
+ Type string `json:"type"`
+ Name string `json:"name"`
+ Linkname string `json:"linkName,omitempty"`
+ Mode int64 `json:"mode,omitempty"`
+ Size int64 `json:"size"`
+ UID int `json:"uid"`
+ GID int `json:"gid"`
+ ModTime time.Time `json:"modtime"`
+ AccessTime time.Time `json:"accesstime"`
+ ChangeTime time.Time `json:"changetime"`
+ Devmajor int64 `json:"devMajor"`
+ Devminor int64 `json:"devMinor"`
+ Xattrs map[string]string `json:"xattrs,omitempty"`
+ Digest string `json:"digest,omitempty"`
+ Offset int64 `json:"offset,omitempty"`
+ EndOffset int64 `json:"endOffset,omitempty"`
+
+ // Currently chunking is not supported.
+ ChunkSize int64 `json:"chunkSize,omitempty"`
+ ChunkOffset int64 `json:"chunkOffset,omitempty"`
+ ChunkDigest string `json:"chunkDigest,omitempty"`
+}
+
+const (
+ TypeReg = "reg"
+ TypeChunk = "chunk"
+ TypeLink = "hardlink"
+ TypeChar = "char"
+ TypeBlock = "block"
+ TypeDir = "dir"
+ TypeFifo = "fifo"
+ TypeSymlink = "symlink"
+)
+
+var TarTypes = map[byte]string{
+ tar.TypeReg: TypeReg,
+ tar.TypeRegA: TypeReg,
+ tar.TypeLink: TypeLink,
+ tar.TypeChar: TypeChar,
+ tar.TypeBlock: TypeBlock,
+ tar.TypeDir: TypeDir,
+ tar.TypeFifo: TypeFifo,
+ tar.TypeSymlink: TypeSymlink,
+}
+
+func GetType(t byte) (string, error) {
+ r, found := TarTypes[t]
+ if !found {
+ return "", fmt.Errorf("unknown tarball type: %v", t)
+ }
+ return r, nil
+}
+
+const (
+ ManifestChecksumKey = "io.containers.zstd-chunked.manifest-checksum"
+ ManifestInfoKey = "io.containers.zstd-chunked.manifest-position"
+
+ // ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
+ ManifestTypeCRFS = 1
+
+ // FooterSizeSupported is the footer size supported by this implementation.
+ // Newer versions of the image format might increase this value, so reject
+ // any version that is not supported.
+ FooterSizeSupported = 40
+)
+
+var (
+ // when the zstd decoder encounters a skippable frame + 1 byte for the size, it
+ // will ignore it.
+ // https://tools.ietf.org/html/rfc8478#section-3.1.2
+ skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18}
+
+ ZstdChunkedFrameMagic = []byte{0x47, 0x6e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78}
+)
+
+func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
+ if _, err := dest.Write(skippableFrameMagic); err != nil {
+ return err
+ }
+
+ var size []byte = make([]byte, 4)
+ binary.LittleEndian.PutUint32(size, uint32(len(data)))
+ if _, err := dest.Write(size); err != nil {
+ return err
+ }
+ if _, err := dest.Write(data); err != nil {
+ return err
+ }
+ return nil
+}
+
+func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []ZstdFileMetadata, level int) error {
+ // 8 is the size of the zstd skippable frame header + the frame size
+ manifestOffset := offset + 8
+
+ toc := ZstdTOC{
+ Version: 1,
+ Entries: metadata,
+ }
+
+ // Generate the manifest
+ manifest, err := json.Marshal(toc)
+ if err != nil {
+ return err
+ }
+
+ var compressedBuffer bytes.Buffer
+ zstdWriter, err := ZstdWriterWithLevel(&compressedBuffer, level)
+ if err != nil {
+ return err
+ }
+ if _, err := zstdWriter.Write(manifest); err != nil {
+ zstdWriter.Close()
+ return err
+ }
+ if err := zstdWriter.Close(); err != nil {
+ return err
+ }
+ compressedManifest := compressedBuffer.Bytes()
+
+ manifestDigester := digest.Canonical.Digester()
+ manifestChecksum := manifestDigester.Hash()
+ if _, err := manifestChecksum.Write(compressedManifest); err != nil {
+ return err
+ }
+
+ outMetadata[ManifestChecksumKey] = manifestDigester.Digest().String()
+ outMetadata[ManifestInfoKey] = fmt.Sprintf("%d:%d:%d:%d", manifestOffset, len(compressedManifest), len(manifest), ManifestTypeCRFS)
+ if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil {
+ return err
+ }
+
+ // Store the offset to the manifest and its size in LE order
+ var manifestDataLE []byte = make([]byte, FooterSizeSupported)
+ binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
+ binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest)))
+ binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest)))
+ binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(ManifestTypeCRFS))
+ copy(manifestDataLE[32:], ZstdChunkedFrameMagic)
+
+ return appendZstdSkippableFrame(dest, manifestDataLE)
+}
+
+func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
+ el := zstd.EncoderLevelFromZstd(level)
+ return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
index 8c5062475..0f14d8af9 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
@@ -19,6 +19,7 @@ import (
graphdriver "github.com/containers/storage/drivers"
driversCopy "github.com/containers/storage/drivers/copy"
"github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/types"
"github.com/klauspost/compress/zstd"
@@ -39,7 +40,7 @@ const (
type chunkedZstdDiffer struct {
stream ImageSourceSeekable
manifest []byte
- layersMetadata map[string][]zstdFileMetadata
+ layersMetadata map[string][]internal.ZstdFileMetadata
layersTarget map[string]string
}
@@ -75,11 +76,11 @@ func copyFileContent(src, destFile, root string, dirfd int, missingDirsMode, mod
return dstFile, st.Size(), err
}
-func prepareOtherLayersCache(layersMetadata map[string][]zstdFileMetadata) map[string]map[string]*zstdFileMetadata {
- maps := make(map[string]map[string]*zstdFileMetadata)
+func prepareOtherLayersCache(layersMetadata map[string][]internal.ZstdFileMetadata) map[string]map[string]*internal.ZstdFileMetadata {
+ maps := make(map[string]map[string]*internal.ZstdFileMetadata)
for layerID, v := range layersMetadata {
- r := make(map[string]*zstdFileMetadata)
+ r := make(map[string]*internal.ZstdFileMetadata)
for i := range v {
r[v[i].Digest] = &v[i]
}
@@ -88,13 +89,13 @@ func prepareOtherLayersCache(layersMetadata map[string][]zstdFileMetadata) map[s
return maps
}
-func getLayersCache(store storage.Store) (map[string][]zstdFileMetadata, map[string]string, error) {
+func getLayersCache(store storage.Store) (map[string][]internal.ZstdFileMetadata, map[string]string, error) {
allLayers, err := store.Layers()
if err != nil {
return nil, nil, err
}
- layersMetadata := make(map[string][]zstdFileMetadata)
+ layersMetadata := make(map[string][]internal.ZstdFileMetadata)
layersTarget := make(map[string]string)
for _, r := range allLayers {
manifestReader, err := store.LayerBigData(r.ID, bigDataKey)
@@ -106,7 +107,7 @@ func getLayersCache(store storage.Store) (map[string][]zstdFileMetadata, map[str
if err != nil {
return nil, nil, err
}
- var toc zstdTOC
+ var toc internal.ZstdTOC
if err := json.Unmarshal(manifest, &toc); err != nil {
continue
}
@@ -123,7 +124,7 @@ func getLayersCache(store storage.Store) (map[string][]zstdFileMetadata, map[str
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
- if _, ok := annotations[manifestChecksumKey]; ok {
+ if _, ok := annotations[internal.ManifestChecksumKey]; ok {
return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss)
}
return nil, errors.New("blob type not supported for partial retrieval")
@@ -147,7 +148,7 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
}, nil
}
-func findFileInOtherLayers(file zstdFileMetadata, root string, dirfd int, layersMetadata map[string]map[string]*zstdFileMetadata, layersTarget map[string]string, missingDirsMode os.FileMode) (*os.File, int64, error) {
+func findFileInOtherLayers(file internal.ZstdFileMetadata, root string, dirfd int, layersMetadata map[string]map[string]*internal.ZstdFileMetadata, layersTarget map[string]string, missingDirsMode os.FileMode) (*os.File, int64, error) {
// this is ugly, needs to be indexed
for layerID, checksums := range layersMetadata {
m, found := checksums[file.Digest]
@@ -194,7 +195,7 @@ func getFileDigest(f *os.File) (digest.Digest, error) {
// findFileOnTheHost checks whether the requested file already exist on the host and copies the file content from there if possible.
// It is currently implemented to look only at the file with the same path. Ideally it can detect the same content also at different
// paths.
-func findFileOnTheHost(file zstdFileMetadata, root string, dirfd int, missingDirsMode os.FileMode) (*os.File, int64, error) {
+func findFileOnTheHost(file internal.ZstdFileMetadata, root string, dirfd int, missingDirsMode os.FileMode) (*os.File, int64, error) {
sourceFile := filepath.Clean(filepath.Join("/", file.Name))
if !strings.HasPrefix(sourceFile, "/usr/") {
// limit host deduplication to files under /usr.
@@ -251,7 +252,7 @@ func findFileOnTheHost(file zstdFileMetadata, root string, dirfd int, missingDir
return dstFile, written, nil
}
-func maybeDoIDRemap(manifest []zstdFileMetadata, options *archive.TarOptions) error {
+func maybeDoIDRemap(manifest []internal.ZstdFileMetadata, options *archive.TarOptions) error {
if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 {
return nil
}
@@ -278,7 +279,7 @@ func maybeDoIDRemap(manifest []zstdFileMetadata, options *archive.TarOptions) er
}
type missingFile struct {
- File *zstdFileMetadata
+ File *internal.ZstdFileMetadata
Gap int64
}
@@ -291,7 +292,7 @@ type missingChunk struct {
Files []missingFile
}
-func setFileAttrs(file *os.File, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) error {
+func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
if file == nil || file.Fd() < 0 {
return errors.Errorf("invalid file")
}
@@ -346,7 +347,7 @@ func openFileUnderRoot(name, root string, dirfd int, flags uint64, mode os.FileM
return os.NewFile(uintptr(fd), name), nil
}
-func createFileFromZstdStream(dest string, dirfd int, reader io.Reader, missingDirsMode, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) (err error) {
+func createFileFromZstdStream(dest string, dirfd int, reader io.Reader, missingDirsMode, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) (err error) {
file, err := openFileUnderRoot(metadata.Name, dest, dirfd, newFileFlags, 0)
if err != nil {
return err
@@ -497,7 +498,7 @@ func retrieveMissingFiles(input *chunkedZstdDiffer, dest string, dirfd int, miss
return nil
}
-func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) error {
+func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
parent := filepath.Dir(metadata.Name)
base := filepath.Base(metadata.Name)
@@ -526,7 +527,7 @@ func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *zstdFileMet
return setFileAttrs(file, mode, metadata, options)
}
-func safeLink(target string, dirfd int, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) error {
+func safeLink(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
sourceFile, err := openFileUnderRoot(metadata.Linkname, target, dirfd, unix.O_RDONLY, 0)
if err != nil {
return err
@@ -558,7 +559,7 @@ func safeLink(target string, dirfd int, mode os.FileMode, metadata *zstdFileMeta
return setFileAttrs(newFile, mode, metadata, options)
}
-func safeSymlink(target string, dirfd int, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) error {
+func safeSymlink(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
destDirFd := dirfd
if destDir != "." {
@@ -636,7 +637,7 @@ type hardLinkToCreate struct {
dest string
dirfd int
mode os.FileMode
- metadata *zstdFileMetadata
+ metadata *internal.ZstdFileMetadata
}
func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) {
@@ -659,7 +660,7 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
}
// Generate the manifest
- var toc zstdTOC
+ var toc internal.ZstdTOC
if err := json.Unmarshal(d.manifest, &toc); err != nil {
return output, err
}
@@ -667,7 +668,7 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
var missingChunks []missingChunk
- var mergedEntries []zstdFileMetadata
+ var mergedEntries []internal.ZstdFileMetadata
if err := maybeDoIDRemap(toc.Entries, options); err != nil {
return output, err
diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf
index e70f4f018..722750c0c 100644
--- a/vendor/github.com/containers/storage/storage.conf
+++ b/vendor/github.com/containers/storage/storage.conf
@@ -69,6 +69,9 @@ additionalimagestores = [
# and vfs drivers.
#ignore_chown_errors = "false"
+# Inodes is used to set a maximum inodes of the container image.
+# inodes = ""
+
# Path to an helper program to use for mounting the file system instead of mounting it
# directly.
#mount_program = "/usr/bin/fuse-overlayfs"
diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go
index 03ddd5ad9..4d62b151a 100644
--- a/vendor/github.com/containers/storage/types/utils.go
+++ b/vendor/github.com/containers/storage/types/utils.go
@@ -2,6 +2,7 @@ package types
import (
"fmt"
+ "io/ioutil"
"os"
"path/filepath"
"strconv"
@@ -74,9 +75,12 @@ func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, e
return runtimeDir, nil
}
- runUserDir := env.getRunUserDir()
- if isRootlessRuntimeDirOwner(runUserDir, env) {
- return runUserDir, nil
+ initCommand, err := ioutil.ReadFile(env.getProcCommandFile())
+ if err != nil || string(initCommand) == "systemd" {
+ runUserDir := env.getRunUserDir()
+ if isRootlessRuntimeDirOwner(runUserDir, env) {
+ return runUserDir, nil
+ }
}
tmpPerUserDir := env.getTmpPerUserDir()