summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--go.mod2
-rw-r--r--go.sum6
-rw-r--r--libpod/runtime_cstorage.go2
-rw-r--r--libpod/storage.go8
-rw-r--r--pkg/machine/qemu/machine.go7
-rw-r--r--test/system/410-selinux.bats4
-rw-r--r--test/system/420-cgroups.bats4
-rw-r--r--test/system/helpers.bash12
-rw-r--r--vendor/github.com/containers/storage/.cirrus.yml15
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/drivers/aufs/aufs.go6
-rw-r--r--vendor/github.com/containers/storage/drivers/fsdiff.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go91
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/randomid.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/template.go4
-rw-r--r--vendor/github.com/containers/storage/go.mod5
-rw-r--r--vendor/github.com/containers/storage/go.sum12
-rw-r--r--vendor/github.com/containers/storage/layers.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/storage_linux.go86
-rw-r--r--vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go10
-rw-r--r--vendor/github.com/containers/storage/pkg/loopback/loopback.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go30
-rw-r--r--vendor/github.com/containers/storage/store.go4
-rw-r--r--vendor/github.com/containers/storage/types/utils.go2
-rw-r--r--vendor/github.com/containers/storage/userns.go2
-rw-r--r--vendor/github.com/klauspost/compress/LICENSE12
-rw-r--r--vendor/github.com/klauspost/compress/README.md12
-rw-r--r--vendor/github.com/klauspost/compress/flate/fast_encoder.go25
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress.go275
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go1
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go2
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s66
-rw-r--r--vendor/modules.txt4
38 files changed, 511 insertions, 220 deletions
diff --git a/go.mod b/go.mod
index d1346956e..3c2e55f07 100644
--- a/go.mod
+++ b/go.mod
@@ -17,7 +17,7 @@ require (
github.com/containers/image/v5 v5.16.0
github.com/containers/ocicrypt v1.1.2
github.com/containers/psgo v1.7.1
- github.com/containers/storage v1.36.0
+ github.com/containers/storage v1.36.1-0.20210929132900-162a0bf730ce
github.com/coreos/go-systemd/v22 v22.3.2
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
github.com/cyphar/filepath-securejoin v0.2.3
diff --git a/go.sum b/go.sum
index 73ed1e459..bee514506 100644
--- a/go.sum
+++ b/go.sum
@@ -267,8 +267,9 @@ github.com/containers/psgo v1.7.1 h1:2N6KADeFvBm1aI2iXxu6+/Xh7CCkdh8p8F3F/cpIU5I
github.com/containers/psgo v1.7.1/go.mod h1:mWGpFzW73qWFA+blhF6l7GuKzbrACkYgr/ajiNQR+RM=
github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM=
github.com/containers/storage v1.35.0/go.mod h1:qzYhasQP2/V9D9XdO+vRwkHBhsBO0oznMLzzRDQ8s20=
-github.com/containers/storage v1.36.0 h1:OelxllCW19tnNngYuZw2ty/zLabVMG5rSs3KSwO1Lzc=
github.com/containers/storage v1.36.0/go.mod h1:vbd3SKVQNHdmU5qQI6hTEcKPxnZkGqydG4f6uwrI5a8=
+github.com/containers/storage v1.36.1-0.20210929132900-162a0bf730ce h1:6YOfANEWtL7+Q4RmnAfloGLIJNtt17MEHjvlHXz0vVY=
+github.com/containers/storage v1.36.1-0.20210929132900-162a0bf730ce/go.mod h1:b7OGxODIyB3XpvCSWR91lllT9fv9DXeC8yfnaUocWJU=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -590,8 +591,9 @@ github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
diff --git a/libpod/runtime_cstorage.go b/libpod/runtime_cstorage.go
index 58bd67e6d..5694967aa 100644
--- a/libpod/runtime_cstorage.go
+++ b/libpod/runtime_cstorage.go
@@ -121,7 +121,7 @@ func (r *Runtime) removeStorageContainer(idOrName string, force bool) error {
}
if err := r.store.DeleteContainer(ctr.ID); err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown {
// Container again gone, no error
logrus.Infof("Storage for container %s already removed", ctr.ID)
return nil
diff --git a/libpod/storage.go b/libpod/storage.go
index ad78fe191..5c265df40 100644
--- a/libpod/storage.go
+++ b/libpod/storage.go
@@ -184,8 +184,12 @@ func (r *storageService) DeleteContainer(idOrName string) error {
}
err = r.store.DeleteContainer(container.ID)
if err != nil {
- logrus.Debugf("Failed to delete container %q: %v", container.ID, err)
- return err
+ if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown {
+ logrus.Infof("Storage for container %s already removed", container.ID)
+ } else {
+ logrus.Debugf("Failed to delete container %q: %v", container.ID, err)
+ return err
+ }
}
return nil
}
diff --git a/pkg/machine/qemu/machine.go b/pkg/machine/qemu/machine.go
index 09078fbfb..c4e6aa611 100644
--- a/pkg/machine/qemu/machine.go
+++ b/pkg/machine/qemu/machine.go
@@ -392,7 +392,12 @@ func (v *MachineVM) Stop(name string, _ machine.StopOptions) error {
logrus.Warn(err)
}
// Remove socket
- return os.Remove(qemuSocketFile)
+ if err := os.Remove(qemuSocketFile); err != nil {
+ return err
+ }
+
+ fmt.Printf("Successfully stopped machine: %s", name)
+ return nil
}
// NewQMPMonitor creates the monitor subsection of our vm
diff --git a/test/system/410-selinux.bats b/test/system/410-selinux.bats
index 5ee0e0715..0f7c35c65 100644
--- a/test/system/410-selinux.bats
+++ b/test/system/410-selinux.bats
@@ -113,6 +113,10 @@ function check_label() {
@test "podman selinux: shared context in (some) namespaces" {
skip_if_no_selinux
+ # rootless users have no usable cgroups with cgroupsv1, so containers
+ # must use a pid namespace and not join an existing one.
+ skip_if_rootless_cgroupsv1
+
run_podman run -d --name myctr $IMAGE top
run_podman exec myctr cat -v /proc/self/attr/current
context_c1="$output"
diff --git a/test/system/420-cgroups.bats b/test/system/420-cgroups.bats
index 89c81a742..025a20012 100644
--- a/test/system/420-cgroups.bats
+++ b/test/system/420-cgroups.bats
@@ -8,9 +8,7 @@ load helpers
@test "podman run, preserves initial --cgroup-manager" {
skip_if_remote "podman-remote does not support --cgroup-manager"
- if is_rootless && is_cgroupsv1; then
- skip "not supported as rootless under cgroups v1"
- fi
+ skip_if_rootless_cgroupsv1
# Find out our default cgroup manager, and from that, get the non-default
run_podman info --format '{{.Host.CgroupManager}}'
diff --git a/test/system/helpers.bash b/test/system/helpers.bash
index 28ea924bb..666735b0c 100644
--- a/test/system/helpers.bash
+++ b/test/system/helpers.bash
@@ -428,6 +428,18 @@ function skip_if_cgroupsv1() {
fi
}
+######################
+# skip_if_rootless_cgroupsv1 # ...with an optional message
+######################
+function skip_if_rootless_cgroupsv1() {
+ if is_rootless; then
+ if ! is_cgroupsv2; then
+ local msg=$(_add_label_if_missing "$1" "rootless cgroupvs1")
+ skip "${msg:-not supported as rootless under cgroupsv1}"
+ fi
+ fi
+}
+
##################################
# skip_if_journald_unavailable # rhbz#1895105: rootless journald permissions
##################################
diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml
index 20bede452..d080d790c 100644
--- a/vendor/github.com/containers/storage/.cirrus.yml
+++ b/vendor/github.com/containers/storage/.cirrus.yml
@@ -20,16 +20,14 @@ env:
FEDORA_NAME: "fedora-34"
PRIOR_FEDORA_NAME: "fedora-33"
UBUNTU_NAME: "ubuntu-2104"
- PRIOR_UBUNTU_NAME: "ubuntu-2010"
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
- _BUILT_IMAGE_SUFFIX: "c6248193773010944"
+ _BUILT_IMAGE_SUFFIX: "c6431352024203264"
FEDORA_CACHE_IMAGE_NAME: "fedora-${_BUILT_IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${_BUILT_IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${_BUILT_IMAGE_SUFFIX}"
- PRIOR_UBUNTU_CACHE_IMAGE_NAME: "prior-ubuntu-${_BUILT_IMAGE_SUFFIX}"
####
#### Command variables to help avoid duplication
@@ -115,15 +113,6 @@ ubuntu_testing_task: &ubuntu_testing
TEST_DRIVER: "overlay"
-prior_ubuntu_testing_task:
- <<: *ubuntu_testing
- alias: prior_ubuntu_testing
- name: *std_test_name
- env:
- OS_NAME: "${PRIOR_UBUNTU_NAME}"
- VM_IMAGE: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}"
-
-
lint_task:
env:
CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage"
@@ -153,7 +142,6 @@ meta_task:
${FEDORA_CACHE_IMAGE_NAME}
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
${UBUNTU_CACHE_IMAGE_NAME}
- ${PRIOR_UBUNTU_CACHE_IMAGE_NAME}
BUILDID: "${CIRRUS_BUILD_ID}"
REPOREF: "${CIRRUS_CHANGE_IN_REPO}"
GCPJSON: ENCRYPTED[244a93fe8b386b48b96f748342bf741350e43805eee81dd04b45093bdf737e540b993fc735df41f131835fa0f9b65826]
@@ -181,7 +169,6 @@ success_task:
- fedora_testing
- prior_fedora_testing
- ubuntu_testing
- - prior_ubuntu_testing
- meta
- vendor
container:
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 39fc130ef..afc132ffa 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.36.0
+1.36.0+dev
diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
index 76f12ec3b..a566fbffa 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
@@ -730,14 +730,14 @@ func useDirperm() bool {
enableDirpermLock.Do(func() {
base, err := ioutil.TempDir("", "storage-aufs-base")
if err != nil {
- logrus.Errorf("error checking dirperm1: %v", err)
+ logrus.Errorf("Checking dirperm1: %v", err)
return
}
defer os.RemoveAll(base)
union, err := ioutil.TempDir("", "storage-aufs-union")
if err != nil {
- logrus.Errorf("error checking dirperm1: %v", err)
+ logrus.Errorf("Checking dirperm1: %v", err)
return
}
defer os.RemoveAll(union)
@@ -748,7 +748,7 @@ func useDirperm() bool {
}
enableDirperm = true
if err := Unmount(union); err != nil {
- logrus.Errorf("error checking dirperm1: failed to unmount %v", err)
+ logrus.Errorf("Checking dirperm1: failed to unmount %v", err)
}
})
return enableDirperm
diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go
index c52788509..a534630df 100644
--- a/vendor/github.com/containers/storage/drivers/fsdiff.go
+++ b/vendor/github.com/containers/storage/drivers/fsdiff.go
@@ -180,7 +180,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts)
start := time.Now().UTC()
logrus.Debug("Start untar layer")
if size, err = ApplyUncompressedLayer(layerFs, options.Diff, tarOptions); err != nil {
- logrus.Errorf("Error while applying layer: %s", err)
+ logrus.Errorf("While applying layer: %s", err)
return
}
logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index f546f9b10..62130c73e 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -174,21 +174,21 @@ func checkSupportVolatile(home, runhome string) (bool, error) {
var usingVolatile bool
if err == nil {
if volatileCacheResult {
- logrus.Debugf("cached value indicated that volatile is being used")
+ logrus.Debugf("Cached value indicated that volatile is being used")
} else {
- logrus.Debugf("cached value indicated that volatile is not being used")
+ logrus.Debugf("Cached value indicated that volatile is not being used")
}
usingVolatile = volatileCacheResult
} else {
usingVolatile, err = doesVolatile(home)
if err == nil {
if usingVolatile {
- logrus.Debugf("overlay test mount indicated that volatile is being used")
+ logrus.Debugf("overlay: test mount indicated that volatile is being used")
} else {
- logrus.Debugf("overlay test mount indicated that volatile is not being used")
+ logrus.Debugf("overlay: test mount indicated that volatile is not being used")
}
if err = cachedFeatureRecord(runhome, feature, usingVolatile, ""); err != nil {
- return false, errors.Wrap(err, "error recording volatile-being-used status")
+ return false, errors.Wrap(err, "recording volatile-being-used status")
}
}
}
@@ -206,9 +206,9 @@ func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome str
overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature)
if err == nil {
if overlayCacheResult {
- logrus.Debugf("cached value indicated that overlay is supported")
+ logrus.Debugf("Cached value indicated that overlay is supported")
} else {
- logrus.Debugf("cached value indicated that overlay is not supported")
+ logrus.Debugf("Cached value indicated that overlay is not supported")
}
supportsDType = overlayCacheResult
if !supportsDType {
@@ -225,12 +225,12 @@ func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome str
}
err = errors.Wrap(err, "kernel does not support overlay fs")
if err2 := cachedFeatureRecord(runhome, feature, false, err.Error()); err2 != nil {
- return false, errors.Wrapf(err2, "error recording overlay not being supported (%v)", err)
+ return false, errors.Wrapf(err2, "recording overlay not being supported (%v)", err)
}
return false, err
}
if err = cachedFeatureRecord(runhome, feature, supportsDType, ""); err != nil {
- return false, errors.Wrap(err, "error recording overlay support status")
+ return false, errors.Wrap(err, "recording overlay support status")
}
}
return supportsDType, nil
@@ -310,24 +310,24 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
metacopyCacheResult, _, err := cachedFeatureCheck(runhome, feature)
if err == nil {
if metacopyCacheResult {
- logrus.Debugf("cached value indicated that metacopy is being used")
+ logrus.Debugf("Cached value indicated that metacopy is being used")
} else {
- logrus.Debugf("cached value indicated that metacopy is not being used")
+ logrus.Debugf("Cached value indicated that metacopy is not being used")
}
usingMetacopy = metacopyCacheResult
} else {
usingMetacopy, err = doesMetacopy(home, opts.mountOptions)
if err == nil {
if usingMetacopy {
- logrus.Debugf("overlay test mount indicated that metacopy is being used")
+ logrus.Debugf("overlay: test mount indicated that metacopy is being used")
} else {
- logrus.Debugf("overlay test mount indicated that metacopy is not being used")
+ logrus.Debugf("overlay: test mount indicated that metacopy is not being used")
}
if err = cachedFeatureRecord(runhome, feature, usingMetacopy, ""); err != nil {
- return nil, errors.Wrap(err, "error recording metacopy-being-used status")
+ return nil, errors.Wrap(err, "recording metacopy-being-used status")
}
} else {
- logrus.Infof("overlay test mount did not indicate whether or not metacopy is being used: %v", err)
+ logrus.Infof("overlay: test mount did not indicate whether or not metacopy is being used: %v", err)
return nil, err
}
}
@@ -548,7 +548,7 @@ func SupportsNativeOverlay(graphroot, rundir string) (bool, error) {
}
switch contents {
case "true":
- logrus.Debugf("overlay storage already configured with a mount-program")
+ logrus.Debugf("overlay: storage already configured with a mount-program")
return false, nil
default:
needsMountProgram, err := scanForMountProgramIndicators(home)
@@ -640,17 +640,17 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
flags = fmt.Sprintf("%s,userxattr", flags)
}
if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0600, int(unix.Mkdev(0, 0))); err != nil {
- logrus.Debugf("unable to create kernel-style whiteout: %v", err)
+ logrus.Debugf("Unable to create kernel-style whiteout: %v", err)
return supportsDType, errors.Wrapf(err, "unable to create kernel-style whiteout")
}
if len(flags) < unix.Getpagesize() {
err := unix.Mount("overlay", mergedDir, "overlay", 0, flags)
if err == nil {
- logrus.Debugf("overlay test mount with multiple lowers succeeded")
+ logrus.Debugf("overlay: test mount with multiple lowers succeeded")
return supportsDType, nil
}
- logrus.Debugf("overlay test mount with multiple lowers failed %v", err)
+ logrus.Debugf("overlay: test mount with multiple lowers failed %v", err)
}
flags = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower1Dir, upperDir, workDir)
if selinux.GetEnabled() {
@@ -659,10 +659,10 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
if len(flags) < unix.Getpagesize() {
err := unix.Mount("overlay", mergedDir, "overlay", 0, flags)
if err == nil {
- logrus.StandardLogger().Logf(logLevel, "overlay test mount with multiple lowers failed, but succeeded with a single lower")
+ logrus.StandardLogger().Logf(logLevel, "overlay: test mount with multiple lowers failed, but succeeded with a single lower")
return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay")
}
- logrus.Debugf("overlay test mount with a single lower failed %v", err)
+ logrus.Debugf("overlay: test mount with a single lower failed %v", err)
}
logrus.StandardLogger().Logf(logLevel, "'overlay' is not supported over %s at %q", backingFs, home)
return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home)
@@ -682,9 +682,9 @@ func (d *Driver) useNaiveDiff() bool {
nativeDiffCacheResult, nativeDiffCacheText, err := cachedFeatureCheck(d.runhome, feature)
if err == nil {
if nativeDiffCacheResult {
- logrus.Debugf("cached value indicated that native-diff is usable")
+ logrus.Debugf("Cached value indicated that native-diff is usable")
} else {
- logrus.Debugf("cached value indicated that native-diff is not being used")
+ logrus.Debugf("Cached value indicated that native-diff is not being used")
logrus.Info(nativeDiffCacheText)
}
useNaiveDiffOnly = !nativeDiffCacheResult
@@ -821,7 +821,7 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
opts.StorageOpt["inodes"] = strconv.FormatUint(d.options.quota.Inodes, 10)
}
- return d.create(id, parent, opts)
+ return d.create(id, parent, opts, false)
}
// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id.
@@ -831,15 +831,16 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
if _, ok := opts.StorageOpt["size"]; ok {
return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers")
}
+
if _, ok := opts.StorageOpt["inodes"]; ok {
return fmt.Errorf("--storage-opt inodes is only supported for ReadWrite Layers")
}
}
- return d.create(id, parent, opts)
+ return d.create(id, parent, opts, true)
}
-func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) {
+func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) {
dir := d.dir(id)
uidMaps := d.uidMaps
@@ -880,7 +881,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
}
}()
- if d.quotaCtl != nil {
+ if d.quotaCtl != nil && !disableQuota {
quota := quota.Quota{}
if opts != nil && len(opts.StorageOpt) > 0 {
driver := &Driver{}
@@ -994,7 +995,7 @@ func (d *Driver) getLower(parent string) (string, error) {
}
logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(parentDir, "link"))
if err := d.recreateSymlinks(); err != nil {
- return "", errors.Wrap(err, "error recreating the links")
+ return "", errors.Wrap(err, "recreating the links")
}
parentLink, err = ioutil.ReadFile(path.Join(parentDir, "link"))
if err != nil {
@@ -1038,7 +1039,7 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) {
if os.IsNotExist(err) {
logrus.Warnf("Can't read link %q because it does not exist. A storage corruption might have occurred, attempting to recreate the missing symlinks. It might be best wipe the storage to avoid further errors due to storage corruption.", lower)
if err := d.recreateSymlinks(); err != nil {
- return nil, fmt.Errorf("error recreating the missing symlinks: %v", err)
+ return nil, fmt.Errorf("recreating the missing symlinks: %v", err)
}
// let's call Readlink on lower again now that we have recreated the missing symlinks
lp, err = os.Readlink(lower)
@@ -1121,7 +1122,7 @@ func (d *Driver) recreateSymlinks() error {
// List all the directories under the home directory
dirs, err := ioutil.ReadDir(d.home)
if err != nil {
- return fmt.Errorf("error reading driver home directory %q: %v", d.home, err)
+ return fmt.Errorf("reading driver home directory %q: %v", d.home, err)
}
linksDir := filepath.Join(d.home, "l")
// This makes the link directory if it doesn't exist
@@ -1148,7 +1149,7 @@ func (d *Driver) recreateSymlinks() error {
// Read the "link" file under each layer to get the name of the symlink
data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link"))
if err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "error reading name of symlink for %q", dir))
+ errs = multierror.Append(errs, errors.Wrapf(err, "reading name of symlink for %q", dir))
continue
}
linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n"))
@@ -1162,7 +1163,7 @@ func (d *Driver) recreateSymlinks() error {
}
madeProgress = true
} else if err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "error trying to stat %q", linkPath))
+ errs = multierror.Append(errs, err)
continue
}
}
@@ -1170,7 +1171,7 @@ func (d *Driver) recreateSymlinks() error {
// that each symlink we have corresponds to one.
links, err := ioutil.ReadDir(linksDir)
if err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "error reading links directory %q", linksDir))
+ errs = multierror.Append(errs, err)
continue
}
// Go through all of the symlinks in the "l" directory
@@ -1178,7 +1179,7 @@ func (d *Driver) recreateSymlinks() error {
// Read the symlink's target, which should be "../$layer/diff"
target, err := os.Readlink(filepath.Join(linksDir, link.Name()))
if err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "error reading target of link %q", link))
+ errs = multierror.Append(errs, err)
continue
}
targetComponents := strings.Split(target, string(os.PathSeparator))
@@ -1196,7 +1197,7 @@ func (d *Driver) recreateSymlinks() error {
data, err := ioutil.ReadFile(linkFile)
if err != nil || string(data) != link.Name() {
if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil {
- errs = multierror.Append(errs, errors.Wrapf(err, "error correcting link for layer %q", targetID))
+ errs = multierror.Append(errs, errors.Wrapf(err, "correcting link for layer %s", targetID))
continue
}
madeProgress = true
@@ -1241,7 +1242,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if unshare.IsRootless() {
logLevel = logrus.DebugLevel
}
- logrus.StandardLogger().Logf(logLevel, "ignoring metacopy option from storage.conf, not supported with booted kernel")
+ logrus.StandardLogger().Logf(logLevel, "Ignoring metacopy option from storage.conf, not supported with booted kernel")
}
}
}
@@ -1275,7 +1276,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(dir, "link"))
if err := d.recreateSymlinks(); err != nil {
- return "", errors.Wrap(err, "error recreating the links")
+ return "", errors.Wrap(err, "recreating the links")
}
link, err = ioutil.ReadFile(path.Join(dir, "link"))
if err != nil {
@@ -1330,7 +1331,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if lower == "" && os.IsNotExist(err) {
logrus.Warnf("Can't stat lower layer %q because it does not exist. Going through storage to recreate the missing symlinks.", newpath)
if err := d.recreateSymlinks(); err != nil {
- return "", fmt.Errorf("error recreating the missing symlinks: %v", err)
+ return "", fmt.Errorf("Recreating the missing symlinks: %v", err)
}
lower = newpath
} else if lower == "" {
@@ -1381,7 +1382,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if retErr != nil {
if c := d.ctr.Decrement(mergedDir); c <= 0 {
if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil {
- logrus.Errorf("error unmounting %v: %v", mergedDir, mntErr)
+ logrus.Errorf("Unmounting %v: %v", mergedDir, mntErr)
}
}
}
@@ -1473,7 +1474,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
flags, data := mount.ParseOptions(mountData)
logrus.Debugf("overlay: mount_data=%s", mountData)
if err := mountFunc("overlay", mountTarget, "overlay", uintptr(flags), data); err != nil {
- return "", fmt.Errorf("error creating overlay mount to %s, mount_data=%q: %v", mountTarget, mountData, err)
+ return "", fmt.Errorf("creating overlay mount to %s, mount_data=%q: %v", mountTarget, mountData, err)
}
return mergedDir, nil
@@ -1820,7 +1821,7 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
err = graphdriver.ChownPathByMaps(layerFs, toContainer, toHost)
if err != nil {
if err2 := d.Put(id); err2 != nil {
- logrus.Errorf("%v; error unmounting %v: %v", err, id, err2)
+ logrus.Errorf("%v; unmounting %v: %v", err, id, err2)
}
return err
}
@@ -1923,7 +1924,7 @@ func (d *Driver) releaseAdditionalLayerByID(id string) {
if al, err := d.getAdditionalLayerPathByID(id); err == nil {
notifyReleaseAdditionalLayer(al)
} else if !os.IsNotExist(err) {
- logrus.Warnf("unexpected error on reading Additional Layer Store pointer %v", err)
+ logrus.Warnf("Unexpected error on reading Additional Layer Store pointer %v", err)
}
}
@@ -2004,10 +2005,10 @@ func notifyUseAdditionalLayer(al string) {
} else if err == nil {
f.Close()
if err := os.Remove(useFile); err != nil {
- logrus.Warnf("failed to remove use file")
+ logrus.Warnf("Failed to remove use file")
}
}
- logrus.Warnf("unexpected error by Additional Layer Store %v during use; GC doesn't seem to be supported", err)
+ logrus.Warnf("Unexpected error by Additional Layer Store %v during use; GC doesn't seem to be supported", err)
}
// notifyReleaseAdditionalLayer notifies Additional Layer Store that we don't use the specified
@@ -2024,7 +2025,7 @@ func notifyReleaseAdditionalLayer(al string) {
if os.IsNotExist(err) {
return
}
- logrus.Warnf("unexpected error by Additional Layer Store %v during release; GC doesn't seem to be supported", err)
+ logrus.Warnf("Unexpected error by Additional Layer Store %v during release; GC doesn't seem to be supported", err)
}
// redirectDiffIfAdditionalLayer checks if the passed diff path is Additional Layer and
diff --git a/vendor/github.com/containers/storage/drivers/overlay/randomid.go b/vendor/github.com/containers/storage/drivers/overlay/randomid.go
index fc565ef0b..736c48b9c 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/randomid.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/randomid.go
@@ -47,7 +47,7 @@ func generateID(l int) string {
if retryOnError(err) && retries < maxretries {
count += n
retries++
- logrus.Errorf("error generating version 4 uuid, retrying: %v", err)
+ logrus.Errorf("Generating version 4 uuid, retrying: %v", err)
continue
}
diff --git a/vendor/github.com/containers/storage/drivers/template.go b/vendor/github.com/containers/storage/drivers/template.go
index 5d80b8865..d40d71cfc 100644
--- a/vendor/github.com/containers/storage/drivers/template.go
+++ b/vendor/github.com/containers/storage/drivers/template.go
@@ -31,7 +31,7 @@ func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMa
diff, err := d.Diff(template, templateIDMappings, parent, parentIDMappings, opts.MountLabel)
if err != nil {
if err2 := d.Remove(id); err2 != nil {
- logrus.Errorf("error removing layer %q: %v", id, err2)
+ logrus.Errorf("Removing layer %q: %v", id, err2)
}
return err
}
@@ -44,7 +44,7 @@ func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMa
}
if _, err = d.ApplyDiff(id, parent, applyOptions); err != nil {
if err2 := d.Remove(id); err2 != nil {
- logrus.Errorf("error removing layer %q: %v", id, err2)
+ logrus.Errorf("Removing layer %q: %v", id, err2)
}
return err
}
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index 911469182..ac7f45c3f 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -12,14 +12,13 @@ require (
github.com/google/go-intervals v0.0.2
github.com/google/uuid v1.2.0 // indirect
github.com/hashicorp/go-multierror v1.1.1
- github.com/json-iterator/go v1.1.11
- github.com/klauspost/compress v1.13.5
+ github.com/json-iterator/go v1.1.12
+ github.com/klauspost/compress v1.13.6
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.12
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
github.com/moby/sys/mountinfo v0.4.1
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/runc v1.0.2
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index 5cc5da6d3..3da64ce8c 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -115,15 +115,16 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -148,9 +149,8 @@ github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2J
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index 32ba20685..fbf6ad362 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -1557,7 +1557,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
compressor = pgzip.NewWriter(&tsdata)
}
if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that
- logrus.Infof("error setting compression concurrency threads to 1: %v; ignoring", err)
+ logrus.Infof("Error setting compression concurrency threads to 1: %v; ignoring", err)
}
metadata := storage.NewJSONPacker(compressor)
uncompressed, err := archive.DecompressStream(defragmented)
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 48e846f7c..76544ff28 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -879,7 +879,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
if include != relFilePath {
matches, err := pm.IsMatch(relFilePath)
if err != nil {
- logrus.Errorf("Error matching %s: %v", relFilePath, err)
+ logrus.Errorf("Matching %s: %v", relFilePath, err)
return err
}
skip = matches
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
index 7bd804c44..6efc6a4c8 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
@@ -345,6 +345,56 @@ func getFileDigest(f *os.File) (digest.Digest, error) {
return digester.Digest(), nil
}
+// findFileInOSTreeRepos checks whether the requested file already exist in one of the OSTree repo and copies the file content from there if possible.
+// file is the file to look for.
+// ostreeRepos is a list of OSTree repos.
+// dirfd is an open fd to the destination checkout.
+// useHardLinks defines whether the deduplication can be performed using hard links.
+func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
+ digest, err := digest.Parse(file.Digest)
+ if err != nil {
+ return false, nil, 0, nil
+ }
+ payloadLink := digest.Encoded() + ".payload-link"
+ if len(payloadLink) < 2 {
+ return false, nil, 0, nil
+ }
+
+ for _, repo := range ostreeRepos {
+ sourceFile := filepath.Join(repo, "objects", payloadLink[:2], payloadLink[2:])
+ st, err := os.Stat(sourceFile)
+ if err != nil || !st.Mode().IsRegular() {
+ continue
+ }
+ if st.Size() != file.Size {
+ continue
+ }
+ fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0)
+ if err != nil {
+ return false, nil, 0, nil
+ }
+ f := os.NewFile(uintptr(fd), "fd")
+ defer f.Close()
+
+ // check if the open file can be deduplicated with hard links
+ if useHardLinks && !canDedupFileWithHardLink(file, fd, st) {
+ continue
+ }
+
+ dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks)
+ if err != nil {
+ return false, nil, 0, nil
+ }
+ return true, dstFile, written, nil
+ }
+ // If hard links deduplication was used and it has failed, try again without hard links.
+ if useHardLinks {
+ return findFileInOSTreeRepos(file, ostreeRepos, dirfd, false)
+ }
+
+ return false, nil, 0, nil
+}
+
// findFileOnTheHost checks whether the requested file already exist on the host and copies the file content from there if possible.
// It is currently implemented to look only at the file with the same path. Ideally it can detect the same content also at different
// paths.
@@ -873,6 +923,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
// modifies the source file as well.
useHardLinks := parseBooleanPullOption(&storeOpts, "use_hard_links", false)
+ // List of OSTree repositories to use for deduplication
+ ostreeRepos := strings.Split(storeOpts.PullOptions["ostree_repos"], ":")
+
// Generate the manifest
var toc internal.TOC
if err := json.Unmarshal(c.manifest, &toc); err != nil {
@@ -1009,18 +1062,35 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
totalChunksSize += r.Size
+ finalizeFile := func(dstFile *os.File) error {
+ if dstFile != nil {
+ defer dstFile.Close()
+ if err := setFileAttrs(dstFile, mode, &r, options); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
found, dstFile, _, err := findFileInOtherLayers(&r, dirfd, otherLayersCache, c.layersTarget, useHardLinks)
if err != nil {
return output, err
}
- if dstFile != nil {
- if err := setFileAttrs(dstFile, mode, &r, options); err != nil {
- dstFile.Close()
+ if found {
+ if err := finalizeFile(dstFile); err != nil {
return output, err
}
- dstFile.Close()
+ continue
+ }
+
+ found, dstFile, _, err = findFileInOSTreeRepos(&r, ostreeRepos, dirfd, useHardLinks)
+ if err != nil {
+ return output, err
}
if found {
+ if err := finalizeFile(dstFile); err != nil {
+ return output, err
+ }
continue
}
@@ -1029,14 +1099,10 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
if err != nil {
return output, err
}
- if dstFile != nil {
- if err := setFileAttrs(dstFile, mode, &r, options); err != nil {
- dstFile.Close()
+ if found {
+ if err := finalizeFile(dstFile); err != nil {
return output, err
}
- dstFile.Close()
- }
- if found {
continue
}
}
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
index 9e0e97bd6..92056c1d5 100644
--- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
@@ -14,7 +14,7 @@ import (
// reading it via /proc filesystem.
func GetTotalUsedFds() int {
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
- logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
+ logrus.Errorf("%v", err)
} else {
return len(fds)
}
diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
index b224e7b5c..fc080acbe 100644
--- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
+++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
@@ -36,7 +36,7 @@ type lockfile struct {
// necessary.
func openLock(path string, ro bool) (fd int, err error) {
if ro {
- fd, err = unix.Open(path, os.O_RDONLY|unix.O_CLOEXEC, 0)
+ fd, err = unix.Open(path, os.O_RDONLY|unix.O_CLOEXEC|os.O_CREATE, 0)
} else {
fd, err = unix.Open(path,
os.O_RDWR|unix.O_CLOEXEC|os.O_CREATE,
diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
index e2cf30b41..6f0726505 100644
--- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
+++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
@@ -43,7 +43,7 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File
var st syscall.Stat_t
err = syscall.Fstat(int(sparseFile.Fd()), &st)
if err != nil {
- logrus.Errorf("Error reading information about loopback file %s: %v", sparseName, err)
+ logrus.Errorf("Reading information about loopback file %s: %v", sparseName, err)
return nil, ErrAttachLoopbackDevice
}
@@ -68,7 +68,7 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File
// OpenFile adds O_CLOEXEC
loopFile, err = os.OpenFile(target, os.O_RDWR, 0644)
if err != nil {
- logrus.Errorf("Error opening loopback device: %s", err)
+ logrus.Errorf("Opening loopback device: %s", err)
return nil, ErrAttachLoopbackDevice
}
@@ -90,7 +90,7 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File
// device and inode numbers.
dev, ino, err := getLoopbackBackingFile(loopFile)
if err != nil {
- logrus.Errorf("Error getting loopback backing file: %s", err)
+ logrus.Errorf("Getting loopback backing file: %s", err)
return nil, ErrGetLoopbackBackingFile
}
if dev != uint64(st.Dev) || ino != st.Ino {
@@ -125,7 +125,7 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
// OpenFile adds O_CLOEXEC
sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644)
if err != nil {
- logrus.Errorf("Error opening sparse file %s: %s", sparseName, err)
+ logrus.Errorf("Opening sparse file: %v", err)
return nil, ErrAttachLoopbackDevice
}
defer sparseFile.Close()
@@ -147,7 +147,7 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
// If the call failed, then free the loopback device
if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
- logrus.Error("Error while cleaning up the loopback device")
+ logrus.Error("While cleaning up the loopback device")
}
loopFile.Close()
return nil, ErrAttachLoopbackDevice
diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go
index f4cf2826e..c9be05776 100644
--- a/vendor/github.com/containers/storage/pkg/loopback/loopback.go
+++ b/vendor/github.com/containers/storage/pkg/loopback/loopback.go
@@ -13,7 +13,7 @@ import (
func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
loopInfo, err := ioctlLoopGetStatus64(file.Fd())
if err != nil {
- logrus.Errorf("Error get loopback backing file: %s", err)
+ logrus.Errorf("Get loopback backing file: %v", err)
return 0, 0, ErrGetLoopbackBackingFile
}
return loopInfo.loDevice, loopInfo.loInode, nil
@@ -22,7 +22,7 @@ func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
// SetCapacity reloads the size for the loopback device.
func SetCapacity(file *os.File) error {
if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil {
- logrus.Errorf("Error loopbackSetCapacity: %s", err)
+ logrus.Errorf("loopbackSetCapacity: %s", err)
return ErrSetCapacity
}
return nil
diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go
index 76e1e499f..7a68bc39b 100644
--- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go
+++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go
@@ -35,7 +35,7 @@ func GetKernelVersion() (*VersionInfo, error) {
// the given version.
func CheckKernelVersion(k, major, minor int) bool {
if v, err := GetKernelVersion(); err != nil {
- logrus.Warnf("error getting kernel version: %s", err)
+ logrus.Warnf("Error getting kernel version: %s", err)
} else {
if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 {
return false
diff --git a/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go b/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go
index 26cd8504c..674e0a0ba 100644
--- a/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go
+++ b/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go
@@ -34,7 +34,7 @@ func NewLogger(logger func(*tar.Header)) (io.WriteCloser, error) {
}
// Make sure to avoid writes after the reader has been closed.
if err := reader.Close(); err != nil {
- logrus.Errorf("error closing tarlogger reader: %v", err)
+ logrus.Errorf("Closing tarlogger reader: %v", err)
}
// Unblock the Close().
t.closeMutex.Unlock()
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
index 96b857543..6d351ce80 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
@@ -154,7 +154,7 @@ func (c *Cmd) Start() error {
pidString := ""
b := new(bytes.Buffer)
if _, err := io.Copy(b, pidRead); err != nil {
- return errors.Wrapf(err, "error reading child PID")
+ return errors.Wrapf(err, "Reading child PID")
}
pidString = b.String()
pid, err := strconv.Atoi(pidString)
@@ -188,8 +188,8 @@ func (c *Cmd) Start() error {
if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 {
uidmap, gidmap, err := GetHostIDMappings("")
if err != nil {
- fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err)
- return errors.Wrapf(err, "error reading ID mappings in parent")
+ fmt.Fprintf(continueWrite, "Reading ID mappings in parent: %v", err)
+ return errors.Wrapf(err, "Reading ID mappings in parent")
}
if len(c.UidMappings) == 0 {
c.UidMappings = uidmap
@@ -222,8 +222,8 @@ func (c *Cmd) Start() error {
if err == nil {
gidmapSet = true
} else {
- logrus.Warnf("error running newgidmap: %v: %s", err, g.String())
- logrus.Warnf("falling back to single mapping")
+ logrus.Warnf("Error running newgidmap: %v: %s", err, g.String())
+ logrus.Warnf("Falling back to single mapping")
g.Reset()
g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid())))
}
@@ -271,8 +271,8 @@ func (c *Cmd) Start() error {
if err == nil {
uidmapSet = true
} else {
- logrus.Warnf("error running newuidmap: %v: %s", err, u.String())
- logrus.Warnf("falling back to single mapping")
+ logrus.Warnf("Error running newuidmap: %v: %s", err, u.String())
+ logrus.Warnf("Falling back to single mapping")
u.Reset()
u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid())))
}
@@ -407,7 +407,7 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
// ID and a range size.
uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username)
if err != nil {
- logrus.Warnf("error reading allowed ID mappings: %v", err)
+ logrus.Warnf("Reading allowed ID mappings: %v", err)
}
if len(uidmap) == 0 {
logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username)
@@ -434,13 +434,13 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
// If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able
// to use unshare(), so don't bother creating a new user namespace at this point.
capabilities, err := capability.NewPid(0)
- bailOnError(err, "error reading the current capabilities sets")
+ bailOnError(err, "Reading the current capabilities sets")
if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) {
return
}
// Read the set of ID mappings that we're currently using.
uidmap, gidmap, err = GetHostIDMappings("")
- bailOnError(err, "error reading current ID mappings")
+ bailOnError(err, "Reading current ID mappings")
// Just reuse them.
for i := range uidmap {
uidmap[i].HostID = uidmap[i].ContainerID
@@ -463,7 +463,7 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present {
if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
- logrus.Errorf("error setting BUILDAH_ISOLATION=rootless in environment: %v", err)
+ logrus.Errorf("Setting BUILDAH_ISOLATION=rootless in environment: %v", err)
os.Exit(1)
}
}
@@ -483,7 +483,7 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
cmd.GidMappingsEnableSetgroups = true
// Finish up.
- logrus.Debugf("running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings)
+ logrus.Debugf("Running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings)
ExecRunnable(cmd, nil)
}
@@ -512,7 +512,7 @@ func ExecRunnable(cmd Runnable, cleanup func()) {
}
}
logrus.Errorf("%v", err)
- logrus.Errorf("(unable to determine exit status)")
+ logrus.Errorf("(Unable to determine exit status)")
exit(1)
}
exit(0)
@@ -523,7 +523,7 @@ func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) {
var mappings []specs.LinuxIDMapping
f, err := os.Open(path)
if err != nil {
- return nil, errors.Wrapf(err, "error reading ID mappings from %q", path)
+ return nil, errors.Wrapf(err, "Reading ID mappings from %q", path)
}
defer f.Close()
scanner := bufio.NewScanner(f)
@@ -571,7 +571,7 @@ func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMappi
func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
mappings, err := idtools.NewIDMappings(user, group)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group)
+ return nil, nil, errors.Wrapf(err, "Reading subuid mappings for user %q and subgid mappings for group %q", user, group)
}
var uidmap, gidmap []specs.LinuxIDMapping
for _, m := range mappings.UIDs() {
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 8d6f2c4d7..6f6f69807 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -1131,10 +1131,6 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea
if options.HostGIDMapping && len(layer.GIDMap) != 0 {
return false
}
- // If we don't care about the mapping, it's fine.
- if len(options.UIDMap) == 0 && len(options.GIDMap) == 0 {
- return true
- }
// Compare the maps.
return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap)
}
diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go
index b7ab07342..4dd1a786e 100644
--- a/vendor/github.com/containers/storage/types/utils.go
+++ b/vendor/github.com/containers/storage/types/utils.go
@@ -87,7 +87,7 @@ func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, e
if tmpPerUserDir != "" {
if _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) {
if err := os.Mkdir(tmpPerUserDir, 0700); err != nil {
- logrus.Errorf("failed to create temp directory for user: %v", err)
+ logrus.Errorf("Failed to create temp directory for user: %v", err)
} else {
return tmpPerUserDir, nil
}
diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go
index 3ada41f73..523c92dc8 100644
--- a/vendor/github.com/containers/storage/userns.go
+++ b/vendor/github.com/containers/storage/userns.go
@@ -43,7 +43,7 @@ func getAdditionalSubIDs(username string) (*idSet, *idSet, error) {
}
mappings, err := idtools.NewIDMappings(username, username)
if err != nil {
- logrus.Errorf("cannot find mappings for user %q: %v", username, err)
+ logrus.Errorf("Cannot find mappings for user %q: %v", username, err)
} else {
uids = getHostIDs(mappings.UIDs())
gids = getHostIDs(mappings.GIDs())
diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE
index 6cd1e9627..87d557477 100644
--- a/vendor/github.com/klauspost/compress/LICENSE
+++ b/vendor/github.com/klauspost/compress/LICENSE
@@ -290,3 +290,15 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-----------------
+
+Files: s2/cmd/internal/filepathx/*
+
+Copyright 2016 The filepathx Authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 235dc7cc6..3429879eb 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -17,12 +17,17 @@ This package provides various compression algorithms.
# changelog
+* Aug 30, 2021 (v1.13.5)
+ * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
+ * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
+ * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426)
+ * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421)
+
* Aug 12, 2021 (v1.13.4)
* Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy).
* zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415)
* Aug 3, 2021 (v1.13.3)
-
* zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404)
* zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411)
* gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406)
@@ -31,7 +36,6 @@ This package provides various compression algorithms.
* zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410)
* Jun 14, 2021 (v1.13.1)
-
* s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396)
* zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394)
* gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389)
@@ -64,6 +68,9 @@ This package provides various compression algorithms.
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
+<details>
+ <summary>See changes prior to v1.12.1</summary>
+
* Mar 26, 2021 (v1.11.13)
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
* zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336)
@@ -118,6 +125,7 @@ This package provides various compression algorithms.
* zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281)
* zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282)
* inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274)
+</details>
<details>
<summary>See changes prior to v1.11.0</summary>
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
index 347ac2c90..a746eb733 100644
--- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -215,24 +215,15 @@ func (e *fastGen) Reset() {
func matchLen(a, b []byte) int {
b = b[:len(a)]
var checked int
- if len(a) >= 4 {
- // Try 4 bytes first
- if diff := binary.LittleEndian.Uint32(a) ^ binary.LittleEndian.Uint32(b); diff != 0 {
- return bits.TrailingZeros32(diff) >> 3
- }
- // Switch to 8 byte matching.
- checked = 4
- a = a[4:]
- b = b[4:]
- for len(a) >= 8 {
- b = b[:len(a)]
- if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
- return checked + (bits.TrailingZeros64(diff) >> 3)
- }
- checked += 8
- a = a[8:]
- b = b[8:]
+
+ for len(a) >= 8 {
+ b = b[:len(a)]
+ if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
+ return checked + (bits.TrailingZeros64(diff) >> 3)
}
+ checked += 8
+ a = a[8:]
+ b = b[8:]
}
b = b[:len(a)]
for i := range a {
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 41703bba4..9b7cc8e97 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -344,35 +344,241 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
var buf [256]byte
var off uint8
- shift := (8 - d.actualTableLog) & 7
-
- //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog)
- for br.off >= 4 {
- br.fillFast()
- v := dt[br.peekByteFast()>>shift]
- br.advance(uint8(v.entry))
- buf[off+0] = uint8(v.entry >> 8)
-
- v = dt[br.peekByteFast()>>shift]
- br.advance(uint8(v.entry))
- buf[off+1] = uint8(v.entry >> 8)
-
- v = dt[br.peekByteFast()>>shift]
- br.advance(uint8(v.entry))
- buf[off+2] = uint8(v.entry >> 8)
-
- v = dt[br.peekByteFast()>>shift]
- br.advance(uint8(v.entry))
- buf[off+3] = uint8(v.entry >> 8)
-
- off += 4
- if off == 0 {
- if len(dst)+256 > maxDecodedSize {
- br.close()
- return nil, ErrMaxDecodedSizeExceeded
+ switch d.actualTableLog {
+ case 8:
+ const shift = 8 - 8
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 7:
+ const shift = 8 - 7
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 6:
+ const shift = 8 - 6
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 5:
+ const shift = 8 - 5
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 4:
+ const shift = 8 - 4
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 3:
+ const shift = 8 - 3
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 2:
+ const shift = 8 - 2
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 1:
+ const shift = 8 - 1
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
}
- dst = append(dst, buf[:]...)
}
+ default:
+ return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog)
}
if len(dst)+int(off) > maxDecodedSize {
@@ -383,6 +589,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
// br < 4, so uint8 is fine
bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead))
+ shift := (8 - d.actualTableLog) & 7
+
for bitsLeft > 0 {
if br.bitsRead >= 64-8 {
for br.off > 0 {
@@ -423,24 +631,24 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
var buf [256]byte
var off uint8
- const shift = 0
+ const shift = 56
//fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog)
for br.off >= 4 {
br.fillFast()
- v := dt[br.peekByteFast()>>shift]
+ v := dt[uint8(br.value>>shift)]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
- v = dt[br.peekByteFast()>>shift]
+ v = dt[uint8(br.value>>shift)]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
- v = dt[br.peekByteFast()>>shift]
+ v = dt[uint8(br.value>>shift)]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
- v = dt[br.peekByteFast()>>shift]
+ v = dt[uint8(br.value>>shift)]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
@@ -474,7 +682,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
- v := dt[br.peekByteFast()>>shift]
+ v := dt[br.peekByteFast()]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= int8(nBits)
@@ -709,7 +917,6 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
shift := (8 - d.actualTableLog) & 7
const tlSize = 1 << 8
- const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
index 426b9cac7..2c112a0ab 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
@@ -195,7 +195,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
b, d.v4 = consumeUint64(b)
b, d.total = consumeUint64(b)
copy(d.mem[:], b)
- b = b[len(d.mem):]
d.n = int(d.total % uint64(len(d.mem)))
return nil
}
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
index 3ddbd5c0b..0ae847f75 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
@@ -9,4 +9,4 @@ package xxhash
func Sum64(b []byte) uint64
//go:noescape
-func writeBlocks(*Digest, []byte) int
+func writeBlocks(d *Digest, b []byte) int
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
index 2c9c5357a..be8db5bf7 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
@@ -6,7 +6,7 @@
// Register allocation:
// AX h
-// CX pointer to advance through b
+// SI pointer to advance through b
// DX n
// BX loop end
// R8 v1, k1
@@ -16,39 +16,39 @@
// R12 tmp
// R13 prime1v
// R14 prime2v
-// R15 prime4v
+// DI prime4v
-// round reads from and advances the buffer pointer in CX.
+// round reads from and advances the buffer pointer in SI.
// It assumes that R13 has prime1v and R14 has prime2v.
#define round(r) \
- MOVQ (CX), R12 \
- ADDQ $8, CX \
+ MOVQ (SI), R12 \
+ ADDQ $8, SI \
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
+// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
#define mergeRound(acc, val) \
IMULQ R14, val \
ROLQ $31, val \
IMULQ R13, val \
XORQ val, acc \
IMULQ R13, acc \
- ADDQ R15, acc
+ ADDQ DI, acc
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32
// Load fixed primes.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), R15
+ MOVQ ·prime4v(SB), DI
// Load slice.
- MOVQ b_base+0(FP), CX
+ MOVQ b_base+0(FP), SI
MOVQ b_len+8(FP), DX
- LEAQ (CX)(DX*1), BX
+ LEAQ (SI)(DX*1), BX
// The first loop limit will be len(b)-32.
SUBQ $32, BX
@@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32
XORQ R11, R11
SUBQ R13, R11
- // Loop until CX > BX.
+ // Loop until SI > BX.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
- CMPQ CX, BX
+ CMPQ SI, BX
JLE blockLoop
MOVQ R8, AX
@@ -100,16 +100,16 @@ noBlocks:
afterBlocks:
ADDQ DX, AX
- // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
+ // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
ADDQ $24, BX
- CMPQ CX, BX
+ CMPQ SI, BX
JG fourByte
wordLoop:
// Calculate k1.
- MOVQ (CX), R8
- ADDQ $8, CX
+ MOVQ (SI), R8
+ ADDQ $8, SI
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
@@ -117,18 +117,18 @@ wordLoop:
XORQ R8, AX
ROLQ $27, AX
IMULQ R13, AX
- ADDQ R15, AX
+ ADDQ DI, AX
- CMPQ CX, BX
+ CMPQ SI, BX
JLE wordLoop
fourByte:
ADDQ $4, BX
- CMPQ CX, BX
+ CMPQ SI, BX
JG singles
- MOVL (CX), R8
- ADDQ $4, CX
+ MOVL (SI), R8
+ ADDQ $4, SI
IMULQ R13, R8
XORQ R8, AX
@@ -138,19 +138,19 @@ fourByte:
singles:
ADDQ $4, BX
- CMPQ CX, BX
+ CMPQ SI, BX
JGE finalize
singlesLoop:
- MOVBQZX (CX), R12
- ADDQ $1, CX
+ MOVBQZX (SI), R12
+ ADDQ $1, SI
IMULQ ·prime5v(SB), R12
XORQ R12, AX
ROLQ $11, AX
IMULQ R13, AX
- CMPQ CX, BX
+ CMPQ SI, BX
JL singlesLoop
finalize:
@@ -179,13 +179,13 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
MOVQ ·prime2v(SB), R14
// Load slice.
- MOVQ arg1_base+8(FP), CX
- MOVQ arg1_len+16(FP), DX
- LEAQ (CX)(DX*1), BX
+ MOVQ b_base+8(FP), SI
+ MOVQ b_len+16(FP), DX
+ LEAQ (SI)(DX*1), BX
SUBQ $32, BX
// Load vN from d.
- MOVQ arg+0(FP), AX
+ MOVQ d+0(FP), AX
MOVQ 0(AX), R8 // v1
MOVQ 8(AX), R9 // v2
MOVQ 16(AX), R10 // v3
@@ -199,7 +199,7 @@ blockLoop:
round(R10)
round(R11)
- CMPQ CX, BX
+ CMPQ SI, BX
JLE blockLoop
// Copy vN back to d.
@@ -208,8 +208,8 @@ blockLoop:
MOVQ R10, 16(AX)
MOVQ R11, 24(AX)
- // The number of bytes written is CX minus the old base pointer.
- SUBQ arg1_base+8(FP), CX
- MOVQ CX, ret+32(FP)
+ // The number of bytes written is SI minus the old base pointer.
+ SUBQ b_base+8(FP), SI
+ MOVQ SI, ret+32(FP)
RET
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 98eb1dab9..1927f8285 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -202,7 +202,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.36.0
+# github.com/containers/storage v1.36.1-0.20210929132900-162a0bf730ce
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs
@@ -405,7 +405,7 @@ github.com/json-iterator/go
# github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a
github.com/juju/ansiterm
github.com/juju/ansiterm/tabwriter
-# github.com/klauspost/compress v1.13.5
+# github.com/klauspost/compress v1.13.6
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse