summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cmd/podman/common/completion.go4
-rw-r--r--cmd/podman/containers/cp.go108
-rw-r--r--cmd/podman/registry/config.go3
-rw-r--r--cmd/podman/root.go3
-rw-r--r--cmd/podman/system/migrate.go5
-rw-r--r--docs/source/markdown/podman-cp.1.md11
-rw-r--r--go.mod2
-rw-r--r--go.sum3
-rw-r--r--libpod/container_api.go4
-rw-r--r--libpod/container_copy_linux.go3
-rw-r--r--pkg/api/handlers/compat/containers_archive.go17
-rw-r--r--pkg/api/server/register_archive.go4
-rw-r--r--pkg/bindings/containers/types.go2
-rw-r--r--pkg/bindings/containers/types_copy_options.go16
-rw-r--r--pkg/copy/parse.go12
-rw-r--r--pkg/domain/entities/containers.go2
-rw-r--r--pkg/domain/entities/engine_container.go3
-rw-r--r--pkg/domain/infra/abi/archive.go2
-rw-r--r--pkg/domain/infra/abi/system.go12
-rw-r--r--pkg/domain/infra/tunnel/containers.go3
-rw-r--r--pkg/domain/infra/tunnel/system.go3
-rw-r--r--test/system/065-cp.bats234
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go39
-rw-r--r--vendor/github.com/containers/storage/drivers/quota/projectquota.go30
-rw-r--r--vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go3
-rw-r--r--vendor/github.com/containers/storage/go.mod2
-rw-r--r--vendor/github.com/containers/storage/go.sum6
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_unix.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/compression.go380
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go220
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/internal/compression.go172
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/storage_linux.go41
-rw-r--r--vendor/github.com/containers/storage/storage.conf3
-rw-r--r--vendor/github.com/containers/storage/types/utils.go10
-rw-r--r--vendor/modules.txt4
37 files changed, 905 insertions, 474 deletions
diff --git a/cmd/podman/common/completion.go b/cmd/podman/common/completion.go
index 177d094aa..08b2f6235 100644
--- a/cmd/podman/common/completion.go
+++ b/cmd/podman/common/completion.go
@@ -46,7 +46,9 @@ func setupContainerEngine(cmd *cobra.Command) (entities.ContainerEngine, error)
return nil, err
}
if !registry.IsRemote() && rootless.IsRootless() {
- err := containerEngine.SetupRootless(registry.Context(), cmd)
+ _, noMoveProcess := cmd.Annotations[registry.NoMoveProcess]
+
+ err := containerEngine.SetupRootless(registry.Context(), noMoveProcess)
if err != nil {
return nil, err
}
diff --git a/cmd/podman/containers/cp.go b/cmd/podman/containers/cp.go
index c1f1e27f5..67bb13aa1 100644
--- a/cmd/podman/containers/cp.go
+++ b/cmd/podman/containers/cp.go
@@ -82,7 +82,9 @@ func cp(cmd *cobra.Command, args []string) error {
return err
}
- if len(sourceContainerStr) > 0 {
+ if len(sourceContainerStr) > 0 && len(destContainerStr) > 0 {
+ return copyContainerToContainer(sourceContainerStr, sourcePath, destContainerStr, destPath)
+ } else if len(sourceContainerStr) > 0 {
return copyFromContainer(sourceContainerStr, sourcePath, destPath)
}
@@ -115,6 +117,110 @@ func doCopy(funcA func() error, funcB func() error) error {
return errorhandling.JoinErrors(copyErrors)
}
+func copyContainerToContainer(sourceContainer string, sourcePath string, destContainer string, destPath string) error {
+ if err := containerMustExist(sourceContainer); err != nil {
+ return err
+ }
+
+ if err := containerMustExist(destContainer); err != nil {
+ return err
+ }
+
+ sourceContainerInfo, err := registry.ContainerEngine().ContainerStat(registry.GetContext(), sourceContainer, sourcePath)
+ if err != nil {
+ return errors.Wrapf(err, "%q could not be found on container %s", sourcePath, sourceContainer)
+ }
+
+ var destContainerBaseName string
+ destContainerInfo, destContainerInfoErr := registry.ContainerEngine().ContainerStat(registry.GetContext(), destContainer, destPath)
+ if destContainerInfoErr != nil {
+ if strings.HasSuffix(destPath, "/") {
+ return errors.Wrapf(destContainerInfoErr, "%q could not be found on container %s", destPath, destContainer)
+ }
+ // NOTE: containerInfo may actually be set. That happens when
+ // the container path is a symlink into nirvana. In that case,
+ // we must use the symlinked path instead.
+ path := destPath
+ if destContainerInfo != nil {
+ destContainerBaseName = filepath.Base(destContainerInfo.LinkTarget)
+ path = destContainerInfo.LinkTarget
+ } else {
+ destContainerBaseName = filepath.Base(destPath)
+ }
+
+ parentDir, err := containerParentDir(destContainer, path)
+ if err != nil {
+ return errors.Wrapf(err, "could not determine parent dir of %q on container %s", path, destContainer)
+ }
+ destContainerInfo, err = registry.ContainerEngine().ContainerStat(registry.GetContext(), destContainer, parentDir)
+ if err != nil {
+ return errors.Wrapf(err, "%q could not be found on container %s", destPath, destContainer)
+ }
+ } else {
+ // If the specified path exists on the container, we must use
+ // its base path as it may have changed due to symlink
+ // evaluations.
+ destContainerBaseName = filepath.Base(destContainerInfo.LinkTarget)
+ }
+
+ if sourceContainerInfo.IsDir && !destContainerInfo.IsDir {
+ return errors.New("destination must be a directory when copying a directory")
+ }
+
+ sourceContainerTarget, destContainerTarget := sourceContainerInfo.LinkTarget, destContainerInfo.LinkTarget
+ if !destContainerInfo.IsDir {
+ destContainerTarget = filepath.Dir(destPath)
+ }
+
+ // If we copy a directory via the "." notation and the container path
+ // does not exist, we need to make sure that the destination on the
+ // container gets created; otherwise the contents of the source
+ // directory will be written to the destination's parent directory.
+ //
+ // Hence, whenever "." is the source and the destination does not
+ // exist, we copy the source's parent and let the copier package create
+ // the destination via the Rename option.
+ if destContainerInfoErr != nil && sourceContainerInfo.IsDir && strings.HasSuffix(sourcePath, ".") {
+ sourceContainerTarget = filepath.Dir(sourceContainerTarget)
+ }
+
+ reader, writer := io.Pipe()
+
+ sourceContainerCopy := func() error {
+ defer writer.Close()
+ copyFunc, err := registry.ContainerEngine().ContainerCopyToArchive(registry.GetContext(), sourceContainer, sourceContainerTarget, writer)
+ if err != nil {
+ return err
+ }
+ if err := copyFunc(); err != nil {
+ return errors.Wrap(err, "error copying from container")
+ }
+ return nil
+ }
+
+ destContainerCopy := func() error {
+ defer reader.Close()
+
+ copyOptions := entities.CopyOptions{Chown: chown}
+ if (!sourceContainerInfo.IsDir && !destContainerInfo.IsDir) || destContainerInfoErr != nil {
+ // If we're having a file-to-file copy, make sure to
+ // rename accordingly.
+ copyOptions.Rename = map[string]string{filepath.Base(sourceContainerTarget): destContainerBaseName}
+ }
+
+ copyFunc, err := registry.ContainerEngine().ContainerCopyFromArchive(registry.GetContext(), destContainer, destContainerTarget, reader, copyOptions)
+ if err != nil {
+ return err
+ }
+ if err := copyFunc(); err != nil {
+ return errors.Wrap(err, "error copying to container")
+ }
+ return nil
+ }
+
+ return doCopy(sourceContainerCopy, destContainerCopy)
+}
+
// copyFromContainer copies from the containerPath on the container to hostPath.
func copyFromContainer(container string, containerPath string, hostPath string) error {
if err := containerMustExist(container); err != nil {
diff --git a/cmd/podman/registry/config.go b/cmd/podman/registry/config.go
index 25139a3de..b512ba341 100644
--- a/cmd/podman/registry/config.go
+++ b/cmd/podman/registry/config.go
@@ -15,6 +15,9 @@ import (
)
const (
+ // NoMoveProcess used as cobra.Annotation when command doesn't need Podman to be moved to a separate cgroup
+ NoMoveProcess = "NoMoveProcess"
+
// ParentNSRequired used as cobra.Annotation when command requires root access
ParentNSRequired = "ParentNSRequired"
diff --git a/cmd/podman/root.go b/cmd/podman/root.go
index 2633e4040..dc4ebb952 100644
--- a/cmd/podman/root.go
+++ b/cmd/podman/root.go
@@ -208,7 +208,8 @@ func persistentPreRunE(cmd *cobra.Command, args []string) error {
// 3) command doesn't require Parent Namespace
_, found := cmd.Annotations[registry.ParentNSRequired]
if !registry.IsRemote() && rootless.IsRootless() && !found {
- err := registry.ContainerEngine().SetupRootless(registry.Context(), cmd)
+ _, noMoveProcess := cmd.Annotations[registry.NoMoveProcess]
+ err := registry.ContainerEngine().SetupRootless(registry.Context(), noMoveProcess)
if err != nil {
return err
}
diff --git a/cmd/podman/system/migrate.go b/cmd/podman/system/migrate.go
index 9940cd063..b9dc272d7 100644
--- a/cmd/podman/system/migrate.go
+++ b/cmd/podman/system/migrate.go
@@ -22,7 +22,10 @@ var (
`
migrateCommand = &cobra.Command{
- Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
+ Annotations: map[string]string{
+ registry.EngineMode: registry.ABIMode,
+ registry.NoMoveProcess: registry.NoMoveProcess,
+ },
Use: "migrate [options]",
Args: validate.NoArgs,
Short: "Migrate containers",
diff --git a/docs/source/markdown/podman-cp.1.md b/docs/source/markdown/podman-cp.1.md
index 43ee4cdff..e04542aa2 100644
--- a/docs/source/markdown/podman-cp.1.md
+++ b/docs/source/markdown/podman-cp.1.md
@@ -9,10 +9,10 @@ podman\-cp - Copy files/folders between a container and the local filesystem
**podman container cp** [*options*] [*container*:]*src_path* [*container*:]*dest_path*
## DESCRIPTION
-Copy the contents of **src_path** to the **dest_path**. You can copy from the container's filesystem to the local machine or the reverse, from the local filesystem to the container.
+Copy the contents of **src_path** to the **dest_path**. You can copy from the container's filesystem to the local machine or the reverse, from the local filesystem to the container or between two containers.
If `-` is specified for either the SRC_PATH or DEST_PATH, you can also stream a tar archive from STDIN or to STDOUT.
-The CONTAINER can be a running or stopped container. The **src_path** or **dest_path** can be a file or directory.
+The containers can be a running or stopped. The **src_path** or **dest_path** can be a file or directory.
The **podman cp** command assumes container paths are relative to the container's root directory (i.e., `/`).
@@ -70,10 +70,9 @@ The default is *true*.
## ALTERNATIVES
-Podman has much stronger capabilities than just `podman cp` to achieve copy files between host and container.
+Podman has much stronger capabilities than just `podman cp` to achieve copying files between the host and containers.
-Using standard podman-mount and podman-umount takes advantage of the entire linux tool chain, rather
-then just cp.
+Using standard podman-mount and podman-umount takes advantage of the entire linux tool chain, rather than just cp.
If a user wants to copy contents out of a container or into a container, they can execute a few simple commands.
@@ -113,6 +112,8 @@ podman cp containerID:/myapp/ /myapp/
podman cp containerID:/home/myuser/. /home/myuser/
+podman cp containerA:/myapp containerB:/yourapp
+
podman cp - containerID:/myfiles.tar.gz < myfiles.tar.gz
## SEE ALSO
diff --git a/go.mod b/go.mod
index 2c573ea8b..fdfd197ad 100644
--- a/go.mod
+++ b/go.mod
@@ -17,7 +17,7 @@ require (
github.com/containers/image/v5 v5.14.0
github.com/containers/ocicrypt v1.1.2
github.com/containers/psgo v1.5.2
- github.com/containers/storage v1.32.6
+ github.com/containers/storage v1.33.0
github.com/coreos/go-systemd/v22 v22.3.2
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
github.com/cri-o/ocicni v0.2.1-0.20210621164014-d0acc7862283
diff --git a/go.sum b/go.sum
index 7066c2ebf..b2429e42f 100644
--- a/go.sum
+++ b/go.sum
@@ -260,8 +260,9 @@ github.com/containers/psgo v1.5.2 h1:3aoozst/GIwsrr/5jnFy3FrJay98uujPCu9lTuSZ/Cw
github.com/containers/psgo v1.5.2/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU=
github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM=
github.com/containers/storage v1.32.2/go.mod h1:YIBxxjfXZTi04Ah49sh1uSGfmT1V89+I5i3deRobzQo=
-github.com/containers/storage v1.32.6 h1:NqdFRewXO/PYPjgCAScoigZc5QUA21yapSEj6kqD8cw=
github.com/containers/storage v1.32.6/go.mod h1:mdB+b89p+jU8zpzLTVXA0gWMmIo0WrkfGMh1R8O2IQw=
+github.com/containers/storage v1.33.0 h1:sTk1Mfz3uSNg7cxeaDb0Ld8/UV+8pZEOQjvysjJuzX8=
+github.com/containers/storage v1.33.0/go.mod h1:FUZPF4nJijX8ixdhByZJXf02cvbyLi6dyDwXdIe8QVY=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
diff --git a/libpod/container_api.go b/libpod/container_api.go
index ddc5aa684..637f5b686 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -854,7 +854,7 @@ func (c *Container) ShouldRestart(ctx context.Context) bool {
// CopyFromArchive copies the contents from the specified tarStream to path
// *inside* the container.
-func (c *Container) CopyFromArchive(ctx context.Context, containerPath string, chown bool, tarStream io.Reader) (func() error, error) {
+func (c *Container) CopyFromArchive(ctx context.Context, containerPath string, chown bool, rename map[string]string, tarStream io.Reader) (func() error, error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -864,7 +864,7 @@ func (c *Container) CopyFromArchive(ctx context.Context, containerPath string, c
}
}
- return c.copyFromArchive(ctx, containerPath, chown, tarStream)
+ return c.copyFromArchive(ctx, containerPath, chown, rename, tarStream)
}
// CopyToArchive copies the contents from the specified path *inside* the
diff --git a/libpod/container_copy_linux.go b/libpod/container_copy_linux.go
index 01e7ecacb..a35824289 100644
--- a/libpod/container_copy_linux.go
+++ b/libpod/container_copy_linux.go
@@ -23,7 +23,7 @@ import (
"golang.org/x/sys/unix"
)
-func (c *Container) copyFromArchive(ctx context.Context, path string, chown bool, reader io.Reader) (func() error, error) {
+func (c *Container) copyFromArchive(ctx context.Context, path string, chown bool, rename map[string]string, reader io.Reader) (func() error, error) {
var (
mountPoint string
resolvedRoot string
@@ -89,6 +89,7 @@ func (c *Container) copyFromArchive(ctx context.Context, path string, chown bool
GIDMap: c.config.IDMappings.GIDMap,
ChownDirs: idPair,
ChownFiles: idPair,
+ Rename: rename,
}
return c.joinMountAndExec(ctx,
diff --git a/pkg/api/handlers/compat/containers_archive.go b/pkg/api/handlers/compat/containers_archive.go
index a9d74e5f4..541f702e7 100644
--- a/pkg/api/handlers/compat/containers_archive.go
+++ b/pkg/api/handlers/compat/containers_archive.go
@@ -1,6 +1,7 @@
package compat
import (
+ "encoding/json"
"fmt"
"net/http"
"os"
@@ -93,8 +94,9 @@ func handleHeadAndGet(w http.ResponseWriter, r *http.Request, decoder *schema.De
func handlePut(w http.ResponseWriter, r *http.Request, decoder *schema.Decoder, runtime *libpod.Runtime) {
query := struct {
- Path string `schema:"path"`
- Chown bool `schema:"copyUIDGID"`
+ Path string `schema:"path"`
+ Chown bool `schema:"copyUIDGID"`
+ Rename string `schema:"rename"`
// TODO handle params below
NoOverwriteDirNonDir bool `schema:"noOverwriteDirNonDir"`
}{
@@ -107,10 +109,19 @@ func handlePut(w http.ResponseWriter, r *http.Request, decoder *schema.Decoder,
return
}
+ var rename map[string]string
+ if query.Rename != "" {
+ if err := json.Unmarshal([]byte(query.Rename), &rename); err != nil {
+ utils.Error(w, "Bad Request.", http.StatusBadRequest, errors.Wrap(err, "couldn't decode the query"))
+ return
+ }
+ }
+
containerName := utils.GetName(r)
containerEngine := abi.ContainerEngine{Libpod: runtime}
- copyFunc, err := containerEngine.ContainerCopyFromArchive(r.Context(), containerName, query.Path, r.Body, entities.CopyOptions{Chown: query.Chown})
+ copyOptions := entities.CopyOptions{Chown: query.Chown, Rename: rename}
+ copyFunc, err := containerEngine.ContainerCopyFromArchive(r.Context(), containerName, query.Path, r.Body, copyOptions)
if errors.Cause(err) == define.ErrNoSuchCtr || os.IsNotExist(err) {
// 404 is returned for an absent container and path. The
// clients must deal with it accordingly.
diff --git a/pkg/api/server/register_archive.go b/pkg/api/server/register_archive.go
index ee7449fbb..82d72ee6a 100644
--- a/pkg/api/server/register_archive.go
+++ b/pkg/api/server/register_archive.go
@@ -151,6 +151,10 @@ func (s *APIServer) registerArchiveHandlers(r *mux.Router) error {
// type: string
// description: Path to a directory in the container to extract
// required: true
+ // - in: query
+ // name: rename
+ // type: string
+ // description: JSON encoded map[string]string to translate paths
// responses:
// 200:
// description: no error
diff --git a/pkg/bindings/containers/types.go b/pkg/bindings/containers/types.go
index b74a938d1..cf088441f 100644
--- a/pkg/bindings/containers/types.go
+++ b/pkg/bindings/containers/types.go
@@ -264,4 +264,6 @@ type CopyOptions struct {
// If used with CopyFromArchive and set to true it will change ownership of files from the source tar archive
// to the primary uid/gid of the target container.
Chown *bool `schema:"copyUIDGID"`
+ // Map to translate path names.
+ Rename map[string]string
}
diff --git a/pkg/bindings/containers/types_copy_options.go b/pkg/bindings/containers/types_copy_options.go
index 12ad085fd..0624b450e 100644
--- a/pkg/bindings/containers/types_copy_options.go
+++ b/pkg/bindings/containers/types_copy_options.go
@@ -35,3 +35,19 @@ func (o *CopyOptions) GetChown() bool {
}
return *o.Chown
}
+
+// WithRename
+func (o *CopyOptions) WithRename(value map[string]string) *CopyOptions {
+ v := value
+ o.Rename = v
+ return o
+}
+
+// GetRename
+func (o *CopyOptions) GetRename() map[string]string {
+ var rename map[string]string
+ if o.Rename == nil {
+ return rename
+ }
+ return o.Rename
+}
diff --git a/pkg/copy/parse.go b/pkg/copy/parse.go
index 39e0e1547..93edec5fa 100644
--- a/pkg/copy/parse.go
+++ b/pkg/copy/parse.go
@@ -18,18 +18,6 @@ func ParseSourceAndDestination(source, destination string) (string, string, stri
sourceContainer, sourcePath := parseUserInput(source)
destContainer, destPath := parseUserInput(destination)
- numContainers := 0
- if len(sourceContainer) > 0 {
- numContainers++
- }
- if len(destContainer) > 0 {
- numContainers++
- }
-
- if numContainers != 1 {
- return "", "", "", "", errors.Errorf("invalid arguments %q, %q: exactly 1 container expected but %d specified", source, destination, numContainers)
- }
-
if len(sourcePath) == 0 || len(destPath) == 0 {
return "", "", "", "", errors.Errorf("invalid arguments %q, %q: you must specify paths", source, destination)
}
diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go
index 6d84a4ecb..564921c52 100644
--- a/pkg/domain/entities/containers.go
+++ b/pkg/domain/entities/containers.go
@@ -165,6 +165,8 @@ type CopyOptions struct {
// it will change ownership of files from the source tar archive
// to the primary uid/gid of the destination container.
Chown bool
+ // Map to translate path names.
+ Rename map[string]string
}
type CommitReport struct {
diff --git a/pkg/domain/entities/engine_container.go b/pkg/domain/entities/engine_container.go
index 62e83fab3..d573e4704 100644
--- a/pkg/domain/entities/engine_container.go
+++ b/pkg/domain/entities/engine_container.go
@@ -8,7 +8,6 @@ import (
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/pkg/domain/entities/reports"
"github.com/containers/podman/v3/pkg/specgen"
- "github.com/spf13/cobra"
)
type ContainerCopyFunc func() error
@@ -82,7 +81,7 @@ type ContainerEngine interface {
PodStop(ctx context.Context, namesOrIds []string, options PodStopOptions) ([]*PodStopReport, error)
PodTop(ctx context.Context, options PodTopOptions) (*StringSliceReport, error)
PodUnpause(ctx context.Context, namesOrIds []string, options PodunpauseOptions) ([]*PodUnpauseReport, error)
- SetupRootless(ctx context.Context, cmd *cobra.Command) error
+ SetupRootless(ctx context.Context, noMoveProcess bool) error
SecretCreate(ctx context.Context, name string, reader io.Reader, options SecretCreateOptions) (*SecretCreateReport, error)
SecretInspect(ctx context.Context, nameOrIDs []string) ([]*SecretInfoReport, []error, error)
SecretList(ctx context.Context) ([]*SecretInfoReport, error)
diff --git a/pkg/domain/infra/abi/archive.go b/pkg/domain/infra/abi/archive.go
index 1a5bb6dc4..b60baa935 100644
--- a/pkg/domain/infra/abi/archive.go
+++ b/pkg/domain/infra/abi/archive.go
@@ -12,7 +12,7 @@ func (ic *ContainerEngine) ContainerCopyFromArchive(ctx context.Context, nameOrI
if err != nil {
return nil, err
}
- return container.CopyFromArchive(ctx, containerPath, options.Chown, reader)
+ return container.CopyFromArchive(ctx, containerPath, options.Chown, options.Rename, reader)
}
func (ic *ContainerEngine) ContainerCopyToArchive(ctx context.Context, nameOrID string, containerPath string, writer io.Writer) (entities.ContainerCopyFunc, error) {
diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go
index 155cda21d..bc98edd06 100644
--- a/pkg/domain/infra/abi/system.go
+++ b/pkg/domain/infra/abi/system.go
@@ -24,7 +24,6 @@ import (
"github.com/containers/storage/pkg/unshare"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -57,7 +56,7 @@ func (ic *ContainerEngine) Info(ctx context.Context) (*define.Info, error) {
return info, err
}
-func (ic *ContainerEngine) SetupRootless(_ context.Context, cmd *cobra.Command) error {
+func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool) error {
// do it only after podman has already re-execed and running with uid==0.
hasCapSysAdmin, err := unshare.HasCapSysAdmin()
if err != nil {
@@ -104,6 +103,9 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, cmd *cobra.Command)
if became {
os.Exit(ret)
}
+ if noMoveProcess {
+ return nil
+ }
// if there is no pid file, try to join existing containers, and create a pause process.
ctrs, err := ic.Libpod.GetRunningContainers()
@@ -118,9 +120,10 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, cmd *cobra.Command)
}
became, ret, err = rootless.TryJoinFromFilePaths(pausePidPath, true, paths)
+
if err := movePauseProcessToScope(ic.Libpod); err != nil {
- conf, err := ic.Config(context.Background())
- if err != nil {
+ conf, err2 := ic.Config(context.Background())
+ if err2 != nil {
return err
}
if conf.Engine.CgroupManager == config.SystemdCgroupsManager {
@@ -148,7 +151,6 @@ func movePauseProcessToScope(r *libpod.Runtime) error {
if err != nil {
return errors.Wrapf(err, "could not get pause process pid file path")
}
-
data, err := ioutil.ReadFile(pausePidPath)
if err != nil {
return errors.Wrapf(err, "cannot read pause pid file")
diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go
index d7e8a4e46..58f9c5fb0 100644
--- a/pkg/domain/infra/tunnel/containers.go
+++ b/pkg/domain/infra/tunnel/containers.go
@@ -853,7 +853,8 @@ func (ic *ContainerEngine) ContainerPort(ctx context.Context, nameOrID string, o
}
func (ic *ContainerEngine) ContainerCopyFromArchive(ctx context.Context, nameOrID, path string, reader io.Reader, options entities.CopyOptions) (entities.ContainerCopyFunc, error) {
- return containers.CopyFromArchiveWithOptions(ic.ClientCtx, nameOrID, path, reader, new(containers.CopyOptions).WithChown(options.Chown))
+ copyOptions := new(containers.CopyOptions).WithChown(options.Chown).WithRename(options.Rename)
+ return containers.CopyFromArchiveWithOptions(ic.ClientCtx, nameOrID, path, reader, copyOptions)
}
func (ic *ContainerEngine) ContainerCopyToArchive(ctx context.Context, nameOrID string, path string, writer io.Writer) (entities.ContainerCopyFunc, error) {
diff --git a/pkg/domain/infra/tunnel/system.go b/pkg/domain/infra/tunnel/system.go
index 7400d3771..6b43cf038 100644
--- a/pkg/domain/infra/tunnel/system.go
+++ b/pkg/domain/infra/tunnel/system.go
@@ -7,14 +7,13 @@ import (
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/pkg/bindings/system"
"github.com/containers/podman/v3/pkg/domain/entities"
- "github.com/spf13/cobra"
)
func (ic *ContainerEngine) Info(ctx context.Context) (*define.Info, error) {
return system.Info(ic.ClientCtx, nil)
}
-func (ic *ContainerEngine) SetupRootless(_ context.Context, cmd *cobra.Command) error {
+func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool) error {
panic(errors.New("rootless engine mode is not supported when tunneling"))
}
diff --git a/test/system/065-cp.bats b/test/system/065-cp.bats
index 5778eb46e..39f439e7b 100644
--- a/test/system/065-cp.bats
+++ b/test/system/065-cp.bats
@@ -22,8 +22,7 @@ load helpers
mkdir -p $srcdir/subdir
echo "${randomcontent[2]}" > $srcdir/subdir/dotfile.
- run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sleep infinity
- run_podman exec cpcontainer mkdir /srv/subdir
+ run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sh -c "mkdir /srv/subdir; sleep infinity"
# Commit the image for testing non-running containers
run_podman commit -q cpcontainer
@@ -41,7 +40,6 @@ load helpers
0 | /tmp | /tmp/hostfile0 | copy to /tmp
1 | /tmp/ | /tmp/hostfile1 | copy to /tmp/
2 | /tmp/. | /tmp/hostfile2 | copy to /tmp/.
-0 | /tmp/hostfile2 | /tmp/hostfile2 | overwrite previous copy
0 | /tmp/anotherbase.txt | /tmp/anotherbase.txt | copy to /tmp, new name
0 | . | /srv/hostfile0 | copy to workdir (rel path), new name
1 | ./ | /srv/hostfile1 | copy to workdir (rel path), new name
@@ -175,11 +173,12 @@ load helpers
random-1-$(random_string 15)
random-2-$(random_string 20)
)
- run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sleep infinity
- run_podman exec cpcontainer sh -c "echo ${randomcontent[0]} > /tmp/containerfile"
- run_podman exec cpcontainer sh -c "echo ${randomcontent[0]} > /tmp/dotfile."
- run_podman exec cpcontainer sh -c "echo ${randomcontent[1]} > /srv/containerfile1"
- run_podman exec cpcontainer sh -c "mkdir /srv/subdir; echo ${randomcontent[2]} > /srv/subdir/containerfile2"
+ run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sh -c "mkdir /srv/subdir;
+ echo ${randomcontent[0]} > /tmp/containerfile;
+ echo ${randomcontent[0]} > /tmp/dotfile.;
+ echo ${randomcontent[1]} > /srv/containerfile1;
+ echo ${randomcontent[2]} > /srv/subdir/containerfile2;
+ sleep infinity"
# Commit the image for testing non-running containers
run_podman commit -q cpcontainer
@@ -226,6 +225,98 @@ load helpers
}
+@test "podman cp file from container to container" {
+ # Create 3 files with random content in the container.
+ local -a randomcontent=(
+ random-0-$(random_string 10)
+ random-1-$(random_string 15)
+ random-2-$(random_string 20)
+ )
+
+ run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sh -c "mkdir /srv/subdir;
+ echo ${randomcontent[0]} > /tmp/containerfile;
+ echo ${randomcontent[0]} > /tmp/dotfile.;
+ echo ${randomcontent[1]} > /srv/containerfile1;
+ echo ${randomcontent[2]} > /srv/subdir/containerfile2;
+ sleep infinity"
+
+ # Commit the image for testing non-running containers
+ run_podman commit -q cpcontainer
+ cpimage="$output"
+
+ # format is: <id> | <source arg to cp> | <destination arg (appended to $srcdir) to cp> | <full dest path (appended to $srcdir)> | <test name>
+ tests="
+0 | /tmp/containerfile | | /containerfile | /
+0 | /tmp/dotfile. | | /dotfile. | /
+0 | /tmp/containerfile | / | /containerfile | /
+0 | /tmp/containerfile | /. | /containerfile | /.
+0 | /tmp/containerfile | /newfile | /newfile | /newfile
+1 | containerfile1 | / | /containerfile1 | copy from workdir (rel path) to /
+2 | subdir/containerfile2 | / | /containerfile2 | copy from workdir/subdir (rel path) to /
+"
+
+ # From RUNNING container
+ while read id src dest dest_fullname description; do
+ # dest may be "''" for empty table cells
+ if [[ $dest == "''" ]];then
+ unset dest
+ fi
+
+ # To RUNNING container
+ run_podman run -d $IMAGE sleep infinity
+ destcontainer="$output"
+ run_podman cp cpcontainer:$src $destcontainer:"/$dest"
+ run_podman exec $destcontainer cat "/$dest_fullname"
+ is "$output" "${randomcontent[$id]}" "$description (cp ctr:$src to /$dest)"
+ run_podman kill $destcontainer
+ run_podman rm -f $destcontainer
+
+ # To CREATED container
+ run_podman create $IMAGE sleep infinity
+ destcontainer="$output"
+ run_podman cp cpcontainer:$src $destcontainer:"/$dest"
+ run_podman start $destcontainer
+ run_podman exec $destcontainer cat "/$dest_fullname"
+ is "$output" "${randomcontent[$id]}" "$description (cp ctr:$src to /$dest)"
+ run_podman kill $destcontainer
+ run_podman rm -f $destcontainer
+ done < <(parse_table "$tests")
+ run_podman kill cpcontainer
+ run_podman rm -f cpcontainer
+
+ # From CREATED container
+ run_podman create --name cpcontainer --workdir=/srv $cpimage
+ while read id src dest dest_fullname description; do
+ # dest may be "''" for empty table cells
+ if [[ $dest == "''" ]];then
+ unset dest
+ fi
+
+ # To RUNNING container
+ run_podman run -d $IMAGE sleep infinity
+ destcontainer="$output"
+ run_podman cp cpcontainer:$src $destcontainer:"/$dest"
+ run_podman exec $destcontainer cat "/$dest_fullname"
+ is "$output" "${randomcontent[$id]}" "$description (cp ctr:$src to /$dest)"
+ run_podman kill $destcontainer
+ run_podman rm -f $destcontainer
+
+ # To CREATED container
+ run_podman create $IMAGE sleep infinity
+ destcontainer="$output"
+ run_podman cp cpcontainer:$src $destcontainer:"/$dest"
+ run_podman start $destcontainer
+ run_podman exec $destcontainer cat "/$dest_fullname"
+ is "$output" "${randomcontent[$id]}" "$description (cp ctr:$src to /$dest)"
+ run_podman kill $destcontainer
+ run_podman rm -f $destcontainer
+ done < <(parse_table "$tests")
+ run_podman rm -f cpcontainer
+
+ run_podman rmi -f $cpimage
+}
+
+
@test "podman cp dir from host to container" {
srcdir=$PODMAN_TMPDIR
mkdir -p $srcdir/dir/sub
@@ -241,8 +332,7 @@ load helpers
mkdir -p $srcdir/dir.
cp -r $srcdir/dir/* $srcdir/dir.
- run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sleep infinity
- run_podman exec cpcontainer mkdir /srv/subdir
+ run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sh -c "mkdir /srv/subdir; sleep infinity"
# Commit the image for testing non-running containers
run_podman commit -q cpcontainer
@@ -309,12 +399,12 @@ load helpers
random-0-$(random_string 10)
random-1-$(random_string 15)
)
- run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sleep infinity
- run_podman exec cpcontainer sh -c "mkdir /srv/subdir; echo ${randomcontent[0]} > /srv/subdir/containerfile0"
- run_podman exec cpcontainer sh -c "echo ${randomcontent[1]} > /srv/subdir/containerfile1"
- # "." and "dir/." will copy the contents, so make sure that a dir ending
- # with dot is treated correctly.
- run_podman exec cpcontainer sh -c 'mkdir /tmp/subdir.; cp /srv/subdir/* /tmp/subdir./'
+
+ run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sh -c "mkdir /srv/subdir;
+ echo ${randomcontent[0]} > /srv/subdir/containerfile0; \
+ echo ${randomcontent[1]} > /srv/subdir/containerfile1; \
+ mkdir /tmp/subdir.; cp /srv/subdir/* /tmp/subdir./; \
+ sleep infinity"
# Commit the image for testing non-running containers
run_podman commit -q cpcontainer
@@ -377,6 +467,110 @@ load helpers
}
+@test "podman cp dir from container to container" {
+ # Create 2 files with random content in the container.
+ local -a randomcontent=(
+ random-0-$(random_string 10)
+ random-1-$(random_string 15)
+ )
+
+ run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sh -c "mkdir /srv/subdir;
+ echo ${randomcontent[0]} > /srv/subdir/containerfile0; \
+ echo ${randomcontent[1]} > /srv/subdir/containerfile1; \
+ mkdir /tmp/subdir.; cp /srv/subdir/* /tmp/subdir./; \
+ sleep infinity"
+
+ # Commit the image for testing non-running containers
+ run_podman commit -q cpcontainer
+ cpimage="$output"
+
+ # format is: <source arg to cp (appended to /srv)> | <dest> | <full dest path> | <test name>
+ tests="
+/srv | | /srv/subdir | copy /srv
+/srv | /newdir | /newdir/subdir | copy /srv to /newdir
+/srv/ | | /srv/subdir | copy /srv/
+/srv/. | | /subdir | copy /srv/.
+/srv/. | /newdir | /newdir/subdir | copy /srv/. to /newdir
+/srv/subdir/. | | | copy /srv/subdir/.
+/tmp/subdir. | | /subdir. | copy /tmp/subdir.
+"
+
+ # From RUNNING container
+ while read src dest dest_fullname description; do
+ if [[ $src == "''" ]];then
+ unset src
+ fi
+ if [[ $dest == "''" ]];then
+ unset dest
+ fi
+ if [[ $dest_fullname == "''" ]];then
+ unset dest_fullname
+ fi
+
+ # To RUNNING container
+ run_podman run -d $IMAGE sleep infinity
+ destcontainer="$output"
+ run_podman cp cpcontainer:$src $destcontainer:"/$dest"
+ run_podman exec $destcontainer cat "/$dest_fullname/containerfile0" "/$dest_fullname/containerfile1"
+ is "$output" "${randomcontent[0]}
+${randomcontent[1]}" "$description"
+ run_podman kill $destcontainer
+ run_podman rm -f $destcontainer
+
+ # To CREATED container
+ run_podman create $IMAGE sleep infinity
+ destcontainer="$output"
+ run_podman cp cpcontainer:$src $destcontainer:"/$dest"
+ run_podman start $destcontainer
+ run_podman exec $destcontainer cat "/$dest_fullname/containerfile0" "/$dest_fullname/containerfile1"
+ is "$output" "${randomcontent[0]}
+${randomcontent[1]}" "$description"
+ run_podman kill $destcontainer
+ run_podman rm -f $destcontainer
+ done < <(parse_table "$tests")
+ run_podman kill cpcontainer
+ run_podman rm -f cpcontainer
+
+ # From CREATED container
+ run_podman create --name cpcontainer --workdir=/srv $cpimage
+ while read src dest dest_fullname description; do
+ if [[ $src == "''" ]];then
+ unset src
+ fi
+ if [[ $dest == "''" ]];then
+ unset dest
+ fi
+ if [[ $dest_fullname == "''" ]];then
+ unset dest_fullname
+ fi
+
+ # To RUNNING container
+ run_podman run -d $IMAGE sleep infinity
+ destcontainer="$output"
+ run_podman cp cpcontainer:$src $destcontainer:"/$dest"
+ run_podman exec $destcontainer cat "/$dest_fullname/containerfile0" "/$dest_fullname/containerfile1"
+ is "$output" "${randomcontent[0]}
+${randomcontent[1]}" "$description"
+ run_podman kill $destcontainer
+ run_podman rm -f $destcontainer
+
+ # To CREATED container
+ run_podman create $IMAGE sleep infinity
+ destcontainer="$output"
+ run_podman start $destcontainer
+ run_podman cp cpcontainer:$src $destcontainer:"/$dest"
+ run_podman exec $destcontainer cat "/$dest_fullname/containerfile0" "/$dest_fullname/containerfile1"
+ is "$output" "${randomcontent[0]}
+${randomcontent[1]}" "$description"
+ run_podman kill $destcontainer
+ run_podman rm -f $destcontainer
+ done < <(parse_table "$tests")
+
+ run_podman rm -f cpcontainer
+ run_podman rmi -f $cpimage
+}
+
+
@test "podman cp symlinked directory from container" {
destdir=$PODMAN_TMPDIR/cp-weird-symlink
mkdir -p $destdir
@@ -387,10 +581,10 @@ load helpers
random-1-$(random_string 15)
)
- run_podman run -d --name cpcontainer $IMAGE sleep infinity
- run_podman exec cpcontainer sh -c "echo ${randomcontent[0]} > /tmp/containerfile0"
- run_podman exec cpcontainer sh -c "echo ${randomcontent[1]} > /tmp/containerfile1"
- run_podman exec cpcontainer sh -c "mkdir /tmp/sub && cd /tmp/sub && ln -s .. weirdlink"
+ run_podman run -d --name cpcontainer $IMAGE sh -c "echo ${randomcontent[0]} > /tmp/containerfile0; \
+ echo ${randomcontent[1]} > /tmp/containerfile1; \
+ mkdir /tmp/sub && cd /tmp/sub && ln -s .. weirdlink; \
+ sleep infinity"
# Commit the image for testing non-running containers
run_podman commit -q cpcontainer
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 5d245052c..7aa332e41 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.32.6
+1.33.0
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 2fa54a207..ecfbae916 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -364,12 +364,12 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// Try to enable project quota support over xfs.
if d.quotaCtl, err = quota.NewControl(home); err == nil {
projectQuotaSupported = true
- } else if opts.quota.Size > 0 {
- return nil, fmt.Errorf("Storage option overlay.size not supported. Filesystem does not support Project Quota: %v", err)
+ } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 {
+ return nil, fmt.Errorf("Storage options overlay.size and overlay.inodes not supported. Filesystem does not support Project Quota: %v", err)
}
- } else if opts.quota.Size > 0 {
+ } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 {
// if xfs is not the backing fs then error out if the storage-opt overlay.size is used.
- return nil, fmt.Errorf("Storage option overlay.size only supported for backingFS XFS. Found %v", backingFs)
+ return nil, fmt.Errorf("Storage option overlay.size and overlay.inodes only supported for backingFS XFS. Found %v", backingFs)
}
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy)
@@ -400,6 +400,13 @@ func parseOptions(options []string) (*overlayOptions, error) {
return nil, err
}
o.quota.Size = uint64(size)
+ case "inodes":
+ logrus.Debugf("overlay: inodes=%s", val)
+ inodes, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ o.quota.Inodes = uint64(inodes)
case "imagestore", "additionalimagestore":
logrus.Debugf("overlay: imagestore=%s", val)
// Additional read only image stores to use for lower paths
@@ -613,6 +620,10 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
if unshare.IsRootless() {
flags = fmt.Sprintf("%s,userxattr", flags)
}
+ if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0600, int(unix.Mkdev(0, 0))); err != nil {
+ logrus.Debugf("unable to create kernel-style whiteout: %v", err)
+ return supportsDType, errors.Wrapf(err, "unable to create kernel-style whiteout")
+ }
if len(flags) < unix.Getpagesize() {
err := unix.Mount("overlay", mergedDir, "overlay", 0, flags)
@@ -784,6 +795,13 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10)
}
+ if _, ok := opts.StorageOpt["inodes"]; !ok {
+ if opts.StorageOpt == nil {
+ opts.StorageOpt = map[string]string{}
+ }
+ opts.StorageOpt["inodes"] = strconv.FormatUint(d.options.quota.Inodes, 10)
+ }
+
return d.create(id, parent, opts)
}
@@ -794,6 +812,9 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
if _, ok := opts.StorageOpt["size"]; ok {
return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers")
}
+ if _, ok := opts.StorageOpt["inodes"]; ok {
+ return fmt.Errorf("--storage-opt inodes is only supported for ReadWrite Layers")
+ }
}
return d.create(id, parent, opts)
@@ -850,7 +871,9 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
if driver.options.quota.Size > 0 {
quota.Size = driver.options.quota.Size
}
-
+ if driver.options.quota.Inodes > 0 {
+ quota.Inodes = driver.options.quota.Inodes
+ }
}
// Set container disk quota limit
// If it is set to 0, we will track the disk usage, but not enforce a limit
@@ -922,6 +945,12 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e
return err
}
driver.options.quota.Size = uint64(size)
+ case "inodes":
+ inodes, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return err
+ }
+ driver.options.quota.Inodes = uint64(inodes)
default:
return fmt.Errorf("Unknown option %s", key)
}
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
index d805623b5..a435f6b82 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
@@ -38,8 +38,8 @@ struct fsxattr {
#ifndef PRJQUOTA
#define PRJQUOTA 2
#endif
-#ifndef XFS_PROJ_QUOTA
-#define XFS_PROJ_QUOTA 2
+#ifndef FS_PROJ_QUOTA
+#define FS_PROJ_QUOTA 2
#endif
#ifndef Q_XSETPQLIM
#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA)
@@ -61,9 +61,10 @@ import (
"golang.org/x/sys/unix"
)
-// Quota limit params - currently we only control blocks hard limit
+// Quota limit params - currently we only control blocks hard limit and inodes
type Quota struct {
- Size uint64
+ Size uint64
+ Inodes uint64
}
// Control - Context to be used by storage driver (e.g. overlay)
@@ -119,7 +120,8 @@ func NewControl(basePath string) (*Control, error) {
// a quota on the first available project id
//
quota := Quota{
- Size: 0,
+ Size: 0,
+ Inodes: 0,
}
if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil {
return nil, err
@@ -166,7 +168,7 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
//
// set the quota limit for the container's project id
//
- logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID)
+ logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID)
return setProjectQuota(q.backingFsBlockDev, projectID, quota)
}
@@ -175,11 +177,18 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er
var d C.fs_disk_quota_t
d.d_version = C.FS_DQUOT_VERSION
d.d_id = C.__u32(projectID)
- d.d_flags = C.XFS_PROJ_QUOTA
+ d.d_flags = C.FS_PROJ_QUOTA
- d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT
- d.d_blk_hardlimit = C.__u64(quota.Size / 512)
- d.d_blk_softlimit = d.d_blk_hardlimit
+ if quota.Size > 0 {
+ d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT
+ d.d_blk_hardlimit = C.__u64(quota.Size / 512)
+ d.d_blk_softlimit = d.d_blk_hardlimit
+ }
+ if quota.Inodes > 0 {
+ d.d_fieldmask = C.FS_DQ_IHARD | C.FS_DQ_ISOFT
+ d.d_ino_hardlimit = C.__u64(quota.Inodes)
+ d.d_ino_softlimit = d.d_ino_hardlimit
+ }
var cs = C.CString(backingFsBlockDev)
defer C.free(unsafe.Pointer(cs))
@@ -202,6 +211,7 @@ func (q *Control) GetQuota(targetPath string, quota *Quota) error {
return err
}
quota.Size = uint64(d.d_blk_hardlimit) * 512
+ quota.Inodes = uint64(d.d_ino_hardlimit)
return nil
}
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
index be6c75a58..7469138db 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
@@ -8,7 +8,8 @@ import (
// Quota limit params - currently we only control blocks hard limit
type Quota struct {
- Size uint64
+ Size uint64
+ Inodes uint64
}
// Control - Context to be used by storage driver (e.g. overlay)
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index d46000ace..e4f484d6b 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -16,7 +16,7 @@ require (
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
github.com/moby/sys/mountinfo v0.4.1
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/runc v1.0.0
+ github.com/opencontainers/runc v1.0.1
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/selinux v1.8.2
github.com/pkg/errors v0.9.1
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index 081da00e4..2607dbc9b 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -99,7 +99,7 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/cilium/ebpf v0.6.1/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
@@ -468,8 +468,8 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runc v1.0.0 h1:QOhAQAYUlKeofuyeKdR6ITvOnXLPbEAjPMjz9wCUXcU=
-github.com/opencontainers/runc v1.0.0/go.mod h1:MU2S3KEB2ZExnhnAQYbwjdYV6HwKtDlNbA2Z2OeNDeA=
+github.com/opencontainers/runc v1.0.1 h1:G18PGckGdAm3yVQRWDVQ1rLSLntiniKJ0cNRT2Tm5gs=
+github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 50e3e3555..48e846f7c 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -645,10 +645,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
file.Close()
- case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
+ case tar.TypeBlock, tar.TypeChar:
if inUserns { // cannot create devices in a userns
+ logrus.Debugf("Tar: Can't create device %v while running in user namespace", path)
return nil
}
+ fallthrough
+ case tar.TypeFifo:
// Handle this is an OS-specific way
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
return err
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
index e257737e7..7c3e442da 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
@@ -11,7 +11,6 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
- "github.com/opencontainers/runc/libcontainer/userns"
"golang.org/x/sys/unix"
)
@@ -88,11 +87,6 @@ func minor(device uint64) uint64 {
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
- if userns.RunningInUserNS() {
- // cannot create a device if running in user namespace
- return nil
- }
-
mode := uint32(hdr.Mode & 07777)
switch hdr.Typeflag {
case tar.TypeBlock:
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression.go b/vendor/github.com/containers/storage/pkg/chunked/compression.go
index 605be4b8f..f2811fb9a 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/compression.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/compression.go
@@ -2,72 +2,29 @@ package chunked
import (
"bytes"
- "encoding/base64"
"encoding/binary"
- "encoding/json"
"fmt"
"io"
- "io/ioutil"
- "time"
- "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/chunked/compressor"
+ "github.com/containers/storage/pkg/chunked/internal"
"github.com/klauspost/compress/zstd"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/vbatts/tar-split/archive/tar"
)
-type zstdTOC struct {
- Version int `json:"version"`
- Entries []zstdFileMetadata `json:"entries"`
-}
-
-type zstdFileMetadata struct {
- Type string `json:"type"`
- Name string `json:"name"`
- Linkname string `json:"linkName,omitempty"`
- Mode int64 `json:"mode,omitempty"`
- Size int64 `json:"size"`
- UID int `json:"uid"`
- GID int `json:"gid"`
- ModTime time.Time `json:"modtime"`
- AccessTime time.Time `json:"accesstime"`
- ChangeTime time.Time `json:"changetime"`
- Devmajor int64 `json:"devMajor"`
- Devminor int64 `json:"devMinor"`
- Xattrs map[string]string `json:"xattrs,omitempty"`
- Digest string `json:"digest,omitempty"`
- Offset int64 `json:"offset,omitempty"`
- EndOffset int64 `json:"endOffset,omitempty"`
-
- // Currently chunking is not supported.
- ChunkSize int64 `json:"chunkSize,omitempty"`
- ChunkOffset int64 `json:"chunkOffset,omitempty"`
- ChunkDigest string `json:"chunkDigest,omitempty"`
-}
-
const (
- TypeReg = "reg"
- TypeChunk = "chunk"
- TypeLink = "hardlink"
- TypeChar = "char"
- TypeBlock = "block"
- TypeDir = "dir"
- TypeFifo = "fifo"
- TypeSymlink = "symlink"
+ TypeReg = internal.TypeReg
+ TypeChunk = internal.TypeChunk
+ TypeLink = internal.TypeLink
+ TypeChar = internal.TypeChar
+ TypeBlock = internal.TypeBlock
+ TypeDir = internal.TypeDir
+ TypeFifo = internal.TypeFifo
+ TypeSymlink = internal.TypeSymlink
)
-var tarTypes = map[byte]string{
- tar.TypeReg: TypeReg,
- tar.TypeRegA: TypeReg,
- tar.TypeLink: TypeLink,
- tar.TypeChar: TypeChar,
- tar.TypeBlock: TypeBlock,
- tar.TypeDir: TypeDir,
- tar.TypeFifo: TypeFifo,
- tar.TypeSymlink: TypeSymlink,
-}
-
var typesToTar = map[string]byte{
TypeReg: tar.TypeReg,
TypeLink: tar.TypeLink,
@@ -78,14 +35,6 @@ var typesToTar = map[string]byte{
TypeSymlink: tar.TypeSymlink,
}
-func getType(t byte) (string, error) {
- r, found := tarTypes[t]
- if !found {
- return "", fmt.Errorf("unknown tarball type: %v", t)
- }
- return r, nil
-}
-
func typeToTarType(t string) (byte, error) {
r, found := typesToTar[t]
if !found {
@@ -94,52 +43,30 @@ func typeToTarType(t string) (byte, error) {
return r, nil
}
-const (
- manifestChecksumKey = "io.containers.zstd-chunked.manifest-checksum"
- manifestInfoKey = "io.containers.zstd-chunked.manifest-position"
-
- // manifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
- manifestTypeCRFS = 1
-
- // footerSizeSupported is the footer size supported by this implementation.
- // Newer versions of the image format might increase this value, so reject
- // any version that is not supported.
- footerSizeSupported = 40
-)
-
-var (
- // when the zstd decoder encounters a skippable frame + 1 byte for the size, it
- // will ignore it.
- // https://tools.ietf.org/html/rfc8478#section-3.1.2
- skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18}
-
- zstdChunkedFrameMagic = []byte{0x47, 0x6e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78}
-)
-
func isZstdChunkedFrameMagic(data []byte) bool {
if len(data) < 8 {
return false
}
- return bytes.Equal(zstdChunkedFrameMagic, data[:8])
+ return bytes.Equal(internal.ZstdChunkedFrameMagic, data[:8])
}
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
// be specified.
// This function uses the io.containers.zstd-chunked. annotations when specified.
func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, error) {
- footerSize := int64(footerSizeSupported)
+ footerSize := int64(internal.FooterSizeSupported)
if blobSize <= footerSize {
return nil, errors.New("blob too small")
}
- manifestChecksumAnnotation := annotations[manifestChecksumKey]
+ manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey]
if manifestChecksumAnnotation == "" {
- return nil, fmt.Errorf("manifest checksum annotation %q not found", manifestChecksumKey)
+ return nil, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
}
var offset, length, lengthUncompressed, manifestType uint64
- if offsetMetadata := annotations[manifestInfoKey]; offsetMetadata != "" {
+ if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil {
return nil, err
}
@@ -173,7 +100,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
}
}
- if manifestType != manifestTypeCRFS {
+ if manifestType != internal.ManifestTypeCRFS {
return nil, errors.New("invalid manifest type")
}
@@ -235,279 +162,8 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
return manifest, nil
}
-func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
- if _, err := dest.Write(skippableFrameMagic); err != nil {
- return err
- }
-
- var size []byte = make([]byte, 4)
- binary.LittleEndian.PutUint32(size, uint32(len(data)))
- if _, err := dest.Write(size); err != nil {
- return err
- }
- if _, err := dest.Write(data); err != nil {
- return err
- }
- return nil
-}
-
-func writeZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []zstdFileMetadata, level int) error {
- // 8 is the size of the zstd skippable frame header + the frame size
- manifestOffset := offset + 8
-
- toc := zstdTOC{
- Version: 1,
- Entries: metadata,
- }
-
- // Generate the manifest
- manifest, err := json.Marshal(toc)
- if err != nil {
- return err
- }
-
- var compressedBuffer bytes.Buffer
- zstdWriter, err := zstdWriterWithLevel(&compressedBuffer, level)
- if err != nil {
- return err
- }
- if _, err := zstdWriter.Write(manifest); err != nil {
- zstdWriter.Close()
- return err
- }
- if err := zstdWriter.Close(); err != nil {
- return err
- }
- compressedManifest := compressedBuffer.Bytes()
-
- manifestDigester := digest.Canonical.Digester()
- manifestChecksum := manifestDigester.Hash()
- if _, err := manifestChecksum.Write(compressedManifest); err != nil {
- return err
- }
-
- outMetadata[manifestChecksumKey] = manifestDigester.Digest().String()
- outMetadata[manifestInfoKey] = fmt.Sprintf("%d:%d:%d:%d", manifestOffset, len(compressedManifest), len(manifest), manifestTypeCRFS)
- if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil {
- return err
- }
-
- // Store the offset to the manifest and its size in LE order
- var manifestDataLE []byte = make([]byte, footerSizeSupported)
- binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
- binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest)))
- binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest)))
- binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(manifestTypeCRFS))
- copy(manifestDataLE[32:], zstdChunkedFrameMagic)
-
- return appendZstdSkippableFrame(dest, manifestDataLE)
-}
-
-func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
- // total written so far. Used to retrieve partial offsets in the file
- dest := ioutils.NewWriteCounter(destFile)
-
- tr := tar.NewReader(reader)
- tr.RawAccounting = true
-
- buf := make([]byte, 4096)
-
- zstdWriter, err := zstdWriterWithLevel(dest, level)
- if err != nil {
- return err
- }
- defer func() {
- if zstdWriter != nil {
- zstdWriter.Close()
- zstdWriter.Flush()
- }
- }()
-
- restartCompression := func() (int64, error) {
- var offset int64
- if zstdWriter != nil {
- if err := zstdWriter.Close(); err != nil {
- return 0, err
- }
- if err := zstdWriter.Flush(); err != nil {
- return 0, err
- }
- offset = dest.Count
- zstdWriter.Reset(dest)
- }
- return offset, nil
- }
-
- var metadata []zstdFileMetadata
- for {
- hdr, err := tr.Next()
- if err != nil {
- if err == io.EOF {
- break
- }
- return err
- }
-
- rawBytes := tr.RawBytes()
- if _, err := zstdWriter.Write(rawBytes); err != nil {
- return err
- }
- payloadDigester := digest.Canonical.Digester()
- payloadChecksum := payloadDigester.Hash()
-
- payloadDest := io.MultiWriter(payloadChecksum, zstdWriter)
-
- // Now handle the payload, if any
- var startOffset, endOffset int64
- checksum := ""
- for {
- read, errRead := tr.Read(buf)
- if errRead != nil && errRead != io.EOF {
- return err
- }
-
- // restart the compression only if there is
- // a payload.
- if read > 0 {
- if startOffset == 0 {
- startOffset, err = restartCompression()
- if err != nil {
- return err
- }
- }
- _, err := payloadDest.Write(buf[:read])
- if err != nil {
- return err
- }
- }
- if errRead == io.EOF {
- if startOffset > 0 {
- endOffset, err = restartCompression()
- if err != nil {
- return err
- }
- checksum = payloadDigester.Digest().String()
- }
- break
- }
- }
-
- typ, err := getType(hdr.Typeflag)
- if err != nil {
- return err
- }
- xattrs := make(map[string]string)
- for k, v := range hdr.Xattrs {
- xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
- }
- m := zstdFileMetadata{
- Type: typ,
- Name: hdr.Name,
- Linkname: hdr.Linkname,
- Mode: hdr.Mode,
- Size: hdr.Size,
- UID: hdr.Uid,
- GID: hdr.Gid,
- ModTime: hdr.ModTime,
- AccessTime: hdr.AccessTime,
- ChangeTime: hdr.ChangeTime,
- Devmajor: hdr.Devmajor,
- Devminor: hdr.Devminor,
- Xattrs: xattrs,
- Digest: checksum,
- Offset: startOffset,
- EndOffset: endOffset,
-
- // ChunkSize is 0 for the last chunk
- ChunkSize: 0,
- ChunkOffset: 0,
- ChunkDigest: checksum,
- }
- metadata = append(metadata, m)
- }
-
- rawBytes := tr.RawBytes()
- if _, err := zstdWriter.Write(rawBytes); err != nil {
- return err
- }
- if err := zstdWriter.Flush(); err != nil {
- return err
- }
- if err := zstdWriter.Close(); err != nil {
- return err
- }
- zstdWriter = nil
-
- return writeZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level)
-}
-
-type zstdChunkedWriter struct {
- tarSplitOut *io.PipeWriter
- tarSplitErr chan error
-}
-
-func (w zstdChunkedWriter) Close() error {
- err := <-w.tarSplitErr
- if err != nil {
- w.tarSplitOut.Close()
- return err
- }
- return w.tarSplitOut.Close()
-}
-
-func (w zstdChunkedWriter) Write(p []byte) (int, error) {
- select {
- case err := <-w.tarSplitErr:
- w.tarSplitOut.Close()
- return 0, err
- default:
- return w.tarSplitOut.Write(p)
- }
-}
-
-// zstdChunkedWriterWithLevel writes a zstd compressed tarball where each file is
-// compressed separately so it can be addressed separately. Idea based on CRFS:
-// https://github.com/google/crfs
-// The difference with CRFS is that the zstd compression is used instead of gzip.
-// The reason for it is that zstd supports embedding metadata ignored by the decoder
-// as part of the compressed stream.
-// A manifest json file with all the metadata is appended at the end of the tarball
-// stream, using zstd skippable frames.
-// The final file will look like:
-// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2]
-// Where:
-// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER]
-// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST]
-// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER]
-// MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format.
-func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level int) (io.WriteCloser, error) {
- ch := make(chan error, 1)
- r, w := io.Pipe()
-
- go func() {
- ch <- writeZstdChunkedStream(out, metadata, r, level)
- io.Copy(ioutil.Discard, r)
- r.Close()
- close(ch)
- }()
-
- return zstdChunkedWriter{
- tarSplitOut: w,
- tarSplitErr: ch,
- }, nil
-}
-
-func zstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
- el := zstd.EncoderLevelFromZstd(level)
- return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
-}
-
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
+// Deprecated: Use pkg/chunked/compressor.ZstdCompressor.
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
- if level == nil {
- l := 3
- level = &l
- }
-
- return zstdChunkedWriterWithLevel(r, metadata, *level)
+ return compressor.ZstdCompressor(r, metadata, level)
}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
new file mode 100644
index 000000000..a205b73fd
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
@@ -0,0 +1,220 @@
+package compressor
+
+// NOTE: This is used from github.com/containers/image by callers that
+// don't otherwise use containers/storage, so don't make this depend on any
+// larger software like the graph drivers.
+
+import (
+ "encoding/base64"
+ "io"
+ "io/ioutil"
+
+ "github.com/containers/storage/pkg/chunked/internal"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/opencontainers/go-digest"
+ "github.com/vbatts/tar-split/archive/tar"
+)
+
+func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
+ // total written so far. Used to retrieve partial offsets in the file
+ dest := ioutils.NewWriteCounter(destFile)
+
+ tr := tar.NewReader(reader)
+ tr.RawAccounting = true
+
+ buf := make([]byte, 4096)
+
+ zstdWriter, err := internal.ZstdWriterWithLevel(dest, level)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if zstdWriter != nil {
+ zstdWriter.Close()
+ zstdWriter.Flush()
+ }
+ }()
+
+ restartCompression := func() (int64, error) {
+ var offset int64
+ if zstdWriter != nil {
+ if err := zstdWriter.Close(); err != nil {
+ return 0, err
+ }
+ if err := zstdWriter.Flush(); err != nil {
+ return 0, err
+ }
+ offset = dest.Count
+ zstdWriter.Reset(dest)
+ }
+ return offset, nil
+ }
+
+ var metadata []internal.ZstdFileMetadata
+ for {
+ hdr, err := tr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ rawBytes := tr.RawBytes()
+ if _, err := zstdWriter.Write(rawBytes); err != nil {
+ return err
+ }
+ payloadDigester := digest.Canonical.Digester()
+ payloadChecksum := payloadDigester.Hash()
+
+ payloadDest := io.MultiWriter(payloadChecksum, zstdWriter)
+
+ // Now handle the payload, if any
+ var startOffset, endOffset int64
+ checksum := ""
+ for {
+ read, errRead := tr.Read(buf)
+ if errRead != nil && errRead != io.EOF {
+ return err
+ }
+
+ // restart the compression only if there is
+ // a payload.
+ if read > 0 {
+ if startOffset == 0 {
+ startOffset, err = restartCompression()
+ if err != nil {
+ return err
+ }
+ }
+ _, err := payloadDest.Write(buf[:read])
+ if err != nil {
+ return err
+ }
+ }
+ if errRead == io.EOF {
+ if startOffset > 0 {
+ endOffset, err = restartCompression()
+ if err != nil {
+ return err
+ }
+ checksum = payloadDigester.Digest().String()
+ }
+ break
+ }
+ }
+
+ typ, err := internal.GetType(hdr.Typeflag)
+ if err != nil {
+ return err
+ }
+ xattrs := make(map[string]string)
+ for k, v := range hdr.Xattrs {
+ xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
+ }
+ m := internal.ZstdFileMetadata{
+ Type: typ,
+ Name: hdr.Name,
+ Linkname: hdr.Linkname,
+ Mode: hdr.Mode,
+ Size: hdr.Size,
+ UID: hdr.Uid,
+ GID: hdr.Gid,
+ ModTime: hdr.ModTime,
+ AccessTime: hdr.AccessTime,
+ ChangeTime: hdr.ChangeTime,
+ Devmajor: hdr.Devmajor,
+ Devminor: hdr.Devminor,
+ Xattrs: xattrs,
+ Digest: checksum,
+ Offset: startOffset,
+ EndOffset: endOffset,
+
+ // ChunkSize is 0 for the last chunk
+ ChunkSize: 0,
+ ChunkOffset: 0,
+ ChunkDigest: checksum,
+ }
+ metadata = append(metadata, m)
+ }
+
+ rawBytes := tr.RawBytes()
+ if _, err := zstdWriter.Write(rawBytes); err != nil {
+ return err
+ }
+ if err := zstdWriter.Flush(); err != nil {
+ return err
+ }
+ if err := zstdWriter.Close(); err != nil {
+ return err
+ }
+ zstdWriter = nil
+
+ return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level)
+}
+
+type zstdChunkedWriter struct {
+ tarSplitOut *io.PipeWriter
+ tarSplitErr chan error
+}
+
+func (w zstdChunkedWriter) Close() error {
+ err := <-w.tarSplitErr
+ if err != nil {
+ w.tarSplitOut.Close()
+ return err
+ }
+ return w.tarSplitOut.Close()
+}
+
+func (w zstdChunkedWriter) Write(p []byte) (int, error) {
+ select {
+ case err := <-w.tarSplitErr:
+ w.tarSplitOut.Close()
+ return 0, err
+ default:
+ return w.tarSplitOut.Write(p)
+ }
+}
+
+// zstdChunkedWriterWithLevel writes a zstd compressed tarball where each file is
+// compressed separately so it can be addressed separately. Idea based on CRFS:
+// https://github.com/google/crfs
+// The difference with CRFS is that the zstd compression is used instead of gzip.
+// The reason for it is that zstd supports embedding metadata ignored by the decoder
+// as part of the compressed stream.
+// A manifest json file with all the metadata is appended at the end of the tarball
+// stream, using zstd skippable frames.
+// The final file will look like:
+// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2]
+// Where:
+// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER]
+// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST]
+// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER]
+// MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format.
+func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level int) (io.WriteCloser, error) {
+ ch := make(chan error, 1)
+ r, w := io.Pipe()
+
+ go func() {
+ ch <- writeZstdChunkedStream(out, metadata, r, level)
+ io.Copy(ioutil.Discard, r)
+ r.Close()
+ close(ch)
+ }()
+
+ return zstdChunkedWriter{
+ tarSplitOut: w,
+ tarSplitErr: ch,
+ }, nil
+}
+
+// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
+func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
+ if level == nil {
+ l := 3
+ level = &l
+ }
+
+ return zstdChunkedWriterWithLevel(r, metadata, *level)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
new file mode 100644
index 000000000..af0025c20
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
@@ -0,0 +1,172 @@
+package internal
+
+// NOTE: This is used from github.com/containers/image by callers that
+// don't otherwise use containers/storage, so don't make this depend on any
+// larger software like the graph drivers.
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/klauspost/compress/zstd"
+ "github.com/opencontainers/go-digest"
+)
+
+type ZstdTOC struct {
+ Version int `json:"version"`
+ Entries []ZstdFileMetadata `json:"entries"`
+}
+
+type ZstdFileMetadata struct {
+ Type string `json:"type"`
+ Name string `json:"name"`
+ Linkname string `json:"linkName,omitempty"`
+ Mode int64 `json:"mode,omitempty"`
+ Size int64 `json:"size"`
+ UID int `json:"uid"`
+ GID int `json:"gid"`
+ ModTime time.Time `json:"modtime"`
+ AccessTime time.Time `json:"accesstime"`
+ ChangeTime time.Time `json:"changetime"`
+ Devmajor int64 `json:"devMajor"`
+ Devminor int64 `json:"devMinor"`
+ Xattrs map[string]string `json:"xattrs,omitempty"`
+ Digest string `json:"digest,omitempty"`
+ Offset int64 `json:"offset,omitempty"`
+ EndOffset int64 `json:"endOffset,omitempty"`
+
+ // Currently chunking is not supported.
+ ChunkSize int64 `json:"chunkSize,omitempty"`
+ ChunkOffset int64 `json:"chunkOffset,omitempty"`
+ ChunkDigest string `json:"chunkDigest,omitempty"`
+}
+
+const (
+ TypeReg = "reg"
+ TypeChunk = "chunk"
+ TypeLink = "hardlink"
+ TypeChar = "char"
+ TypeBlock = "block"
+ TypeDir = "dir"
+ TypeFifo = "fifo"
+ TypeSymlink = "symlink"
+)
+
+var TarTypes = map[byte]string{
+ tar.TypeReg: TypeReg,
+ tar.TypeRegA: TypeReg,
+ tar.TypeLink: TypeLink,
+ tar.TypeChar: TypeChar,
+ tar.TypeBlock: TypeBlock,
+ tar.TypeDir: TypeDir,
+ tar.TypeFifo: TypeFifo,
+ tar.TypeSymlink: TypeSymlink,
+}
+
+func GetType(t byte) (string, error) {
+ r, found := TarTypes[t]
+ if !found {
+ return "", fmt.Errorf("unknown tarball type: %v", t)
+ }
+ return r, nil
+}
+
+const (
+ ManifestChecksumKey = "io.containers.zstd-chunked.manifest-checksum"
+ ManifestInfoKey = "io.containers.zstd-chunked.manifest-position"
+
+ // ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
+ ManifestTypeCRFS = 1
+
+ // FooterSizeSupported is the footer size supported by this implementation.
+ // Newer versions of the image format might increase this value, so reject
+ // any version that is not supported.
+ FooterSizeSupported = 40
+)
+
+var (
+ // when the zstd decoder encounters a skippable frame + 1 byte for the size, it
+ // will ignore it.
+ // https://tools.ietf.org/html/rfc8478#section-3.1.2
+ skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18}
+
+ ZstdChunkedFrameMagic = []byte{0x47, 0x6e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78}
+)
+
+func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
+ if _, err := dest.Write(skippableFrameMagic); err != nil {
+ return err
+ }
+
+ var size []byte = make([]byte, 4)
+ binary.LittleEndian.PutUint32(size, uint32(len(data)))
+ if _, err := dest.Write(size); err != nil {
+ return err
+ }
+ if _, err := dest.Write(data); err != nil {
+ return err
+ }
+ return nil
+}
+
+func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []ZstdFileMetadata, level int) error {
+ // 8 is the size of the zstd skippable frame header + the frame size
+ manifestOffset := offset + 8
+
+ toc := ZstdTOC{
+ Version: 1,
+ Entries: metadata,
+ }
+
+ // Generate the manifest
+ manifest, err := json.Marshal(toc)
+ if err != nil {
+ return err
+ }
+
+ var compressedBuffer bytes.Buffer
+ zstdWriter, err := ZstdWriterWithLevel(&compressedBuffer, level)
+ if err != nil {
+ return err
+ }
+ if _, err := zstdWriter.Write(manifest); err != nil {
+ zstdWriter.Close()
+ return err
+ }
+ if err := zstdWriter.Close(); err != nil {
+ return err
+ }
+ compressedManifest := compressedBuffer.Bytes()
+
+ manifestDigester := digest.Canonical.Digester()
+ manifestChecksum := manifestDigester.Hash()
+ if _, err := manifestChecksum.Write(compressedManifest); err != nil {
+ return err
+ }
+
+ outMetadata[ManifestChecksumKey] = manifestDigester.Digest().String()
+ outMetadata[ManifestInfoKey] = fmt.Sprintf("%d:%d:%d:%d", manifestOffset, len(compressedManifest), len(manifest), ManifestTypeCRFS)
+ if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil {
+ return err
+ }
+
+ // Store the offset to the manifest and its size in LE order
+ var manifestDataLE []byte = make([]byte, FooterSizeSupported)
+ binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
+ binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest)))
+ binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest)))
+ binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(ManifestTypeCRFS))
+ copy(manifestDataLE[32:], ZstdChunkedFrameMagic)
+
+ return appendZstdSkippableFrame(dest, manifestDataLE)
+}
+
+func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
+ el := zstd.EncoderLevelFromZstd(level)
+ return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
index 8c5062475..0f14d8af9 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
@@ -19,6 +19,7 @@ import (
graphdriver "github.com/containers/storage/drivers"
driversCopy "github.com/containers/storage/drivers/copy"
"github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/types"
"github.com/klauspost/compress/zstd"
@@ -39,7 +40,7 @@ const (
type chunkedZstdDiffer struct {
stream ImageSourceSeekable
manifest []byte
- layersMetadata map[string][]zstdFileMetadata
+ layersMetadata map[string][]internal.ZstdFileMetadata
layersTarget map[string]string
}
@@ -75,11 +76,11 @@ func copyFileContent(src, destFile, root string, dirfd int, missingDirsMode, mod
return dstFile, st.Size(), err
}
-func prepareOtherLayersCache(layersMetadata map[string][]zstdFileMetadata) map[string]map[string]*zstdFileMetadata {
- maps := make(map[string]map[string]*zstdFileMetadata)
+func prepareOtherLayersCache(layersMetadata map[string][]internal.ZstdFileMetadata) map[string]map[string]*internal.ZstdFileMetadata {
+ maps := make(map[string]map[string]*internal.ZstdFileMetadata)
for layerID, v := range layersMetadata {
- r := make(map[string]*zstdFileMetadata)
+ r := make(map[string]*internal.ZstdFileMetadata)
for i := range v {
r[v[i].Digest] = &v[i]
}
@@ -88,13 +89,13 @@ func prepareOtherLayersCache(layersMetadata map[string][]zstdFileMetadata) map[s
return maps
}
-func getLayersCache(store storage.Store) (map[string][]zstdFileMetadata, map[string]string, error) {
+func getLayersCache(store storage.Store) (map[string][]internal.ZstdFileMetadata, map[string]string, error) {
allLayers, err := store.Layers()
if err != nil {
return nil, nil, err
}
- layersMetadata := make(map[string][]zstdFileMetadata)
+ layersMetadata := make(map[string][]internal.ZstdFileMetadata)
layersTarget := make(map[string]string)
for _, r := range allLayers {
manifestReader, err := store.LayerBigData(r.ID, bigDataKey)
@@ -106,7 +107,7 @@ func getLayersCache(store storage.Store) (map[string][]zstdFileMetadata, map[str
if err != nil {
return nil, nil, err
}
- var toc zstdTOC
+ var toc internal.ZstdTOC
if err := json.Unmarshal(manifest, &toc); err != nil {
continue
}
@@ -123,7 +124,7 @@ func getLayersCache(store storage.Store) (map[string][]zstdFileMetadata, map[str
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
- if _, ok := annotations[manifestChecksumKey]; ok {
+ if _, ok := annotations[internal.ManifestChecksumKey]; ok {
return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss)
}
return nil, errors.New("blob type not supported for partial retrieval")
@@ -147,7 +148,7 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
}, nil
}
-func findFileInOtherLayers(file zstdFileMetadata, root string, dirfd int, layersMetadata map[string]map[string]*zstdFileMetadata, layersTarget map[string]string, missingDirsMode os.FileMode) (*os.File, int64, error) {
+func findFileInOtherLayers(file internal.ZstdFileMetadata, root string, dirfd int, layersMetadata map[string]map[string]*internal.ZstdFileMetadata, layersTarget map[string]string, missingDirsMode os.FileMode) (*os.File, int64, error) {
// this is ugly, needs to be indexed
for layerID, checksums := range layersMetadata {
m, found := checksums[file.Digest]
@@ -194,7 +195,7 @@ func getFileDigest(f *os.File) (digest.Digest, error) {
// findFileOnTheHost checks whether the requested file already exist on the host and copies the file content from there if possible.
// It is currently implemented to look only at the file with the same path. Ideally it can detect the same content also at different
// paths.
-func findFileOnTheHost(file zstdFileMetadata, root string, dirfd int, missingDirsMode os.FileMode) (*os.File, int64, error) {
+func findFileOnTheHost(file internal.ZstdFileMetadata, root string, dirfd int, missingDirsMode os.FileMode) (*os.File, int64, error) {
sourceFile := filepath.Clean(filepath.Join("/", file.Name))
if !strings.HasPrefix(sourceFile, "/usr/") {
// limit host deduplication to files under /usr.
@@ -251,7 +252,7 @@ func findFileOnTheHost(file zstdFileMetadata, root string, dirfd int, missingDir
return dstFile, written, nil
}
-func maybeDoIDRemap(manifest []zstdFileMetadata, options *archive.TarOptions) error {
+func maybeDoIDRemap(manifest []internal.ZstdFileMetadata, options *archive.TarOptions) error {
if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 {
return nil
}
@@ -278,7 +279,7 @@ func maybeDoIDRemap(manifest []zstdFileMetadata, options *archive.TarOptions) er
}
type missingFile struct {
- File *zstdFileMetadata
+ File *internal.ZstdFileMetadata
Gap int64
}
@@ -291,7 +292,7 @@ type missingChunk struct {
Files []missingFile
}
-func setFileAttrs(file *os.File, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) error {
+func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
if file == nil || file.Fd() < 0 {
return errors.Errorf("invalid file")
}
@@ -346,7 +347,7 @@ func openFileUnderRoot(name, root string, dirfd int, flags uint64, mode os.FileM
return os.NewFile(uintptr(fd), name), nil
}
-func createFileFromZstdStream(dest string, dirfd int, reader io.Reader, missingDirsMode, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) (err error) {
+func createFileFromZstdStream(dest string, dirfd int, reader io.Reader, missingDirsMode, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) (err error) {
file, err := openFileUnderRoot(metadata.Name, dest, dirfd, newFileFlags, 0)
if err != nil {
return err
@@ -497,7 +498,7 @@ func retrieveMissingFiles(input *chunkedZstdDiffer, dest string, dirfd int, miss
return nil
}
-func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) error {
+func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
parent := filepath.Dir(metadata.Name)
base := filepath.Base(metadata.Name)
@@ -526,7 +527,7 @@ func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *zstdFileMet
return setFileAttrs(file, mode, metadata, options)
}
-func safeLink(target string, dirfd int, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) error {
+func safeLink(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
sourceFile, err := openFileUnderRoot(metadata.Linkname, target, dirfd, unix.O_RDONLY, 0)
if err != nil {
return err
@@ -558,7 +559,7 @@ func safeLink(target string, dirfd int, mode os.FileMode, metadata *zstdFileMeta
return setFileAttrs(newFile, mode, metadata, options)
}
-func safeSymlink(target string, dirfd int, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) error {
+func safeSymlink(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
destDirFd := dirfd
if destDir != "." {
@@ -636,7 +637,7 @@ type hardLinkToCreate struct {
dest string
dirfd int
mode os.FileMode
- metadata *zstdFileMetadata
+ metadata *internal.ZstdFileMetadata
}
func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) {
@@ -659,7 +660,7 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
}
// Generate the manifest
- var toc zstdTOC
+ var toc internal.ZstdTOC
if err := json.Unmarshal(d.manifest, &toc); err != nil {
return output, err
}
@@ -667,7 +668,7 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
var missingChunks []missingChunk
- var mergedEntries []zstdFileMetadata
+ var mergedEntries []internal.ZstdFileMetadata
if err := maybeDoIDRemap(toc.Entries, options); err != nil {
return output, err
diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf
index e70f4f018..722750c0c 100644
--- a/vendor/github.com/containers/storage/storage.conf
+++ b/vendor/github.com/containers/storage/storage.conf
@@ -69,6 +69,9 @@ additionalimagestores = [
# and vfs drivers.
#ignore_chown_errors = "false"
+# Inodes is used to set a maximum inodes of the container image.
+# inodes = ""
+
# Path to an helper program to use for mounting the file system instead of mounting it
# directly.
#mount_program = "/usr/bin/fuse-overlayfs"
diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go
index 03ddd5ad9..4d62b151a 100644
--- a/vendor/github.com/containers/storage/types/utils.go
+++ b/vendor/github.com/containers/storage/types/utils.go
@@ -2,6 +2,7 @@ package types
import (
"fmt"
+ "io/ioutil"
"os"
"path/filepath"
"strconv"
@@ -74,9 +75,12 @@ func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, e
return runtimeDir, nil
}
- runUserDir := env.getRunUserDir()
- if isRootlessRuntimeDirOwner(runUserDir, env) {
- return runUserDir, nil
+ initCommand, err := ioutil.ReadFile(env.getProcCommandFile())
+ if err != nil || string(initCommand) == "systemd" {
+ runUserDir := env.getRunUserDir()
+ if isRootlessRuntimeDirOwner(runUserDir, env) {
+ return runUserDir, nil
+ }
}
tmpPerUserDir := env.getTmpPerUserDir()
diff --git a/vendor/modules.txt b/vendor/modules.txt
index c462b5810..6b0eb5244 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -197,7 +197,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.32.6
+# github.com/containers/storage v1.33.0
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs
@@ -214,6 +214,8 @@ github.com/containers/storage/drivers/zfs
github.com/containers/storage/pkg/archive
github.com/containers/storage/pkg/chrootarchive
github.com/containers/storage/pkg/chunked
+github.com/containers/storage/pkg/chunked/compressor
+github.com/containers/storage/pkg/chunked/internal
github.com/containers/storage/pkg/config
github.com/containers/storage/pkg/devicemapper
github.com/containers/storage/pkg/directory