summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml6
-rw-r--r--.pre-commit-config.yaml7
-rw-r--r--Makefile2
-rw-r--r--cmd/podman/common/completion.go4
-rw-r--r--cmd/podman/common/create_opts.go9
-rw-r--r--cmd/podman/common/specgen.go42
-rw-r--r--cmd/podman/containers/cp.go177
-rw-r--r--cmd/podman/containers/restore.go6
-rw-r--r--cmd/podman/login.go1
-rw-r--r--cmd/podman/logout.go1
-rw-r--r--cmd/podman/registry/config.go3
-rw-r--r--cmd/podman/root.go3
-rw-r--r--cmd/podman/system/migrate.go5
-rw-r--r--docs/source/markdown/podman-container-restore.1.md11
-rw-r--r--docs/source/markdown/podman-cp.1.md100
-rw-r--r--go.mod8
-rw-r--r--go.sum14
-rw-r--r--libpod/container.go40
-rw-r--r--libpod/container_api.go20
-rw-r--r--libpod/container_copy_linux.go3
-rw-r--r--libpod/container_inspect.go33
-rw-r--r--libpod/container_internal_linux.go120
-rw-r--r--libpod/oci_conmon_linux.go24
-rw-r--r--libpod/runtime_ctr.go12
-rw-r--r--pkg/api/handlers/compat/containers_archive.go17
-rw-r--r--pkg/api/handlers/compat/images_build.go19
-rw-r--r--pkg/api/server/register_archive.go4
-rw-r--r--pkg/api/server/server.go9
-rw-r--r--pkg/bindings/connection.go3
-rw-r--r--pkg/bindings/containers/types.go3
-rw-r--r--pkg/bindings/containers/types_copy_options.go16
-rw-r--r--pkg/bindings/containers/types_restore_options.go16
-rw-r--r--pkg/bindings/images/build.go38
-rw-r--r--pkg/checkpoint/checkpoint_restore.go96
-rw-r--r--pkg/checkpoint/crutils/checkpoint_restore_utils.go11
-rw-r--r--pkg/copy/parse.go12
-rw-r--r--pkg/criu/criu.go10
-rw-r--r--pkg/domain/entities/containers.go3
-rw-r--r--pkg/domain/entities/engine_container.go3
-rw-r--r--pkg/domain/filters/containers.go30
-rw-r--r--pkg/domain/infra/abi/archive.go2
-rw-r--r--pkg/domain/infra/abi/containers.go1
-rw-r--r--pkg/domain/infra/abi/system.go12
-rw-r--r--pkg/domain/infra/runtime_abi.go1
-rw-r--r--pkg/domain/infra/tunnel/containers.go3
-rw-r--r--pkg/domain/infra/tunnel/system.go3
-rw-r--r--pkg/rootless/rootless_linux.c25
-rw-r--r--pkg/rootless/rootless_linux.go20
-rw-r--r--test/apiv2/python/rest_api/test_v2_0_0_container.py10
-rwxr-xr-xtest/buildah-bud/apply-podman-deltas61
-rw-r--r--test/buildah-bud/buildah-tests.diff93
-rw-r--r--test/buildah-bud/make-new-buildah-diffs8
-rwxr-xr-xtest/buildah-bud/run-buildah-bud-tests4
-rw-r--r--test/e2e/checkpoint_test.go164
-rw-r--r--test/e2e/healthcheck_run_test.go10
-rw-r--r--test/e2e/login_logout_test.go223
-rw-r--r--test/e2e/ps_test.go37
-rw-r--r--test/e2e/run_test.go4
-rw-r--r--test/system/050-stop.bats31
-rw-r--r--test/system/065-cp.bats234
-rw-r--r--test/system/070-build.bats13
-rw-r--r--test/system/255-auto-update.bats3
-rw-r--r--transfer.md4
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/.gitignore5
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/.travis.yml25
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/Makefile60
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/rpc/rpc.pb.go1211
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/v5/.gitignore5
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/v5/.golangci.yml12
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/v5/LICENSE (renamed from vendor/github.com/checkpoint-restore/go-criu/LICENSE)0
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/v5/Makefile62
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/v5/README.md (renamed from vendor/github.com/checkpoint-restore/go-criu/README.md)53
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/v5/go.mod9
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/v5/go.sum12
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/v5/main.go (renamed from vendor/github.com/checkpoint-restore/go-criu/main.go)48
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/v5/notify.go (renamed from vendor/github.com/checkpoint-restore/go-criu/notify.go)5
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/v5/rpc/rpc.pb.go2237
-rw-r--r--vendor/github.com/checkpoint-restore/go-criu/v5/rpc/rpc.proto239
-rw-r--r--vendor/github.com/containers/common/libimage/copier.go12
-rw-r--r--vendor/github.com/containers/common/libimage/image.go8
-rw-r--r--vendor/github.com/containers/common/libimage/import.go2
-rw-r--r--vendor/github.com/containers/common/libimage/manifests/manifests.go18
-rw-r--r--vendor/github.com/containers/common/libimage/pull.go80
-rw-r--r--vendor/github.com/containers/common/libimage/search.go4
-rw-r--r--vendor/github.com/containers/common/pkg/auth/auth.go153
-rw-r--r--vendor/github.com/containers/common/pkg/auth/cli.go20
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go11
-rw-r--r--vendor/github.com/containers/common/pkg/config/pull_policy.go8
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/types.go2
-rw-r--r--vendor/github.com/containers/image/v5/copy/copy.go34
-rw-r--r--vendor/github.com/containers/image/v5/directory/directory_dest.go26
-rw-r--r--vendor/github.com/containers/image/v5/directory/directory_transport.go6
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_client.go29
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image.go4
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_dest.go16
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_src.go15
-rw-r--r--vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go3
-rw-r--r--vendor/github.com/containers/image/v5/manifest/common.go6
-rw-r--r--vendor/github.com/containers/image/v5/manifest/docker_schema2.go14
-rw-r--r--vendor/github.com/containers/image/v5/manifest/oci.go14
-rw-r--r--vendor/github.com/containers/image/v5/oci/layout/oci_src.go4
-rw-r--r--vendor/github.com/containers/image/v5/openshift/openshift.go9
-rw-r--r--vendor/github.com/containers/image/v5/pkg/compression/compression.go20
-rw-r--r--vendor/github.com/containers/image/v5/pkg/compression/internal/types.go2
-rw-r--r--vendor/github.com/containers/image/v5/pkg/compression/types/types.go28
-rw-r--r--vendor/github.com/containers/image/v5/pkg/docker/config/config.go16
-rw-r--r--vendor/github.com/containers/image/v5/types/types.go2
-rw-r--r--vendor/github.com/containers/image/v5/version/version.go2
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go39
-rw-r--r--vendor/github.com/containers/storage/drivers/quota/projectquota.go30
-rw-r--r--vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go3
-rw-r--r--vendor/github.com/containers/storage/go.mod2
-rw-r--r--vendor/github.com/containers/storage/go.sum6
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_unix.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/compression.go380
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go220
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/internal/compression.go172
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/storage_linux.go41
-rw-r--r--vendor/github.com/containers/storage/storage.conf3
-rw-r--r--vendor/github.com/containers/storage/types/utils.go10
-rw-r--r--vendor/modules.txt14
123 files changed, 5139 insertions, 2252 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 8a79ade78..7218e3e9a 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -601,7 +601,11 @@ buildah_bud_test_task:
CTR_FQIN: ${FEDORA_CONTAINER_FQIN}
# ID for re-use of build output
_BUILD_CACHE_HANDLE: ${FEDORA_NAME}-build-${CIRRUS_BUILD_ID}
- PODBIN_NAME: podman
+ matrix:
+ - env:
+ PODBIN_NAME: podman
+ - env:
+ PODBIN_NAME: remote
gce_instance: *standardvm
timeout_in: 45m
clone_script: *noop
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index f44b0ea42..91c0fef87 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -4,8 +4,15 @@ repos:
- repo: https://github.com/pre-commit/pre-commit-hooks.git
rev: v3.4.0
hooks:
+ # buildah-tests.diff is generated by 'git format-patch' and includes
+ # trailing whitespace as part of its format. We can work around that,
+ # but unfortunately the buildah repo has some files with tabs, which
+ # git-diff formats as '[+/-]<space><tab>', which these hooks choke on.
+ # Just disable checks on this diff file as a special case.
- id: end-of-file-fixer
+ exclude: test/buildah-bud/buildah-tests.diff
- id: trailing-whitespace
+ exclude: test/buildah-bud/buildah-tests.diff
- id: mixed-line-ending
- id: check-byte-order-marker
- id: check-executables-have-shebangs
diff --git a/Makefile b/Makefile
index 832671337..32d2f44b7 100644
--- a/Makefile
+++ b/Makefile
@@ -214,7 +214,7 @@ endif
.PHONY: .gitvalidation
.gitvalidation: .gopathok
@echo "Validating vs commit '$(call err_if_empty,EPOCH_TEST_COMMIT)'"
- GIT_CHECK_EXCLUDE="./vendor:docs/make.bat" $(GOBIN)/git-validation -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..$(HEAD)
+ GIT_CHECK_EXCLUDE="./vendor:docs/make.bat:test/buildah-bud/buildah-tests.diff" $(GOBIN)/git-validation -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..$(HEAD)
.PHONY: lint
lint: golangci-lint
diff --git a/cmd/podman/common/completion.go b/cmd/podman/common/completion.go
index 177d094aa..08b2f6235 100644
--- a/cmd/podman/common/completion.go
+++ b/cmd/podman/common/completion.go
@@ -46,7 +46,9 @@ func setupContainerEngine(cmd *cobra.Command) (entities.ContainerEngine, error)
return nil, err
}
if !registry.IsRemote() && rootless.IsRootless() {
- err := containerEngine.SetupRootless(registry.Context(), cmd)
+ _, noMoveProcess := cmd.Annotations[registry.NoMoveProcess]
+
+ err := containerEngine.SetupRootless(registry.Context(), noMoveProcess)
if err != nil {
return nil, err
}
diff --git a/cmd/podman/common/create_opts.go b/cmd/podman/common/create_opts.go
index 66778f519..42e0efe5d 100644
--- a/cmd/podman/common/create_opts.go
+++ b/cmd/podman/common/create_opts.go
@@ -517,7 +517,14 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, rtc *c
cliOpts.OOMKillDisable = *cc.HostConfig.OomKillDisable
}
if cc.Config.Healthcheck != nil {
- cliOpts.HealthCmd = strings.Join(cc.Config.Healthcheck.Test, " ")
+ finCmd := ""
+ for _, str := range cc.Config.Healthcheck.Test {
+ finCmd = finCmd + str + " "
+ }
+ if len(finCmd) > 1 {
+ finCmd = finCmd[:len(finCmd)-1]
+ }
+ cliOpts.HealthCmd = finCmd
cliOpts.HealthInterval = cc.Config.Healthcheck.Interval.String()
cliOpts.HealthRetries = uint(cc.Config.Healthcheck.Retries)
cliOpts.HealthStartPeriod = cc.Config.Healthcheck.StartPeriod.String()
diff --git a/cmd/podman/common/specgen.go b/cmd/podman/common/specgen.go
index 24b45e479..42f515ace 100644
--- a/cmd/podman/common/specgen.go
+++ b/cmd/podman/common/specgen.go
@@ -516,7 +516,6 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string
if len(con) != 2 {
return fmt.Errorf("invalid --security-opt 1: %q", opt)
}
-
switch con[0] {
case "apparmor":
s.ContainerSecurityConfig.ApparmorProfile = con[1]
@@ -664,25 +663,40 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string
}
func makeHealthCheckFromCli(inCmd, interval string, retries uint, timeout, startPeriod string) (*manifest.Schema2HealthConfig, error) {
+ cmdArr := []string{}
+ isArr := true
+ err := json.Unmarshal([]byte(inCmd), &cmdArr) // array unmarshalling
+ if err != nil {
+ cmdArr = strings.SplitN(inCmd, " ", 2) // default for compat
+ isArr = false
+ }
// Every healthcheck requires a command
- if len(inCmd) == 0 {
+ if len(cmdArr) == 0 {
return nil, errors.New("Must define a healthcheck command for all healthchecks")
}
-
- // first try to parse option value as JSON array of strings...
- cmd := []string{}
-
- if inCmd == "none" {
- cmd = []string{"NONE"}
- } else {
- err := json.Unmarshal([]byte(inCmd), &cmd)
- if err != nil {
- // ...otherwise pass it to "/bin/sh -c" inside the container
- cmd = []string{"CMD-SHELL", inCmd}
+ concat := ""
+ if cmdArr[0] == "CMD" || cmdArr[0] == "none" { // this is for compat, we are already split properly for most compat cases
+ cmdArr = strings.Fields(inCmd)
+ } else if cmdArr[0] != "CMD-SHELL" { // this is for podman side of things, wont contain the keywords
+ if isArr && len(cmdArr) > 1 { // an array of consecutive commands
+ cmdArr = append([]string{"CMD"}, cmdArr...)
+ } else { // one singular command
+ if len(cmdArr) == 1 {
+ concat = cmdArr[0]
+ } else {
+ concat = strings.Join(cmdArr[0:], " ")
+ }
+ cmdArr = append([]string{"CMD-SHELL"}, concat)
}
}
+
+ if cmdArr[0] == "none" { // if specified to remove healtcheck
+ cmdArr = []string{"NONE"}
+ }
+
+ // healthcheck is by default an array, so we simply pass the user input
hc := manifest.Schema2HealthConfig{
- Test: cmd,
+ Test: cmdArr,
}
if interval == "disable" {
diff --git a/cmd/podman/containers/cp.go b/cmd/podman/containers/cp.go
index c1f1e27f5..7b5846a35 100644
--- a/cmd/podman/containers/cp.go
+++ b/cmd/podman/containers/cp.go
@@ -82,7 +82,9 @@ func cp(cmd *cobra.Command, args []string) error {
return err
}
- if len(sourceContainerStr) > 0 {
+ if len(sourceContainerStr) > 0 && len(destContainerStr) > 0 {
+ return copyContainerToContainer(sourceContainerStr, sourcePath, destContainerStr, destPath)
+ } else if len(sourceContainerStr) > 0 {
return copyFromContainer(sourceContainerStr, sourcePath, destPath)
}
@@ -115,6 +117,84 @@ func doCopy(funcA func() error, funcB func() error) error {
return errorhandling.JoinErrors(copyErrors)
}
+func copyContainerToContainer(sourceContainer string, sourcePath string, destContainer string, destPath string) error {
+ if err := containerMustExist(sourceContainer); err != nil {
+ return err
+ }
+
+ if err := containerMustExist(destContainer); err != nil {
+ return err
+ }
+
+ sourceContainerInfo, err := registry.ContainerEngine().ContainerStat(registry.GetContext(), sourceContainer, sourcePath)
+ if err != nil {
+ return errors.Wrapf(err, "%q could not be found on container %s", sourcePath, sourceContainer)
+ }
+
+ destContainerBaseName, destContainerInfo, destResolvedToParentDir, err := resolvePathOnDestinationContainer(destContainer, destPath, false)
+ if err != nil {
+ return err
+ }
+
+ if sourceContainerInfo.IsDir && !destContainerInfo.IsDir {
+ return errors.New("destination must be a directory when copying a directory")
+ }
+
+ sourceContainerTarget := sourceContainerInfo.LinkTarget
+ destContainerTarget := destContainerInfo.LinkTarget
+ if !destContainerInfo.IsDir {
+ destContainerTarget = filepath.Dir(destPath)
+ }
+
+ // If we copy a directory via the "." notation and the container path
+ // does not exist, we need to make sure that the destination on the
+ // container gets created; otherwise the contents of the source
+ // directory will be written to the destination's parent directory.
+ //
+ // Hence, whenever "." is the source and the destination does not
+ // exist, we copy the source's parent and let the copier package create
+ // the destination via the Rename option.
+ if destResolvedToParentDir && sourceContainerInfo.IsDir && strings.HasSuffix(sourcePath, ".") {
+ sourceContainerTarget = filepath.Dir(sourceContainerTarget)
+ }
+
+ reader, writer := io.Pipe()
+
+ sourceContainerCopy := func() error {
+ defer writer.Close()
+ copyFunc, err := registry.ContainerEngine().ContainerCopyToArchive(registry.GetContext(), sourceContainer, sourceContainerTarget, writer)
+ if err != nil {
+ return err
+ }
+ if err := copyFunc(); err != nil {
+ return errors.Wrap(err, "error copying from container")
+ }
+ return nil
+ }
+
+ destContainerCopy := func() error {
+ defer reader.Close()
+
+ copyOptions := entities.CopyOptions{Chown: chown}
+ if (!sourceContainerInfo.IsDir && !destContainerInfo.IsDir) || destResolvedToParentDir {
+ // If we're having a file-to-file copy, make sure to
+ // rename accordingly.
+ copyOptions.Rename = map[string]string{filepath.Base(sourceContainerTarget): destContainerBaseName}
+ }
+
+ copyFunc, err := registry.ContainerEngine().ContainerCopyFromArchive(registry.GetContext(), destContainer, destContainerTarget, reader, copyOptions)
+ if err != nil {
+ return err
+ }
+ if err := copyFunc(); err != nil {
+ return errors.Wrap(err, "error copying to container")
+ }
+ return nil
+ }
+
+ return doCopy(sourceContainerCopy, destContainerCopy)
+}
+
// copyFromContainer copies from the containerPath on the container to hostPath.
func copyFromContainer(container string, containerPath string, hostPath string) error {
if err := containerMustExist(container); err != nil {
@@ -133,6 +213,7 @@ func copyFromContainer(container string, containerPath string, hostPath string)
}
var hostBaseName string
+ var resolvedToHostParentDir bool
hostInfo, hostInfoErr := copy.ResolveHostPath(hostPath)
if hostInfoErr != nil {
if strings.HasSuffix(hostPath, "/") {
@@ -148,6 +229,7 @@ func copyFromContainer(container string, containerPath string, hostPath string)
// it'll be created while copying. Hence, we use it as the
// base path.
hostBaseName = filepath.Base(hostPath)
+ resolvedToHostParentDir = true
} else {
// If the specified path exists on the host, we must use its
// base path as it may have changed due to symlink evaluations.
@@ -175,7 +257,7 @@ func copyFromContainer(container string, containerPath string, hostPath string)
// we copy the source's parent and let the copier package create the
// destination via the Rename option.
containerTarget := containerInfo.LinkTarget
- if hostInfoErr != nil && containerInfo.IsDir && strings.HasSuffix(containerTarget, ".") {
+ if resolvedToHostParentDir && containerInfo.IsDir && strings.HasSuffix(containerTarget, ".") {
containerTarget = filepath.Dir(containerTarget)
}
@@ -216,7 +298,7 @@ func copyFromContainer(container string, containerPath string, hostPath string)
ChownFiles: &idPair,
IgnoreDevices: true,
}
- if (!containerInfo.IsDir && !hostInfo.IsDir) || hostInfoErr != nil {
+ if (!containerInfo.IsDir && !hostInfo.IsDir) || resolvedToHostParentDir {
// If we're having a file-to-file copy, make sure to
// rename accordingly.
putOptions.Rename = map[string]string{filepath.Base(containerTarget): hostBaseName}
@@ -263,42 +345,9 @@ func copyToContainer(container string, containerPath string, hostPath string) er
return errors.Wrapf(err, "%q could not be found on the host", hostPath)
}
- // If the path on the container does not exist. We need to make sure
- // that it's parent directory exists. The destination may be created
- // while copying.
- var containerBaseName string
- containerInfo, containerInfoErr := registry.ContainerEngine().ContainerStat(registry.GetContext(), container, containerPath)
- if containerInfoErr != nil {
- if strings.HasSuffix(containerPath, "/") {
- return errors.Wrapf(containerInfoErr, "%q could not be found on container %s", containerPath, container)
- }
- if isStdin {
- return errors.New("destination must be a directory when copying from stdin")
- }
- // NOTE: containerInfo may actually be set. That happens when
- // the container path is a symlink into nirvana. In that case,
- // we must use the symlinked path instead.
- path := containerPath
- if containerInfo != nil {
- containerBaseName = filepath.Base(containerInfo.LinkTarget)
- path = containerInfo.LinkTarget
- } else {
- containerBaseName = filepath.Base(containerPath)
- }
-
- parentDir, err := containerParentDir(container, path)
- if err != nil {
- return errors.Wrapf(err, "could not determine parent dir of %q on container %s", path, container)
- }
- containerInfo, err = registry.ContainerEngine().ContainerStat(registry.GetContext(), container, parentDir)
- if err != nil {
- return errors.Wrapf(err, "%q could not be found on container %s", containerPath, container)
- }
- } else {
- // If the specified path exists on the container, we must use
- // its base path as it may have changed due to symlink
- // evaluations.
- containerBaseName = filepath.Base(containerInfo.LinkTarget)
+ containerBaseName, containerInfo, containerResolvedToParentDir, err := resolvePathOnDestinationContainer(container, containerPath, isStdin)
+ if err != nil {
+ return err
}
// If we copy a directory via the "." notation and the container path
@@ -310,7 +359,7 @@ func copyToContainer(container string, containerPath string, hostPath string) er
// exist, we copy the source's parent and let the copier package create
// the destination via the Rename option.
hostTarget := hostInfo.LinkTarget
- if containerInfoErr != nil && hostInfo.IsDir && strings.HasSuffix(hostTarget, ".") {
+ if containerResolvedToParentDir && hostInfo.IsDir && strings.HasSuffix(hostTarget, ".") {
hostTarget = filepath.Dir(hostTarget)
}
@@ -362,7 +411,7 @@ func copyToContainer(container string, containerPath string, hostPath string) er
// copy the base directory.
KeepDirectoryNames: hostInfo.IsDir && filepath.Base(hostTarget) != ".",
}
- if (!hostInfo.IsDir && !containerInfo.IsDir) || containerInfoErr != nil {
+ if (!hostInfo.IsDir && !containerInfo.IsDir) || containerResolvedToParentDir {
// If we're having a file-to-file copy, make sure to
// rename accordingly.
getOptions.Rename = map[string]string{filepath.Base(hostTarget): containerBaseName}
@@ -393,6 +442,52 @@ func copyToContainer(container string, containerPath string, hostPath string) er
return doCopy(hostCopy, containerCopy)
}
+// resolvePathOnDestinationContainer resolves the specified path on the
+// container. If the path does not exist, it attempts to use the parent
+// directory.
+func resolvePathOnDestinationContainer(container string, containerPath string, isStdin bool) (baseName string, containerInfo *entities.ContainerStatReport, resolvedToParentDir bool, err error) {
+ containerInfo, err = registry.ContainerEngine().ContainerStat(registry.GetContext(), container, containerPath)
+ if err == nil {
+ baseName = filepath.Base(containerInfo.LinkTarget)
+ return
+ }
+
+ if strings.HasSuffix(containerPath, "/") {
+ err = errors.Wrapf(err, "%q could not be found on container %s", containerPath, container)
+ return
+ }
+ if isStdin {
+ err = errors.New("destination must be a directory when copying from stdin")
+ return
+ }
+
+ // NOTE: containerInfo may actually be set. That happens when
+ // the container path is a symlink into nirvana. In that case,
+ // we must use the symlinked path instead.
+ path := containerPath
+ if containerInfo != nil {
+ baseName = filepath.Base(containerInfo.LinkTarget)
+ path = containerInfo.LinkTarget
+ } else {
+ baseName = filepath.Base(containerPath)
+ }
+
+ parentDir, err := containerParentDir(container, path)
+ if err != nil {
+ err = errors.Wrapf(err, "could not determine parent dir of %q on container %s", path, container)
+ return
+ }
+
+ containerInfo, err = registry.ContainerEngine().ContainerStat(registry.GetContext(), container, parentDir)
+ if err != nil {
+ err = errors.Wrapf(err, "%q could not be found on container %s", containerPath, container)
+ return
+ }
+
+ resolvedToParentDir = true
+ return baseName, containerInfo, resolvedToParentDir, nil
+}
+
// containerParentDir returns the parent directory of the specified path on the
// container. If the path is relative, it will be resolved relative to the
// container's working directory (or "/" if the work dir isn't set).
diff --git a/cmd/podman/containers/restore.go b/cmd/podman/containers/restore.go
index b908ea493..3b6f74efa 100644
--- a/cmd/podman/containers/restore.go
+++ b/cmd/podman/containers/restore.go
@@ -71,6 +71,9 @@ func init() {
)
_ = restoreCommand.RegisterFlagCompletionFunc("publish", completion.AutocompleteNone)
+ flags.StringVar(&restoreOptions.Pod, "pod", "", "Restore container into existing Pod (only works with --import)")
+ _ = restoreCommand.RegisterFlagCompletionFunc("pod", common.AutocompletePodsRunning)
+
validate.AddLatestFlag(restoreCommand, &restoreOptions.Latest)
}
@@ -91,6 +94,9 @@ func restore(cmd *cobra.Command, args []string) error {
if restoreOptions.Import == "" && restoreOptions.Name != "" {
return errors.Errorf("--name can only be used with --import")
}
+ if restoreOptions.Import == "" && restoreOptions.Pod != "" {
+ return errors.Errorf("--pod can only be used with --import")
+ }
if restoreOptions.Name != "" && restoreOptions.TCPEstablished {
return errors.Errorf("--tcp-established cannot be used with --name")
}
diff --git a/cmd/podman/login.go b/cmd/podman/login.go
index a8dadf5cd..7e853b38d 100644
--- a/cmd/podman/login.go
+++ b/cmd/podman/login.go
@@ -52,6 +52,7 @@ func init() {
loginOptions.Stdin = os.Stdin
loginOptions.Stdout = os.Stdout
loginOptions.AcceptUnspecifiedRegistry = true
+ loginOptions.AcceptRepositories = true
}
// Implementation of podman-login.
diff --git a/cmd/podman/logout.go b/cmd/podman/logout.go
index 0ee134635..f44b13a1f 100644
--- a/cmd/podman/logout.go
+++ b/cmd/podman/logout.go
@@ -43,6 +43,7 @@ func init() {
logoutOptions.Stdout = os.Stdout
logoutOptions.AcceptUnspecifiedRegistry = true
+ logoutOptions.AcceptRepositories = true
}
// Implementation of podman-logout.
diff --git a/cmd/podman/registry/config.go b/cmd/podman/registry/config.go
index 25139a3de..b512ba341 100644
--- a/cmd/podman/registry/config.go
+++ b/cmd/podman/registry/config.go
@@ -15,6 +15,9 @@ import (
)
const (
+ // NoMoveProcess used as cobra.Annotation when command doesn't need Podman to be moved to a separate cgroup
+ NoMoveProcess = "NoMoveProcess"
+
// ParentNSRequired used as cobra.Annotation when command requires root access
ParentNSRequired = "ParentNSRequired"
diff --git a/cmd/podman/root.go b/cmd/podman/root.go
index 2633e4040..dc4ebb952 100644
--- a/cmd/podman/root.go
+++ b/cmd/podman/root.go
@@ -208,7 +208,8 @@ func persistentPreRunE(cmd *cobra.Command, args []string) error {
// 3) command doesn't require Parent Namespace
_, found := cmd.Annotations[registry.ParentNSRequired]
if !registry.IsRemote() && rootless.IsRootless() && !found {
- err := registry.ContainerEngine().SetupRootless(registry.Context(), cmd)
+ _, noMoveProcess := cmd.Annotations[registry.NoMoveProcess]
+ err := registry.ContainerEngine().SetupRootless(registry.Context(), noMoveProcess)
if err != nil {
return err
}
diff --git a/cmd/podman/system/migrate.go b/cmd/podman/system/migrate.go
index 9940cd063..b9dc272d7 100644
--- a/cmd/podman/system/migrate.go
+++ b/cmd/podman/system/migrate.go
@@ -22,7 +22,10 @@ var (
`
migrateCommand = &cobra.Command{
- Annotations: map[string]string{registry.EngineMode: registry.ABIMode},
+ Annotations: map[string]string{
+ registry.EngineMode: registry.ABIMode,
+ registry.NoMoveProcess: registry.NoMoveProcess,
+ },
Use: "migrate [options]",
Args: validate.NoArgs,
Short: "Migrate containers",
diff --git a/docs/source/markdown/podman-container-restore.1.md b/docs/source/markdown/podman-container-restore.1.md
index 36eb650e5..856008cc0 100644
--- a/docs/source/markdown/podman-container-restore.1.md
+++ b/docs/source/markdown/podman-container-restore.1.md
@@ -93,6 +93,15 @@ be used once and the restored *container* will have another IP address. This als
that **--name, -n** cannot be used in combination with **--tcp-established**.\
*IMPORTANT: This OPTION is only available in combination with **--import, -i**.*
+#### **--pod**=*name*
+
+Restore a container into the pod *name*. The destination pod for this restore
+has to have the same namespaces shared as the pod this container was checkpointed
+from (see **[podman pod create --share](podman-pod-create.1.md#--share)**).
+*IMPORTANT: This OPTION is only available in combination with **--import, -i**.*
+
+This option requires at least CRIU 3.16.
+
#### **--publish**, **-p**=*port*
Replaces the ports that the *container* publishes, as configured during the
@@ -128,7 +137,7 @@ $ podman run --rm -p 2345:80 -d webserver
```
## SEE ALSO
-**[podman(1)](podman.1.md)**, **[podman-container-checkpoint(1)](podman-container-checkpoint.1.md)**, **[podman-run(1)](podman-run.1.md)**
+**[podman(1)](podman.1.md)**, **[podman-container-checkpoint(1)](podman-container-checkpoint.1.md)**, **[podman-run(1)](podman-run.1.md)**, **[podman-pod-create(1)](podman-pod-create.1.md)**
## HISTORY
September 2018, Originally compiled by Adrian Reber <areber@redhat.com>
diff --git a/docs/source/markdown/podman-cp.1.md b/docs/source/markdown/podman-cp.1.md
index 43ee4cdff..79edf26ed 100644
--- a/docs/source/markdown/podman-cp.1.md
+++ b/docs/source/markdown/podman-cp.1.md
@@ -9,111 +9,121 @@ podman\-cp - Copy files/folders between a container and the local filesystem
**podman container cp** [*options*] [*container*:]*src_path* [*container*:]*dest_path*
## DESCRIPTION
-Copy the contents of **src_path** to the **dest_path**. You can copy from the container's filesystem to the local machine or the reverse, from the local filesystem to the container.
-If `-` is specified for either the SRC_PATH or DEST_PATH, you can also stream a tar archive from STDIN or to STDOUT.
+**podman cp** allows copying the contents of **src_path** to the **dest_path**. Files can be copied from a container to the local machine and vice versa or between two containers.
+If `-` is specified for either the `SRC_PATH` or `DEST_PATH`, one can also stream a tar archive from `STDIN` or to `STDOUT`.
-The CONTAINER can be a running or stopped container. The **src_path** or **dest_path** can be a file or directory.
+The containers can be either running or stopped and the *src_path* or *dest_path* can be a file or directory.
-The **podman cp** command assumes container paths are relative to the container's root directory (i.e., `/`).
-
-This means supplying the initial forward slash is optional;
-
-The command sees **compassionate_darwin:/tmp/foo/myfile.txt** and **compassionate_darwin:tmp/foo/myfile.txt** as identical.
+*IMPORTANT: The **podman cp** command assumes container paths are relative to the container's root directory (`/`), which means supplying the initial forward slash is optional and therefore sees `compassionate_darwin:/tmp/foo/myfile.txt` and `compassionate_darwin:tmp/foo/myfile.txt` as identical.*
Local machine paths can be an absolute or relative value.
The command interprets a local machine's relative paths as relative to the current working directory where **podman cp** is run.
-Assuming a path separator of /, a first argument of **src_path** and second argument of **dest_path**, the behavior is as follows:
+Assuming a path separator of `/`, a first argument of **src_path** and second argument of **dest_path**, the behavior is as follows:
-**src_path** specifies a file
+**src_path** specifies a file:
- **dest_path** does not exist
- - the file is saved to a file created at **dest_path** (note that parent directory must exist)
+ - the file is saved to a file created at **dest_path** (note that parent directory must exist).
- **dest_path** exists and is a file
- - the destination is overwritten with the source file's contents
+ - the destination is overwritten with the source file's contents.
- **dest_path** exists and is a directory
- - the file is copied into this directory using the base name from **src_path**
+ - the file is copied into this directory using the base name from **src_path**.
-**src_path** specifies a directory
+**src_path** specifies a directory:
- **dest_path** does not exist
- - **dest_path** is created as a directory and the contents of the source directory are copied into this directory
+ - **dest_path** is created as a directory and the contents of the source directory are copied into this directory.
- **dest_path** exists and is a file
- - Error condition: cannot copy a directory to a file
+ - Error condition: cannot copy a directory to a file.
- **dest_path** exists and is a directory
- **src_path** ends with `/`
- - the source directory is copied into this directory
+ - the source directory is copied into this directory.
- **src_path** ends with `/.` (i.e., slash followed by dot)
- - the content of the source directory is copied into this directory
+ - the content of the source directory is copied into this directory.
The command requires **src_path** and **dest_path** to exist according to the above rules.
If **src_path** is local and is a symbolic link, the symbolic target, is copied by default.
-A colon (:) is used as a delimiter between CONTAINER and its path.
-
-You can also use : when specifying paths to a **src_path** or **dest_path** on a local machine, for example, `file:name.txt`.
+A *colon* ( : ) is used as a delimiter between a container and its path, it can also be used when specifying paths to a **src_path** or **dest_path** on a local machine, for example, `file:name.txt`.
-If you use a : in a local machine path, you must be explicit with a relative or absolute path, for example:
- `/path/to/file:name.txt` or `./file:name.txt`
+*IMPORTANT: while using a *colon* ( : ) in a local machine path, one must be explicit with a relative or absolute path, for example: `/path/to/file:name.txt` or `./file:name.txt`*
-Using `-` as the *src_path* streams the contents of STDIN as a tar archive. The command extracts the content of the tar to the *DEST_PATH* in the container. In this case, *dest_path* must specify a directory. Using `-` as the *dest_path* streams the contents of the resource (can be a directory) as a tar archive to STDOUT.
+Using `-` as the **src_path** streams the contents of `STDIN` as a tar archive. The command extracts the content of the tar to the `DEST_PATH` in the container. In this case, **dest_path** must specify a directory. Using `-` as the **dest_path** streams the contents of the resource (can be a directory) as a tar archive to `STDOUT`.
Note that `podman cp` ignores permission errors when copying from a running rootless container. The TTY devices inside a rootless container are owned by the host's root user and hence cannot be read inside the container's user namespace.
## OPTIONS
-#### **--archive**, **-a**
+#### **--archive**, **-a**=**true** | *false*
Archive mode (copy all uid/gid information).
-When set to true, files copied to a container will have changed ownership to the primary uid/gid of the container.
+When set to true, files copied to a container will have changed ownership to the primary UID/GID of the container.
When set to false, maintain uid/gid from archive sources instead of changing them to the primary uid/gid of the destination container.
-The default is *true*.
+The default is **true**.
## ALTERNATIVES
-Podman has much stronger capabilities than just `podman cp` to achieve copy files between host and container.
+Podman has much stronger capabilities than just `podman cp` to achieve copying files between the host and containers.
-Using standard podman-mount and podman-umount takes advantage of the entire linux tool chain, rather
-then just cp.
+Using standard **[podman-mount(1)](podman-mount.1.md)** and **[podman-unmount(1)](podman-unmount.1.md)** takes advantage of the entire linux tool chain, rather than just cp.
-If a user wants to copy contents out of a container or into a container, they can execute a few simple commands.
+copying contents out of a container or into a container, can be achieved with a few simple commands. For example:
-You can copy from the container's file system to the local machine or the reverse, from the local filesystem to the container.
-
-If you want to copy the /etc/foobar directory out of a container and onto /tmp on the host, you could execute the following commands:
+To copy the `/etc/foobar` directory out of a container and onto `/tmp` on the host, the following commands can be executed:
mnt=$(podman mount CONTAINERID)
cp -R ${mnt}/etc/foobar /tmp
podman umount CONTAINERID
-If you want to untar a tar ball into a container, you can execute these commands:
+To untar a tar ball into a container, following commands can be executed:
mnt=$(podman mount CONTAINERID)
tar xf content.tgz -C ${mnt}
podman umount CONTAINERID
-One last example, if you want to install a package into a container that
-does not have dnf installed, you could execute something like:
+To install a package into a container that
+does not have dnf installed, following commands can be executed:
mnt=$(podman mount CONTAINERID)
dnf install --installroot=${mnt} httpd
chroot ${mnt} rm -rf /var/log/dnf /var/cache/dnf
podman umount CONTAINERID
-This shows that using `podman mount` and `podman umount` you can use all of the
+By using `podman mount` and `podman unmount`, one can use all of the
standard linux tools for moving files into and out of containers, not just
the cp command.
-## EXAMPLE
+## EXAMPLES
+
+- Copy a file from host to a container.
+ ```
+ podman cp /myapp/app.conf containerID:/myapp/app.conf
+ ```
-podman cp /myapp/app.conf containerID:/myapp/app.conf
+- Copy a file from a container to a directory on another container.
+ ```
+ podman cp containerID1:/myfile.txt containerID2:/tmp
+ ```
-podman cp /home/myuser/myfiles.tar containerID:/tmp
+- Copy a directory on a container to a directory on the host.
+ ```
+ podman cp containerID:/myapp/ /myapp/
+ ```
-podman cp containerID:/myapp/ /myapp/
+- Copy the contents of a directory on a container to a directory on the host.
+ ```
+ podman cp containerID:/home/myuser/. /home/myuser/
+ ```
-podman cp containerID:/home/myuser/. /home/myuser/
+- Copy a directory on a container into a directory on another.
+ ```
+ podman cp containerA:/myapp containerB:/yourapp
+ ```
-podman cp - containerID:/myfiles.tar.gz < myfiles.tar.gz
+- Stream a tar archive from `STDIN` to a container.
+ ```
+ podman cp - containerID:/myfiles.tar.gz < myfiles.tar.gz
+ ```
## SEE ALSO
-podman(1), podman-mount(1), podman-umount(1)
+**[podman(1)](podman.1.md)**, **[podman-mount(1)](podman-mount.1.md)**, **[podman-unmount(1)](podman-unmount.1.md)**
diff --git a/go.mod b/go.mod
index c6ec6355b..62424319f 100644
--- a/go.mod
+++ b/go.mod
@@ -7,17 +7,17 @@ require (
github.com/blang/semver v3.5.1+incompatible
github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37
github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7
- github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b
+ github.com/checkpoint-restore/go-criu/v5 v5.1.0
github.com/container-orchestrated-devices/container-device-interface v0.0.0-20210325223243-f99e8b6c10b9
github.com/containernetworking/cni v0.8.1
github.com/containernetworking/plugins v0.9.1
github.com/containers/buildah v1.21.1-0.20210721171232-54cafea4c933
- github.com/containers/common v0.41.1-0.20210721172332-291287e9d060
+ github.com/containers/common v0.41.1-0.20210730122913-cd6c45fd20e3
github.com/containers/conmon v2.0.20+incompatible
- github.com/containers/image/v5 v5.14.0
+ github.com/containers/image/v5 v5.15.0
github.com/containers/ocicrypt v1.1.2
github.com/containers/psgo v1.5.2
- github.com/containers/storage v1.32.6
+ github.com/containers/storage v1.33.0
github.com/coreos/go-systemd/v22 v22.3.2
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
github.com/cri-o/ocicni v0.2.1-0.20210621164014-d0acc7862283
diff --git a/go.sum b/go.sum
index fc72958ad..13bdf2997 100644
--- a/go.sum
+++ b/go.sum
@@ -128,11 +128,11 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7 h1:ZmSAEFFtv3mepC4/Ze6E/hi6vGZlhRvywqp1l+w+qqw=
github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7/go.mod h1:Kp3ezoDVdhfYxZUtgs4OL8sVvgOLz3txk0sbQD0opvw=
-github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b h1:T4nWG1TXIxeor8mAu5bFguPJgSIGhZqv/f0z55KCrJM=
-github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho=
github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
+github.com/checkpoint-restore/go-criu/v5 v5.1.0 h1:BkVH17kcwgmKMnFArsvLrxuBbMwfvPNYRB7mfJ0lzyI=
+github.com/checkpoint-restore/go-criu/v5 v5.1.0/go.mod h1:iaS8bb7p6zKJanp1Qe8mpl7+bnkYBR500psJR6mwma0=
github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=
@@ -242,13 +242,14 @@ github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRD
github.com/containers/buildah v1.21.1-0.20210721171232-54cafea4c933 h1:jqO3hDypBoKM5be+fVcqGHOpX2fOiQy2DFEeb/VKpsk=
github.com/containers/buildah v1.21.1-0.20210721171232-54cafea4c933/go.mod h1:9gspFNeUJxIK72n1IMIKIHmtcePEZQsv0tjo+1LqkCo=
github.com/containers/common v0.41.1-0.20210721112610-c95d2f794edf/go.mod h1:Ba5YVNCnyX6xDtg1JqEHa2EMVMW5UbHmIyEqsEwpeGE=
-github.com/containers/common v0.41.1-0.20210721172332-291287e9d060 h1:HgGff2MeEKfYoKp2WQFl9xdsgP7KV8rr/1JZRIuPXmg=
-github.com/containers/common v0.41.1-0.20210721172332-291287e9d060/go.mod h1:Ba5YVNCnyX6xDtg1JqEHa2EMVMW5UbHmIyEqsEwpeGE=
+github.com/containers/common v0.41.1-0.20210730122913-cd6c45fd20e3 h1:lHOZ+G5B7aP2YPsbDo4DtALFAuFG5PWH3Pv5zL2bC08=
+github.com/containers/common v0.41.1-0.20210730122913-cd6c45fd20e3/go.mod h1:UzAAjDsxwd4qkN1mgsk6aspduBY5bspxvKgwQElaBwk=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.13.2/go.mod h1:GkWursKDlDcUIT7L7vZf70tADvZCk/Ga0wgS0MuF0ag=
-github.com/containers/image/v5 v5.14.0 h1:ORaFZ/NwFjkSunMhxg9I8fQod8pgXkrYNiZzit/VqOE=
github.com/containers/image/v5 v5.14.0/go.mod h1:SxiBKOcKuT+4yTjD0AskjO+UwFvNcVOJ9qlAw1HNSPU=
+github.com/containers/image/v5 v5.15.0 h1:NduhN20ptHNlf0uRny5iTJa2OodB9SLMEB4hKKbzBBs=
+github.com/containers/image/v5 v5.15.0/go.mod h1:gzdBcooi6AFdiqfzirUqv90hUyHyI0MMdaqKzACKr2s=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@@ -260,8 +261,9 @@ github.com/containers/psgo v1.5.2 h1:3aoozst/GIwsrr/5jnFy3FrJay98uujPCu9lTuSZ/Cw
github.com/containers/psgo v1.5.2/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU=
github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM=
github.com/containers/storage v1.32.2/go.mod h1:YIBxxjfXZTi04Ah49sh1uSGfmT1V89+I5i3deRobzQo=
-github.com/containers/storage v1.32.6 h1:NqdFRewXO/PYPjgCAScoigZc5QUA21yapSEj6kqD8cw=
github.com/containers/storage v1.32.6/go.mod h1:mdB+b89p+jU8zpzLTVXA0gWMmIo0WrkfGMh1R8O2IQw=
+github.com/containers/storage v1.33.0 h1:sTk1Mfz3uSNg7cxeaDb0Ld8/UV+8pZEOQjvysjJuzX8=
+github.com/containers/storage v1.33.0/go.mod h1:FUZPF4nJijX8ixdhByZJXf02cvbyLi6dyDwXdIe8QVY=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
diff --git a/libpod/container.go b/libpod/container.go
index 4b9bea5fc..f3f4b27b7 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -1173,6 +1173,46 @@ func (c *Container) Networks() ([]string, bool, error) {
return c.networks()
}
+// NetworkMode gets the configured network mode for the container.
+// Get actual value from the database
+func (c *Container) NetworkMode() string {
+ networkMode := ""
+ ctrSpec := c.config.Spec
+
+ switch {
+ case c.config.CreateNetNS:
+ // We actually store the network
+ // mode for Slirp and Bridge, so
+ // we can just use that
+ networkMode = string(c.config.NetMode)
+ case c.config.NetNsCtr != "":
+ networkMode = fmt.Sprintf("container:%s", c.config.NetNsCtr)
+ default:
+ // Find the spec's network namespace.
+ // If there is none, it's host networking.
+ // If there is one and it has a path, it's "ns:".
+ foundNetNS := false
+ for _, ns := range ctrSpec.Linux.Namespaces {
+ if ns.Type == spec.NetworkNamespace {
+ foundNetNS = true
+ if ns.Path != "" {
+ networkMode = fmt.Sprintf("ns:%s", ns.Path)
+ } else {
+ // We're making a network ns, but not
+ // configuring with Slirp or CNI. That
+ // means it's --net=none
+ networkMode = "none"
+ }
+ break
+ }
+ }
+ if !foundNetNS {
+ networkMode = "host"
+ }
+ }
+ return networkMode
+}
+
// Unlocked accessor for networks
func (c *Container) networks() ([]string, bool, error) {
networks, err := c.runtime.state.GetNetworks(c)
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 390bba7bb..637f5b686 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -780,6 +780,16 @@ type ContainerCheckpointOptions struct {
// Compression tells the API which compression to use for
// the exported checkpoint archive.
Compression archive.Compression
+ // If Pod is set the container should be restored into the
+ // given Pod. If Pod is empty it is a restore without a Pod.
+ // Restoring a non Pod container into a Pod or a Pod container
+ // without a Pod is theoretically possible, but will
+ // probably not work if a PID namespace is shared.
+ // A shared PID namespace means that a Pod container has PID 1
+ // in the infrastructure container, but without the infrastructure
+ // container no PID 1 will be in the namespace and that is not
+ // possible.
+ Pod string
}
// Checkpoint checkpoints a container
@@ -811,7 +821,11 @@ func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointO
// Restore restores a container
func (c *Container) Restore(ctx context.Context, options ContainerCheckpointOptions) error {
- logrus.Debugf("Trying to restore container %s", c.ID())
+ if options.Pod == "" {
+ logrus.Debugf("Trying to restore container %s", c.ID())
+ } else {
+ logrus.Debugf("Trying to restore container %s into pod %s", c.ID(), options.Pod)
+ }
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -840,7 +854,7 @@ func (c *Container) ShouldRestart(ctx context.Context) bool {
// CopyFromArchive copies the contents from the specified tarStream to path
// *inside* the container.
-func (c *Container) CopyFromArchive(ctx context.Context, containerPath string, chown bool, tarStream io.Reader) (func() error, error) {
+func (c *Container) CopyFromArchive(ctx context.Context, containerPath string, chown bool, rename map[string]string, tarStream io.Reader) (func() error, error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -850,7 +864,7 @@ func (c *Container) CopyFromArchive(ctx context.Context, containerPath string, c
}
}
- return c.copyFromArchive(ctx, containerPath, chown, tarStream)
+ return c.copyFromArchive(ctx, containerPath, chown, rename, tarStream)
}
// CopyToArchive copies the contents from the specified path *inside* the
diff --git a/libpod/container_copy_linux.go b/libpod/container_copy_linux.go
index 01e7ecacb..a35824289 100644
--- a/libpod/container_copy_linux.go
+++ b/libpod/container_copy_linux.go
@@ -23,7 +23,7 @@ import (
"golang.org/x/sys/unix"
)
-func (c *Container) copyFromArchive(ctx context.Context, path string, chown bool, reader io.Reader) (func() error, error) {
+func (c *Container) copyFromArchive(ctx context.Context, path string, chown bool, rename map[string]string, reader io.Reader) (func() error, error) {
var (
mountPoint string
resolvedRoot string
@@ -89,6 +89,7 @@ func (c *Container) copyFromArchive(ctx context.Context, path string, chown bool
GIDMap: c.config.IDMappings.GIDMap,
ChownDirs: idPair,
ChownFiles: idPair,
+ Rename: rename,
}
return c.joinMountAndExec(ctx,
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 638e0b756..8c662c488 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -618,38 +618,7 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named
hostConfig.Tmpfs = tmpfs
// Network mode parsing.
- networkMode := ""
- switch {
- case c.config.CreateNetNS:
- // We actually store the network
- // mode for Slirp and Bridge, so
- // we can just use that
- networkMode = string(c.config.NetMode)
- case c.config.NetNsCtr != "":
- networkMode = fmt.Sprintf("container:%s", c.config.NetNsCtr)
- default:
- // Find the spec's network namespace.
- // If there is none, it's host networking.
- // If there is one and it has a path, it's "ns:".
- foundNetNS := false
- for _, ns := range ctrSpec.Linux.Namespaces {
- if ns.Type == spec.NetworkNamespace {
- foundNetNS = true
- if ns.Path != "" {
- networkMode = fmt.Sprintf("ns:%s", ns.Path)
- } else {
- // We're making a network ns, but not
- // configuring with Slirp or CNI. That
- // means it's --net=none
- networkMode = "none"
- }
- break
- }
- }
- if !foundNetNS {
- networkMode = "host"
- }
- }
+ networkMode := c.NetworkMode()
hostConfig.NetworkMode = networkMode
// Port bindings.
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index b69ad4105..bff64aa95 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -901,8 +901,27 @@ func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr
}
func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
- if len(c.Dependencies()) > 0 {
- return errors.Errorf("Cannot export checkpoints of containers with dependencies")
+ if len(c.Dependencies()) == 1 {
+ // Check if the dependency is an infra container. If it is we can checkpoint
+ // the container out of the Pod.
+ if c.config.Pod == "" {
+ return errors.Errorf("cannot export checkpoints of containers with dependencies")
+ }
+
+ pod, err := c.runtime.state.Pod(c.config.Pod)
+ if err != nil {
+ return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", c.ID(), c.config.Pod)
+ }
+ infraID, err := pod.InfraContainerID()
+ if err != nil {
+ return errors.Wrapf(err, "cannot retrieve infra container ID for pod %s", c.config.Pod)
+ }
+ if c.Dependencies()[0] != infraID {
+ return errors.Errorf("cannot export checkpoints of containers with dependencies")
+ }
+ }
+ if len(c.Dependencies()) > 1 {
+ return errors.Errorf("cannot export checkpoints of containers with dependencies")
}
logrus.Debugf("Exporting checkpoint image of container %q to %q", c.ID(), options.TargetFile)
@@ -1021,9 +1040,9 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
return nil
}
-func (c *Container) checkpointRestoreSupported() error {
- if !criu.CheckForCriu() {
- return errors.Errorf("checkpoint/restore requires at least CRIU %d", criu.MinCriuVersion)
+func (c *Container) checkpointRestoreSupported(version int) error {
+ if !criu.CheckForCriu(version) {
+ return errors.Errorf("checkpoint/restore requires at least CRIU %d", version)
}
if !c.ociRuntime.SupportsCheckpoint() {
return errors.Errorf("configured runtime does not support checkpoint/restore")
@@ -1032,7 +1051,7 @@ func (c *Container) checkpointRestoreSupported() error {
}
func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) error {
- if err := c.checkpointRestoreSupported(); err != nil {
+ if err := c.checkpointRestoreSupported(criu.MinCriuVersion); err != nil {
return err
}
@@ -1136,10 +1155,20 @@ func (c *Container) importPreCheckpoint(input string) error {
}
func (c *Container) restore(ctx context.Context, options ContainerCheckpointOptions) (retErr error) {
- if err := c.checkpointRestoreSupported(); err != nil {
+ minCriuVersion := func() int {
+ if options.Pod == "" {
+ return criu.MinCriuVersion
+ }
+ return criu.PodCriuVersion
+ }()
+ if err := c.checkpointRestoreSupported(minCriuVersion); err != nil {
return err
}
+ if options.Pod != "" && !crutils.CRRuntimeSupportsPodCheckpointRestore(c.ociRuntime.Path()) {
+ return errors.Errorf("runtime %s does not support pod restore", c.ociRuntime.Path())
+ }
+
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID())
}
@@ -1247,6 +1276,83 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
}
}
+ if options.Pod != "" {
+ // Running in a Pod means that we have to change all namespace settings to
+ // the ones from the infrastructure container.
+ pod, err := c.runtime.LookupPod(options.Pod)
+ if err != nil {
+ return errors.Wrapf(err, "pod %q cannot be retrieved", options.Pod)
+ }
+
+ infraContainer, err := pod.InfraContainer()
+ if err != nil {
+ return errors.Wrapf(err, "cannot retrieved infra container from pod %q", options.Pod)
+ }
+
+ infraContainer.lock.Lock()
+ if err := infraContainer.syncContainer(); err != nil {
+ infraContainer.lock.Unlock()
+ return errors.Wrapf(err, "Error syncing infrastructure container %s status", infraContainer.ID())
+ }
+ if infraContainer.state.State != define.ContainerStateRunning {
+ if err := infraContainer.initAndStart(ctx); err != nil {
+ infraContainer.lock.Unlock()
+ return errors.Wrapf(err, "Error starting infrastructure container %s status", infraContainer.ID())
+ }
+ }
+ infraContainer.lock.Unlock()
+
+ if c.config.IPCNsCtr != "" {
+ nsPath, err := infraContainer.namespacePath(IPCNS)
+ if err != nil {
+ return errors.Wrapf(err, "cannot retrieve IPC namespace path for Pod %q", options.Pod)
+ }
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.IPCNamespace), nsPath); err != nil {
+ return err
+ }
+ }
+
+ if c.config.NetNsCtr != "" {
+ nsPath, err := infraContainer.namespacePath(NetNS)
+ if err != nil {
+ return errors.Wrapf(err, "cannot retrieve network namespace path for Pod %q", options.Pod)
+ }
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), nsPath); err != nil {
+ return err
+ }
+ }
+
+ if c.config.PIDNsCtr != "" {
+ nsPath, err := infraContainer.namespacePath(PIDNS)
+ if err != nil {
+ return errors.Wrapf(err, "cannot retrieve PID namespace path for Pod %q", options.Pod)
+ }
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.PIDNamespace), nsPath); err != nil {
+ return err
+ }
+ }
+
+ if c.config.UTSNsCtr != "" {
+ nsPath, err := infraContainer.namespacePath(UTSNS)
+ if err != nil {
+ return errors.Wrapf(err, "cannot retrieve UTS namespace path for Pod %q", options.Pod)
+ }
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.UTSNamespace), nsPath); err != nil {
+ return err
+ }
+ }
+
+ if c.config.CgroupNsCtr != "" {
+ nsPath, err := infraContainer.namespacePath(CgroupNS)
+ if err != nil {
+ return errors.Wrapf(err, "cannot retrieve Cgroup namespace path for Pod %q", options.Pod)
+ }
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), nsPath); err != nil {
+ return err
+ }
+ }
+ }
+
if err := c.makeBindMounts(); err != nil {
return err
}
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index 2914bd1a1..846d3815a 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -1064,6 +1064,30 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
if restoreOptions.TCPEstablished {
args = append(args, "--runtime-opt", "--tcp-established")
}
+ if restoreOptions.Pod != "" {
+ mountLabel := ctr.config.MountLabel
+ processLabel := ctr.config.ProcessLabel
+ if mountLabel != "" {
+ args = append(
+ args,
+ "--runtime-opt",
+ fmt.Sprintf(
+ "--lsm-mount-context=%s",
+ mountLabel,
+ ),
+ )
+ }
+ if processLabel != "" {
+ args = append(
+ args,
+ "--runtime-opt",
+ fmt.Sprintf(
+ "--lsm-profile=selinux:%s",
+ processLabel,
+ ),
+ )
+ }
+ }
}
logrus.WithFields(logrus.Fields{
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index ce4c5d758..31e2d09ce 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -894,6 +894,18 @@ func (r *Runtime) LookupContainer(idOrName string) (*Container, error) {
return r.state.LookupContainer(idOrName)
}
+// LookupContainerId looks up a container id by its name or a partial ID
+// If a partial ID is not unique, an error will be returned
+func (r *Runtime) LookupContainerID(idOrName string) (string, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if !r.valid {
+ return "", define.ErrRuntimeStopped
+ }
+ return r.state.LookupContainerID(idOrName)
+}
+
// GetContainers retrieves all containers from the state
// Filters can be provided which will determine what containers are included in
// the output. Multiple filters are handled by ANDing their output, so only
diff --git a/pkg/api/handlers/compat/containers_archive.go b/pkg/api/handlers/compat/containers_archive.go
index a9d74e5f4..541f702e7 100644
--- a/pkg/api/handlers/compat/containers_archive.go
+++ b/pkg/api/handlers/compat/containers_archive.go
@@ -1,6 +1,7 @@
package compat
import (
+ "encoding/json"
"fmt"
"net/http"
"os"
@@ -93,8 +94,9 @@ func handleHeadAndGet(w http.ResponseWriter, r *http.Request, decoder *schema.De
func handlePut(w http.ResponseWriter, r *http.Request, decoder *schema.Decoder, runtime *libpod.Runtime) {
query := struct {
- Path string `schema:"path"`
- Chown bool `schema:"copyUIDGID"`
+ Path string `schema:"path"`
+ Chown bool `schema:"copyUIDGID"`
+ Rename string `schema:"rename"`
// TODO handle params below
NoOverwriteDirNonDir bool `schema:"noOverwriteDirNonDir"`
}{
@@ -107,10 +109,19 @@ func handlePut(w http.ResponseWriter, r *http.Request, decoder *schema.Decoder,
return
}
+ var rename map[string]string
+ if query.Rename != "" {
+ if err := json.Unmarshal([]byte(query.Rename), &rename); err != nil {
+ utils.Error(w, "Bad Request.", http.StatusBadRequest, errors.Wrap(err, "couldn't decode the query"))
+ return
+ }
+ }
+
containerName := utils.GetName(r)
containerEngine := abi.ContainerEngine{Libpod: runtime}
- copyFunc, err := containerEngine.ContainerCopyFromArchive(r.Context(), containerName, query.Path, r.Body, entities.CopyOptions{Chown: query.Chown})
+ copyOptions := entities.CopyOptions{Chown: query.Chown, Rename: rename}
+ copyFunc, err := containerEngine.ContainerCopyFromArchive(r.Context(), containerName, query.Path, r.Body, copyOptions)
if errors.Cause(err) == define.ErrNoSuchCtr || os.IsNotExist(err) {
// 404 is returned for an absent container and path. The
// clients must deal with it accordingly.
diff --git a/pkg/api/handlers/compat/images_build.go b/pkg/api/handlers/compat/images_build.go
index 64805b7fa..2c98a5361 100644
--- a/pkg/api/handlers/compat/images_build.go
+++ b/pkg/api/handlers/compat/images_build.go
@@ -393,16 +393,16 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
defer auth.RemoveAuthfile(authfile)
// Channels all mux'ed in select{} below to follow API build protocol
- stdout := channel.NewWriter(make(chan []byte, 1))
+ stdout := channel.NewWriter(make(chan []byte))
defer stdout.Close()
- auxout := channel.NewWriter(make(chan []byte, 1))
+ auxout := channel.NewWriter(make(chan []byte))
defer auxout.Close()
- stderr := channel.NewWriter(make(chan []byte, 1))
+ stderr := channel.NewWriter(make(chan []byte))
defer stderr.Close()
- reporter := channel.NewWriter(make(chan []byte, 1))
+ reporter := channel.NewWriter(make(chan []byte))
defer reporter.Close()
runtime := r.Context().Value("runtime").(*libpod.Runtime)
@@ -529,7 +529,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
enc := json.NewEncoder(body)
enc.SetEscapeHTML(true)
-loop:
+
for {
m := struct {
Stream string `json:"stream,omitempty"`
@@ -543,13 +543,13 @@ loop:
stderr.Write([]byte(err.Error()))
}
flush()
- case e := <-auxout.Chan():
+ case e := <-reporter.Chan():
m.Stream = string(e)
if err := enc.Encode(m); err != nil {
stderr.Write([]byte(err.Error()))
}
flush()
- case e := <-reporter.Chan():
+ case e := <-auxout.Chan():
m.Stream = string(e)
if err := enc.Encode(m); err != nil {
stderr.Write([]byte(err.Error()))
@@ -561,8 +561,8 @@ loop:
logrus.Warnf("Failed to json encode error %v", err)
}
flush()
+ return
case <-runCtx.Done():
- flush()
if success {
if !utils.IsLibpodRequest(r) {
m.Stream = fmt.Sprintf("Successfully built %12.12s\n", imageID)
@@ -579,7 +579,8 @@ loop:
}
}
}
- break loop
+ flush()
+ return
case <-r.Context().Done():
cancel()
logrus.Infof("Client disconnect reported for build %q / %q.", registry, query.Dockerfile)
diff --git a/pkg/api/server/register_archive.go b/pkg/api/server/register_archive.go
index ee7449fbb..82d72ee6a 100644
--- a/pkg/api/server/register_archive.go
+++ b/pkg/api/server/register_archive.go
@@ -151,6 +151,10 @@ func (s *APIServer) registerArchiveHandlers(r *mux.Router) error {
// type: string
// description: Path to a directory in the container to extract
// required: true
+ // - in: query
+ // name: rename
+ // type: string
+ // description: JSON encoded map[string]string to translate paths
// responses:
// 200:
// description: no error
diff --git a/pkg/api/server/server.go b/pkg/api/server/server.go
index 1e8faf8f5..72ae27276 100644
--- a/pkg/api/server/server.go
+++ b/pkg/api/server/server.go
@@ -90,11 +90,10 @@ func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Li
server := APIServer{
Server: http.Server{
- Handler: router,
- ReadHeaderTimeout: 20 * time.Second,
- IdleTimeout: duration * 2,
- ConnState: idle.ConnState,
- ErrorLog: log.New(logrus.StandardLogger().Out, "", 0),
+ Handler: router,
+ IdleTimeout: duration * 2,
+ ConnState: idle.ConnState,
+ ErrorLog: log.New(logrus.StandardLogger().Out, "", 0),
},
Decoder: handlers.NewAPIDecoder(),
idleTracker: idle,
diff --git a/pkg/bindings/connection.go b/pkg/bindings/connection.go
index fd93c5ac7..62b1655ac 100644
--- a/pkg/bindings/connection.go
+++ b/pkg/bindings/connection.go
@@ -327,7 +327,7 @@ func (c *Connection) DoRequest(httpBody io.Reader, httpMethod, endpoint string,
uri := fmt.Sprintf("http://d/v%d.%d.%d/libpod"+endpoint, params...)
logrus.Debugf("DoRequest Method: %s URI: %v", httpMethod, uri)
- req, err := http.NewRequest(httpMethod, uri, httpBody)
+ req, err := http.NewRequestWithContext(context.WithValue(context.Background(), clientKey, c), httpMethod, uri, httpBody)
if err != nil {
return nil, err
}
@@ -337,7 +337,6 @@ func (c *Connection) DoRequest(httpBody io.Reader, httpMethod, endpoint string,
for key, val := range header {
req.Header.Set(key, val)
}
- req = req.WithContext(context.WithValue(context.Background(), clientKey, c))
// Give the Do three chances in the case of a comm/service hiccup
for i := 0; i < 3; i++ {
response, err = c.Client.Do(req) // nolint
diff --git a/pkg/bindings/containers/types.go b/pkg/bindings/containers/types.go
index 1058c7a48..cf088441f 100644
--- a/pkg/bindings/containers/types.go
+++ b/pkg/bindings/containers/types.go
@@ -62,6 +62,7 @@ type RestoreOptions struct {
Keep *bool
Name *string
TCPEstablished *bool
+ Pod *string
}
//go:generate go run ../generator/generator.go CreateOptions
@@ -263,4 +264,6 @@ type CopyOptions struct {
// If used with CopyFromArchive and set to true it will change ownership of files from the source tar archive
// to the primary uid/gid of the target container.
Chown *bool `schema:"copyUIDGID"`
+ // Map to translate path names.
+ Rename map[string]string
}
diff --git a/pkg/bindings/containers/types_copy_options.go b/pkg/bindings/containers/types_copy_options.go
index 12ad085fd..0624b450e 100644
--- a/pkg/bindings/containers/types_copy_options.go
+++ b/pkg/bindings/containers/types_copy_options.go
@@ -35,3 +35,19 @@ func (o *CopyOptions) GetChown() bool {
}
return *o.Chown
}
+
+// WithRename
+func (o *CopyOptions) WithRename(value map[string]string) *CopyOptions {
+ v := value
+ o.Rename = v
+ return o
+}
+
+// GetRename
+func (o *CopyOptions) GetRename() map[string]string {
+ var rename map[string]string
+ if o.Rename == nil {
+ return rename
+ }
+ return o.Rename
+}
diff --git a/pkg/bindings/containers/types_restore_options.go b/pkg/bindings/containers/types_restore_options.go
index ea6c810a2..820a7696f 100644
--- a/pkg/bindings/containers/types_restore_options.go
+++ b/pkg/bindings/containers/types_restore_options.go
@@ -131,3 +131,19 @@ func (o *RestoreOptions) GetTCPEstablished() bool {
}
return *o.TCPEstablished
}
+
+// WithPod
+func (o *RestoreOptions) WithPod(value string) *RestoreOptions {
+ v := &value
+ o.Pod = v
+ return o
+}
+
+// GetPod
+func (o *RestoreOptions) GetPod() string {
+ var pod string
+ if o.Pod == nil {
+ return pod
+ }
+ return *o.Pod
+}
diff --git a/pkg/bindings/images/build.go b/pkg/bindings/images/build.go
index 142204f27..a35f461a7 100644
--- a/pkg/bindings/images/build.go
+++ b/pkg/bindings/images/build.go
@@ -391,42 +391,50 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
dec := json.NewDecoder(body)
var id string
- var mErr error
for {
var s struct {
Stream string `json:"stream,omitempty"`
Error string `json:"error,omitempty"`
}
- if err := dec.Decode(&s); err != nil {
- if errors.Is(err, io.EOF) {
- if mErr == nil && id == "" {
- mErr = errors.New("stream dropped, unexpected failure")
- }
- break
- }
- s.Error = err.Error() + "\n"
- }
select {
+ // FIXME(vrothberg): it seems we always hit the EOF case below,
+ // even when the server quit but it seems desirable to
+ // distinguish a proper build from a transient EOF.
case <-response.Request.Context().Done():
- return &entities.BuildReport{ID: id}, mErr
+ return &entities.BuildReport{ID: id}, nil
default:
// non-blocking select
}
+ if err := dec.Decode(&s); err != nil {
+ if errors.Is(err, io.ErrUnexpectedEOF) {
+ return nil, errors.Wrap(err, "server probably quit")
+ }
+ // EOF means the stream is over in which case we need
+ // to have read the id.
+ if errors.Is(err, io.EOF) && id != "" {
+ break
+ }
+ return &entities.BuildReport{ID: id}, errors.Wrap(err, "decoding stream")
+ }
+
switch {
case s.Stream != "":
- stdout.Write([]byte(s.Stream))
- if iidRegex.Match([]byte(s.Stream)) {
+ raw := []byte(s.Stream)
+ stdout.Write(raw)
+ if iidRegex.Match(raw) {
id = strings.TrimSuffix(s.Stream, "\n")
}
case s.Error != "":
- mErr = errors.New(s.Error)
+ // If there's an error, return directly. The stream
+ // will be closed on return.
+ return &entities.BuildReport{ID: id}, errors.New(s.Error)
default:
return &entities.BuildReport{ID: id}, errors.New("failed to parse build results stream, unexpected input")
}
}
- return &entities.BuildReport{ID: id}, mErr
+ return &entities.BuildReport{ID: id}, nil
}
func nTar(excludes []string, sources ...string) (io.ReadCloser, error) {
diff --git a/pkg/checkpoint/checkpoint_restore.go b/pkg/checkpoint/checkpoint_restore.go
index 0d45cab5f..9fdf04933 100644
--- a/pkg/checkpoint/checkpoint_restore.go
+++ b/pkg/checkpoint/checkpoint_restore.go
@@ -9,6 +9,9 @@ import (
"github.com/containers/common/libimage"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v3/libpod"
+ ann "github.com/containers/podman/v3/pkg/annotations"
+ "github.com/containers/podman/v3/pkg/checkpoint/crutils"
+ "github.com/containers/podman/v3/pkg/criu"
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/errorhandling"
"github.com/containers/podman/v3/pkg/specgen/generate"
@@ -68,6 +71,14 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
return nil, err
}
+ if ctrConfig.Pod != "" && restoreOptions.Pod == "" {
+ return nil, errors.New("cannot restore pod container without --pod")
+ }
+
+ if ctrConfig.Pod == "" && restoreOptions.Pod != "" {
+ return nil, errors.New("cannot restore non pod container into pod")
+ }
+
// This should not happen as checkpoints with these options are not exported.
if len(ctrConfig.Dependencies) > 0 {
return nil, errors.Errorf("Cannot import checkpoints of containers with dependencies")
@@ -96,6 +107,91 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
newName = true
}
+ if restoreOptions.Pod != "" {
+ // Restoring into a Pod requires much newer versions of CRIU
+ if !criu.CheckForCriu(criu.PodCriuVersion) {
+ return nil, errors.Errorf("restoring containers into pods requires at least CRIU %d", criu.PodCriuVersion)
+ }
+ // The runtime also has to support it
+ if !crutils.CRRuntimeSupportsPodCheckpointRestore(runtime.GetOCIRuntimePath()) {
+ return nil, errors.Errorf("runtime %s does not support pod restore", runtime.GetOCIRuntimePath())
+ }
+ // Restoring into an existing Pod
+ ctrConfig.Pod = restoreOptions.Pod
+
+ // According to podman pod create a pod can share the following namespaces:
+ // cgroup, ipc, net, pid, uts
+ // Let's make sure we a restoring into a pod with the same shared namespaces.
+ pod, err := runtime.LookupPod(ctrConfig.Pod)
+ if err != nil {
+ return nil, errors.Wrapf(err, "pod %q cannot be retrieved", ctrConfig.Pod)
+ }
+
+ infraContainer, err := pod.InfraContainer()
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot retrieve infra container from pod %q", ctrConfig.Pod)
+ }
+
+ // If a namespaces was shared (!= "") it needs to be set to the new infrastructure container
+ // If the infrastructure container does not share the same namespaces as the to be restored
+ // container we abort.
+ if ctrConfig.IPCNsCtr != "" {
+ if !pod.SharesIPC() {
+ return nil, errors.Errorf("pod %s does not share the IPC namespace", ctrConfig.Pod)
+ }
+ ctrConfig.IPCNsCtr = infraContainer.ID()
+ }
+
+ if ctrConfig.NetNsCtr != "" {
+ if !pod.SharesNet() {
+ return nil, errors.Errorf("pod %s does not share the network namespace", ctrConfig.Pod)
+ }
+ ctrConfig.NetNsCtr = infraContainer.ID()
+ }
+
+ if ctrConfig.PIDNsCtr != "" {
+ if !pod.SharesPID() {
+ return nil, errors.Errorf("pod %s does not share the PID namespace", ctrConfig.Pod)
+ }
+ ctrConfig.PIDNsCtr = infraContainer.ID()
+ }
+
+ if ctrConfig.UTSNsCtr != "" {
+ if !pod.SharesUTS() {
+ return nil, errors.Errorf("pod %s does not share the UTS namespace", ctrConfig.Pod)
+ }
+ ctrConfig.UTSNsCtr = infraContainer.ID()
+ }
+
+ if ctrConfig.CgroupNsCtr != "" {
+ if !pod.SharesCgroup() {
+ return nil, errors.Errorf("pod %s does not share the cgroup namespace", ctrConfig.Pod)
+ }
+ ctrConfig.CgroupNsCtr = infraContainer.ID()
+ }
+
+ // Change SELinux labels to infrastructure container labels
+ ctrConfig.MountLabel = infraContainer.MountLabel()
+ ctrConfig.ProcessLabel = infraContainer.ProcessLabel()
+
+ // Fix parent cgroup
+ cgroupPath, err := pod.CgroupPath()
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot retrieve cgroup path from pod %q", ctrConfig.Pod)
+ }
+ ctrConfig.CgroupParent = cgroupPath
+
+ oldPodID := dumpSpec.Annotations[ann.SandboxID]
+ // Fix up SandboxID in the annotations
+ dumpSpec.Annotations[ann.SandboxID] = ctrConfig.Pod
+ // Fix up CreateCommand
+ for i, c := range ctrConfig.CreateCommand {
+ if c == oldPodID {
+ ctrConfig.CreateCommand[i] = ctrConfig.Pod
+ }
+ }
+ }
+
if len(restoreOptions.PublishPorts) > 0 {
ports, _, _, err := generate.ParsePortMapping(restoreOptions.PublishPorts)
if err != nil {
diff --git a/pkg/checkpoint/crutils/checkpoint_restore_utils.go b/pkg/checkpoint/crutils/checkpoint_restore_utils.go
index 53ff55865..3b77368bb 100644
--- a/pkg/checkpoint/crutils/checkpoint_restore_utils.go
+++ b/pkg/checkpoint/crutils/checkpoint_restore_utils.go
@@ -1,6 +1,7 @@
package crutils
import (
+ "bytes"
"io"
"os"
"os/exec"
@@ -189,3 +190,13 @@ func CRRuntimeSupportsCheckpointRestore(runtimePath string) bool {
}
return false
}
+
+// CRRuntimeSupportsCheckpointRestore tests if the runtime at 'runtimePath'
+// supports restoring into existing Pods. The runtime needs to support
+// the CRIU option --lsm-mount-context and the existence of this is checked
+// by this function. In addition it is necessary to at least have CRIU 3.16.
+func CRRuntimeSupportsPodCheckpointRestore(runtimePath string) bool {
+ cmd := exec.Command(runtimePath, "restore", "--lsm-mount-context")
+ out, _ := cmd.CombinedOutput()
+ return bytes.Contains(out, []byte("flag needs an argument"))
+}
diff --git a/pkg/copy/parse.go b/pkg/copy/parse.go
index 39e0e1547..93edec5fa 100644
--- a/pkg/copy/parse.go
+++ b/pkg/copy/parse.go
@@ -18,18 +18,6 @@ func ParseSourceAndDestination(source, destination string) (string, string, stri
sourceContainer, sourcePath := parseUserInput(source)
destContainer, destPath := parseUserInput(destination)
- numContainers := 0
- if len(sourceContainer) > 0 {
- numContainers++
- }
- if len(destContainer) > 0 {
- numContainers++
- }
-
- if numContainers != 1 {
- return "", "", "", "", errors.Errorf("invalid arguments %q, %q: exactly 1 container expected but %d specified", source, destination, numContainers)
- }
-
if len(sourcePath) == 0 || len(destPath) == 0 {
return "", "", "", "", errors.Errorf("invalid arguments %q, %q: you must specify paths", source, destination)
}
diff --git a/pkg/criu/criu.go b/pkg/criu/criu.go
index f4cce238a..2a6805979 100644
--- a/pkg/criu/criu.go
+++ b/pkg/criu/criu.go
@@ -1,17 +1,21 @@
package criu
import (
- "github.com/checkpoint-restore/go-criu"
+ "github.com/checkpoint-restore/go-criu/v5"
)
// MinCriuVersion for Podman at least CRIU 3.11 is required
const MinCriuVersion = 31100
+// PodCriuVersion is the version of CRIU needed for
+// checkpointing and restoring containers out of and into Pods.
+const PodCriuVersion = 31600
+
// CheckForCriu uses CRIU's go bindings to check if the CRIU
// binary exists and if it at least the version Podman needs.
-func CheckForCriu() bool {
+func CheckForCriu(version int) bool {
c := criu.MakeCriu()
- result, err := c.IsCriuAtLeast(MinCriuVersion)
+ result, err := c.IsCriuAtLeast(version)
if err != nil {
return false
}
diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go
index 4d85941cd..564921c52 100644
--- a/pkg/domain/entities/containers.go
+++ b/pkg/domain/entities/containers.go
@@ -165,6 +165,8 @@ type CopyOptions struct {
// it will change ownership of files from the source tar archive
// to the primary uid/gid of the destination container.
Chown bool
+ // Map to translate path names.
+ Rename map[string]string
}
type CommitReport struct {
@@ -207,6 +209,7 @@ type RestoreOptions struct {
TCPEstablished bool
ImportPrevious string
PublishPorts []specgen.PortMapping
+ Pod string
}
type RestoreReport struct {
diff --git a/pkg/domain/entities/engine_container.go b/pkg/domain/entities/engine_container.go
index 62e83fab3..d573e4704 100644
--- a/pkg/domain/entities/engine_container.go
+++ b/pkg/domain/entities/engine_container.go
@@ -8,7 +8,6 @@ import (
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/pkg/domain/entities/reports"
"github.com/containers/podman/v3/pkg/specgen"
- "github.com/spf13/cobra"
)
type ContainerCopyFunc func() error
@@ -82,7 +81,7 @@ type ContainerEngine interface {
PodStop(ctx context.Context, namesOrIds []string, options PodStopOptions) ([]*PodStopReport, error)
PodTop(ctx context.Context, options PodTopOptions) (*StringSliceReport, error)
PodUnpause(ctx context.Context, namesOrIds []string, options PodunpauseOptions) ([]*PodUnpauseReport, error)
- SetupRootless(ctx context.Context, cmd *cobra.Command) error
+ SetupRootless(ctx context.Context, noMoveProcess bool) error
SecretCreate(ctx context.Context, name string, reader io.Reader, options SecretCreateOptions) (*SecretCreateReport, error)
SecretInspect(ctx context.Context, nameOrIDs []string) ([]*SecretInfoReport, []error, error)
SecretList(ctx context.Context) ([]*SecretInfoReport, error)
diff --git a/pkg/domain/filters/containers.go b/pkg/domain/filters/containers.go
index 965a12468..dc9fed2a4 100644
--- a/pkg/domain/filters/containers.go
+++ b/pkg/domain/filters/containers.go
@@ -211,6 +211,36 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo
}, nil
case "network":
return func(c *libpod.Container) bool {
+ networkMode := c.NetworkMode()
+ // support docker like `--filter network=container:<IDorName>`
+ // check if networkMode is configured as `container:<ctr>`
+ // peform a match against filter `container:<IDorName>`
+ // networks is already going to be empty if `container:<ctr>` is configured as Mode
+ if strings.HasPrefix(networkMode, "container:") {
+ networkModeContainerPart := strings.SplitN(networkMode, ":", 2)
+ if len(networkModeContainerPart) < 2 {
+ return false
+ }
+ networkModeContainerID := networkModeContainerPart[1]
+ for _, val := range filterValues {
+ if strings.HasPrefix(val, "container:") {
+ filterNetworkModePart := strings.SplitN(val, ":", 2)
+ if len(filterNetworkModePart) < 2 {
+ return false
+ }
+ filterNetworkModeIDorName := filterNetworkModePart[1]
+ filterID, err := r.LookupContainerID(filterNetworkModeIDorName)
+ if err != nil {
+ return false
+ }
+ if filterID == networkModeContainerID {
+ return true
+ }
+ }
+ }
+ return false
+ }
+
networks, _, err := c.Networks()
// if err or no networks, quick out
if err != nil || len(networks) == 0 {
diff --git a/pkg/domain/infra/abi/archive.go b/pkg/domain/infra/abi/archive.go
index 1a5bb6dc4..b60baa935 100644
--- a/pkg/domain/infra/abi/archive.go
+++ b/pkg/domain/infra/abi/archive.go
@@ -12,7 +12,7 @@ func (ic *ContainerEngine) ContainerCopyFromArchive(ctx context.Context, nameOrI
if err != nil {
return nil, err
}
- return container.CopyFromArchive(ctx, containerPath, options.Chown, reader)
+ return container.CopyFromArchive(ctx, containerPath, options.Chown, options.Rename, reader)
}
func (ic *ContainerEngine) ContainerCopyToArchive(ctx context.Context, nameOrID string, containerPath string, writer io.Writer) (entities.ContainerCopyFunc, error) {
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index 2c89fc66b..2003879b8 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -529,6 +529,7 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st
IgnoreStaticIP: options.IgnoreStaticIP,
IgnoreStaticMAC: options.IgnoreStaticMAC,
ImportPrevious: options.ImportPrevious,
+ Pod: options.Pod,
}
filterFuncs := []libpod.ContainerFilter{
diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go
index 155cda21d..bc98edd06 100644
--- a/pkg/domain/infra/abi/system.go
+++ b/pkg/domain/infra/abi/system.go
@@ -24,7 +24,6 @@ import (
"github.com/containers/storage/pkg/unshare"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -57,7 +56,7 @@ func (ic *ContainerEngine) Info(ctx context.Context) (*define.Info, error) {
return info, err
}
-func (ic *ContainerEngine) SetupRootless(_ context.Context, cmd *cobra.Command) error {
+func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool) error {
// do it only after podman has already re-execed and running with uid==0.
hasCapSysAdmin, err := unshare.HasCapSysAdmin()
if err != nil {
@@ -104,6 +103,9 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, cmd *cobra.Command)
if became {
os.Exit(ret)
}
+ if noMoveProcess {
+ return nil
+ }
// if there is no pid file, try to join existing containers, and create a pause process.
ctrs, err := ic.Libpod.GetRunningContainers()
@@ -118,9 +120,10 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, cmd *cobra.Command)
}
became, ret, err = rootless.TryJoinFromFilePaths(pausePidPath, true, paths)
+
if err := movePauseProcessToScope(ic.Libpod); err != nil {
- conf, err := ic.Config(context.Background())
- if err != nil {
+ conf, err2 := ic.Config(context.Background())
+ if err2 != nil {
return err
}
if conf.Engine.CgroupManager == config.SystemdCgroupsManager {
@@ -148,7 +151,6 @@ func movePauseProcessToScope(r *libpod.Runtime) error {
if err != nil {
return errors.Wrapf(err, "could not get pause process pid file path")
}
-
data, err := ioutil.ReadFile(pausePidPath)
if err != nil {
return errors.Wrapf(err, "cannot read pause pid file")
diff --git a/pkg/domain/infra/runtime_abi.go b/pkg/domain/infra/runtime_abi.go
index ca201b5ae..177e9cff4 100644
--- a/pkg/domain/infra/runtime_abi.go
+++ b/pkg/domain/infra/runtime_abi.go
@@ -33,6 +33,7 @@ func NewImageEngine(facts *entities.PodmanConfig) (entities.ImageEngine, error)
r, err := NewLibpodImageRuntime(facts.FlagSet, facts)
return r, err
case entities.TunnelMode:
+ // TODO: look at me!
ctx, err := bindings.NewConnectionWithIdentity(context.Background(), facts.URI, facts.Identity)
return &tunnel.ImageEngine{ClientCtx: ctx}, err
}
diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go
index d7e8a4e46..58f9c5fb0 100644
--- a/pkg/domain/infra/tunnel/containers.go
+++ b/pkg/domain/infra/tunnel/containers.go
@@ -853,7 +853,8 @@ func (ic *ContainerEngine) ContainerPort(ctx context.Context, nameOrID string, o
}
func (ic *ContainerEngine) ContainerCopyFromArchive(ctx context.Context, nameOrID, path string, reader io.Reader, options entities.CopyOptions) (entities.ContainerCopyFunc, error) {
- return containers.CopyFromArchiveWithOptions(ic.ClientCtx, nameOrID, path, reader, new(containers.CopyOptions).WithChown(options.Chown))
+ copyOptions := new(containers.CopyOptions).WithChown(options.Chown).WithRename(options.Rename)
+ return containers.CopyFromArchiveWithOptions(ic.ClientCtx, nameOrID, path, reader, copyOptions)
}
func (ic *ContainerEngine) ContainerCopyToArchive(ctx context.Context, nameOrID string, path string, writer io.Writer) (entities.ContainerCopyFunc, error) {
diff --git a/pkg/domain/infra/tunnel/system.go b/pkg/domain/infra/tunnel/system.go
index 7400d3771..6b43cf038 100644
--- a/pkg/domain/infra/tunnel/system.go
+++ b/pkg/domain/infra/tunnel/system.go
@@ -7,14 +7,13 @@ import (
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/pkg/bindings/system"
"github.com/containers/podman/v3/pkg/domain/entities"
- "github.com/spf13/cobra"
)
func (ic *ContainerEngine) Info(ctx context.Context) (*define.Info, error) {
return system.Info(ic.ClientCtx, nil)
}
-func (ic *ContainerEngine) SetupRootless(_ context.Context, cmd *cobra.Command) error {
+func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool) error {
panic(errors.New("rootless engine mode is not supported when tunneling"))
}
diff --git a/pkg/rootless/rootless_linux.c b/pkg/rootless/rootless_linux.c
index e5f9e88d9..4d8443fcb 100644
--- a/pkg/rootless/rootless_linux.c
+++ b/pkg/rootless/rootless_linux.c
@@ -465,38 +465,43 @@ reexec_in_user_namespace_wait (int pid, int options)
static int
create_pause_process (const char *pause_pid_file_path, char **argv)
{
- int r, p[2];
+ pid_t pid;
+ int p[2];
if (pipe (p) < 0)
- _exit (EXIT_FAILURE);
+ return -1;
- r = fork ();
- if (r < 0)
- _exit (EXIT_FAILURE);
+ pid = fork ();
+ if (pid < 0)
+ {
+ close (p[0]);
+ close (p[1]);
+ return -1;
+ }
- if (r)
+ if (pid)
{
char b;
+ int r;
close (p[1]);
/* Block until we write the pid file. */
r = TEMP_FAILURE_RETRY (read (p[0], &b, 1));
close (p[0]);
- reexec_in_user_namespace_wait (r, 0);
+ reexec_in_user_namespace_wait (pid, 0);
return r == 1 && b == '0' ? 0 : -1;
}
else
{
- int fd;
- pid_t pid;
+ int r, fd;
close (p[0]);
setsid ();
pid = fork ();
- if (r < 0)
+ if (pid < 0)
_exit (EXIT_FAILURE);
if (pid)
diff --git a/pkg/rootless/rootless_linux.go b/pkg/rootless/rootless_linux.go
index f76eab0e3..9ef56acb4 100644
--- a/pkg/rootless/rootless_linux.go
+++ b/pkg/rootless/rootless_linux.go
@@ -14,11 +14,13 @@ import (
"os/user"
"runtime"
"strconv"
+ "strings"
"sync"
"unsafe"
"github.com/containers/podman/v3/pkg/errorhandling"
"github.com/containers/storage/pkg/idtools"
+ pmount "github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/unshare"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -235,6 +237,24 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo
return false, 0, nil
}
+ if mounts, err := pmount.GetMounts(); err == nil {
+ for _, m := range mounts {
+ if m.Mountpoint == "/" {
+ isShared := false
+ for _, o := range strings.Split(m.Optional, ",") {
+ if strings.HasPrefix(o, "shared:") {
+ isShared = true
+ break
+ }
+ }
+ if !isShared {
+ logrus.Warningf("%q is not a shared mount, this could cause issues or missing mounts with rootless containers", m.Mountpoint)
+ }
+ break
+ }
+ }
+ }
+
cPausePid := C.CString(pausePid)
defer C.free(unsafe.Pointer(cPausePid))
diff --git a/test/apiv2/python/rest_api/test_v2_0_0_container.py b/test/apiv2/python/rest_api/test_v2_0_0_container.py
index f252bd401..30d902d8c 100644
--- a/test/apiv2/python/rest_api/test_v2_0_0_container.py
+++ b/test/apiv2/python/rest_api/test_v2_0_0_container.py
@@ -33,9 +33,10 @@ class ContainerTestCase(APITestCase):
self.assertId(r.content)
_ = parse(r.json()["Created"])
+
r = requests.post(
self.podman_url + "/v1.40/containers/create?name=topcontainer",
- json={"Cmd": ["top"], "Image": "alpine:latest"},
+ json={"Healthcheck": {"Test": ["CMD-SHELL", "exit 0"], "Interval":1000, "Timeout":1000, "Retries": 5}, "Cmd": ["top"], "Image": "alpine:latest"},
)
self.assertEqual(r.status_code, 201, r.text)
payload = r.json()
@@ -49,6 +50,13 @@ class ContainerTestCase(APITestCase):
state = out["State"]["Health"]
self.assertIsInstance(state, dict)
+ r = requests.get(self.uri(f"/containers/{payload['Id']}/json"))
+ self.assertEqual(r.status_code, 200, r.text)
+ self.assertId(r.content)
+ out = r.json()
+ hc = out["Config"]["Healthcheck"]["Test"]
+ self.assertListEqual(["CMD-SHELL", "exit 0"], hc)
+
def test_stats(self):
r = requests.get(self.uri(self.resolve_container("/containers/{}/stats?stream=false")))
self.assertIn(r.status_code, (200, 409), r.text)
diff --git a/test/buildah-bud/apply-podman-deltas b/test/buildah-bud/apply-podman-deltas
index 41c84257f..18b3d56f9 100755
--- a/test/buildah-bud/apply-podman-deltas
+++ b/test/buildah-bud/apply-podman-deltas
@@ -56,22 +56,31 @@ function errmsg() {
done
}
-# skip: used to add a 'skip' to one specific test
-function skip() {
+# _skip: used to add a 'skip' or 'skip_if_remote' to one specific test
+function _skip() {
+ local skip=$1; shift
local reason=$1; shift
# All further arguments are test names
for t in "$@"; do
if fgrep -qx "@test \"$t\" {" $BUD; then
- $ECHO "@test \"$t\" : skip \"$reason\""
+ $ECHO "@test \"$t\" : $skip \"$reason\""
t=${t//\//\\/}
- sed -i -e "/^\@test \"$t\" {/ a \ \ skip \"$reason\"" $BUD
+ sed -i -e "/^\@test \"$t\" {/ a \ \ $skip \"$reason\"" $BUD
else
- warn "[skip] Did not find test \"$t\" in $BUD"
+ warn "[$skip] Did not find test \"$t\" in $BUD"
fi
done
}
+function skip() {
+ _skip "skip" "$@"
+}
+
+function skip_if_remote() {
+ _skip "skip_if_remote" "$@"
+}
+
# END handlers
###############################################################################
# BEGIN user-customizable section
@@ -79,14 +88,14 @@ function skip() {
# These are the hand-maintained exceptions. This is what you want to edit
# or update as needed.
#
-# There are two directives you can use below:
+# There are three directives you can use below:
#
# errmsg "old-message" "new-message" "test name" ["test name"...]
#
# This replaced "old-message" with "new-message" in @test "test name".
# It is used when a podman error message differs from buildah's.
#
-# skip "reason" "test name" ["test name"...]
+# [skip | skip_if_remote] "reason" "test name" ["test name"...]
#
# This adds a 'skip' statement as the first line of @test "test name".
# It is used when a test does not work in podman, either for permanent
@@ -126,6 +135,7 @@ errmsg "no such file or directory" \
###############################################################################
# BEGIN tests that don't make sense under podman due to fundamental differences
+
skip "N/A under podman" \
"bud-flags-order-verification"
@@ -147,13 +157,13 @@ skip "Too much effort to spin up a local registry" \
skip "FIXME FIXME FIXME: argument-order incompatible with podman" \
"bud-squash-hardlinks"
-skip "FIXME FIXME FIXME we'll figure these out later" \
- "bud-multi-stage-nocache-nocommit" \
- "bud with --cgroup-parent"
+skip "FIXME FIXME FIXME: this passes on Ed's laptop, fails in CI??" \
+ "bud-multi-stage-nocache-nocommit"
-# see https://github.com/containers/podman/pull/10147#issuecomment-832503633
-skip "FIXME FIXME FIXME podman save/load has been fixed (but not yet used in Buildah CI)" \
- "bud with --layers and --no-cache flags"
+# This will probably never work: buildah and podman have incompatible defaults
+# Documented in https://github.com/containers/podman/issues/10412
+skip "buildah runs with --cgroup-manager=cgroupfs, podman with systemd" \
+ "bud with --cgroup-parent"
# see https://github.com/containers/podman/pull/10829
skip "FIXME FIXME FIXME - requires updated CI images (#10829)" \
@@ -164,6 +174,31 @@ skip "FIXME FIXME FIXME - requires updated CI images (#10829)" \
###############################################################################
+# BEGIN tests which are skipped because they make no sense under podman-remote
+
+skip_if_remote "--target does not work with podman-remote" \
+ "bud-target"
+
+skip_if_remote "--runtime not meaningful under podman-remote" \
+ "bud with --runtime and --runtime-flag"
+
+skip_if_remote "secret files not implemented under podman-remote" \
+ "bud with containerfile secret" \
+ "bud with containerfile secret accessed on second RUN" \
+ "bud with containerfile secret options"
+
+skip_if_remote "volumes don't work with podman-remote" \
+ "buildah bud --volume" \
+ "buildah-bud-policy"
+
+# See podman #9890 for discussion
+skip_if_remote "--stdin option will not be implemented in podman-remote" \
+ "bud test no --stdin"
+
+###############################################################################
+# BEGIN tests which are skipped due to actual podman-remote bugs.
+
+###############################################################################
# Done.
exit $RC
diff --git a/test/buildah-bud/buildah-tests.diff b/test/buildah-bud/buildah-tests.diff
index 1c8592f7f..66d470648 100644
--- a/test/buildah-bud/buildah-tests.diff
+++ b/test/buildah-bud/buildah-tests.diff
@@ -1,23 +1,75 @@
-From a00508656599b24776982996fdb44d4874338fd4 Mon Sep 17 00:00:00 2001
+From d684753d6f00ee95720d8fb2e09c7ac19b37b01e Mon Sep 17 00:00:00 2001
From: Ed Santiago <santiago@redhat.com>
Date: Tue, 9 Feb 2021 17:28:05 -0700
Subject: [PATCH] tweaks for running buildah tests under podman
Signed-off-by: Ed Santiago <santiago@redhat.com>
---
- tests/helpers.bash | 28 +++++++++++++++++++++++++---
- 1 file changed, 25 insertions(+), 3 deletions(-)
+ tests/helpers.bash | 71 +++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 67 insertions(+), 4 deletions(-)
diff --git a/tests/helpers.bash b/tests/helpers.bash
-index 11deb367..08e73954 100644
+index 11deb367..44c71dad 100644
--- a/tests/helpers.bash
+++ b/tests/helpers.bash
-@@ -164,15 +164,37 @@ function run_buildah() {
+@@ -34,6 +34,23 @@ function setup() {
+ ROOTDIR_OPTS="--root ${TESTDIR}/root --runroot ${TESTDIR}/runroot --storage-driver ${STORAGE_DRIVER}"
+ BUILDAH_REGISTRY_OPTS="--registries-conf ${TESTSDIR}/registries.conf --registries-conf-dir ${TESTDIR}/registries.d --short-name-alias-conf ${TESTDIR}/cache/shortnames.conf"
+ PODMAN_REGISTRY_OPTS="--registries-conf ${TESTSDIR}/registries.conf"
++
++ PODMAN_SERVER_PID=
++ PODMAN_NATIVE="${PODMAN_BINARY} ${ROOTDIR_OPTS} ${PODMAN_REGISTRY_OPTS}"
++ if [[ -n "$REMOTE" ]]; then
++ PODMAN_NATIVE="${PODMAN_BINARY%%-remote} ${ROOTDIR_OPTS} ${PODMAN_REGISTRY_OPTS}"
++ # static CONTAINERS_CONF needed for capabilities test. As of 2021-07-01
++ # no tests in bud.bats override this; if at some point any test does
++ # so, it will probably need to be skip_if_remote()d.
++ env CONTAINERS_CONF=${CONTAINERS_CONF:-$(dirname ${BASH_SOURCE})/containers.conf} $PODMAN_NATIVE system service --timeout=0 &
++ PODMAN_SERVER_PID=$!
++ local timeout=10
++ while ((timeout > 0)); do
++ test -S /run/podman/podman.sock && return
++ sleep 0.2
++ done
++ die "podman server never came up"
++ fi
+ }
+
+ function starthttpd() {
+@@ -57,6 +74,12 @@ function stophttpd() {
+ function teardown() {
+ stophttpd
+
++ if [[ -n "$PODMAN_SERVER_PID" ]]; then
++ kill $PODMAN_SERVER_PID
++ wait $PODMAN_SERVER_PID
++ rm -f /run/podman/podman.sock
++ fi
++
+ # Workaround for #1991 - buildah + overlayfs leaks mount points.
+ # Many tests leave behind /var/tmp/.../root/overlay and sub-mounts;
+ # let's find those and clean them up, otherwise 'rm -rf' fails.
+@@ -129,7 +152,13 @@ function copy() {
+ }
+
+ function podman() {
+- command podman ${PODMAN_REGISTRY_OPTS} ${ROOTDIR_OPTS} "$@"
++ echo "# ... podman $*" >&3
++ ${PODMAN_BINARY} ${PODMAN_REGISTRY_OPTS} ${ROOTDIR_OPTS} "$@"
++}
++
++function podman-remote() {
++ echo "# ... podman-remote $*" >&3
++ ${PODMAN_BINARY} ${ROOTDIR_OPTS} "$@"
+ }
+
+ #################
+@@ -164,15 +193,40 @@ function run_buildah() {
--retry) retry=3; shift;; # retry network flakes
esac
-
+
+ local podman_or_buildah=${BUILDAH_BINARY}
-+ local registry_opts=${BUILDAH_REGISTRY_OPTS}
++ local _opts="${ROOTDIR_OPTS} ${BUILDAH_REGISTRY_OPTS}"
+ if [[ $1 == "bud" || $1 == "build-using-dockerfile" ]]; then
+ shift
+ # podman defaults to --layers=true; buildah to --false.
@@ -29,7 +81,10 @@ index 11deb367..08e73954 100644
+ set "build" "--force-rm=false" "--layers=false" "$@"
+ fi
+ podman_or_buildah=${PODMAN_BINARY}
-+ registry_opts=${PODMAN_REGISTRY_OPTS}
++ _opts="${ROOTDIR_OPTS} ${PODMAN_REGISTRY_OPTS}"
++ if [[ -n "$REMOTE" ]]; then
++ _opts=
++ fi
+
+ # podman always exits 125 where buildah exits 1 or 2
+ case $expected_rc in
@@ -41,17 +96,31 @@ index 11deb367..08e73954 100644
# Remember command args, for possible use in later diagnostic messages
- MOST_RECENT_BUILDAH_COMMAND="buildah $*"
+ MOST_RECENT_BUILDAH_COMMAND="$cmd_basename $*"
-
+
while [ $retry -gt 0 ]; do
retry=$(( retry - 1 ))
-
+
# stdout is only emitted upon error; this echo is to help a debugger
- echo "\$ $BUILDAH_BINARY $*"
- run env CONTAINERS_CONF=${CONTAINERS_CONF:-$(dirname ${BASH_SOURCE})/containers.conf} timeout --foreground --kill=10 $BUILDAH_TIMEOUT ${BUILDAH_BINARY} ${BUILDAH_REGISTRY_OPTS} ${ROOTDIR_OPTS} "$@"
+ echo "\$ $cmd_basename $*"
-+ run env CONTAINERS_CONF=${CONTAINERS_CONF:-$(dirname ${BASH_SOURCE})/containers.conf} timeout --foreground --kill=10 $BUILDAH_TIMEOUT ${podman_or_buildah} ${registry_opts} ${ROOTDIR_OPTS} "$@"
++ run env CONTAINERS_CONF=${CONTAINERS_CONF:-$(dirname ${BASH_SOURCE})/containers.conf} timeout --foreground --kill=10 $BUILDAH_TIMEOUT ${podman_or_buildah} ${_opts} "$@"
# without "quotes", multiple lines are glommed together into one
if [ -n "$output" ]; then
echo "$output"
---
+@@ -396,3 +450,12 @@ function skip_if_no_docker() {
+ skip "this test needs actual docker, not podman-docker"
+ fi
+ }
++
++####################
++# skip_if_remote # (only applicable for podman)
++####################
++function skip_if_remote() {
++ if [[ -n "$REMOTE" ]]; then
++ skip "${1:-test does not work with podman-remote}"
++ fi
++}
+--
2.31.1
+
diff --git a/test/buildah-bud/make-new-buildah-diffs b/test/buildah-bud/make-new-buildah-diffs
index 11987e376..3d0a77008 100644
--- a/test/buildah-bud/make-new-buildah-diffs
+++ b/test/buildah-bud/make-new-buildah-diffs
@@ -56,8 +56,6 @@ if [[ -n "$patch2" ]]; then
die "Internal error: I thought I checked for squashed commits, but still see $patch2"
fi
-# All looks good. Now write that patch into its proper place in the
-# podman repo. The sed and tac mess strips trailing whitespace and
-# empty lines; we need to do this to pass github CI checks.
-sed -e 's/ \+$//' <0001-*.patch |\
- tac | sed -e '/./,$!d' | tac >| ../test/buildah-bud/buildah-tests.diff
+# All looks good. We can now copy that patch into its proper place in the
+# podman repo.
+cp 0001-*.patch ../test/buildah-bud/buildah-tests.diff
diff --git a/test/buildah-bud/run-buildah-bud-tests b/test/buildah-bud/run-buildah-bud-tests
index a37e90dc4..eb8de5618 100755
--- a/test/buildah-bud/run-buildah-bud-tests
+++ b/test/buildah-bud/run-buildah-bud-tests
@@ -69,8 +69,7 @@ REMOTE=
# If remote, start server & change path
if [[ "${PODBIN_NAME:-}" = "remote" ]]; then
REMOTE=1
- echo "$ME: remote tests are not working yet" >&2
- exit 1
+ PODMAN_BINARY+="-remote"
fi
function die() {
@@ -214,6 +213,7 @@ review the test failure and double-check your changes.
(set -x;sudo env TMPDIR=/var/tmp \
PODMAN_BINARY=$PODMAN_BINARY \
+ REMOTE=$REMOTE \
BUILDAH_BINARY=$(pwd)/bin/buildah \
COPY_BINARY=$(pwd)/bin/copy \
bats "${bats_filter[@]}" tests/bud.bats)
diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go
index b5bbfcd5c..1c9a8dc6f 100644
--- a/test/e2e/checkpoint_test.go
+++ b/test/e2e/checkpoint_test.go
@@ -1,11 +1,13 @@
package integration
import (
+ "fmt"
"net"
"os"
"os/exec"
"strings"
+ "github.com/containers/podman/v3/pkg/checkpoint/crutils"
"github.com/containers/podman/v3/pkg/criu"
. "github.com/containers/podman/v3/test/utils"
. "github.com/onsi/ginkgo"
@@ -46,7 +48,7 @@ var _ = Describe("Podman checkpoint", func() {
Skip("OCI runtime does not support checkpoint/restore")
}
- if !criu.CheckForCriu() {
+ if !criu.CheckForCriu(criu.MinCriuVersion) {
Skip("CRIU is missing or too old.")
}
// Only Fedora 29 and newer has a new enough selinux-policy and
@@ -977,4 +979,164 @@ var _ = Describe("Podman checkpoint", func() {
// Remove exported checkpoint
os.Remove(fileName)
})
+
+ namespaceCombination := []string{
+ "cgroup,ipc,net,uts,pid",
+ "cgroup,ipc,net,uts",
+ "cgroup,ipc,net",
+ "cgroup,ipc",
+ "ipc,net,uts,pid",
+ "ipc,net,uts",
+ "ipc,net",
+ "net,uts,pid",
+ "net,uts",
+ "uts,pid",
+ }
+ for _, share := range namespaceCombination {
+ testName := fmt.Sprintf(
+ "podman checkpoint and restore container out of and into pod (%s)",
+ share,
+ )
+ It(testName, func() {
+ if !criu.CheckForCriu(criu.PodCriuVersion) {
+ Skip("CRIU is missing or too old.")
+ }
+ if !crutils.CRRuntimeSupportsPodCheckpointRestore(podmanTest.OCIRuntime) {
+ Skip("runtime does not support pod restore")
+ }
+ // Create a pod
+ session := podmanTest.Podman([]string{
+ "pod",
+ "create",
+ "--share",
+ share,
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(Exit(0))
+ podID := session.OutputToString()
+
+ session = podmanTest.Podman([]string{
+ "run",
+ "-d",
+ "--rm",
+ "--pod",
+ podID,
+ ALPINE,
+ "top",
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(Exit(0))
+ cid := session.OutputToString()
+
+ fileName := "/tmp/checkpoint-" + cid + ".tar.gz"
+
+ // Checkpoint the container
+ result := podmanTest.Podman([]string{
+ "container",
+ "checkpoint",
+ "-e",
+ fileName,
+ cid,
+ })
+ result.WaitWithDefaultTimeout()
+
+ // As the container has been started with '--rm' it will be completely
+ // cleaned up after checkpointing.
+ Expect(result).To(Exit(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(1))
+
+ // Remove the pod and create a new pod
+ result = podmanTest.Podman([]string{
+ "pod",
+ "rm",
+ podID,
+ })
+ result.WaitWithDefaultTimeout()
+ Expect(result).To(Exit(0))
+
+ // First create a pod with different shared namespaces.
+ // Restore should fail
+
+ wrongShare := share[:strings.LastIndex(share, ",")]
+
+ session = podmanTest.Podman([]string{
+ "pod",
+ "create",
+ "--share",
+ wrongShare,
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(Exit(0))
+ podID = session.OutputToString()
+
+ // Restore container with different port mapping
+ result = podmanTest.Podman([]string{
+ "container",
+ "restore",
+ "--pod",
+ podID,
+ "-i",
+ fileName,
+ })
+ result.WaitWithDefaultTimeout()
+ Expect(result).To(Exit(125))
+ Expect(result.ErrorToString()).To(ContainSubstring("does not share the"))
+
+ // Remove the pod and create a new pod
+ result = podmanTest.Podman([]string{
+ "pod",
+ "rm",
+ podID,
+ })
+ result.WaitWithDefaultTimeout()
+ Expect(result).To(Exit(0))
+
+ session = podmanTest.Podman([]string{
+ "pod",
+ "create",
+ "--share",
+ share,
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(Exit(0))
+ podID = session.OutputToString()
+
+ // Restore container with different port mapping
+ result = podmanTest.Podman([]string{
+ "container",
+ "restore",
+ "--pod",
+ podID,
+ "-i",
+ fileName,
+ })
+ result.WaitWithDefaultTimeout()
+
+ Expect(result).To(Exit(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2))
+ Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
+
+ result = podmanTest.Podman([]string{
+ "rm",
+ "-f",
+ result.OutputToString(),
+ })
+ result.WaitWithDefaultTimeout()
+ Expect(result).To(Exit(0))
+ Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
+ Expect(podmanTest.NumberOfContainers()).To(Equal(1))
+
+ result = podmanTest.Podman([]string{
+ "pod",
+ "rm",
+ "-fa",
+ })
+ result.WaitWithDefaultTimeout()
+ Expect(result).To(Exit(0))
+
+ // Remove exported checkpoint
+ os.Remove(fileName)
+ })
+ }
})
diff --git a/test/e2e/healthcheck_run_test.go b/test/e2e/healthcheck_run_test.go
index 28040ecfd..899c84a14 100644
--- a/test/e2e/healthcheck_run_test.go
+++ b/test/e2e/healthcheck_run_test.go
@@ -174,6 +174,16 @@ var _ = Describe("Podman healthcheck run", func() {
Expect(inspect[0].State.Healthcheck.Status).To(Equal("healthy"))
})
+ It("podman healthcheck unhealthy but valid arguments check", func() {
+ session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--health-retries", "2", "--health-cmd", "[\"ls\", \"/foo\"]", ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ hc := podmanTest.Podman([]string{"healthcheck", "run", "hc"})
+ hc.WaitWithDefaultTimeout()
+ Expect(hc).Should(Exit(1))
+ })
+
It("podman healthcheck single healthy result changes failed to healthy", func() {
session := podmanTest.Podman([]string{"run", "-dt", "--name", "hc", "--health-retries", "2", "--health-cmd", "ls /foo || exit 1", ALPINE, "top"})
session.WaitWithDefaultTimeout()
diff --git a/test/e2e/login_logout_test.go b/test/e2e/login_logout_test.go
index 6088d991f..7ad1fc1f2 100644
--- a/test/e2e/login_logout_test.go
+++ b/test/e2e/login_logout_test.go
@@ -97,6 +97,24 @@ var _ = Describe("Podman login and logout", func() {
os.RemoveAll(certDirPath)
})
+ readAuthInfo := func(filePath string) map[string]interface{} {
+ authBytes, err := ioutil.ReadFile(filePath)
+ Expect(err).To(BeNil())
+
+ var authInfo map[string]interface{}
+ err = json.Unmarshal(authBytes, &authInfo)
+ Expect(err).To(BeNil())
+ fmt.Println(authInfo)
+
+ const authsKey = "auths"
+ Expect(authInfo).To(HaveKey(authsKey))
+
+ auths, ok := authInfo[authsKey].(map[string]interface{})
+ Expect(ok).To(BeTrue())
+
+ return auths
+ }
+
It("podman login and logout", func() {
session := podmanTest.Podman([]string{"login", "-u", "podmantest", "-p", "test", server})
session.WaitWithDefaultTimeout()
@@ -151,10 +169,7 @@ var _ = Describe("Podman login and logout", func() {
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- authInfo, _ := ioutil.ReadFile(authFile)
- var info map[string]interface{}
- json.Unmarshal(authInfo, &info)
- fmt.Println(info)
+ readAuthInfo(authFile)
// push should fail with nonexistent authfile
session = podmanTest.Podman([]string{"push", "--authfile", "/tmp/nonexistent", ALPINE, testImg})
@@ -284,4 +299,204 @@ var _ = Describe("Podman login and logout", func() {
session.WaitWithDefaultTimeout()
Expect(session).To(ExitWithError())
})
+
+ It("podman login and logout with repository", func() {
+ authFile := filepath.Join(podmanTest.TempDir, "auth.json")
+
+ testRepository := server + "/podmantest"
+ session := podmanTest.Podman([]string{
+ "login",
+ "-u", "podmantest",
+ "-p", "test",
+ "--authfile", authFile,
+ testRepository,
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ authInfo := readAuthInfo(authFile)
+ Expect(authInfo).To(HaveKey(testRepository))
+
+ session = podmanTest.Podman([]string{
+ "logout",
+ "--authfile", authFile,
+ testRepository,
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ authInfo = readAuthInfo(authFile)
+ Expect(authInfo).NotTo(HaveKey(testRepository))
+ })
+
+ It("podman login and logout with repository and specified image", func() {
+ authFile := filepath.Join(podmanTest.TempDir, "auth.json")
+
+ testTarget := server + "/podmantest/test-alpine"
+ session := podmanTest.Podman([]string{
+ "login",
+ "-u", "podmantest",
+ "-p", "test",
+ "--authfile", authFile,
+ testTarget,
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ authInfo := readAuthInfo(authFile)
+ Expect(authInfo).To(HaveKey(testTarget))
+
+ session = podmanTest.Podman([]string{
+ "push",
+ "--authfile", authFile,
+ ALPINE, testTarget,
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ })
+
+ It("podman login and logout with repository with fallback", func() {
+ authFile := filepath.Join(podmanTest.TempDir, "auth.json")
+
+ testRepos := []string{
+ server + "/podmantest",
+ server,
+ }
+ for _, testRepo := range testRepos {
+ session := podmanTest.Podman([]string{
+ "login",
+ "-u", "podmantest",
+ "-p", "test",
+ "--authfile", authFile,
+ testRepo,
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ }
+
+ authInfo := readAuthInfo(authFile)
+ Expect(authInfo).To(HaveKey(testRepos[0]))
+ Expect(authInfo).To(HaveKey(testRepos[1]))
+
+ session := podmanTest.Podman([]string{
+ "push",
+ "--authfile", authFile,
+ ALPINE, testRepos[0] + "/test-image-alpine",
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{
+ "logout",
+ "--authfile", authFile,
+ testRepos[0],
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{
+ "push",
+ "--authfile", authFile,
+ ALPINE, testRepos[0] + "/test-image-alpine",
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{
+ "logout",
+ "--authfile", authFile,
+ testRepos[1],
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ authInfo = readAuthInfo(authFile)
+ Expect(authInfo).NotTo(HaveKey(testRepos[0]))
+ Expect(authInfo).NotTo(HaveKey(testRepos[1]))
+ })
+
+ It("podman login with repository invalid arguments", func() {
+ authFile := filepath.Join(podmanTest.TempDir, "auth.json")
+
+ for _, invalidArg := range []string{
+ "https://" + server + "/podmantest",
+ server + "/podmantest/image:latest",
+ } {
+ session := podmanTest.Podman([]string{
+ "login",
+ "-u", "podmantest",
+ "-p", "test",
+ "--authfile", authFile,
+ invalidArg,
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(ExitWithError())
+ }
+ })
+
+ It("podman login and logout with repository push with invalid auth.json credentials", func() {
+ authFile := filepath.Join(podmanTest.TempDir, "auth.json")
+ // only `server` contains the correct login data
+ err := ioutil.WriteFile(authFile, []byte(fmt.Sprintf(`{"auths": {
+ "%s/podmantest": { "auth": "cG9kbWFudGVzdDp3cm9uZw==" },
+ "%s": { "auth": "cG9kbWFudGVzdDp0ZXN0" }
+ }}`, server, server)), 0644)
+ Expect(err).To(BeNil())
+
+ session := podmanTest.Podman([]string{
+ "push",
+ "--authfile", authFile,
+ ALPINE, server + "/podmantest/test-image",
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(ExitWithError())
+
+ session = podmanTest.Podman([]string{
+ "push",
+ "--authfile", authFile,
+ ALPINE, server + "/test-image",
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(Exit(0))
+ })
+
+ It("podman login and logout with repository pull with wrong auth.json credentials", func() {
+ authFile := filepath.Join(podmanTest.TempDir, "auth.json")
+
+ testTarget := server + "/podmantest/test-alpine"
+ session := podmanTest.Podman([]string{
+ "login",
+ "-u", "podmantest",
+ "-p", "test",
+ "--authfile", authFile,
+ testTarget,
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ session = podmanTest.Podman([]string{
+ "push",
+ "--authfile", authFile,
+ ALPINE, testTarget,
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ // only `server + /podmantest` and `server` have the correct login data
+ err := ioutil.WriteFile(authFile, []byte(fmt.Sprintf(`{"auths": {
+ "%s/podmantest/test-alpine": { "auth": "cG9kbWFudGVzdDp3cm9uZw==" },
+ "%s/podmantest": { "auth": "cG9kbWFudGVzdDp0ZXN0" },
+ "%s": { "auth": "cG9kbWFudGVzdDp0ZXN0" }
+ }}`, server, server, server)), 0644)
+ Expect(err).To(BeNil())
+
+ session = podmanTest.Podman([]string{
+ "pull",
+ "--authfile", authFile,
+ server + "/podmantest/test-alpine",
+ })
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(ExitWithError())
+ })
})
diff --git a/test/e2e/ps_test.go b/test/e2e/ps_test.go
index e27ff27a4..aeb88e481 100644
--- a/test/e2e/ps_test.go
+++ b/test/e2e/ps_test.go
@@ -172,6 +172,43 @@ var _ = Describe("Podman ps", func() {
Expect(fullCid).To(Equal(result.OutputToStringArray()[0]))
})
+ It("podman ps --filter network=container:<name>", func() {
+ ctrAlpha := "alpha"
+ container := podmanTest.Podman([]string{"run", "-dt", "--name", ctrAlpha, ALPINE, "top"})
+ container.WaitWithDefaultTimeout()
+ Expect(container).Should(Exit(0))
+
+ ctrBravo := "bravo"
+ containerBravo := podmanTest.Podman([]string{"run", "-dt", "--network", "container:alpha", "--name", ctrBravo, ALPINE, "top"})
+ containerBravo.WaitWithDefaultTimeout()
+ Expect(containerBravo).Should(Exit(0))
+
+ result := podmanTest.Podman([]string{"ps", "-a", "--format", "table {{.Names}}", "--filter", "network=container:alpha"})
+ result.WaitWithDefaultTimeout()
+ result.WaitWithDefaultTimeout()
+ Expect(result).Should(Exit(0))
+ Expect(result.OutputToString()).To(ContainSubstring("bravo"))
+ })
+
+ It("podman ps --filter network=container:<id>", func() {
+ ctrAlpha := "first"
+ container := podmanTest.Podman([]string{"run", "-dt", "--name", ctrAlpha, ALPINE, "top"})
+ container.WaitWithDefaultTimeout()
+ cid := container.OutputToString()
+ Expect(container).Should(Exit(0))
+
+ ctrBravo := "second"
+ containerBravo := podmanTest.Podman([]string{"run", "-dt", "--network", "container:" + cid, "--name", ctrBravo, ALPINE, "top"})
+ containerBravo.WaitWithDefaultTimeout()
+ Expect(containerBravo).Should(Exit(0))
+
+ result := podmanTest.Podman([]string{"ps", "-a", "--format", "table {{.Names}}", "--filter", "network=container:" + cid})
+ result.WaitWithDefaultTimeout()
+ result.WaitWithDefaultTimeout()
+ Expect(result).Should(Exit(0))
+ Expect(result.OutputToString()).To(ContainSubstring("second"))
+ })
+
It("podman ps namespace flag", func() {
_, ec, _ := podmanTest.RunLsContainer("")
Expect(ec).To(Equal(0))
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index 3bfd59b54..3c65c02d1 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -1213,14 +1213,14 @@ USER mail`, BB)
})
It("podman run with bad healthcheck timeout", func() {
- session := podmanTest.Podman([]string{"run", "-dt", "--health-cmd", "[\"foo\"]", "--health-timeout", "0s", ALPINE, "top"})
+ session := podmanTest.Podman([]string{"run", "-dt", "--health-cmd", "foo", "--health-timeout", "0s", ALPINE, "top"})
session.WaitWithDefaultTimeout()
Expect(session).To(ExitWithError())
Expect(session.ErrorToString()).To(ContainSubstring("healthcheck-timeout must be at least 1 second"))
})
It("podman run with bad healthcheck start-period", func() {
- session := podmanTest.Podman([]string{"run", "-dt", "--health-cmd", "[\"foo\"]", "--health-start-period", "-1s", ALPINE, "top"})
+ session := podmanTest.Podman([]string{"run", "-dt", "--health-cmd", "foo", "--health-start-period", "-1s", ALPINE, "top"})
session.WaitWithDefaultTimeout()
Expect(session).To(ExitWithError())
Expect(session.ErrorToString()).To(ContainSubstring("healthcheck-start-period must be 0 seconds or greater"))
diff --git a/test/system/050-stop.bats b/test/system/050-stop.bats
index 2ed791429..d809507a5 100644
--- a/test/system/050-stop.bats
+++ b/test/system/050-stop.bats
@@ -119,11 +119,31 @@ load helpers
# the container's status.
run_podman run --name stopme -d $IMAGE sh -c \
- "trap 'echo Received SIGTERM, ignoring' SIGTERM; echo READY; while :; do sleep 1; done"
+ "trap 'echo Received SIGTERM, ignoring' SIGTERM; echo READY; while :; do sleep 0.2; done"
- # Stop the container in the background
+ wait_for_ready stopme
+
+ local t0=$SECONDS
+ # Stop the container, but do so in the background so we can inspect
+ # the container status while it's stopping. Use $PODMAN because we
+ # don't want the overhead and error checks of run_podman.
$PODMAN stop -t 20 stopme &
+ # Wait for container to acknowledge the signal. We can't use wait_for_output
+ # because that aborts if .State.Running != true
+ local timeout=5
+ while [[ $timeout -gt 0 ]]; do
+ run_podman logs stopme
+ if [[ "$output" =~ "Received SIGTERM, ignoring" ]]; then
+ break
+ fi
+ timeout=$((timeout - 1))
+ if [[ $timeout -eq 0 ]]; then
+ die "Timed out waiting for container to receive SIGERM"
+ fi
+ sleep 0.5
+ done
+
# Other commands can acquire the lock
run_podman ps -a
@@ -131,6 +151,13 @@ load helpers
run_podman inspect --format '{{.State.Status}}' stopme
is "$output" "stopping" "Status of container should be 'stopping'"
+ # Time check: make sure we were able to run 'ps' before the container
+ # exited. If this takes too long, it means ps had to wait for lock.
+ local delta_t=$(( $SECONDS - t0 ))
+ if [[ $delta_t -gt 5 ]]; then
+ die "Operations took too long ($delta_t seconds)"
+ fi
+
run_podman kill stopme
run_podman wait stopme
diff --git a/test/system/065-cp.bats b/test/system/065-cp.bats
index 5778eb46e..39f439e7b 100644
--- a/test/system/065-cp.bats
+++ b/test/system/065-cp.bats
@@ -22,8 +22,7 @@ load helpers
mkdir -p $srcdir/subdir
echo "${randomcontent[2]}" > $srcdir/subdir/dotfile.
- run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sleep infinity
- run_podman exec cpcontainer mkdir /srv/subdir
+ run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sh -c "mkdir /srv/subdir; sleep infinity"
# Commit the image for testing non-running containers
run_podman commit -q cpcontainer
@@ -41,7 +40,6 @@ load helpers
0 | /tmp | /tmp/hostfile0 | copy to /tmp
1 | /tmp/ | /tmp/hostfile1 | copy to /tmp/
2 | /tmp/. | /tmp/hostfile2 | copy to /tmp/.
-0 | /tmp/hostfile2 | /tmp/hostfile2 | overwrite previous copy
0 | /tmp/anotherbase.txt | /tmp/anotherbase.txt | copy to /tmp, new name
0 | . | /srv/hostfile0 | copy to workdir (rel path), new name
1 | ./ | /srv/hostfile1 | copy to workdir (rel path), new name
@@ -175,11 +173,12 @@ load helpers
random-1-$(random_string 15)
random-2-$(random_string 20)
)
- run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sleep infinity
- run_podman exec cpcontainer sh -c "echo ${randomcontent[0]} > /tmp/containerfile"
- run_podman exec cpcontainer sh -c "echo ${randomcontent[0]} > /tmp/dotfile."
- run_podman exec cpcontainer sh -c "echo ${randomcontent[1]} > /srv/containerfile1"
- run_podman exec cpcontainer sh -c "mkdir /srv/subdir; echo ${randomcontent[2]} > /srv/subdir/containerfile2"
+ run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sh -c "mkdir /srv/subdir;
+ echo ${randomcontent[0]} > /tmp/containerfile;
+ echo ${randomcontent[0]} > /tmp/dotfile.;
+ echo ${randomcontent[1]} > /srv/containerfile1;
+ echo ${randomcontent[2]} > /srv/subdir/containerfile2;
+ sleep infinity"
# Commit the image for testing non-running containers
run_podman commit -q cpcontainer
@@ -226,6 +225,98 @@ load helpers
}
+@test "podman cp file from container to container" {
+ # Create 3 files with random content in the container.
+ local -a randomcontent=(
+ random-0-$(random_string 10)
+ random-1-$(random_string 15)
+ random-2-$(random_string 20)
+ )
+
+ run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sh -c "mkdir /srv/subdir;
+ echo ${randomcontent[0]} > /tmp/containerfile;
+ echo ${randomcontent[0]} > /tmp/dotfile.;
+ echo ${randomcontent[1]} > /srv/containerfile1;
+ echo ${randomcontent[2]} > /srv/subdir/containerfile2;
+ sleep infinity"
+
+ # Commit the image for testing non-running containers
+ run_podman commit -q cpcontainer
+ cpimage="$output"
+
+ # format is: <id> | <source arg to cp> | <destination arg (appended to $srcdir) to cp> | <full dest path (appended to $srcdir)> | <test name>
+ tests="
+0 | /tmp/containerfile | | /containerfile | /
+0 | /tmp/dotfile. | | /dotfile. | /
+0 | /tmp/containerfile | / | /containerfile | /
+0 | /tmp/containerfile | /. | /containerfile | /.
+0 | /tmp/containerfile | /newfile | /newfile | /newfile
+1 | containerfile1 | / | /containerfile1 | copy from workdir (rel path) to /
+2 | subdir/containerfile2 | / | /containerfile2 | copy from workdir/subdir (rel path) to /
+"
+
+ # From RUNNING container
+ while read id src dest dest_fullname description; do
+ # dest may be "''" for empty table cells
+ if [[ $dest == "''" ]];then
+ unset dest
+ fi
+
+ # To RUNNING container
+ run_podman run -d $IMAGE sleep infinity
+ destcontainer="$output"
+ run_podman cp cpcontainer:$src $destcontainer:"/$dest"
+ run_podman exec $destcontainer cat "/$dest_fullname"
+ is "$output" "${randomcontent[$id]}" "$description (cp ctr:$src to /$dest)"
+ run_podman kill $destcontainer
+ run_podman rm -f $destcontainer
+
+ # To CREATED container
+ run_podman create $IMAGE sleep infinity
+ destcontainer="$output"
+ run_podman cp cpcontainer:$src $destcontainer:"/$dest"
+ run_podman start $destcontainer
+ run_podman exec $destcontainer cat "/$dest_fullname"
+ is "$output" "${randomcontent[$id]}" "$description (cp ctr:$src to /$dest)"
+ run_podman kill $destcontainer
+ run_podman rm -f $destcontainer
+ done < <(parse_table "$tests")
+ run_podman kill cpcontainer
+ run_podman rm -f cpcontainer
+
+ # From CREATED container
+ run_podman create --name cpcontainer --workdir=/srv $cpimage
+ while read id src dest dest_fullname description; do
+ # dest may be "''" for empty table cells
+ if [[ $dest == "''" ]];then
+ unset dest
+ fi
+
+ # To RUNNING container
+ run_podman run -d $IMAGE sleep infinity
+ destcontainer="$output"
+ run_podman cp cpcontainer:$src $destcontainer:"/$dest"
+ run_podman exec $destcontainer cat "/$dest_fullname"
+ is "$output" "${randomcontent[$id]}" "$description (cp ctr:$src to /$dest)"
+ run_podman kill $destcontainer
+ run_podman rm -f $destcontainer
+
+ # To CREATED container
+ run_podman create $IMAGE sleep infinity
+ destcontainer="$output"
+ run_podman cp cpcontainer:$src $destcontainer:"/$dest"
+ run_podman start $destcontainer
+ run_podman exec $destcontainer cat "/$dest_fullname"
+ is "$output" "${randomcontent[$id]}" "$description (cp ctr:$src to /$dest)"
+ run_podman kill $destcontainer
+ run_podman rm -f $destcontainer
+ done < <(parse_table "$tests")
+ run_podman rm -f cpcontainer
+
+ run_podman rmi -f $cpimage
+}
+
+
@test "podman cp dir from host to container" {
srcdir=$PODMAN_TMPDIR
mkdir -p $srcdir/dir/sub
@@ -241,8 +332,7 @@ load helpers
mkdir -p $srcdir/dir.
cp -r $srcdir/dir/* $srcdir/dir.
- run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sleep infinity
- run_podman exec cpcontainer mkdir /srv/subdir
+ run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sh -c "mkdir /srv/subdir; sleep infinity"
# Commit the image for testing non-running containers
run_podman commit -q cpcontainer
@@ -309,12 +399,12 @@ load helpers
random-0-$(random_string 10)
random-1-$(random_string 15)
)
- run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sleep infinity
- run_podman exec cpcontainer sh -c "mkdir /srv/subdir; echo ${randomcontent[0]} > /srv/subdir/containerfile0"
- run_podman exec cpcontainer sh -c "echo ${randomcontent[1]} > /srv/subdir/containerfile1"
- # "." and "dir/." will copy the contents, so make sure that a dir ending
- # with dot is treated correctly.
- run_podman exec cpcontainer sh -c 'mkdir /tmp/subdir.; cp /srv/subdir/* /tmp/subdir./'
+
+ run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sh -c "mkdir /srv/subdir;
+ echo ${randomcontent[0]} > /srv/subdir/containerfile0; \
+ echo ${randomcontent[1]} > /srv/subdir/containerfile1; \
+ mkdir /tmp/subdir.; cp /srv/subdir/* /tmp/subdir./; \
+ sleep infinity"
# Commit the image for testing non-running containers
run_podman commit -q cpcontainer
@@ -377,6 +467,110 @@ load helpers
}
+@test "podman cp dir from container to container" {
+ # Create 2 files with random content in the container.
+ local -a randomcontent=(
+ random-0-$(random_string 10)
+ random-1-$(random_string 15)
+ )
+
+ run_podman run -d --name cpcontainer --workdir=/srv $IMAGE sh -c "mkdir /srv/subdir;
+ echo ${randomcontent[0]} > /srv/subdir/containerfile0; \
+ echo ${randomcontent[1]} > /srv/subdir/containerfile1; \
+ mkdir /tmp/subdir.; cp /srv/subdir/* /tmp/subdir./; \
+ sleep infinity"
+
+ # Commit the image for testing non-running containers
+ run_podman commit -q cpcontainer
+ cpimage="$output"
+
+ # format is: <source arg to cp (appended to /srv)> | <dest> | <full dest path> | <test name>
+ tests="
+/srv | | /srv/subdir | copy /srv
+/srv | /newdir | /newdir/subdir | copy /srv to /newdir
+/srv/ | | /srv/subdir | copy /srv/
+/srv/. | | /subdir | copy /srv/.
+/srv/. | /newdir | /newdir/subdir | copy /srv/. to /newdir
+/srv/subdir/. | | | copy /srv/subdir/.
+/tmp/subdir. | | /subdir. | copy /tmp/subdir.
+"
+
+ # From RUNNING container
+ while read src dest dest_fullname description; do
+ if [[ $src == "''" ]];then
+ unset src
+ fi
+ if [[ $dest == "''" ]];then
+ unset dest
+ fi
+ if [[ $dest_fullname == "''" ]];then
+ unset dest_fullname
+ fi
+
+ # To RUNNING container
+ run_podman run -d $IMAGE sleep infinity
+ destcontainer="$output"
+ run_podman cp cpcontainer:$src $destcontainer:"/$dest"
+ run_podman exec $destcontainer cat "/$dest_fullname/containerfile0" "/$dest_fullname/containerfile1"
+ is "$output" "${randomcontent[0]}
+${randomcontent[1]}" "$description"
+ run_podman kill $destcontainer
+ run_podman rm -f $destcontainer
+
+ # To CREATED container
+ run_podman create $IMAGE sleep infinity
+ destcontainer="$output"
+ run_podman cp cpcontainer:$src $destcontainer:"/$dest"
+ run_podman start $destcontainer
+ run_podman exec $destcontainer cat "/$dest_fullname/containerfile0" "/$dest_fullname/containerfile1"
+ is "$output" "${randomcontent[0]}
+${randomcontent[1]}" "$description"
+ run_podman kill $destcontainer
+ run_podman rm -f $destcontainer
+ done < <(parse_table "$tests")
+ run_podman kill cpcontainer
+ run_podman rm -f cpcontainer
+
+ # From CREATED container
+ run_podman create --name cpcontainer --workdir=/srv $cpimage
+ while read src dest dest_fullname description; do
+ if [[ $src == "''" ]];then
+ unset src
+ fi
+ if [[ $dest == "''" ]];then
+ unset dest
+ fi
+ if [[ $dest_fullname == "''" ]];then
+ unset dest_fullname
+ fi
+
+ # To RUNNING container
+ run_podman run -d $IMAGE sleep infinity
+ destcontainer="$output"
+ run_podman cp cpcontainer:$src $destcontainer:"/$dest"
+ run_podman exec $destcontainer cat "/$dest_fullname/containerfile0" "/$dest_fullname/containerfile1"
+ is "$output" "${randomcontent[0]}
+${randomcontent[1]}" "$description"
+ run_podman kill $destcontainer
+ run_podman rm -f $destcontainer
+
+ # To CREATED container
+ run_podman create $IMAGE sleep infinity
+ destcontainer="$output"
+ run_podman start $destcontainer
+ run_podman cp cpcontainer:$src $destcontainer:"/$dest"
+ run_podman exec $destcontainer cat "/$dest_fullname/containerfile0" "/$dest_fullname/containerfile1"
+ is "$output" "${randomcontent[0]}
+${randomcontent[1]}" "$description"
+ run_podman kill $destcontainer
+ run_podman rm -f $destcontainer
+ done < <(parse_table "$tests")
+
+ run_podman rm -f cpcontainer
+ run_podman rmi -f $cpimage
+}
+
+
@test "podman cp symlinked directory from container" {
destdir=$PODMAN_TMPDIR/cp-weird-symlink
mkdir -p $destdir
@@ -387,10 +581,10 @@ load helpers
random-1-$(random_string 15)
)
- run_podman run -d --name cpcontainer $IMAGE sleep infinity
- run_podman exec cpcontainer sh -c "echo ${randomcontent[0]} > /tmp/containerfile0"
- run_podman exec cpcontainer sh -c "echo ${randomcontent[1]} > /tmp/containerfile1"
- run_podman exec cpcontainer sh -c "mkdir /tmp/sub && cd /tmp/sub && ln -s .. weirdlink"
+ run_podman run -d --name cpcontainer $IMAGE sh -c "echo ${randomcontent[0]} > /tmp/containerfile0; \
+ echo ${randomcontent[1]} > /tmp/containerfile1; \
+ mkdir /tmp/sub && cd /tmp/sub && ln -s .. weirdlink; \
+ sleep infinity"
# Commit the image for testing non-running containers
run_podman commit -q cpcontainer
diff --git a/test/system/070-build.bats b/test/system/070-build.bats
index 7b76c585f..26113e45c 100644
--- a/test/system/070-build.bats
+++ b/test/system/070-build.bats
@@ -749,16 +749,9 @@ RUN echo $random_string
EOF
run_podman 125 build -t build_test --pull-never $tmpdir
- # FIXME: this is just ridiculous. Even after #10030 and #10034, Ubuntu
- # remote *STILL* flakes this test! It fails with the correct exit status,
- # but the error output is 'Error: stream dropped, unexpected failure'
- # Let's just stop checking on podman-remote. As long as it exits 125,
- # we're happy.
- if ! is_remote; then
- is "$output" \
- ".*Error: error creating build container: quay.io/libpod/nosuchimage:nosuchtag: image not known" \
- "--pull-never fails with expected error message"
- fi
+ is "$output" \
+ ".*Error: error creating build container: quay.io/libpod/nosuchimage:nosuchtag: image not known" \
+ "--pull-never fails with expected error message"
}
@test "podman build --logfile test" {
diff --git a/test/system/255-auto-update.bats b/test/system/255-auto-update.bats
index 25eaba45b..6fb40f41e 100644
--- a/test/system/255-auto-update.bats
+++ b/test/system/255-auto-update.bats
@@ -261,7 +261,8 @@ EOF
systemctl enable --now podman-auto-update-$cname.timer
systemctl list-timers --all
- local expect='Finished Podman auto-update testing service'
+ # While systemd v245 and later uses 'Finished', older versions uses 'Started' for oneshot services
+ local expect='(Finished|Started) Podman auto-update testing service'
local failed_start=failed
local count=0
while [ $count -lt 120 ]; do
diff --git a/transfer.md b/transfer.md
index c37592384..765094dc9 100644
--- a/transfer.md
+++ b/transfer.md
@@ -141,8 +141,8 @@ The following podman commands do not have a Docker equivalent:
* [`podman generate `](./docs/source/markdown/podman-generate.1.md)
* [`podman generate kube`](./docs/source/markdown/podman-generate-kube.1.md)
* [`podman generate systemd`](./docs/source/markdown/podman-generate-systemd.1.md)
-* [`podman healthcheck `](/docs/source/markdown/podmanh-healthcheck.1.md)
-* [`podman healthcheck run`](/docs/source/markdown/podmanh-healthcheck-run.1.md)
+* [`podman healthcheck `](/docs/source/markdown/podman-healthcheck.1.md)
+* [`podman healthcheck run`](/docs/source/markdown/podman-healthcheck-run.1.md)
* [`podman image diff`](./docs/source/markdown/podman-image-diff.1.md)
* [`podman image exists`](./docs/source/markdown/podman-image-exists.1.md)
* [`podman image mount`](./docs/source/markdown/podman-image-mount.1.md)
diff --git a/vendor/github.com/checkpoint-restore/go-criu/.gitignore b/vendor/github.com/checkpoint-restore/go-criu/.gitignore
deleted file mode 100644
index f1c90e3d5..000000000
--- a/vendor/github.com/checkpoint-restore/go-criu/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-test/test
-test/piggie
-test/phaul
-image
-rpc/rpc.proto
diff --git a/vendor/github.com/checkpoint-restore/go-criu/.travis.yml b/vendor/github.com/checkpoint-restore/go-criu/.travis.yml
deleted file mode 100644
index 741dbf0a1..000000000
--- a/vendor/github.com/checkpoint-restore/go-criu/.travis.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-language: go
-sudo: required
-os:
- - linux
-go:
- - "1.8"
- - "1.9"
- - "1.10"
-env:
- # Run the tests with CRIU master and criu-dev
- - CRIU_BRANCH="master"
- - CRIU_BRANCH="criu-dev"
-install:
- - sudo apt-get update
- - sudo apt-get install -y libprotobuf-dev libprotobuf-c0-dev protobuf-c-compiler protobuf-compiler python-protobuf libnl-3-dev libnet-dev libcap-dev
- - go get github.com/checkpoint-restore/go-criu
- - git clone --single-branch -b ${CRIU_BRANCH} https://github.com/checkpoint-restore/criu.git
- - cd criu; make
- - sudo install -D -m 755 criu/criu /usr/sbin/
- - cd ..
-script:
- # This builds the code without running the tests.
- - make build phaul test/test test/phaul test/piggie
- # Run actual test as root as it uses CRIU.
- - sudo make test phaul-test
diff --git a/vendor/github.com/checkpoint-restore/go-criu/Makefile b/vendor/github.com/checkpoint-restore/go-criu/Makefile
deleted file mode 100644
index ee44ee448..000000000
--- a/vendor/github.com/checkpoint-restore/go-criu/Makefile
+++ /dev/null
@@ -1,60 +0,0 @@
-GO ?= go
-CC ?= gcc
-ifeq ($(GOPATH),)
-export GOPATH := $(shell $(GO) env GOPATH)
-endif
-FIRST_GOPATH := $(firstword $(subst :, ,$(GOPATH)))
-GOBIN := $(shell $(GO) env GOBIN)
-ifeq ($(GOBIN),)
- GOBIN := $(FIRST_GOPATH)/bin
-endif
-
-all: build test phaul phaul-test
-
-lint:
- @golint . test phaul
-build:
- @$(GO) build -v
-
-test/piggie: test/piggie.c
- @$(CC) $^ -o $@
-
-test/test: test/main.go
- @$(GO) build -v -o test/test test/main.go
-
-test: test/test test/piggie
- mkdir -p image
- test/piggie
- test/test dump `pidof piggie` image
- test/test restore image
- pkill -9 piggie || :
-
-phaul:
- @cd phaul; go build -v
-
-test/phaul: test/phaul-main.go
- @$(GO) build -v -o test/phaul test/phaul-main.go
-
-phaul-test: test/phaul test/piggie
- rm -rf image
- test/piggie
- test/phaul `pidof piggie`
- pkill -9 piggie || :
-
-clean:
- @rm -f test/test test/piggie test/phaul
- @rm -rf image
- @rm -f rpc/rpc.proto
-
-install.tools:
- if [ ! -x "$(GOBIN)/golint" ]; then \
- $(GO) get -u golang.org/x/lint/golint; \
- fi
-
-rpc/rpc.proto:
- curl -s https://raw.githubusercontent.com/checkpoint-restore/criu/master/images/rpc.proto -o $@
-
-rpc/rpc.pb.go: rpc/rpc.proto
- protoc --go_out=. $^
-
-.PHONY: build test clean lint phaul
diff --git a/vendor/github.com/checkpoint-restore/go-criu/rpc/rpc.pb.go b/vendor/github.com/checkpoint-restore/go-criu/rpc/rpc.pb.go
deleted file mode 100644
index 230faace5..000000000
--- a/vendor/github.com/checkpoint-restore/go-criu/rpc/rpc.pb.go
+++ /dev/null
@@ -1,1211 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: rpc/rpc.proto
-
-/*
-Package rpc is a generated protocol buffer package.
-
-It is generated from these files:
- rpc/rpc.proto
-
-It has these top-level messages:
- CriuPageServerInfo
- CriuVethPair
- ExtMountMap
- JoinNamespace
- InheritFd
- CgroupRoot
- UnixSk
- CriuOpts
- CriuDumpResp
- CriuRestoreResp
- CriuNotify
- CriuFeatures
- CriuReq
- CriuResp
- CriuVersion
-*/
-package rpc
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type CriuCgMode int32
-
-const (
- CriuCgMode_IGNORE CriuCgMode = 0
- CriuCgMode_CG_NONE CriuCgMode = 1
- CriuCgMode_PROPS CriuCgMode = 2
- CriuCgMode_SOFT CriuCgMode = 3
- CriuCgMode_FULL CriuCgMode = 4
- CriuCgMode_STRICT CriuCgMode = 5
- CriuCgMode_DEFAULT CriuCgMode = 6
-)
-
-var CriuCgMode_name = map[int32]string{
- 0: "IGNORE",
- 1: "CG_NONE",
- 2: "PROPS",
- 3: "SOFT",
- 4: "FULL",
- 5: "STRICT",
- 6: "DEFAULT",
-}
-var CriuCgMode_value = map[string]int32{
- "IGNORE": 0,
- "CG_NONE": 1,
- "PROPS": 2,
- "SOFT": 3,
- "FULL": 4,
- "STRICT": 5,
- "DEFAULT": 6,
-}
-
-func (x CriuCgMode) Enum() *CriuCgMode {
- p := new(CriuCgMode)
- *p = x
- return p
-}
-func (x CriuCgMode) String() string {
- return proto.EnumName(CriuCgMode_name, int32(x))
-}
-func (x *CriuCgMode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(CriuCgMode_value, data, "CriuCgMode")
- if err != nil {
- return err
- }
- *x = CriuCgMode(value)
- return nil
-}
-func (CriuCgMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-
-type CriuReqType int32
-
-const (
- CriuReqType_EMPTY CriuReqType = 0
- CriuReqType_DUMP CriuReqType = 1
- CriuReqType_RESTORE CriuReqType = 2
- CriuReqType_CHECK CriuReqType = 3
- CriuReqType_PRE_DUMP CriuReqType = 4
- CriuReqType_PAGE_SERVER CriuReqType = 5
- CriuReqType_NOTIFY CriuReqType = 6
- CriuReqType_CPUINFO_DUMP CriuReqType = 7
- CriuReqType_CPUINFO_CHECK CriuReqType = 8
- CriuReqType_FEATURE_CHECK CriuReqType = 9
- CriuReqType_VERSION CriuReqType = 10
- CriuReqType_WAIT_PID CriuReqType = 11
- CriuReqType_PAGE_SERVER_CHLD CriuReqType = 12
-)
-
-var CriuReqType_name = map[int32]string{
- 0: "EMPTY",
- 1: "DUMP",
- 2: "RESTORE",
- 3: "CHECK",
- 4: "PRE_DUMP",
- 5: "PAGE_SERVER",
- 6: "NOTIFY",
- 7: "CPUINFO_DUMP",
- 8: "CPUINFO_CHECK",
- 9: "FEATURE_CHECK",
- 10: "VERSION",
- 11: "WAIT_PID",
- 12: "PAGE_SERVER_CHLD",
-}
-var CriuReqType_value = map[string]int32{
- "EMPTY": 0,
- "DUMP": 1,
- "RESTORE": 2,
- "CHECK": 3,
- "PRE_DUMP": 4,
- "PAGE_SERVER": 5,
- "NOTIFY": 6,
- "CPUINFO_DUMP": 7,
- "CPUINFO_CHECK": 8,
- "FEATURE_CHECK": 9,
- "VERSION": 10,
- "WAIT_PID": 11,
- "PAGE_SERVER_CHLD": 12,
-}
-
-func (x CriuReqType) Enum() *CriuReqType {
- p := new(CriuReqType)
- *p = x
- return p
-}
-func (x CriuReqType) String() string {
- return proto.EnumName(CriuReqType_name, int32(x))
-}
-func (x *CriuReqType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(CriuReqType_value, data, "CriuReqType")
- if err != nil {
- return err
- }
- *x = CriuReqType(value)
- return nil
-}
-func (CriuReqType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
-
-type CriuPageServerInfo struct {
- Address *string `protobuf:"bytes,1,opt,name=address" json:"address,omitempty"`
- Port *int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
- Pid *int32 `protobuf:"varint,3,opt,name=pid" json:"pid,omitempty"`
- Fd *int32 `protobuf:"varint,4,opt,name=fd" json:"fd,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CriuPageServerInfo) Reset() { *m = CriuPageServerInfo{} }
-func (m *CriuPageServerInfo) String() string { return proto.CompactTextString(m) }
-func (*CriuPageServerInfo) ProtoMessage() {}
-func (*CriuPageServerInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-
-func (m *CriuPageServerInfo) GetAddress() string {
- if m != nil && m.Address != nil {
- return *m.Address
- }
- return ""
-}
-
-func (m *CriuPageServerInfo) GetPort() int32 {
- if m != nil && m.Port != nil {
- return *m.Port
- }
- return 0
-}
-
-func (m *CriuPageServerInfo) GetPid() int32 {
- if m != nil && m.Pid != nil {
- return *m.Pid
- }
- return 0
-}
-
-func (m *CriuPageServerInfo) GetFd() int32 {
- if m != nil && m.Fd != nil {
- return *m.Fd
- }
- return 0
-}
-
-type CriuVethPair struct {
- IfIn *string `protobuf:"bytes,1,req,name=if_in,json=ifIn" json:"if_in,omitempty"`
- IfOut *string `protobuf:"bytes,2,req,name=if_out,json=ifOut" json:"if_out,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CriuVethPair) Reset() { *m = CriuVethPair{} }
-func (m *CriuVethPair) String() string { return proto.CompactTextString(m) }
-func (*CriuVethPair) ProtoMessage() {}
-func (*CriuVethPair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
-
-func (m *CriuVethPair) GetIfIn() string {
- if m != nil && m.IfIn != nil {
- return *m.IfIn
- }
- return ""
-}
-
-func (m *CriuVethPair) GetIfOut() string {
- if m != nil && m.IfOut != nil {
- return *m.IfOut
- }
- return ""
-}
-
-type ExtMountMap struct {
- Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
- Val *string `protobuf:"bytes,2,req,name=val" json:"val,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *ExtMountMap) Reset() { *m = ExtMountMap{} }
-func (m *ExtMountMap) String() string { return proto.CompactTextString(m) }
-func (*ExtMountMap) ProtoMessage() {}
-func (*ExtMountMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
-
-func (m *ExtMountMap) GetKey() string {
- if m != nil && m.Key != nil {
- return *m.Key
- }
- return ""
-}
-
-func (m *ExtMountMap) GetVal() string {
- if m != nil && m.Val != nil {
- return *m.Val
- }
- return ""
-}
-
-type JoinNamespace struct {
- Ns *string `protobuf:"bytes,1,req,name=ns" json:"ns,omitempty"`
- NsFile *string `protobuf:"bytes,2,req,name=ns_file,json=nsFile" json:"ns_file,omitempty"`
- ExtraOpt *string `protobuf:"bytes,3,opt,name=extra_opt,json=extraOpt" json:"extra_opt,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *JoinNamespace) Reset() { *m = JoinNamespace{} }
-func (m *JoinNamespace) String() string { return proto.CompactTextString(m) }
-func (*JoinNamespace) ProtoMessage() {}
-func (*JoinNamespace) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
-
-func (m *JoinNamespace) GetNs() string {
- if m != nil && m.Ns != nil {
- return *m.Ns
- }
- return ""
-}
-
-func (m *JoinNamespace) GetNsFile() string {
- if m != nil && m.NsFile != nil {
- return *m.NsFile
- }
- return ""
-}
-
-func (m *JoinNamespace) GetExtraOpt() string {
- if m != nil && m.ExtraOpt != nil {
- return *m.ExtraOpt
- }
- return ""
-}
-
-type InheritFd struct {
- Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
- Fd *int32 `protobuf:"varint,2,req,name=fd" json:"fd,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *InheritFd) Reset() { *m = InheritFd{} }
-func (m *InheritFd) String() string { return proto.CompactTextString(m) }
-func (*InheritFd) ProtoMessage() {}
-func (*InheritFd) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
-
-func (m *InheritFd) GetKey() string {
- if m != nil && m.Key != nil {
- return *m.Key
- }
- return ""
-}
-
-func (m *InheritFd) GetFd() int32 {
- if m != nil && m.Fd != nil {
- return *m.Fd
- }
- return 0
-}
-
-type CgroupRoot struct {
- Ctrl *string `protobuf:"bytes,1,opt,name=ctrl" json:"ctrl,omitempty"`
- Path *string `protobuf:"bytes,2,req,name=path" json:"path,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CgroupRoot) Reset() { *m = CgroupRoot{} }
-func (m *CgroupRoot) String() string { return proto.CompactTextString(m) }
-func (*CgroupRoot) ProtoMessage() {}
-func (*CgroupRoot) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
-
-func (m *CgroupRoot) GetCtrl() string {
- if m != nil && m.Ctrl != nil {
- return *m.Ctrl
- }
- return ""
-}
-
-func (m *CgroupRoot) GetPath() string {
- if m != nil && m.Path != nil {
- return *m.Path
- }
- return ""
-}
-
-type UnixSk struct {
- Inode *uint32 `protobuf:"varint,1,req,name=inode" json:"inode,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *UnixSk) Reset() { *m = UnixSk{} }
-func (m *UnixSk) String() string { return proto.CompactTextString(m) }
-func (*UnixSk) ProtoMessage() {}
-func (*UnixSk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
-
-func (m *UnixSk) GetInode() uint32 {
- if m != nil && m.Inode != nil {
- return *m.Inode
- }
- return 0
-}
-
-type CriuOpts struct {
- ImagesDirFd *int32 `protobuf:"varint,1,req,name=images_dir_fd,json=imagesDirFd" json:"images_dir_fd,omitempty"`
- Pid *int32 `protobuf:"varint,2,opt,name=pid" json:"pid,omitempty"`
- LeaveRunning *bool `protobuf:"varint,3,opt,name=leave_running,json=leaveRunning" json:"leave_running,omitempty"`
- ExtUnixSk *bool `protobuf:"varint,4,opt,name=ext_unix_sk,json=extUnixSk" json:"ext_unix_sk,omitempty"`
- TcpEstablished *bool `protobuf:"varint,5,opt,name=tcp_established,json=tcpEstablished" json:"tcp_established,omitempty"`
- EvasiveDevices *bool `protobuf:"varint,6,opt,name=evasive_devices,json=evasiveDevices" json:"evasive_devices,omitempty"`
- ShellJob *bool `protobuf:"varint,7,opt,name=shell_job,json=shellJob" json:"shell_job,omitempty"`
- FileLocks *bool `protobuf:"varint,8,opt,name=file_locks,json=fileLocks" json:"file_locks,omitempty"`
- LogLevel *int32 `protobuf:"varint,9,opt,name=log_level,json=logLevel,def=2" json:"log_level,omitempty"`
- LogFile *string `protobuf:"bytes,10,opt,name=log_file,json=logFile" json:"log_file,omitempty"`
- Ps *CriuPageServerInfo `protobuf:"bytes,11,opt,name=ps" json:"ps,omitempty"`
- NotifyScripts *bool `protobuf:"varint,12,opt,name=notify_scripts,json=notifyScripts" json:"notify_scripts,omitempty"`
- Root *string `protobuf:"bytes,13,opt,name=root" json:"root,omitempty"`
- ParentImg *string `protobuf:"bytes,14,opt,name=parent_img,json=parentImg" json:"parent_img,omitempty"`
- TrackMem *bool `protobuf:"varint,15,opt,name=track_mem,json=trackMem" json:"track_mem,omitempty"`
- AutoDedup *bool `protobuf:"varint,16,opt,name=auto_dedup,json=autoDedup" json:"auto_dedup,omitempty"`
- WorkDirFd *int32 `protobuf:"varint,17,opt,name=work_dir_fd,json=workDirFd" json:"work_dir_fd,omitempty"`
- LinkRemap *bool `protobuf:"varint,18,opt,name=link_remap,json=linkRemap" json:"link_remap,omitempty"`
- Veths []*CriuVethPair `protobuf:"bytes,19,rep,name=veths" json:"veths,omitempty"`
- CpuCap *uint32 `protobuf:"varint,20,opt,name=cpu_cap,json=cpuCap,def=4294967295" json:"cpu_cap,omitempty"`
- ForceIrmap *bool `protobuf:"varint,21,opt,name=force_irmap,json=forceIrmap" json:"force_irmap,omitempty"`
- ExecCmd []string `protobuf:"bytes,22,rep,name=exec_cmd,json=execCmd" json:"exec_cmd,omitempty"`
- ExtMnt []*ExtMountMap `protobuf:"bytes,23,rep,name=ext_mnt,json=extMnt" json:"ext_mnt,omitempty"`
- ManageCgroups *bool `protobuf:"varint,24,opt,name=manage_cgroups,json=manageCgroups" json:"manage_cgroups,omitempty"`
- CgRoot []*CgroupRoot `protobuf:"bytes,25,rep,name=cg_root,json=cgRoot" json:"cg_root,omitempty"`
- RstSibling *bool `protobuf:"varint,26,opt,name=rst_sibling,json=rstSibling" json:"rst_sibling,omitempty"`
- InheritFd []*InheritFd `protobuf:"bytes,27,rep,name=inherit_fd,json=inheritFd" json:"inherit_fd,omitempty"`
- AutoExtMnt *bool `protobuf:"varint,28,opt,name=auto_ext_mnt,json=autoExtMnt" json:"auto_ext_mnt,omitempty"`
- ExtSharing *bool `protobuf:"varint,29,opt,name=ext_sharing,json=extSharing" json:"ext_sharing,omitempty"`
- ExtMasters *bool `protobuf:"varint,30,opt,name=ext_masters,json=extMasters" json:"ext_masters,omitempty"`
- SkipMnt []string `protobuf:"bytes,31,rep,name=skip_mnt,json=skipMnt" json:"skip_mnt,omitempty"`
- EnableFs []string `protobuf:"bytes,32,rep,name=enable_fs,json=enableFs" json:"enable_fs,omitempty"`
- UnixSkIno []*UnixSk `protobuf:"bytes,33,rep,name=unix_sk_ino,json=unixSkIno" json:"unix_sk_ino,omitempty"`
- ManageCgroupsMode *CriuCgMode `protobuf:"varint,34,opt,name=manage_cgroups_mode,json=manageCgroupsMode,enum=CriuCgMode" json:"manage_cgroups_mode,omitempty"`
- GhostLimit *uint32 `protobuf:"varint,35,opt,name=ghost_limit,json=ghostLimit,def=1048576" json:"ghost_limit,omitempty"`
- IrmapScanPaths []string `protobuf:"bytes,36,rep,name=irmap_scan_paths,json=irmapScanPaths" json:"irmap_scan_paths,omitempty"`
- External []string `protobuf:"bytes,37,rep,name=external" json:"external,omitempty"`
- EmptyNs *uint32 `protobuf:"varint,38,opt,name=empty_ns,json=emptyNs" json:"empty_ns,omitempty"`
- JoinNs []*JoinNamespace `protobuf:"bytes,39,rep,name=join_ns,json=joinNs" json:"join_ns,omitempty"`
- CgroupProps *string `protobuf:"bytes,41,opt,name=cgroup_props,json=cgroupProps" json:"cgroup_props,omitempty"`
- CgroupPropsFile *string `protobuf:"bytes,42,opt,name=cgroup_props_file,json=cgroupPropsFile" json:"cgroup_props_file,omitempty"`
- CgroupDumpController []string `protobuf:"bytes,43,rep,name=cgroup_dump_controller,json=cgroupDumpController" json:"cgroup_dump_controller,omitempty"`
- FreezeCgroup *string `protobuf:"bytes,44,opt,name=freeze_cgroup,json=freezeCgroup" json:"freeze_cgroup,omitempty"`
- Timeout *uint32 `protobuf:"varint,45,opt,name=timeout" json:"timeout,omitempty"`
- TcpSkipInFlight *bool `protobuf:"varint,46,opt,name=tcp_skip_in_flight,json=tcpSkipInFlight" json:"tcp_skip_in_flight,omitempty"`
- WeakSysctls *bool `protobuf:"varint,47,opt,name=weak_sysctls,json=weakSysctls" json:"weak_sysctls,omitempty"`
- LazyPages *bool `protobuf:"varint,48,opt,name=lazy_pages,json=lazyPages" json:"lazy_pages,omitempty"`
- StatusFd *int32 `protobuf:"varint,49,opt,name=status_fd,json=statusFd" json:"status_fd,omitempty"`
- OrphanPtsMaster *bool `protobuf:"varint,50,opt,name=orphan_pts_master,json=orphanPtsMaster" json:"orphan_pts_master,omitempty"`
- ConfigFile *string `protobuf:"bytes,51,opt,name=config_file,json=configFile" json:"config_file,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CriuOpts) Reset() { *m = CriuOpts{} }
-func (m *CriuOpts) String() string { return proto.CompactTextString(m) }
-func (*CriuOpts) ProtoMessage() {}
-func (*CriuOpts) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
-
-const Default_CriuOpts_LogLevel int32 = 2
-const Default_CriuOpts_CpuCap uint32 = 4294967295
-const Default_CriuOpts_GhostLimit uint32 = 1048576
-
-func (m *CriuOpts) GetImagesDirFd() int32 {
- if m != nil && m.ImagesDirFd != nil {
- return *m.ImagesDirFd
- }
- return 0
-}
-
-func (m *CriuOpts) GetPid() int32 {
- if m != nil && m.Pid != nil {
- return *m.Pid
- }
- return 0
-}
-
-func (m *CriuOpts) GetLeaveRunning() bool {
- if m != nil && m.LeaveRunning != nil {
- return *m.LeaveRunning
- }
- return false
-}
-
-func (m *CriuOpts) GetExtUnixSk() bool {
- if m != nil && m.ExtUnixSk != nil {
- return *m.ExtUnixSk
- }
- return false
-}
-
-func (m *CriuOpts) GetTcpEstablished() bool {
- if m != nil && m.TcpEstablished != nil {
- return *m.TcpEstablished
- }
- return false
-}
-
-func (m *CriuOpts) GetEvasiveDevices() bool {
- if m != nil && m.EvasiveDevices != nil {
- return *m.EvasiveDevices
- }
- return false
-}
-
-func (m *CriuOpts) GetShellJob() bool {
- if m != nil && m.ShellJob != nil {
- return *m.ShellJob
- }
- return false
-}
-
-func (m *CriuOpts) GetFileLocks() bool {
- if m != nil && m.FileLocks != nil {
- return *m.FileLocks
- }
- return false
-}
-
-func (m *CriuOpts) GetLogLevel() int32 {
- if m != nil && m.LogLevel != nil {
- return *m.LogLevel
- }
- return Default_CriuOpts_LogLevel
-}
-
-func (m *CriuOpts) GetLogFile() string {
- if m != nil && m.LogFile != nil {
- return *m.LogFile
- }
- return ""
-}
-
-func (m *CriuOpts) GetPs() *CriuPageServerInfo {
- if m != nil {
- return m.Ps
- }
- return nil
-}
-
-func (m *CriuOpts) GetNotifyScripts() bool {
- if m != nil && m.NotifyScripts != nil {
- return *m.NotifyScripts
- }
- return false
-}
-
-func (m *CriuOpts) GetRoot() string {
- if m != nil && m.Root != nil {
- return *m.Root
- }
- return ""
-}
-
-func (m *CriuOpts) GetParentImg() string {
- if m != nil && m.ParentImg != nil {
- return *m.ParentImg
- }
- return ""
-}
-
-func (m *CriuOpts) GetTrackMem() bool {
- if m != nil && m.TrackMem != nil {
- return *m.TrackMem
- }
- return false
-}
-
-func (m *CriuOpts) GetAutoDedup() bool {
- if m != nil && m.AutoDedup != nil {
- return *m.AutoDedup
- }
- return false
-}
-
-func (m *CriuOpts) GetWorkDirFd() int32 {
- if m != nil && m.WorkDirFd != nil {
- return *m.WorkDirFd
- }
- return 0
-}
-
-func (m *CriuOpts) GetLinkRemap() bool {
- if m != nil && m.LinkRemap != nil {
- return *m.LinkRemap
- }
- return false
-}
-
-func (m *CriuOpts) GetVeths() []*CriuVethPair {
- if m != nil {
- return m.Veths
- }
- return nil
-}
-
-func (m *CriuOpts) GetCpuCap() uint32 {
- if m != nil && m.CpuCap != nil {
- return *m.CpuCap
- }
- return Default_CriuOpts_CpuCap
-}
-
-func (m *CriuOpts) GetForceIrmap() bool {
- if m != nil && m.ForceIrmap != nil {
- return *m.ForceIrmap
- }
- return false
-}
-
-func (m *CriuOpts) GetExecCmd() []string {
- if m != nil {
- return m.ExecCmd
- }
- return nil
-}
-
-func (m *CriuOpts) GetExtMnt() []*ExtMountMap {
- if m != nil {
- return m.ExtMnt
- }
- return nil
-}
-
-func (m *CriuOpts) GetManageCgroups() bool {
- if m != nil && m.ManageCgroups != nil {
- return *m.ManageCgroups
- }
- return false
-}
-
-func (m *CriuOpts) GetCgRoot() []*CgroupRoot {
- if m != nil {
- return m.CgRoot
- }
- return nil
-}
-
-func (m *CriuOpts) GetRstSibling() bool {
- if m != nil && m.RstSibling != nil {
- return *m.RstSibling
- }
- return false
-}
-
-func (m *CriuOpts) GetInheritFd() []*InheritFd {
- if m != nil {
- return m.InheritFd
- }
- return nil
-}
-
-func (m *CriuOpts) GetAutoExtMnt() bool {
- if m != nil && m.AutoExtMnt != nil {
- return *m.AutoExtMnt
- }
- return false
-}
-
-func (m *CriuOpts) GetExtSharing() bool {
- if m != nil && m.ExtSharing != nil {
- return *m.ExtSharing
- }
- return false
-}
-
-func (m *CriuOpts) GetExtMasters() bool {
- if m != nil && m.ExtMasters != nil {
- return *m.ExtMasters
- }
- return false
-}
-
-func (m *CriuOpts) GetSkipMnt() []string {
- if m != nil {
- return m.SkipMnt
- }
- return nil
-}
-
-func (m *CriuOpts) GetEnableFs() []string {
- if m != nil {
- return m.EnableFs
- }
- return nil
-}
-
-func (m *CriuOpts) GetUnixSkIno() []*UnixSk {
- if m != nil {
- return m.UnixSkIno
- }
- return nil
-}
-
-func (m *CriuOpts) GetManageCgroupsMode() CriuCgMode {
- if m != nil && m.ManageCgroupsMode != nil {
- return *m.ManageCgroupsMode
- }
- return CriuCgMode_IGNORE
-}
-
-func (m *CriuOpts) GetGhostLimit() uint32 {
- if m != nil && m.GhostLimit != nil {
- return *m.GhostLimit
- }
- return Default_CriuOpts_GhostLimit
-}
-
-func (m *CriuOpts) GetIrmapScanPaths() []string {
- if m != nil {
- return m.IrmapScanPaths
- }
- return nil
-}
-
-func (m *CriuOpts) GetExternal() []string {
- if m != nil {
- return m.External
- }
- return nil
-}
-
-func (m *CriuOpts) GetEmptyNs() uint32 {
- if m != nil && m.EmptyNs != nil {
- return *m.EmptyNs
- }
- return 0
-}
-
-func (m *CriuOpts) GetJoinNs() []*JoinNamespace {
- if m != nil {
- return m.JoinNs
- }
- return nil
-}
-
-func (m *CriuOpts) GetCgroupProps() string {
- if m != nil && m.CgroupProps != nil {
- return *m.CgroupProps
- }
- return ""
-}
-
-func (m *CriuOpts) GetCgroupPropsFile() string {
- if m != nil && m.CgroupPropsFile != nil {
- return *m.CgroupPropsFile
- }
- return ""
-}
-
-func (m *CriuOpts) GetCgroupDumpController() []string {
- if m != nil {
- return m.CgroupDumpController
- }
- return nil
-}
-
-func (m *CriuOpts) GetFreezeCgroup() string {
- if m != nil && m.FreezeCgroup != nil {
- return *m.FreezeCgroup
- }
- return ""
-}
-
-func (m *CriuOpts) GetTimeout() uint32 {
- if m != nil && m.Timeout != nil {
- return *m.Timeout
- }
- return 0
-}
-
-func (m *CriuOpts) GetTcpSkipInFlight() bool {
- if m != nil && m.TcpSkipInFlight != nil {
- return *m.TcpSkipInFlight
- }
- return false
-}
-
-func (m *CriuOpts) GetWeakSysctls() bool {
- if m != nil && m.WeakSysctls != nil {
- return *m.WeakSysctls
- }
- return false
-}
-
-func (m *CriuOpts) GetLazyPages() bool {
- if m != nil && m.LazyPages != nil {
- return *m.LazyPages
- }
- return false
-}
-
-func (m *CriuOpts) GetStatusFd() int32 {
- if m != nil && m.StatusFd != nil {
- return *m.StatusFd
- }
- return 0
-}
-
-func (m *CriuOpts) GetOrphanPtsMaster() bool {
- if m != nil && m.OrphanPtsMaster != nil {
- return *m.OrphanPtsMaster
- }
- return false
-}
-
-func (m *CriuOpts) GetConfigFile() string {
- if m != nil && m.ConfigFile != nil {
- return *m.ConfigFile
- }
- return ""
-}
-
-type CriuDumpResp struct {
- Restored *bool `protobuf:"varint,1,opt,name=restored" json:"restored,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CriuDumpResp) Reset() { *m = CriuDumpResp{} }
-func (m *CriuDumpResp) String() string { return proto.CompactTextString(m) }
-func (*CriuDumpResp) ProtoMessage() {}
-func (*CriuDumpResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
-
-func (m *CriuDumpResp) GetRestored() bool {
- if m != nil && m.Restored != nil {
- return *m.Restored
- }
- return false
-}
-
-type CriuRestoreResp struct {
- Pid *int32 `protobuf:"varint,1,req,name=pid" json:"pid,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CriuRestoreResp) Reset() { *m = CriuRestoreResp{} }
-func (m *CriuRestoreResp) String() string { return proto.CompactTextString(m) }
-func (*CriuRestoreResp) ProtoMessage() {}
-func (*CriuRestoreResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
-
-func (m *CriuRestoreResp) GetPid() int32 {
- if m != nil && m.Pid != nil {
- return *m.Pid
- }
- return 0
-}
-
-type CriuNotify struct {
- Script *string `protobuf:"bytes,1,opt,name=script" json:"script,omitempty"`
- Pid *int32 `protobuf:"varint,2,opt,name=pid" json:"pid,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CriuNotify) Reset() { *m = CriuNotify{} }
-func (m *CriuNotify) String() string { return proto.CompactTextString(m) }
-func (*CriuNotify) ProtoMessage() {}
-func (*CriuNotify) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
-
-func (m *CriuNotify) GetScript() string {
- if m != nil && m.Script != nil {
- return *m.Script
- }
- return ""
-}
-
-func (m *CriuNotify) GetPid() int32 {
- if m != nil && m.Pid != nil {
- return *m.Pid
- }
- return 0
-}
-
-//
-// List of features which can queried via
-// CRIU_REQ_TYPE__FEATURE_CHECK
-type CriuFeatures struct {
- MemTrack *bool `protobuf:"varint,1,opt,name=mem_track,json=memTrack" json:"mem_track,omitempty"`
- LazyPages *bool `protobuf:"varint,2,opt,name=lazy_pages,json=lazyPages" json:"lazy_pages,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CriuFeatures) Reset() { *m = CriuFeatures{} }
-func (m *CriuFeatures) String() string { return proto.CompactTextString(m) }
-func (*CriuFeatures) ProtoMessage() {}
-func (*CriuFeatures) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
-
-func (m *CriuFeatures) GetMemTrack() bool {
- if m != nil && m.MemTrack != nil {
- return *m.MemTrack
- }
- return false
-}
-
-func (m *CriuFeatures) GetLazyPages() bool {
- if m != nil && m.LazyPages != nil {
- return *m.LazyPages
- }
- return false
-}
-
-type CriuReq struct {
- Type *CriuReqType `protobuf:"varint,1,req,name=type,enum=CriuReqType" json:"type,omitempty"`
- Opts *CriuOpts `protobuf:"bytes,2,opt,name=opts" json:"opts,omitempty"`
- NotifySuccess *bool `protobuf:"varint,3,opt,name=notify_success,json=notifySuccess" json:"notify_success,omitempty"`
- //
- // When set service won't close the connection but
- // will wait for more req-s to appear. Works not
- // for all request types.
- KeepOpen *bool `protobuf:"varint,4,opt,name=keep_open,json=keepOpen" json:"keep_open,omitempty"`
- //
- // 'features' can be used to query which features
- // are supported by the installed criu/kernel
- // via RPC.
- Features *CriuFeatures `protobuf:"bytes,5,opt,name=features" json:"features,omitempty"`
- // 'pid' is used for WAIT_PID
- Pid *uint32 `protobuf:"varint,6,opt,name=pid" json:"pid,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CriuReq) Reset() { *m = CriuReq{} }
-func (m *CriuReq) String() string { return proto.CompactTextString(m) }
-func (*CriuReq) ProtoMessage() {}
-func (*CriuReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
-
-func (m *CriuReq) GetType() CriuReqType {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return CriuReqType_EMPTY
-}
-
-func (m *CriuReq) GetOpts() *CriuOpts {
- if m != nil {
- return m.Opts
- }
- return nil
-}
-
-func (m *CriuReq) GetNotifySuccess() bool {
- if m != nil && m.NotifySuccess != nil {
- return *m.NotifySuccess
- }
- return false
-}
-
-func (m *CriuReq) GetKeepOpen() bool {
- if m != nil && m.KeepOpen != nil {
- return *m.KeepOpen
- }
- return false
-}
-
-func (m *CriuReq) GetFeatures() *CriuFeatures {
- if m != nil {
- return m.Features
- }
- return nil
-}
-
-func (m *CriuReq) GetPid() uint32 {
- if m != nil && m.Pid != nil {
- return *m.Pid
- }
- return 0
-}
-
-type CriuResp struct {
- Type *CriuReqType `protobuf:"varint,1,req,name=type,enum=CriuReqType" json:"type,omitempty"`
- Success *bool `protobuf:"varint,2,req,name=success" json:"success,omitempty"`
- Dump *CriuDumpResp `protobuf:"bytes,3,opt,name=dump" json:"dump,omitempty"`
- Restore *CriuRestoreResp `protobuf:"bytes,4,opt,name=restore" json:"restore,omitempty"`
- Notify *CriuNotify `protobuf:"bytes,5,opt,name=notify" json:"notify,omitempty"`
- Ps *CriuPageServerInfo `protobuf:"bytes,6,opt,name=ps" json:"ps,omitempty"`
- CrErrno *int32 `protobuf:"varint,7,opt,name=cr_errno,json=crErrno" json:"cr_errno,omitempty"`
- Features *CriuFeatures `protobuf:"bytes,8,opt,name=features" json:"features,omitempty"`
- CrErrmsg *string `protobuf:"bytes,9,opt,name=cr_errmsg,json=crErrmsg" json:"cr_errmsg,omitempty"`
- Version *CriuVersion `protobuf:"bytes,10,opt,name=version" json:"version,omitempty"`
- Status *int32 `protobuf:"varint,11,opt,name=status" json:"status,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CriuResp) Reset() { *m = CriuResp{} }
-func (m *CriuResp) String() string { return proto.CompactTextString(m) }
-func (*CriuResp) ProtoMessage() {}
-func (*CriuResp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
-
-func (m *CriuResp) GetType() CriuReqType {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return CriuReqType_EMPTY
-}
-
-func (m *CriuResp) GetSuccess() bool {
- if m != nil && m.Success != nil {
- return *m.Success
- }
- return false
-}
-
-func (m *CriuResp) GetDump() *CriuDumpResp {
- if m != nil {
- return m.Dump
- }
- return nil
-}
-
-func (m *CriuResp) GetRestore() *CriuRestoreResp {
- if m != nil {
- return m.Restore
- }
- return nil
-}
-
-func (m *CriuResp) GetNotify() *CriuNotify {
- if m != nil {
- return m.Notify
- }
- return nil
-}
-
-func (m *CriuResp) GetPs() *CriuPageServerInfo {
- if m != nil {
- return m.Ps
- }
- return nil
-}
-
-func (m *CriuResp) GetCrErrno() int32 {
- if m != nil && m.CrErrno != nil {
- return *m.CrErrno
- }
- return 0
-}
-
-func (m *CriuResp) GetFeatures() *CriuFeatures {
- if m != nil {
- return m.Features
- }
- return nil
-}
-
-func (m *CriuResp) GetCrErrmsg() string {
- if m != nil && m.CrErrmsg != nil {
- return *m.CrErrmsg
- }
- return ""
-}
-
-func (m *CriuResp) GetVersion() *CriuVersion {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-func (m *CriuResp) GetStatus() int32 {
- if m != nil && m.Status != nil {
- return *m.Status
- }
- return 0
-}
-
-// Answer for criu_req_type.VERSION requests
-type CriuVersion struct {
- Major *int32 `protobuf:"varint,1,req,name=major" json:"major,omitempty"`
- Minor *int32 `protobuf:"varint,2,req,name=minor" json:"minor,omitempty"`
- Gitid *string `protobuf:"bytes,3,opt,name=gitid" json:"gitid,omitempty"`
- Sublevel *int32 `protobuf:"varint,4,opt,name=sublevel" json:"sublevel,omitempty"`
- Extra *int32 `protobuf:"varint,5,opt,name=extra" json:"extra,omitempty"`
- Name *string `protobuf:"bytes,6,opt,name=name" json:"name,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CriuVersion) Reset() { *m = CriuVersion{} }
-func (m *CriuVersion) String() string { return proto.CompactTextString(m) }
-func (*CriuVersion) ProtoMessage() {}
-func (*CriuVersion) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
-
-func (m *CriuVersion) GetMajor() int32 {
- if m != nil && m.Major != nil {
- return *m.Major
- }
- return 0
-}
-
-func (m *CriuVersion) GetMinor() int32 {
- if m != nil && m.Minor != nil {
- return *m.Minor
- }
- return 0
-}
-
-func (m *CriuVersion) GetGitid() string {
- if m != nil && m.Gitid != nil {
- return *m.Gitid
- }
- return ""
-}
-
-func (m *CriuVersion) GetSublevel() int32 {
- if m != nil && m.Sublevel != nil {
- return *m.Sublevel
- }
- return 0
-}
-
-func (m *CriuVersion) GetExtra() int32 {
- if m != nil && m.Extra != nil {
- return *m.Extra
- }
- return 0
-}
-
-func (m *CriuVersion) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*CriuPageServerInfo)(nil), "criu_page_server_info")
- proto.RegisterType((*CriuVethPair)(nil), "criu_veth_pair")
- proto.RegisterType((*ExtMountMap)(nil), "ext_mount_map")
- proto.RegisterType((*JoinNamespace)(nil), "join_namespace")
- proto.RegisterType((*InheritFd)(nil), "inherit_fd")
- proto.RegisterType((*CgroupRoot)(nil), "cgroup_root")
- proto.RegisterType((*UnixSk)(nil), "unix_sk")
- proto.RegisterType((*CriuOpts)(nil), "criu_opts")
- proto.RegisterType((*CriuDumpResp)(nil), "criu_dump_resp")
- proto.RegisterType((*CriuRestoreResp)(nil), "criu_restore_resp")
- proto.RegisterType((*CriuNotify)(nil), "criu_notify")
- proto.RegisterType((*CriuFeatures)(nil), "criu_features")
- proto.RegisterType((*CriuReq)(nil), "criu_req")
- proto.RegisterType((*CriuResp)(nil), "criu_resp")
- proto.RegisterType((*CriuVersion)(nil), "criu_version")
- proto.RegisterEnum("CriuCgMode", CriuCgMode_name, CriuCgMode_value)
- proto.RegisterEnum("CriuReqType", CriuReqType_name, CriuReqType_value)
-}
-
-func init() { proto.RegisterFile("rpc/rpc.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
- // 1835 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xeb, 0x72, 0x5b, 0xb7,
- 0x11, 0x0e, 0x29, 0xf1, 0x06, 0x5e, 0x7c, 0x0c, 0x5f, 0x02, 0xc7, 0xb5, 0xad, 0xd0, 0x51, 0xa2,
- 0x2a, 0x2e, 0x93, 0x30, 0x76, 0x5c, 0x67, 0xda, 0x1f, 0x1e, 0x8a, 0x74, 0xd8, 0x48, 0x22, 0x07,
- 0xa4, 0xdc, 0xc9, 0x2f, 0xcc, 0xd1, 0x39, 0x20, 0x05, 0xf3, 0xdc, 0x0a, 0x80, 0x8a, 0xe4, 0x97,
- 0xe8, 0xbf, 0x3e, 0x57, 0xde, 0xa4, 0xaf, 0xd0, 0xd9, 0x05, 0x28, 0x4b, 0x49, 0x66, 0xd2, 0x7f,
- 0xd8, 0x0f, 0xbb, 0xc0, 0xde, 0x77, 0x49, 0x5b, 0x17, 0xd1, 0x57, 0xba, 0x88, 0x7a, 0x85, 0xce,
- 0x6d, 0xde, 0x5d, 0x92, 0x7b, 0x91, 0x56, 0x6b, 0x51, 0x84, 0x4b, 0x29, 0x8c, 0xd4, 0xe7, 0x52,
- 0x0b, 0x95, 0x2d, 0x72, 0xca, 0x48, 0x2d, 0x8c, 0x63, 0x2d, 0x8d, 0x61, 0xa5, 0x9d, 0xd2, 0x5e,
- 0x83, 0x6f, 0x48, 0x4a, 0xc9, 0x76, 0x91, 0x6b, 0xcb, 0xca, 0x3b, 0xa5, 0xbd, 0x0a, 0xc7, 0x33,
- 0x0d, 0xc8, 0x56, 0xa1, 0x62, 0xb6, 0x85, 0x10, 0x1c, 0x69, 0x87, 0x94, 0x17, 0x31, 0xdb, 0x46,
- 0xa0, 0xbc, 0x88, 0xbb, 0x7f, 0x23, 0x1d, 0xfc, 0xe8, 0x5c, 0xda, 0x33, 0x51, 0x84, 0x4a, 0xd3,
- 0x3b, 0xa4, 0xa2, 0x16, 0x42, 0x65, 0xac, 0xb4, 0x53, 0xde, 0x6b, 0xf0, 0x6d, 0xb5, 0x18, 0x67,
- 0xf4, 0x1e, 0xa9, 0xaa, 0x85, 0xc8, 0xd7, 0xf0, 0x3c, 0xa0, 0x15, 0xb5, 0x98, 0xac, 0x6d, 0xf7,
- 0x5b, 0xd2, 0x96, 0x17, 0x56, 0xa4, 0xf9, 0x3a, 0xb3, 0x22, 0x0d, 0x0b, 0xf8, 0x70, 0x25, 0x2f,
- 0xbd, 0x28, 0x1c, 0x01, 0x39, 0x0f, 0x13, 0x2f, 0x06, 0xc7, 0xee, 0x5b, 0xd2, 0x79, 0x97, 0xab,
- 0x4c, 0x64, 0x61, 0x2a, 0x4d, 0x11, 0x46, 0x12, 0x94, 0xca, 0x8c, 0x17, 0x2a, 0x67, 0x86, 0x7e,
- 0x4c, 0x6a, 0x99, 0x11, 0x0b, 0x95, 0x48, 0x2f, 0x57, 0xcd, 0xcc, 0x48, 0x25, 0x92, 0x3e, 0x24,
- 0x0d, 0x79, 0x61, 0x75, 0x28, 0xf2, 0xc2, 0xa2, 0x55, 0x0d, 0x5e, 0x47, 0x60, 0x52, 0xd8, 0x6e,
- 0x8f, 0x10, 0x95, 0x9d, 0x49, 0xad, 0xac, 0x58, 0xc4, 0xbf, 0xa3, 0x89, 0x33, 0x1d, 0x1e, 0x74,
- 0xa6, 0xbf, 0x20, 0xcd, 0x68, 0xa9, 0xf3, 0x75, 0x21, 0x74, 0x9e, 0x5b, 0xf0, 0x5f, 0x64, 0x75,
- 0xe2, 0xdd, 0x8a, 0x67, 0xf4, 0x69, 0x68, 0xcf, 0xbc, 0x16, 0x78, 0xee, 0x3e, 0x21, 0xb5, 0x75,
- 0xa6, 0x2e, 0x84, 0x59, 0xd1, 0xbb, 0xa4, 0xa2, 0xb2, 0x3c, 0x96, 0xf8, 0x4b, 0x9b, 0x3b, 0xa2,
- 0xfb, 0xdf, 0x36, 0x69, 0xa0, 0x4f, 0xf3, 0xc2, 0x1a, 0xda, 0x25, 0x6d, 0x95, 0x86, 0x4b, 0x69,
- 0x44, 0xac, 0xb4, 0x58, 0xc4, 0xc8, 0x5b, 0xe1, 0x4d, 0x07, 0x1e, 0x28, 0x3d, 0x8a, 0x37, 0x61,
- 0x2a, 0x7f, 0x08, 0xd3, 0x53, 0xd2, 0x4e, 0x64, 0x78, 0x2e, 0x85, 0x5e, 0x67, 0x99, 0xca, 0x96,
- 0x68, 0x6c, 0x9d, 0xb7, 0x10, 0xe4, 0x0e, 0xa3, 0x8f, 0x49, 0x13, 0xbc, 0xef, 0xb5, 0xc1, 0xa0,
- 0xd6, 0x39, 0x38, 0xe8, 0x24, 0x53, 0x17, 0xb3, 0x15, 0xfd, 0x82, 0xdc, 0xb2, 0x51, 0x21, 0xa4,
- 0xb1, 0xe1, 0x69, 0xa2, 0xcc, 0x99, 0x8c, 0x59, 0x05, 0x79, 0x3a, 0x36, 0x2a, 0x86, 0x1f, 0x50,
- 0x60, 0x94, 0xe7, 0xa1, 0x51, 0xe7, 0x52, 0xc4, 0xf2, 0x5c, 0x45, 0xd2, 0xb0, 0xaa, 0x63, 0xf4,
- 0xf0, 0x81, 0x43, 0xc1, 0xff, 0xe6, 0x4c, 0x26, 0x89, 0x78, 0x97, 0x9f, 0xb2, 0x1a, 0xb2, 0xd4,
- 0x11, 0xf8, 0x47, 0x7e, 0x4a, 0x1f, 0x11, 0x02, 0x21, 0x13, 0x49, 0x1e, 0xad, 0x0c, 0xab, 0x3b,
- 0x6d, 0x00, 0x39, 0x04, 0x80, 0x3e, 0x26, 0x8d, 0x24, 0x5f, 0x8a, 0x44, 0x9e, 0xcb, 0x84, 0x35,
- 0xc0, 0xd4, 0xef, 0x4b, 0x7d, 0x5e, 0x4f, 0xf2, 0xe5, 0x21, 0x40, 0xf4, 0x01, 0x81, 0xb3, 0x8b,
- 0x3a, 0x71, 0xa9, 0x9d, 0xe4, 0x4b, 0x0c, 0xfb, 0xe7, 0xa4, 0x5c, 0x18, 0xd6, 0xdc, 0x29, 0xed,
- 0x35, 0xfb, 0xf7, 0x7b, 0xbf, 0x5b, 0x18, 0xbc, 0x5c, 0x18, 0xba, 0x4b, 0x3a, 0x59, 0x6e, 0xd5,
- 0xe2, 0x52, 0x98, 0x48, 0xab, 0xc2, 0x1a, 0xd6, 0x42, 0x2d, 0xda, 0x0e, 0x9d, 0x39, 0x10, 0xa2,
- 0x0a, 0x11, 0x67, 0x6d, 0x17, 0x69, 0x8c, 0xfe, 0x23, 0x42, 0x8a, 0x50, 0xcb, 0xcc, 0x0a, 0x95,
- 0x2e, 0x59, 0x07, 0x6f, 0x1a, 0x0e, 0x19, 0xa7, 0x4b, 0x30, 0xdc, 0xea, 0x30, 0x5a, 0x89, 0x54,
- 0xa6, 0xec, 0x96, 0x33, 0x1c, 0x81, 0x23, 0x99, 0x82, 0x6c, 0xb8, 0xb6, 0xb9, 0x88, 0x65, 0xbc,
- 0x2e, 0x58, 0xe0, 0x0c, 0x07, 0xe4, 0x00, 0x00, 0x08, 0xd3, 0xcf, 0xb9, 0x5e, 0x6d, 0xe2, 0x7f,
- 0x1b, 0xa3, 0xdc, 0x00, 0xc8, 0x45, 0xff, 0x11, 0x21, 0x89, 0xca, 0x56, 0x42, 0xcb, 0x34, 0x2c,
- 0x18, 0x75, 0xe2, 0x80, 0x70, 0x00, 0xe8, 0x2e, 0xa9, 0x40, 0x71, 0x1a, 0x76, 0x67, 0x67, 0x6b,
- 0xaf, 0xd9, 0xbf, 0xd5, 0xbb, 0x59, 0xaf, 0xdc, 0xdd, 0xd2, 0xa7, 0xa4, 0x16, 0x15, 0x6b, 0x11,
- 0x85, 0x05, 0xbb, 0xbb, 0x53, 0xda, 0x6b, 0x7f, 0x4f, 0x9e, 0xf7, 0x5f, 0x3d, 0x7f, 0xf5, 0xdd,
- 0xcb, 0xfe, 0xab, 0x17, 0xbc, 0x1a, 0x15, 0xeb, 0x41, 0x58, 0xd0, 0x27, 0xa4, 0xb9, 0xc8, 0x75,
- 0x24, 0x85, 0xd2, 0xf0, 0xd7, 0x3d, 0xfc, 0x8b, 0x20, 0x34, 0x06, 0x04, 0x82, 0x20, 0x2f, 0x64,
- 0x24, 0xa2, 0x34, 0x66, 0xf7, 0x77, 0xb6, 0x20, 0x08, 0x40, 0x0f, 0x52, 0x48, 0x92, 0x1a, 0xd6,
- 0x7a, 0x66, 0xd9, 0xc7, 0xa8, 0x49, 0xa7, 0x77, 0xa3, 0xf6, 0x79, 0x55, 0x5e, 0xd8, 0xa3, 0xcc,
- 0x42, 0x14, 0xd2, 0x30, 0x83, 0xf8, 0xb8, 0xf2, 0x32, 0x8c, 0xb9, 0x28, 0x38, 0x74, 0xe0, 0x40,
- 0xba, 0x4b, 0x6a, 0xd1, 0x12, 0x4b, 0x8f, 0x3d, 0xc0, 0xf7, 0x5a, 0xbd, 0x6b, 0xe5, 0xc8, 0xab,
- 0xd1, 0x92, 0x43, 0x60, 0x9e, 0x90, 0xa6, 0x36, 0x56, 0x18, 0x75, 0x9a, 0x40, 0x1d, 0x7c, 0xe2,
- 0x54, 0xd6, 0xc6, 0xce, 0x1c, 0x42, 0xf7, 0xaf, 0x97, 0x3d, 0x7b, 0x88, 0x4f, 0x35, 0x7b, 0x1f,
- 0x20, 0xde, 0xf0, 0xe7, 0x51, 0x4c, 0x77, 0x48, 0x0b, 0x23, 0xb5, 0x31, 0xe4, 0x4f, 0xee, 0x35,
- 0xc0, 0x86, 0x4e, 0xf9, 0x27, 0xae, 0xa6, 0xcc, 0x59, 0xa8, 0xe1, 0xbb, 0x47, 0x8e, 0x41, 0x5e,
- 0xd8, 0x99, 0x43, 0x36, 0x0c, 0x69, 0x68, 0xac, 0xd4, 0x86, 0x3d, 0xbe, 0x62, 0x38, 0x72, 0x08,
- 0xb8, 0xd0, 0xac, 0x54, 0x81, 0xef, 0x3f, 0x71, 0x2e, 0x04, 0x1a, 0x1e, 0x87, 0xf6, 0x95, 0x85,
- 0xa7, 0x89, 0x14, 0x0b, 0xc3, 0x76, 0xf0, 0xae, 0xee, 0x80, 0x91, 0xa1, 0x7b, 0xa4, 0xe9, 0x2b,
- 0x59, 0xa8, 0x2c, 0x67, 0x9f, 0xa2, 0x21, 0xf5, 0x9e, 0xc7, 0x78, 0x63, 0x8d, 0x45, 0x3d, 0xce,
- 0x72, 0xfa, 0x77, 0x72, 0xe7, 0xa6, 0x83, 0x45, 0x0a, 0x4d, 0xa8, 0xbb, 0x53, 0xda, 0xeb, 0xf4,
- 0xdb, 0x2e, 0x3f, 0xa2, 0x25, 0x82, 0xfc, 0xf6, 0x0d, 0xa7, 0x1f, 0xe5, 0xb1, 0x84, 0x8f, 0x96,
- 0x67, 0xb9, 0xb1, 0x22, 0x51, 0xa9, 0xb2, 0xec, 0x29, 0x66, 0x4b, 0xed, 0x9b, 0xaf, 0x9f, 0xff,
- 0xf5, 0xc5, 0xcb, 0xef, 0x38, 0xc1, 0xbb, 0x43, 0xb8, 0xa2, 0x7b, 0x24, 0xc0, 0x44, 0x11, 0x26,
- 0x0a, 0x33, 0x01, 0xdd, 0xcf, 0xb0, 0xcf, 0x50, 0xed, 0x0e, 0xe2, 0xb3, 0x28, 0xcc, 0xa6, 0x80,
- 0xd2, 0x4f, 0x20, 0x6f, 0xac, 0xd4, 0x59, 0x98, 0xb0, 0x5d, 0x6f, 0x98, 0xa7, 0x31, 0xa7, 0xd2,
- 0xc2, 0x5e, 0x8a, 0xcc, 0xb0, 0xcf, 0xe1, 0x33, 0x5e, 0x43, 0xfa, 0x18, 0x6c, 0xae, 0xb9, 0x51,
- 0x60, 0xd8, 0x17, 0x3e, 0xbb, 0x6f, 0x8e, 0x06, 0x5e, 0x05, 0xfa, 0xd8, 0xd0, 0x4f, 0x49, 0xcb,
- 0x67, 0x47, 0xa1, 0xf3, 0xc2, 0xb0, 0x3f, 0x63, 0x85, 0xfa, 0x06, 0x3e, 0x05, 0x88, 0xee, 0x93,
- 0xdb, 0xd7, 0x59, 0x5c, 0x27, 0xd9, 0x47, 0xbe, 0x5b, 0xd7, 0xf8, 0xb0, 0xa3, 0x3c, 0x27, 0xf7,
- 0x3d, 0x6f, 0xbc, 0x4e, 0x0b, 0x11, 0xe5, 0x99, 0xd5, 0x79, 0x92, 0x48, 0xcd, 0xbe, 0x44, 0xed,
- 0xef, 0xba, 0xdb, 0x83, 0x75, 0x5a, 0x0c, 0xae, 0xee, 0xa0, 0x2b, 0x2f, 0xb4, 0x94, 0xef, 0x37,
- 0x8e, 0x67, 0xcf, 0xf0, 0xf5, 0x96, 0x03, 0x9d, 0x8f, 0x61, 0x42, 0x5b, 0x95, 0x4a, 0x98, 0x95,
- 0x7f, 0x71, 0xd6, 0x7a, 0x92, 0x7e, 0x49, 0x28, 0xf4, 0x63, 0xcc, 0x0e, 0x95, 0x89, 0x45, 0xa2,
- 0x96, 0x67, 0x96, 0xf5, 0x30, 0x83, 0xa0, 0x53, 0xcf, 0x56, 0xaa, 0x18, 0x67, 0x23, 0x84, 0xc1,
- 0xe0, 0x9f, 0x65, 0xb8, 0x12, 0xe6, 0xd2, 0x44, 0x36, 0x31, 0xec, 0x2b, 0x64, 0x6b, 0x02, 0x36,
- 0x73, 0x10, 0x36, 0x8e, 0xf0, 0xfd, 0x25, 0xf6, 0x42, 0xc3, 0xbe, 0xf6, 0x8d, 0x23, 0x7c, 0x7f,
- 0x39, 0x05, 0x00, 0x9b, 0xb5, 0x0d, 0xed, 0xda, 0x40, 0x5d, 0x7c, 0x83, 0x5d, 0xa7, 0xee, 0x80,
- 0x51, 0x0c, 0xce, 0xca, 0x75, 0x71, 0x06, 0x61, 0xb5, 0xc6, 0x67, 0x33, 0xeb, 0x3b, 0x55, 0xdc,
- 0xc5, 0xd4, 0x1a, 0x97, 0xd2, 0x90, 0xf2, 0x51, 0x9e, 0x2d, 0x94, 0x6f, 0xce, 0xdf, 0xa2, 0xd1,
- 0xc4, 0x41, 0xe0, 0xcd, 0xee, 0x33, 0xbf, 0x44, 0xa0, 0x2f, 0xb5, 0x34, 0x05, 0xe4, 0x83, 0x96,
- 0xc6, 0xe6, 0x5a, 0xc6, 0x38, 0x50, 0xeb, 0xfc, 0x8a, 0xee, 0xee, 0x92, 0xdb, 0xc8, 0xed, 0x01,
- 0x27, 0xe0, 0x47, 0xa0, 0x1b, 0x8e, 0x70, 0xec, 0xbe, 0x24, 0x4d, 0x64, 0x73, 0xbd, 0x9b, 0xde,
- 0x27, 0x55, 0xd7, 0xd4, 0xfd, 0x80, 0xf6, 0xd4, 0x6f, 0x67, 0x67, 0xf7, 0x47, 0xd2, 0x46, 0xc1,
- 0x85, 0x0c, 0xed, 0x5a, 0x3b, 0x47, 0xa4, 0x32, 0x15, 0xd8, 0xaf, 0x37, 0xda, 0xa4, 0x32, 0x9d,
- 0x03, 0xfd, 0x2b, 0x27, 0x96, 0x7f, 0xe5, 0xc4, 0xee, 0x2f, 0x25, 0x52, 0xf7, 0xda, 0xfe, 0x8b,
- 0x76, 0xc9, 0xb6, 0xbd, 0x2c, 0xdc, 0xb8, 0xef, 0xf4, 0x3b, 0xbd, 0xcd, 0x85, 0x00, 0x94, 0xe3,
- 0x1d, 0x7d, 0x4c, 0xb6, 0x61, 0xee, 0xe3, 0x4b, 0xcd, 0x3e, 0xe9, 0x5d, 0x6d, 0x02, 0x1c, 0xf1,
- 0xeb, 0x33, 0x6a, 0x1d, 0x45, 0xb0, 0xc7, 0x6d, 0xdd, 0x98, 0x51, 0x0e, 0x04, 0x9d, 0x57, 0x52,
- 0x16, 0x22, 0x2f, 0x64, 0xe6, 0x27, 0x7b, 0x1d, 0x80, 0x49, 0x21, 0x33, 0xba, 0x4f, 0xea, 0x1b,
- 0xe3, 0x70, 0xa2, 0x37, 0x37, 0xba, 0x6c, 0x50, 0x7e, 0x75, 0xbf, 0xf1, 0x4f, 0x15, 0x53, 0x11,
- 0xfd, 0xf3, 0xef, 0x2d, 0xbf, 0x9f, 0xa0, 0xe3, 0xff, 0x1f, 0x9b, 0x18, 0xa9, 0x6d, 0x94, 0x85,
- 0x4d, 0xa8, 0xce, 0x37, 0x24, 0x7d, 0x4a, 0xb6, 0x21, 0xe8, 0x68, 0xc3, 0xd5, 0x6c, 0xba, 0x4a,
- 0x03, 0x8e, 0x97, 0xf4, 0x19, 0xa9, 0xf9, 0x58, 0xa3, 0x25, 0xcd, 0x3e, 0xed, 0xfd, 0x26, 0x01,
- 0xf8, 0x86, 0x85, 0x7e, 0x46, 0xaa, 0xce, 0x15, 0xde, 0xb4, 0x56, 0xef, 0x5a, 0x1a, 0x70, 0x7f,
- 0xe7, 0x57, 0x82, 0xea, 0x1f, 0xae, 0x04, 0x0f, 0x20, 0x7c, 0x42, 0x6a, 0x9d, 0xe5, 0xb8, 0xb0,
- 0x54, 0x78, 0x2d, 0xd2, 0x43, 0x20, 0x6f, 0x78, 0xb1, 0xfe, 0x07, 0x5e, 0x7c, 0x08, 0x2e, 0x83,
- 0x67, 0x52, 0xb3, 0xc4, 0xe5, 0xa5, 0xc1, 0xeb, 0xf8, 0x4e, 0x6a, 0x96, 0x30, 0x19, 0xcf, 0xa5,
- 0x36, 0x2a, 0xcf, 0x70, 0x71, 0x69, 0x6e, 0x7a, 0xb0, 0x07, 0xf9, 0xe6, 0x16, 0x73, 0x18, 0x0b,
- 0x10, 0x77, 0x99, 0x0a, 0xf7, 0x54, 0xf7, 0x3f, 0x25, 0xd2, 0xba, 0x2e, 0x01, 0x8b, 0x65, 0x1a,
- 0xbe, 0xcb, 0xb5, 0xaf, 0x07, 0x47, 0x20, 0xaa, 0xb2, 0x5c, 0xfb, 0x1d, 0xd6, 0x11, 0x80, 0x2e,
- 0x95, 0xf5, 0x5b, 0x7e, 0x83, 0x3b, 0x02, 0x0a, 0xd0, 0xac, 0x4f, 0xdd, 0xb2, 0xb5, 0xed, 0x6b,
- 0xdf, 0xd3, 0x20, 0x81, 0x4b, 0x33, 0x3a, 0xb8, 0xc2, 0x1d, 0x01, 0x5b, 0x11, 0xb4, 0x5d, 0xf4,
- 0x69, 0x83, 0xe3, 0x79, 0x5f, 0x78, 0xbd, 0xfc, 0x34, 0xa1, 0x84, 0x54, 0xc7, 0x6f, 0x8e, 0x27,
- 0x7c, 0x18, 0x7c, 0x44, 0x9b, 0xa4, 0x36, 0x78, 0x23, 0x8e, 0x27, 0xc7, 0xc3, 0xa0, 0x44, 0x1b,
- 0xa4, 0x32, 0xe5, 0x93, 0xe9, 0x2c, 0x28, 0xd3, 0x3a, 0xd9, 0x9e, 0x4d, 0x46, 0xf3, 0x60, 0x0b,
- 0x4e, 0xa3, 0x93, 0xc3, 0xc3, 0x60, 0x1b, 0xe4, 0x66, 0x73, 0x3e, 0x1e, 0xcc, 0x83, 0x0a, 0xc8,
- 0x1d, 0x0c, 0x47, 0xaf, 0x4f, 0x0e, 0xe7, 0x41, 0x75, 0xff, 0x97, 0x92, 0x2f, 0xd6, 0x4d, 0xc6,
- 0xc1, 0x4b, 0xc3, 0xa3, 0xe9, 0xfc, 0xa7, 0xe0, 0x23, 0x90, 0x3f, 0x38, 0x39, 0x9a, 0x06, 0x25,
- 0x90, 0xe1, 0xc3, 0xd9, 0x1c, 0x3e, 0x2e, 0x03, 0xc7, 0xe0, 0x87, 0xe1, 0xe0, 0xc7, 0x60, 0x8b,
- 0xb6, 0x48, 0x7d, 0xca, 0x87, 0x02, 0xb9, 0xb6, 0xe9, 0x2d, 0xd2, 0x9c, 0xbe, 0x7e, 0x33, 0x14,
- 0xb3, 0x21, 0x7f, 0x3b, 0xe4, 0x41, 0x05, 0xbe, 0x3d, 0x9e, 0xcc, 0xc7, 0xa3, 0x9f, 0x82, 0x2a,
- 0x0d, 0x48, 0x6b, 0x30, 0x3d, 0x19, 0x1f, 0x8f, 0x26, 0x8e, 0xbd, 0x46, 0x6f, 0x93, 0xf6, 0x06,
- 0x71, 0xef, 0xd5, 0x01, 0x1a, 0x0d, 0x5f, 0xcf, 0x4f, 0xf8, 0xd0, 0x43, 0x0d, 0xf8, 0xfa, 0xed,
- 0x90, 0xcf, 0xc6, 0x93, 0xe3, 0x80, 0xc0, 0x7f, 0xff, 0x7c, 0x3d, 0x9e, 0x8b, 0xe9, 0xf8, 0x20,
- 0x68, 0xd2, 0xbb, 0x24, 0xb8, 0xf6, 0x9f, 0x18, 0xfc, 0x70, 0x78, 0x10, 0xb4, 0xfe, 0x17, 0x00,
- 0x00, 0xff, 0xff, 0xf8, 0x9f, 0x0e, 0x7d, 0xca, 0x0d, 0x00, 0x00,
-}
diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/.gitignore b/vendor/github.com/checkpoint-restore/go-criu/v5/.gitignore
new file mode 100644
index 000000000..6c7385fa2
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/v5/.gitignore
@@ -0,0 +1,5 @@
+test/test
+test/piggie/piggie
+test/phaul
+image
+stats/stats.proto
diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/.golangci.yml b/vendor/github.com/checkpoint-restore/go-criu/v5/.golangci.yml
new file mode 100644
index 000000000..fbbac4b41
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/v5/.golangci.yml
@@ -0,0 +1,12 @@
+run:
+ skip_dirs:
+ - rpc
+ - stats
+
+linters:
+ disable-all: false
+ presets:
+ - bugs
+ - performance
+ - unused
+ - format
diff --git a/vendor/github.com/checkpoint-restore/go-criu/LICENSE b/vendor/github.com/checkpoint-restore/go-criu/v5/LICENSE
index 8dada3eda..8dada3eda 100644
--- a/vendor/github.com/checkpoint-restore/go-criu/LICENSE
+++ b/vendor/github.com/checkpoint-restore/go-criu/v5/LICENSE
diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/Makefile b/vendor/github.com/checkpoint-restore/go-criu/v5/Makefile
new file mode 100644
index 000000000..a5c5f5542
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/v5/Makefile
@@ -0,0 +1,62 @@
+GO ?= go
+CC ?= gcc
+
+all: build test phaul-test
+
+lint:
+ golangci-lint run ./...
+
+build:
+ $(GO) build -v ./...
+
+TEST_BINARIES := test/test test/piggie/piggie test/phaul/phaul
+test-bin: $(TEST_BINARIES)
+
+test/piggie/piggie: test/piggie/piggie.c
+ $(CC) $^ -o $@
+
+test/test: test/*.go
+ $(GO) build -v -o $@ $^
+
+test: $(TEST_BINARIES)
+ mkdir -p image
+ PID=$$(test/piggie/piggie) && { \
+ test/test dump $$PID image && \
+ test/test restore image; \
+ pkill -9 piggie; \
+ }
+ rm -rf image
+
+test/phaul/phaul: test/phaul/*.go
+ $(GO) build -v -o $@ $^
+
+phaul-test: $(TEST_BINARIES)
+ rm -rf image
+ PID=$$(test/piggie/piggie) && { \
+ test/phaul/phaul $$PID; \
+ pkill -9 piggie; \
+ }
+
+clean:
+ @rm -f $(TEST_BINARIES)
+ @rm -rf image
+ @rm -f rpc/rpc.proto stats/stats.proto
+
+rpc/rpc.proto:
+ curl -sSL https://raw.githubusercontent.com/checkpoint-restore/criu/master/images/rpc.proto -o $@
+
+stats/stats.proto:
+ curl -sSL https://raw.githubusercontent.com/checkpoint-restore/criu/master/images/stats.proto -o $@
+
+rpc/rpc.pb.go: rpc/rpc.proto
+ protoc --go_out=. --go_opt=Mrpc/rpc.proto=rpc/ $^
+
+stats/stats.pb.go: stats/stats.proto
+ protoc --go_out=. $^
+
+vendor:
+ GO111MODULE=on $(GO) mod tidy
+ GO111MODULE=on $(GO) mod vendor
+ GO111MODULE=on $(GO) mod verify
+
+.PHONY: build test phaul-test test-bin clean lint vendor
diff --git a/vendor/github.com/checkpoint-restore/go-criu/README.md b/vendor/github.com/checkpoint-restore/go-criu/v5/README.md
index 539627324..390da3e98 100644
--- a/vendor/github.com/checkpoint-restore/go-criu/README.md
+++ b/vendor/github.com/checkpoint-restore/go-criu/v5/README.md
@@ -1,8 +1,10 @@
-[![master](https://travis-ci.org/checkpoint-restore/go-criu.svg?branch=master)](https://travis-ci.org/checkpoint-restore/go-criu)
+[![test](https://github.com/checkpoint-restore/go-criu/workflows/ci/badge.svg?branch=master)](https://github.com/checkpoint-restore/go-criu/actions?query=workflow%3Aci)
+[![verify](https://github.com/checkpoint-restore/go-criu/workflows/verify/badge.svg?branch=master)](https://github.com/checkpoint-restore/go-criu/actions?query=workflow%3Averify)
+[![Go Reference](https://pkg.go.dev/badge/github.com/checkpoint-restore/go-criu.svg)](https://pkg.go.dev/github.com/checkpoint-restore/go-criu)
-## go-criu -- Go bindings for [CRIU](https://criu.org/)
+## go-criu -- Go bindings for CRIU
-This repository provides Go bindings for CRIU. The code is based on the Go based PHaul
+This repository provides Go bindings for [CRIU](https://criu.org/). The code is based on the Go-based PHaul
implementation from the CRIU repository. For easier inclusion into other Go projects the
CRIU Go bindings have been moved to this repository.
@@ -10,24 +12,46 @@ The Go bindings provide an easy way to use the CRIU RPC calls from Go without th
to set up all the infrastructure to make the actual RPC connection to CRIU.
The following example would print the version of CRIU:
-```
+```go
+import (
+ "log"
+
+ "github.com/checkpoint/restore/go-criu/v5"
+)
+
+func main() {
c := criu.MakeCriu()
version, err := c.GetCriuVersion()
- fmt.Println(version)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println(version)
+}
```
+
or to just check if at least a certain CRIU version is installed:
-```
+
+```go
c := criu.MakeCriu()
result, err := c.IsCriuAtLeast(31100)
```
## Releases
-go-criu will carry the same version number as CRIU. This implies that each
-go-criu release will pull in the necessary changes from CRIU before making a
-release.
+The first go-criu release was 3.11 based on CRIU 3.11. The initial plan
+was to follow CRIU so that go-criu would carry the same version number as
+CRIU.
+
+As go-criu is imported in other projects and as Go modules are expected
+to follow Semantic Versioning go-criu will also follow Semantic Versioning
+starting with the 4.0.0 release.
-The first go-criu release was 3.11 based on CRIU 3.11.
+The following table shows the relation between go-criu and criu versions:
+
+| Major version | Latest release | CRIU version |
+| -------------- | -------------- | ------------ |
+| v5             | 5.0.0         | 3.15         |
+| v4             | 4.1.0         | 3.14         |
## How to contribute
@@ -61,6 +85,11 @@ by adding a "Signed-off-by" line containing the contributor's name and e-mail
to every commit message. Your signature certifies that you wrote the patch or
otherwise have the right to pass it on as an open-source patch.
-### License
+### License and copyright
+
+Unless mentioned otherwise in a specific file's header, all code in
+this project is released under the Apache 2.0 license.
-The license of go-criu is the Apache 2.0 license.
+The author of a change remains the copyright holder of their code
+(no copyright assignment). The list of authors and contributors can be
+retrieved from the git commit history and in some cases, the file headers.
diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/go.mod b/vendor/github.com/checkpoint-restore/go-criu/v5/go.mod
new file mode 100644
index 000000000..69595701f
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/v5/go.mod
@@ -0,0 +1,9 @@
+module github.com/checkpoint-restore/go-criu/v5
+
+go 1.13
+
+require (
+ github.com/golang/protobuf v1.5.2
+ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
+ google.golang.org/protobuf v1.26.0
+)
diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/go.sum b/vendor/github.com/checkpoint-restore/go-criu/v5/go.sum
new file mode 100644
index 000000000..7e17df214
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/v5/go.sum
@@ -0,0 +1,12 @@
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
diff --git a/vendor/github.com/checkpoint-restore/go-criu/main.go b/vendor/github.com/checkpoint-restore/go-criu/v5/main.go
index cf94c376e..78811c309 100644
--- a/vendor/github.com/checkpoint-restore/go-criu/main.go
+++ b/vendor/github.com/checkpoint-restore/go-criu/v5/main.go
@@ -8,19 +8,28 @@ import (
"strconv"
"syscall"
- "github.com/checkpoint-restore/go-criu/rpc"
- "github.com/golang/protobuf/proto"
+ "github.com/checkpoint-restore/go-criu/v5/rpc"
+ "google.golang.org/protobuf/proto"
)
// Criu struct
type Criu struct {
- swrkCmd *exec.Cmd
- swrkSk *os.File
+ swrkCmd *exec.Cmd
+ swrkSk *os.File
+ swrkPath string
}
// MakeCriu returns the Criu object required for most operations
func MakeCriu() *Criu {
- return &Criu{}
+ return &Criu{
+ swrkPath: "criu",
+ }
+}
+
+// SetCriuPath allows setting the path to the CRIU binary
+// if it is in a non standard location
+func (c *Criu) SetCriuPath(path string) {
+ c.swrkPath = path
}
// Prepare sets up everything for the RPC communication to CRIU
@@ -36,7 +45,8 @@ func (c *Criu) Prepare() error {
defer srv.Close()
args := []string{"swrk", strconv.Itoa(fds[1])}
- cmd := exec.Command("criu", args...)
+ // #nosec G204
+ cmd := exec.Command(c.swrkPath, args...)
err = cmd.Start()
if err != nil {
@@ -55,7 +65,7 @@ func (c *Criu) Cleanup() {
if c.swrkCmd != nil {
c.swrkSk.Close()
c.swrkSk = nil
- c.swrkCmd.Wait()
+ _ = c.swrkCmd.Wait()
c.swrkCmd = nil
}
}
@@ -178,28 +188,28 @@ func (c *Criu) doSwrkWithResp(reqType rpc.CriuReqType, opts *rpc.CriuOpts, nfy N
}
// Dump dumps a process
-func (c *Criu) Dump(opts rpc.CriuOpts, nfy Notify) error {
- return c.doSwrk(rpc.CriuReqType_DUMP, &opts, nfy)
+func (c *Criu) Dump(opts *rpc.CriuOpts, nfy Notify) error {
+ return c.doSwrk(rpc.CriuReqType_DUMP, opts, nfy)
}
// Restore restores a process
-func (c *Criu) Restore(opts rpc.CriuOpts, nfy Notify) error {
- return c.doSwrk(rpc.CriuReqType_RESTORE, &opts, nfy)
+func (c *Criu) Restore(opts *rpc.CriuOpts, nfy Notify) error {
+ return c.doSwrk(rpc.CriuReqType_RESTORE, opts, nfy)
}
// PreDump does a pre-dump
-func (c *Criu) PreDump(opts rpc.CriuOpts, nfy Notify) error {
- return c.doSwrk(rpc.CriuReqType_PRE_DUMP, &opts, nfy)
+func (c *Criu) PreDump(opts *rpc.CriuOpts, nfy Notify) error {
+ return c.doSwrk(rpc.CriuReqType_PRE_DUMP, opts, nfy)
}
// StartPageServer starts the page server
-func (c *Criu) StartPageServer(opts rpc.CriuOpts) error {
- return c.doSwrk(rpc.CriuReqType_PAGE_SERVER, &opts, nil)
+func (c *Criu) StartPageServer(opts *rpc.CriuOpts) error {
+ return c.doSwrk(rpc.CriuReqType_PAGE_SERVER, opts, nil)
}
// StartPageServerChld starts the page server and returns PID and port
-func (c *Criu) StartPageServerChld(opts rpc.CriuOpts) (int, int, error) {
- resp, err := c.doSwrkWithResp(rpc.CriuReqType_PAGE_SERVER_CHLD, &opts, nil)
+func (c *Criu) StartPageServerChld(opts *rpc.CriuOpts) (int, int, error) {
+ resp, err := c.doSwrkWithResp(rpc.CriuReqType_PAGE_SERVER_CHLD, opts, nil)
if err != nil {
return 0, 0, err
}
@@ -219,8 +229,8 @@ func (c *Criu) GetCriuVersion() (int, error) {
return 0, fmt.Errorf("Unexpected CRIU RPC response")
}
- version := int(*resp.GetVersion().Major) * 10000
- version += int(*resp.GetVersion().Minor) * 100
+ version := int(*resp.GetVersion().MajorNumber) * 10000
+ version += int(*resp.GetVersion().MinorNumber) * 100
if resp.GetVersion().Sublevel != nil {
version += int(*resp.GetVersion().Sublevel)
}
diff --git a/vendor/github.com/checkpoint-restore/go-criu/notify.go b/vendor/github.com/checkpoint-restore/go-criu/v5/notify.go
index 1c8547b43..a177f2bb5 100644
--- a/vendor/github.com/checkpoint-restore/go-criu/notify.go
+++ b/vendor/github.com/checkpoint-restore/go-criu/v5/notify.go
@@ -1,6 +1,6 @@
package criu
-//Notify interface
+// Notify interface
type Notify interface {
PreDump() error
PostDump() error
@@ -14,8 +14,7 @@ type Notify interface {
}
// NoNotify struct
-type NoNotify struct {
-}
+type NoNotify struct{}
// PreDump NoNotify
func (c NoNotify) PreDump() error {
diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/rpc/rpc.pb.go b/vendor/github.com/checkpoint-restore/go-criu/v5/rpc/rpc.pb.go
new file mode 100644
index 000000000..9f22f1539
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/v5/rpc/rpc.pb.go
@@ -0,0 +1,2237 @@
+// SPDX-License-Identifier: MIT
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.14.0
+// source: rpc/rpc.proto
+
+package rpc
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type CriuCgMode int32
+
+const (
+ CriuCgMode_IGNORE CriuCgMode = 0
+ CriuCgMode_CG_NONE CriuCgMode = 1
+ CriuCgMode_PROPS CriuCgMode = 2
+ CriuCgMode_SOFT CriuCgMode = 3
+ CriuCgMode_FULL CriuCgMode = 4
+ CriuCgMode_STRICT CriuCgMode = 5
+ CriuCgMode_DEFAULT CriuCgMode = 6
+)
+
+// Enum value maps for CriuCgMode.
+var (
+ CriuCgMode_name = map[int32]string{
+ 0: "IGNORE",
+ 1: "CG_NONE",
+ 2: "PROPS",
+ 3: "SOFT",
+ 4: "FULL",
+ 5: "STRICT",
+ 6: "DEFAULT",
+ }
+ CriuCgMode_value = map[string]int32{
+ "IGNORE": 0,
+ "CG_NONE": 1,
+ "PROPS": 2,
+ "SOFT": 3,
+ "FULL": 4,
+ "STRICT": 5,
+ "DEFAULT": 6,
+ }
+)
+
+func (x CriuCgMode) Enum() *CriuCgMode {
+ p := new(CriuCgMode)
+ *p = x
+ return p
+}
+
+func (x CriuCgMode) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (CriuCgMode) Descriptor() protoreflect.EnumDescriptor {
+ return file_rpc_rpc_proto_enumTypes[0].Descriptor()
+}
+
+func (CriuCgMode) Type() protoreflect.EnumType {
+ return &file_rpc_rpc_proto_enumTypes[0]
+}
+
+func (x CriuCgMode) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *CriuCgMode) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = CriuCgMode(num)
+ return nil
+}
+
+// Deprecated: Use CriuCgMode.Descriptor instead.
+func (CriuCgMode) EnumDescriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{0}
+}
+
+type CriuPreDumpMode int32
+
+const (
+ CriuPreDumpMode_SPLICE CriuPreDumpMode = 1
+ CriuPreDumpMode_VM_READ CriuPreDumpMode = 2
+)
+
+// Enum value maps for CriuPreDumpMode.
+var (
+ CriuPreDumpMode_name = map[int32]string{
+ 1: "SPLICE",
+ 2: "VM_READ",
+ }
+ CriuPreDumpMode_value = map[string]int32{
+ "SPLICE": 1,
+ "VM_READ": 2,
+ }
+)
+
+func (x CriuPreDumpMode) Enum() *CriuPreDumpMode {
+ p := new(CriuPreDumpMode)
+ *p = x
+ return p
+}
+
+func (x CriuPreDumpMode) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (CriuPreDumpMode) Descriptor() protoreflect.EnumDescriptor {
+ return file_rpc_rpc_proto_enumTypes[1].Descriptor()
+}
+
+func (CriuPreDumpMode) Type() protoreflect.EnumType {
+ return &file_rpc_rpc_proto_enumTypes[1]
+}
+
+func (x CriuPreDumpMode) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *CriuPreDumpMode) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = CriuPreDumpMode(num)
+ return nil
+}
+
+// Deprecated: Use CriuPreDumpMode.Descriptor instead.
+func (CriuPreDumpMode) EnumDescriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{1}
+}
+
+type CriuReqType int32
+
+const (
+ CriuReqType_EMPTY CriuReqType = 0
+ CriuReqType_DUMP CriuReqType = 1
+ CriuReqType_RESTORE CriuReqType = 2
+ CriuReqType_CHECK CriuReqType = 3
+ CriuReqType_PRE_DUMP CriuReqType = 4
+ CriuReqType_PAGE_SERVER CriuReqType = 5
+ CriuReqType_NOTIFY CriuReqType = 6
+ CriuReqType_CPUINFO_DUMP CriuReqType = 7
+ CriuReqType_CPUINFO_CHECK CriuReqType = 8
+ CriuReqType_FEATURE_CHECK CriuReqType = 9
+ CriuReqType_VERSION CriuReqType = 10
+ CriuReqType_WAIT_PID CriuReqType = 11
+ CriuReqType_PAGE_SERVER_CHLD CriuReqType = 12
+)
+
+// Enum value maps for CriuReqType.
+var (
+ CriuReqType_name = map[int32]string{
+ 0: "EMPTY",
+ 1: "DUMP",
+ 2: "RESTORE",
+ 3: "CHECK",
+ 4: "PRE_DUMP",
+ 5: "PAGE_SERVER",
+ 6: "NOTIFY",
+ 7: "CPUINFO_DUMP",
+ 8: "CPUINFO_CHECK",
+ 9: "FEATURE_CHECK",
+ 10: "VERSION",
+ 11: "WAIT_PID",
+ 12: "PAGE_SERVER_CHLD",
+ }
+ CriuReqType_value = map[string]int32{
+ "EMPTY": 0,
+ "DUMP": 1,
+ "RESTORE": 2,
+ "CHECK": 3,
+ "PRE_DUMP": 4,
+ "PAGE_SERVER": 5,
+ "NOTIFY": 6,
+ "CPUINFO_DUMP": 7,
+ "CPUINFO_CHECK": 8,
+ "FEATURE_CHECK": 9,
+ "VERSION": 10,
+ "WAIT_PID": 11,
+ "PAGE_SERVER_CHLD": 12,
+ }
+)
+
+func (x CriuReqType) Enum() *CriuReqType {
+ p := new(CriuReqType)
+ *p = x
+ return p
+}
+
+func (x CriuReqType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (CriuReqType) Descriptor() protoreflect.EnumDescriptor {
+ return file_rpc_rpc_proto_enumTypes[2].Descriptor()
+}
+
+func (CriuReqType) Type() protoreflect.EnumType {
+ return &file_rpc_rpc_proto_enumTypes[2]
+}
+
+func (x CriuReqType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *CriuReqType) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = CriuReqType(num)
+ return nil
+}
+
+// Deprecated: Use CriuReqType.Descriptor instead.
+func (CriuReqType) EnumDescriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{2}
+}
+
+type CriuPageServerInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Address *string `protobuf:"bytes,1,opt,name=address" json:"address,omitempty"`
+ Port *int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
+ Pid *int32 `protobuf:"varint,3,opt,name=pid" json:"pid,omitempty"`
+ Fd *int32 `protobuf:"varint,4,opt,name=fd" json:"fd,omitempty"`
+}
+
+func (x *CriuPageServerInfo) Reset() {
+ *x = CriuPageServerInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_rpc_rpc_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CriuPageServerInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CriuPageServerInfo) ProtoMessage() {}
+
+func (x *CriuPageServerInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_rpc_rpc_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CriuPageServerInfo.ProtoReflect.Descriptor instead.
+func (*CriuPageServerInfo) Descriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CriuPageServerInfo) GetAddress() string {
+ if x != nil && x.Address != nil {
+ return *x.Address
+ }
+ return ""
+}
+
+func (x *CriuPageServerInfo) GetPort() int32 {
+ if x != nil && x.Port != nil {
+ return *x.Port
+ }
+ return 0
+}
+
+func (x *CriuPageServerInfo) GetPid() int32 {
+ if x != nil && x.Pid != nil {
+ return *x.Pid
+ }
+ return 0
+}
+
+func (x *CriuPageServerInfo) GetFd() int32 {
+ if x != nil && x.Fd != nil {
+ return *x.Fd
+ }
+ return 0
+}
+
+type CriuVethPair struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IfIn *string `protobuf:"bytes,1,req,name=if_in,json=ifIn" json:"if_in,omitempty"`
+ IfOut *string `protobuf:"bytes,2,req,name=if_out,json=ifOut" json:"if_out,omitempty"`
+}
+
+func (x *CriuVethPair) Reset() {
+ *x = CriuVethPair{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_rpc_rpc_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CriuVethPair) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CriuVethPair) ProtoMessage() {}
+
+func (x *CriuVethPair) ProtoReflect() protoreflect.Message {
+ mi := &file_rpc_rpc_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CriuVethPair.ProtoReflect.Descriptor instead.
+func (*CriuVethPair) Descriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *CriuVethPair) GetIfIn() string {
+ if x != nil && x.IfIn != nil {
+ return *x.IfIn
+ }
+ return ""
+}
+
+func (x *CriuVethPair) GetIfOut() string {
+ if x != nil && x.IfOut != nil {
+ return *x.IfOut
+ }
+ return ""
+}
+
+type ExtMountMap struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ Val *string `protobuf:"bytes,2,req,name=val" json:"val,omitempty"`
+}
+
+func (x *ExtMountMap) Reset() {
+ *x = ExtMountMap{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_rpc_rpc_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExtMountMap) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExtMountMap) ProtoMessage() {}
+
+func (x *ExtMountMap) ProtoReflect() protoreflect.Message {
+ mi := &file_rpc_rpc_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExtMountMap.ProtoReflect.Descriptor instead.
+func (*ExtMountMap) Descriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ExtMountMap) GetKey() string {
+ if x != nil && x.Key != nil {
+ return *x.Key
+ }
+ return ""
+}
+
+func (x *ExtMountMap) GetVal() string {
+ if x != nil && x.Val != nil {
+ return *x.Val
+ }
+ return ""
+}
+
+type JoinNamespace struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ns *string `protobuf:"bytes,1,req,name=ns" json:"ns,omitempty"`
+ NsFile *string `protobuf:"bytes,2,req,name=ns_file,json=nsFile" json:"ns_file,omitempty"`
+ ExtraOpt *string `protobuf:"bytes,3,opt,name=extra_opt,json=extraOpt" json:"extra_opt,omitempty"`
+}
+
+func (x *JoinNamespace) Reset() {
+ *x = JoinNamespace{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_rpc_rpc_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *JoinNamespace) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JoinNamespace) ProtoMessage() {}
+
+func (x *JoinNamespace) ProtoReflect() protoreflect.Message {
+ mi := &file_rpc_rpc_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use JoinNamespace.ProtoReflect.Descriptor instead.
+func (*JoinNamespace) Descriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *JoinNamespace) GetNs() string {
+ if x != nil && x.Ns != nil {
+ return *x.Ns
+ }
+ return ""
+}
+
+func (x *JoinNamespace) GetNsFile() string {
+ if x != nil && x.NsFile != nil {
+ return *x.NsFile
+ }
+ return ""
+}
+
+func (x *JoinNamespace) GetExtraOpt() string {
+ if x != nil && x.ExtraOpt != nil {
+ return *x.ExtraOpt
+ }
+ return ""
+}
+
+type InheritFd struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ Fd *int32 `protobuf:"varint,2,req,name=fd" json:"fd,omitempty"`
+}
+
+func (x *InheritFd) Reset() {
+ *x = InheritFd{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_rpc_rpc_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *InheritFd) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*InheritFd) ProtoMessage() {}
+
+func (x *InheritFd) ProtoReflect() protoreflect.Message {
+ mi := &file_rpc_rpc_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use InheritFd.ProtoReflect.Descriptor instead.
+func (*InheritFd) Descriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *InheritFd) GetKey() string {
+ if x != nil && x.Key != nil {
+ return *x.Key
+ }
+ return ""
+}
+
+func (x *InheritFd) GetFd() int32 {
+ if x != nil && x.Fd != nil {
+ return *x.Fd
+ }
+ return 0
+}
+
+type CgroupRoot struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ctrl *string `protobuf:"bytes,1,opt,name=ctrl" json:"ctrl,omitempty"`
+ Path *string `protobuf:"bytes,2,req,name=path" json:"path,omitempty"`
+}
+
+func (x *CgroupRoot) Reset() {
+ *x = CgroupRoot{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_rpc_rpc_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CgroupRoot) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CgroupRoot) ProtoMessage() {}
+
+func (x *CgroupRoot) ProtoReflect() protoreflect.Message {
+ mi := &file_rpc_rpc_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CgroupRoot.ProtoReflect.Descriptor instead.
+func (*CgroupRoot) Descriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *CgroupRoot) GetCtrl() string {
+ if x != nil && x.Ctrl != nil {
+ return *x.Ctrl
+ }
+ return ""
+}
+
+func (x *CgroupRoot) GetPath() string {
+ if x != nil && x.Path != nil {
+ return *x.Path
+ }
+ return ""
+}
+
+type UnixSk struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Inode *uint32 `protobuf:"varint,1,req,name=inode" json:"inode,omitempty"`
+}
+
+func (x *UnixSk) Reset() {
+ *x = UnixSk{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_rpc_rpc_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UnixSk) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UnixSk) ProtoMessage() {}
+
+func (x *UnixSk) ProtoReflect() protoreflect.Message {
+ mi := &file_rpc_rpc_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UnixSk.ProtoReflect.Descriptor instead.
+func (*UnixSk) Descriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *UnixSk) GetInode() uint32 {
+ if x != nil && x.Inode != nil {
+ return *x.Inode
+ }
+ return 0
+}
+
+type CriuOpts struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ImagesDirFd *int32 `protobuf:"varint,1,req,name=images_dir_fd,json=imagesDirFd" json:"images_dir_fd,omitempty"`
+ Pid *int32 `protobuf:"varint,2,opt,name=pid" json:"pid,omitempty"` // if not set on dump, will dump requesting process
+ LeaveRunning *bool `protobuf:"varint,3,opt,name=leave_running,json=leaveRunning" json:"leave_running,omitempty"`
+ ExtUnixSk *bool `protobuf:"varint,4,opt,name=ext_unix_sk,json=extUnixSk" json:"ext_unix_sk,omitempty"`
+ TcpEstablished *bool `protobuf:"varint,5,opt,name=tcp_established,json=tcpEstablished" json:"tcp_established,omitempty"`
+ EvasiveDevices *bool `protobuf:"varint,6,opt,name=evasive_devices,json=evasiveDevices" json:"evasive_devices,omitempty"`
+ ShellJob *bool `protobuf:"varint,7,opt,name=shell_job,json=shellJob" json:"shell_job,omitempty"`
+ FileLocks *bool `protobuf:"varint,8,opt,name=file_locks,json=fileLocks" json:"file_locks,omitempty"`
+ LogLevel *int32 `protobuf:"varint,9,opt,name=log_level,json=logLevel,def=2" json:"log_level,omitempty"`
+ LogFile *string `protobuf:"bytes,10,opt,name=log_file,json=logFile" json:"log_file,omitempty"` // No subdirs are allowed. Consider using work-dir
+ Ps *CriuPageServerInfo `protobuf:"bytes,11,opt,name=ps" json:"ps,omitempty"`
+ NotifyScripts *bool `protobuf:"varint,12,opt,name=notify_scripts,json=notifyScripts" json:"notify_scripts,omitempty"`
+ Root *string `protobuf:"bytes,13,opt,name=root" json:"root,omitempty"`
+ ParentImg *string `protobuf:"bytes,14,opt,name=parent_img,json=parentImg" json:"parent_img,omitempty"`
+ TrackMem *bool `protobuf:"varint,15,opt,name=track_mem,json=trackMem" json:"track_mem,omitempty"`
+ AutoDedup *bool `protobuf:"varint,16,opt,name=auto_dedup,json=autoDedup" json:"auto_dedup,omitempty"`
+ WorkDirFd *int32 `protobuf:"varint,17,opt,name=work_dir_fd,json=workDirFd" json:"work_dir_fd,omitempty"`
+ LinkRemap *bool `protobuf:"varint,18,opt,name=link_remap,json=linkRemap" json:"link_remap,omitempty"`
+ Veths []*CriuVethPair `protobuf:"bytes,19,rep,name=veths" json:"veths,omitempty"` // DEPRECATED, use external instead
+ CpuCap *uint32 `protobuf:"varint,20,opt,name=cpu_cap,json=cpuCap,def=4294967295" json:"cpu_cap,omitempty"`
+ ForceIrmap *bool `protobuf:"varint,21,opt,name=force_irmap,json=forceIrmap" json:"force_irmap,omitempty"`
+ ExecCmd []string `protobuf:"bytes,22,rep,name=exec_cmd,json=execCmd" json:"exec_cmd,omitempty"`
+ ExtMnt []*ExtMountMap `protobuf:"bytes,23,rep,name=ext_mnt,json=extMnt" json:"ext_mnt,omitempty"` // DEPRECATED, use external instead
+ ManageCgroups *bool `protobuf:"varint,24,opt,name=manage_cgroups,json=manageCgroups" json:"manage_cgroups,omitempty"` // backward compatibility
+ CgRoot []*CgroupRoot `protobuf:"bytes,25,rep,name=cg_root,json=cgRoot" json:"cg_root,omitempty"`
+ RstSibling *bool `protobuf:"varint,26,opt,name=rst_sibling,json=rstSibling" json:"rst_sibling,omitempty"` // swrk only
+ InheritFd []*InheritFd `protobuf:"bytes,27,rep,name=inherit_fd,json=inheritFd" json:"inherit_fd,omitempty"` // swrk only
+ AutoExtMnt *bool `protobuf:"varint,28,opt,name=auto_ext_mnt,json=autoExtMnt" json:"auto_ext_mnt,omitempty"`
+ ExtSharing *bool `protobuf:"varint,29,opt,name=ext_sharing,json=extSharing" json:"ext_sharing,omitempty"`
+ ExtMasters *bool `protobuf:"varint,30,opt,name=ext_masters,json=extMasters" json:"ext_masters,omitempty"`
+ SkipMnt []string `protobuf:"bytes,31,rep,name=skip_mnt,json=skipMnt" json:"skip_mnt,omitempty"`
+ EnableFs []string `protobuf:"bytes,32,rep,name=enable_fs,json=enableFs" json:"enable_fs,omitempty"`
+ UnixSkIno []*UnixSk `protobuf:"bytes,33,rep,name=unix_sk_ino,json=unixSkIno" json:"unix_sk_ino,omitempty"` // DEPRECATED, use external instead
+ ManageCgroupsMode *CriuCgMode `protobuf:"varint,34,opt,name=manage_cgroups_mode,json=manageCgroupsMode,enum=CriuCgMode" json:"manage_cgroups_mode,omitempty"`
+ GhostLimit *uint32 `protobuf:"varint,35,opt,name=ghost_limit,json=ghostLimit,def=1048576" json:"ghost_limit,omitempty"`
+ IrmapScanPaths []string `protobuf:"bytes,36,rep,name=irmap_scan_paths,json=irmapScanPaths" json:"irmap_scan_paths,omitempty"`
+ External []string `protobuf:"bytes,37,rep,name=external" json:"external,omitempty"`
+ EmptyNs *uint32 `protobuf:"varint,38,opt,name=empty_ns,json=emptyNs" json:"empty_ns,omitempty"`
+ JoinNs []*JoinNamespace `protobuf:"bytes,39,rep,name=join_ns,json=joinNs" json:"join_ns,omitempty"`
+ CgroupProps *string `protobuf:"bytes,41,opt,name=cgroup_props,json=cgroupProps" json:"cgroup_props,omitempty"`
+ CgroupPropsFile *string `protobuf:"bytes,42,opt,name=cgroup_props_file,json=cgroupPropsFile" json:"cgroup_props_file,omitempty"`
+ CgroupDumpController []string `protobuf:"bytes,43,rep,name=cgroup_dump_controller,json=cgroupDumpController" json:"cgroup_dump_controller,omitempty"`
+ FreezeCgroup *string `protobuf:"bytes,44,opt,name=freeze_cgroup,json=freezeCgroup" json:"freeze_cgroup,omitempty"`
+ Timeout *uint32 `protobuf:"varint,45,opt,name=timeout" json:"timeout,omitempty"`
+ TcpSkipInFlight *bool `protobuf:"varint,46,opt,name=tcp_skip_in_flight,json=tcpSkipInFlight" json:"tcp_skip_in_flight,omitempty"`
+ WeakSysctls *bool `protobuf:"varint,47,opt,name=weak_sysctls,json=weakSysctls" json:"weak_sysctls,omitempty"`
+ LazyPages *bool `protobuf:"varint,48,opt,name=lazy_pages,json=lazyPages" json:"lazy_pages,omitempty"`
+ StatusFd *int32 `protobuf:"varint,49,opt,name=status_fd,json=statusFd" json:"status_fd,omitempty"`
+ OrphanPtsMaster *bool `protobuf:"varint,50,opt,name=orphan_pts_master,json=orphanPtsMaster" json:"orphan_pts_master,omitempty"`
+ ConfigFile *string `protobuf:"bytes,51,opt,name=config_file,json=configFile" json:"config_file,omitempty"`
+ TcpClose *bool `protobuf:"varint,52,opt,name=tcp_close,json=tcpClose" json:"tcp_close,omitempty"`
+ LsmProfile *string `protobuf:"bytes,53,opt,name=lsm_profile,json=lsmProfile" json:"lsm_profile,omitempty"`
+ TlsCacert *string `protobuf:"bytes,54,opt,name=tls_cacert,json=tlsCacert" json:"tls_cacert,omitempty"`
+ TlsCacrl *string `protobuf:"bytes,55,opt,name=tls_cacrl,json=tlsCacrl" json:"tls_cacrl,omitempty"`
+ TlsCert *string `protobuf:"bytes,56,opt,name=tls_cert,json=tlsCert" json:"tls_cert,omitempty"`
+ TlsKey *string `protobuf:"bytes,57,opt,name=tls_key,json=tlsKey" json:"tls_key,omitempty"`
+ Tls *bool `protobuf:"varint,58,opt,name=tls" json:"tls,omitempty"`
+ TlsNoCnVerify *bool `protobuf:"varint,59,opt,name=tls_no_cn_verify,json=tlsNoCnVerify" json:"tls_no_cn_verify,omitempty"`
+ CgroupYard *string `protobuf:"bytes,60,opt,name=cgroup_yard,json=cgroupYard" json:"cgroup_yard,omitempty"`
+ PreDumpMode *CriuPreDumpMode `protobuf:"varint,61,opt,name=pre_dump_mode,json=preDumpMode,enum=CriuPreDumpMode,def=1" json:"pre_dump_mode,omitempty"`
+ PidfdStoreSk *int32 `protobuf:"varint,62,opt,name=pidfd_store_sk,json=pidfdStoreSk" json:"pidfd_store_sk,omitempty"`
+ LsmMountContext *string `protobuf:"bytes,63,opt,name=lsm_mount_context,json=lsmMountContext" json:"lsm_mount_context,omitempty"` // optional bool check_mounts = 128;
+}
+
+// Default values for CriuOpts fields.
+const (
+ Default_CriuOpts_LogLevel = int32(2)
+ Default_CriuOpts_CpuCap = uint32(4294967295)
+ Default_CriuOpts_GhostLimit = uint32(1048576)
+ Default_CriuOpts_PreDumpMode = CriuPreDumpMode_SPLICE
+)
+
+func (x *CriuOpts) Reset() {
+ *x = CriuOpts{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_rpc_rpc_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CriuOpts) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CriuOpts) ProtoMessage() {}
+
+func (x *CriuOpts) ProtoReflect() protoreflect.Message {
+ mi := &file_rpc_rpc_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CriuOpts.ProtoReflect.Descriptor instead.
+func (*CriuOpts) Descriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *CriuOpts) GetImagesDirFd() int32 {
+ if x != nil && x.ImagesDirFd != nil {
+ return *x.ImagesDirFd
+ }
+ return 0
+}
+
+func (x *CriuOpts) GetPid() int32 {
+ if x != nil && x.Pid != nil {
+ return *x.Pid
+ }
+ return 0
+}
+
+func (x *CriuOpts) GetLeaveRunning() bool {
+ if x != nil && x.LeaveRunning != nil {
+ return *x.LeaveRunning
+ }
+ return false
+}
+
+func (x *CriuOpts) GetExtUnixSk() bool {
+ if x != nil && x.ExtUnixSk != nil {
+ return *x.ExtUnixSk
+ }
+ return false
+}
+
+func (x *CriuOpts) GetTcpEstablished() bool {
+ if x != nil && x.TcpEstablished != nil {
+ return *x.TcpEstablished
+ }
+ return false
+}
+
+func (x *CriuOpts) GetEvasiveDevices() bool {
+ if x != nil && x.EvasiveDevices != nil {
+ return *x.EvasiveDevices
+ }
+ return false
+}
+
+func (x *CriuOpts) GetShellJob() bool {
+ if x != nil && x.ShellJob != nil {
+ return *x.ShellJob
+ }
+ return false
+}
+
+func (x *CriuOpts) GetFileLocks() bool {
+ if x != nil && x.FileLocks != nil {
+ return *x.FileLocks
+ }
+ return false
+}
+
+func (x *CriuOpts) GetLogLevel() int32 {
+ if x != nil && x.LogLevel != nil {
+ return *x.LogLevel
+ }
+ return Default_CriuOpts_LogLevel
+}
+
+func (x *CriuOpts) GetLogFile() string {
+ if x != nil && x.LogFile != nil {
+ return *x.LogFile
+ }
+ return ""
+}
+
+func (x *CriuOpts) GetPs() *CriuPageServerInfo {
+ if x != nil {
+ return x.Ps
+ }
+ return nil
+}
+
+func (x *CriuOpts) GetNotifyScripts() bool {
+ if x != nil && x.NotifyScripts != nil {
+ return *x.NotifyScripts
+ }
+ return false
+}
+
+func (x *CriuOpts) GetRoot() string {
+ if x != nil && x.Root != nil {
+ return *x.Root
+ }
+ return ""
+}
+
+func (x *CriuOpts) GetParentImg() string {
+ if x != nil && x.ParentImg != nil {
+ return *x.ParentImg
+ }
+ return ""
+}
+
+func (x *CriuOpts) GetTrackMem() bool {
+ if x != nil && x.TrackMem != nil {
+ return *x.TrackMem
+ }
+ return false
+}
+
+func (x *CriuOpts) GetAutoDedup() bool {
+ if x != nil && x.AutoDedup != nil {
+ return *x.AutoDedup
+ }
+ return false
+}
+
+func (x *CriuOpts) GetWorkDirFd() int32 {
+ if x != nil && x.WorkDirFd != nil {
+ return *x.WorkDirFd
+ }
+ return 0
+}
+
+func (x *CriuOpts) GetLinkRemap() bool {
+ if x != nil && x.LinkRemap != nil {
+ return *x.LinkRemap
+ }
+ return false
+}
+
+func (x *CriuOpts) GetVeths() []*CriuVethPair {
+ if x != nil {
+ return x.Veths
+ }
+ return nil
+}
+
+func (x *CriuOpts) GetCpuCap() uint32 {
+ if x != nil && x.CpuCap != nil {
+ return *x.CpuCap
+ }
+ return Default_CriuOpts_CpuCap
+}
+
+func (x *CriuOpts) GetForceIrmap() bool {
+ if x != nil && x.ForceIrmap != nil {
+ return *x.ForceIrmap
+ }
+ return false
+}
+
+func (x *CriuOpts) GetExecCmd() []string {
+ if x != nil {
+ return x.ExecCmd
+ }
+ return nil
+}
+
+func (x *CriuOpts) GetExtMnt() []*ExtMountMap {
+ if x != nil {
+ return x.ExtMnt
+ }
+ return nil
+}
+
+func (x *CriuOpts) GetManageCgroups() bool {
+ if x != nil && x.ManageCgroups != nil {
+ return *x.ManageCgroups
+ }
+ return false
+}
+
+func (x *CriuOpts) GetCgRoot() []*CgroupRoot {
+ if x != nil {
+ return x.CgRoot
+ }
+ return nil
+}
+
+func (x *CriuOpts) GetRstSibling() bool {
+ if x != nil && x.RstSibling != nil {
+ return *x.RstSibling
+ }
+ return false
+}
+
+func (x *CriuOpts) GetInheritFd() []*InheritFd {
+ if x != nil {
+ return x.InheritFd
+ }
+ return nil
+}
+
+func (x *CriuOpts) GetAutoExtMnt() bool {
+ if x != nil && x.AutoExtMnt != nil {
+ return *x.AutoExtMnt
+ }
+ return false
+}
+
+func (x *CriuOpts) GetExtSharing() bool {
+ if x != nil && x.ExtSharing != nil {
+ return *x.ExtSharing
+ }
+ return false
+}
+
+func (x *CriuOpts) GetExtMasters() bool {
+ if x != nil && x.ExtMasters != nil {
+ return *x.ExtMasters
+ }
+ return false
+}
+
+func (x *CriuOpts) GetSkipMnt() []string {
+ if x != nil {
+ return x.SkipMnt
+ }
+ return nil
+}
+
+func (x *CriuOpts) GetEnableFs() []string {
+ if x != nil {
+ return x.EnableFs
+ }
+ return nil
+}
+
+func (x *CriuOpts) GetUnixSkIno() []*UnixSk {
+ if x != nil {
+ return x.UnixSkIno
+ }
+ return nil
+}
+
+func (x *CriuOpts) GetManageCgroupsMode() CriuCgMode {
+ if x != nil && x.ManageCgroupsMode != nil {
+ return *x.ManageCgroupsMode
+ }
+ return CriuCgMode_IGNORE
+}
+
+func (x *CriuOpts) GetGhostLimit() uint32 {
+ if x != nil && x.GhostLimit != nil {
+ return *x.GhostLimit
+ }
+ return Default_CriuOpts_GhostLimit
+}
+
+func (x *CriuOpts) GetIrmapScanPaths() []string {
+ if x != nil {
+ return x.IrmapScanPaths
+ }
+ return nil
+}
+
+func (x *CriuOpts) GetExternal() []string {
+ if x != nil {
+ return x.External
+ }
+ return nil
+}
+
+func (x *CriuOpts) GetEmptyNs() uint32 {
+ if x != nil && x.EmptyNs != nil {
+ return *x.EmptyNs
+ }
+ return 0
+}
+
+func (x *CriuOpts) GetJoinNs() []*JoinNamespace {
+ if x != nil {
+ return x.JoinNs
+ }
+ return nil
+}
+
+func (x *CriuOpts) GetCgroupProps() string {
+ if x != nil && x.CgroupProps != nil {
+ return *x.CgroupProps
+ }
+ return ""
+}
+
+func (x *CriuOpts) GetCgroupPropsFile() string {
+ if x != nil && x.CgroupPropsFile != nil {
+ return *x.CgroupPropsFile
+ }
+ return ""
+}
+
+func (x *CriuOpts) GetCgroupDumpController() []string {
+ if x != nil {
+ return x.CgroupDumpController
+ }
+ return nil
+}
+
+func (x *CriuOpts) GetFreezeCgroup() string {
+ if x != nil && x.FreezeCgroup != nil {
+ return *x.FreezeCgroup
+ }
+ return ""
+}
+
+func (x *CriuOpts) GetTimeout() uint32 {
+ if x != nil && x.Timeout != nil {
+ return *x.Timeout
+ }
+ return 0
+}
+
+func (x *CriuOpts) GetTcpSkipInFlight() bool {
+ if x != nil && x.TcpSkipInFlight != nil {
+ return *x.TcpSkipInFlight
+ }
+ return false
+}
+
+func (x *CriuOpts) GetWeakSysctls() bool {
+ if x != nil && x.WeakSysctls != nil {
+ return *x.WeakSysctls
+ }
+ return false
+}
+
+func (x *CriuOpts) GetLazyPages() bool {
+ if x != nil && x.LazyPages != nil {
+ return *x.LazyPages
+ }
+ return false
+}
+
+func (x *CriuOpts) GetStatusFd() int32 {
+ if x != nil && x.StatusFd != nil {
+ return *x.StatusFd
+ }
+ return 0
+}
+
+func (x *CriuOpts) GetOrphanPtsMaster() bool {
+ if x != nil && x.OrphanPtsMaster != nil {
+ return *x.OrphanPtsMaster
+ }
+ return false
+}
+
+func (x *CriuOpts) GetConfigFile() string {
+ if x != nil && x.ConfigFile != nil {
+ return *x.ConfigFile
+ }
+ return ""
+}
+
+func (x *CriuOpts) GetTcpClose() bool {
+ if x != nil && x.TcpClose != nil {
+ return *x.TcpClose
+ }
+ return false
+}
+
+func (x *CriuOpts) GetLsmProfile() string {
+ if x != nil && x.LsmProfile != nil {
+ return *x.LsmProfile
+ }
+ return ""
+}
+
+func (x *CriuOpts) GetTlsCacert() string {
+ if x != nil && x.TlsCacert != nil {
+ return *x.TlsCacert
+ }
+ return ""
+}
+
+func (x *CriuOpts) GetTlsCacrl() string {
+ if x != nil && x.TlsCacrl != nil {
+ return *x.TlsCacrl
+ }
+ return ""
+}
+
+func (x *CriuOpts) GetTlsCert() string {
+ if x != nil && x.TlsCert != nil {
+ return *x.TlsCert
+ }
+ return ""
+}
+
+func (x *CriuOpts) GetTlsKey() string {
+ if x != nil && x.TlsKey != nil {
+ return *x.TlsKey
+ }
+ return ""
+}
+
+func (x *CriuOpts) GetTls() bool {
+ if x != nil && x.Tls != nil {
+ return *x.Tls
+ }
+ return false
+}
+
+func (x *CriuOpts) GetTlsNoCnVerify() bool {
+ if x != nil && x.TlsNoCnVerify != nil {
+ return *x.TlsNoCnVerify
+ }
+ return false
+}
+
+func (x *CriuOpts) GetCgroupYard() string {
+ if x != nil && x.CgroupYard != nil {
+ return *x.CgroupYard
+ }
+ return ""
+}
+
+func (x *CriuOpts) GetPreDumpMode() CriuPreDumpMode {
+ if x != nil && x.PreDumpMode != nil {
+ return *x.PreDumpMode
+ }
+ return Default_CriuOpts_PreDumpMode
+}
+
+func (x *CriuOpts) GetPidfdStoreSk() int32 {
+ if x != nil && x.PidfdStoreSk != nil {
+ return *x.PidfdStoreSk
+ }
+ return 0
+}
+
+func (x *CriuOpts) GetLsmMountContext() string {
+ if x != nil && x.LsmMountContext != nil {
+ return *x.LsmMountContext
+ }
+ return ""
+}
+
+type CriuDumpResp struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Restored *bool `protobuf:"varint,1,opt,name=restored" json:"restored,omitempty"`
+}
+
+func (x *CriuDumpResp) Reset() {
+ *x = CriuDumpResp{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_rpc_rpc_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CriuDumpResp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CriuDumpResp) ProtoMessage() {}
+
+func (x *CriuDumpResp) ProtoReflect() protoreflect.Message {
+ mi := &file_rpc_rpc_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CriuDumpResp.ProtoReflect.Descriptor instead.
+func (*CriuDumpResp) Descriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *CriuDumpResp) GetRestored() bool {
+ if x != nil && x.Restored != nil {
+ return *x.Restored
+ }
+ return false
+}
+
+type CriuRestoreResp struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Pid *int32 `protobuf:"varint,1,req,name=pid" json:"pid,omitempty"`
+}
+
+func (x *CriuRestoreResp) Reset() {
+ *x = CriuRestoreResp{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_rpc_rpc_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CriuRestoreResp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CriuRestoreResp) ProtoMessage() {}
+
+func (x *CriuRestoreResp) ProtoReflect() protoreflect.Message {
+ mi := &file_rpc_rpc_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CriuRestoreResp.ProtoReflect.Descriptor instead.
+func (*CriuRestoreResp) Descriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *CriuRestoreResp) GetPid() int32 {
+ if x != nil && x.Pid != nil {
+ return *x.Pid
+ }
+ return 0
+}
+
+type CriuNotify struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Script *string `protobuf:"bytes,1,opt,name=script" json:"script,omitempty"`
+ Pid *int32 `protobuf:"varint,2,opt,name=pid" json:"pid,omitempty"`
+}
+
+func (x *CriuNotify) Reset() {
+ *x = CriuNotify{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_rpc_rpc_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CriuNotify) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CriuNotify) ProtoMessage() {}
+
+func (x *CriuNotify) ProtoReflect() protoreflect.Message {
+ mi := &file_rpc_rpc_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CriuNotify.ProtoReflect.Descriptor instead.
+func (*CriuNotify) Descriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *CriuNotify) GetScript() string {
+ if x != nil && x.Script != nil {
+ return *x.Script
+ }
+ return ""
+}
+
+func (x *CriuNotify) GetPid() int32 {
+ if x != nil && x.Pid != nil {
+ return *x.Pid
+ }
+ return 0
+}
+
+//
+// List of features which can queried via
+// CRIU_REQ_TYPE__FEATURE_CHECK
+type CriuFeatures struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MemTrack *bool `protobuf:"varint,1,opt,name=mem_track,json=memTrack" json:"mem_track,omitempty"`
+ LazyPages *bool `protobuf:"varint,2,opt,name=lazy_pages,json=lazyPages" json:"lazy_pages,omitempty"`
+ PidfdStore *bool `protobuf:"varint,3,opt,name=pidfd_store,json=pidfdStore" json:"pidfd_store,omitempty"`
+}
+
+func (x *CriuFeatures) Reset() {
+ *x = CriuFeatures{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_rpc_rpc_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CriuFeatures) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CriuFeatures) ProtoMessage() {}
+
+func (x *CriuFeatures) ProtoReflect() protoreflect.Message {
+ mi := &file_rpc_rpc_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CriuFeatures.ProtoReflect.Descriptor instead.
+func (*CriuFeatures) Descriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *CriuFeatures) GetMemTrack() bool {
+ if x != nil && x.MemTrack != nil {
+ return *x.MemTrack
+ }
+ return false
+}
+
+func (x *CriuFeatures) GetLazyPages() bool {
+ if x != nil && x.LazyPages != nil {
+ return *x.LazyPages
+ }
+ return false
+}
+
+func (x *CriuFeatures) GetPidfdStore() bool {
+ if x != nil && x.PidfdStore != nil {
+ return *x.PidfdStore
+ }
+ return false
+}
+
+type CriuReq struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type *CriuReqType `protobuf:"varint,1,req,name=type,enum=CriuReqType" json:"type,omitempty"`
+ Opts *CriuOpts `protobuf:"bytes,2,opt,name=opts" json:"opts,omitempty"`
+ NotifySuccess *bool `protobuf:"varint,3,opt,name=notify_success,json=notifySuccess" json:"notify_success,omitempty"`
+ //
+ // When set service won't close the connection but
+ // will wait for more req-s to appear. Works not
+ // for all request types.
+ KeepOpen *bool `protobuf:"varint,4,opt,name=keep_open,json=keepOpen" json:"keep_open,omitempty"`
+ //
+ // 'features' can be used to query which features
+ // are supported by the installed criu/kernel
+ // via RPC.
+ Features *CriuFeatures `protobuf:"bytes,5,opt,name=features" json:"features,omitempty"`
+ // 'pid' is used for WAIT_PID
+ Pid *uint32 `protobuf:"varint,6,opt,name=pid" json:"pid,omitempty"`
+}
+
+func (x *CriuReq) Reset() {
+ *x = CriuReq{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_rpc_rpc_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CriuReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CriuReq) ProtoMessage() {}
+
+func (x *CriuReq) ProtoReflect() protoreflect.Message {
+ mi := &file_rpc_rpc_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CriuReq.ProtoReflect.Descriptor instead.
+func (*CriuReq) Descriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *CriuReq) GetType() CriuReqType {
+ if x != nil && x.Type != nil {
+ return *x.Type
+ }
+ return CriuReqType_EMPTY
+}
+
+func (x *CriuReq) GetOpts() *CriuOpts {
+ if x != nil {
+ return x.Opts
+ }
+ return nil
+}
+
+func (x *CriuReq) GetNotifySuccess() bool {
+ if x != nil && x.NotifySuccess != nil {
+ return *x.NotifySuccess
+ }
+ return false
+}
+
+func (x *CriuReq) GetKeepOpen() bool {
+ if x != nil && x.KeepOpen != nil {
+ return *x.KeepOpen
+ }
+ return false
+}
+
+func (x *CriuReq) GetFeatures() *CriuFeatures {
+ if x != nil {
+ return x.Features
+ }
+ return nil
+}
+
+func (x *CriuReq) GetPid() uint32 {
+ if x != nil && x.Pid != nil {
+ return *x.Pid
+ }
+ return 0
+}
+
+type CriuResp struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type *CriuReqType `protobuf:"varint,1,req,name=type,enum=CriuReqType" json:"type,omitempty"`
+ Success *bool `protobuf:"varint,2,req,name=success" json:"success,omitempty"`
+ Dump *CriuDumpResp `protobuf:"bytes,3,opt,name=dump" json:"dump,omitempty"`
+ Restore *CriuRestoreResp `protobuf:"bytes,4,opt,name=restore" json:"restore,omitempty"`
+ Notify *CriuNotify `protobuf:"bytes,5,opt,name=notify" json:"notify,omitempty"`
+ Ps *CriuPageServerInfo `protobuf:"bytes,6,opt,name=ps" json:"ps,omitempty"`
+ CrErrno *int32 `protobuf:"varint,7,opt,name=cr_errno,json=crErrno" json:"cr_errno,omitempty"`
+ Features *CriuFeatures `protobuf:"bytes,8,opt,name=features" json:"features,omitempty"`
+ CrErrmsg *string `protobuf:"bytes,9,opt,name=cr_errmsg,json=crErrmsg" json:"cr_errmsg,omitempty"`
+ Version *CriuVersion `protobuf:"bytes,10,opt,name=version" json:"version,omitempty"`
+ Status *int32 `protobuf:"varint,11,opt,name=status" json:"status,omitempty"`
+}
+
+func (x *CriuResp) Reset() {
+ *x = CriuResp{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_rpc_rpc_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CriuResp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CriuResp) ProtoMessage() {}
+
+func (x *CriuResp) ProtoReflect() protoreflect.Message {
+ mi := &file_rpc_rpc_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CriuResp.ProtoReflect.Descriptor instead.
+func (*CriuResp) Descriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *CriuResp) GetType() CriuReqType {
+ if x != nil && x.Type != nil {
+ return *x.Type
+ }
+ return CriuReqType_EMPTY
+}
+
+func (x *CriuResp) GetSuccess() bool {
+ if x != nil && x.Success != nil {
+ return *x.Success
+ }
+ return false
+}
+
+func (x *CriuResp) GetDump() *CriuDumpResp {
+ if x != nil {
+ return x.Dump
+ }
+ return nil
+}
+
+func (x *CriuResp) GetRestore() *CriuRestoreResp {
+ if x != nil {
+ return x.Restore
+ }
+ return nil
+}
+
+func (x *CriuResp) GetNotify() *CriuNotify {
+ if x != nil {
+ return x.Notify
+ }
+ return nil
+}
+
+func (x *CriuResp) GetPs() *CriuPageServerInfo {
+ if x != nil {
+ return x.Ps
+ }
+ return nil
+}
+
+func (x *CriuResp) GetCrErrno() int32 {
+ if x != nil && x.CrErrno != nil {
+ return *x.CrErrno
+ }
+ return 0
+}
+
+func (x *CriuResp) GetFeatures() *CriuFeatures {
+ if x != nil {
+ return x.Features
+ }
+ return nil
+}
+
+func (x *CriuResp) GetCrErrmsg() string {
+ if x != nil && x.CrErrmsg != nil {
+ return *x.CrErrmsg
+ }
+ return ""
+}
+
+func (x *CriuResp) GetVersion() *CriuVersion {
+ if x != nil {
+ return x.Version
+ }
+ return nil
+}
+
+func (x *CriuResp) GetStatus() int32 {
+ if x != nil && x.Status != nil {
+ return *x.Status
+ }
+ return 0
+}
+
+// Answer for criu_req_type.VERSION requests
+type CriuVersion struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MajorNumber *int32 `protobuf:"varint,1,req,name=major_number,json=majorNumber" json:"major_number,omitempty"`
+ MinorNumber *int32 `protobuf:"varint,2,req,name=minor_number,json=minorNumber" json:"minor_number,omitempty"`
+ Gitid *string `protobuf:"bytes,3,opt,name=gitid" json:"gitid,omitempty"`
+ Sublevel *int32 `protobuf:"varint,4,opt,name=sublevel" json:"sublevel,omitempty"`
+ Extra *int32 `protobuf:"varint,5,opt,name=extra" json:"extra,omitempty"`
+ Name *string `protobuf:"bytes,6,opt,name=name" json:"name,omitempty"`
+}
+
+func (x *CriuVersion) Reset() {
+ *x = CriuVersion{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_rpc_rpc_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CriuVersion) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CriuVersion) ProtoMessage() {}
+
+func (x *CriuVersion) ProtoReflect() protoreflect.Message {
+ mi := &file_rpc_rpc_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CriuVersion.ProtoReflect.Descriptor instead.
+func (*CriuVersion) Descriptor() ([]byte, []int) {
+ return file_rpc_rpc_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *CriuVersion) GetMajorNumber() int32 {
+ if x != nil && x.MajorNumber != nil {
+ return *x.MajorNumber
+ }
+ return 0
+}
+
+func (x *CriuVersion) GetMinorNumber() int32 {
+ if x != nil && x.MinorNumber != nil {
+ return *x.MinorNumber
+ }
+ return 0
+}
+
+func (x *CriuVersion) GetGitid() string {
+ if x != nil && x.Gitid != nil {
+ return *x.Gitid
+ }
+ return ""
+}
+
+func (x *CriuVersion) GetSublevel() int32 {
+ if x != nil && x.Sublevel != nil {
+ return *x.Sublevel
+ }
+ return 0
+}
+
+func (x *CriuVersion) GetExtra() int32 {
+ if x != nil && x.Extra != nil {
+ return *x.Extra
+ }
+ return 0
+}
+
+func (x *CriuVersion) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
+ }
+ return ""
+}
+
+var File_rpc_rpc_proto protoreflect.FileDescriptor
+
+var file_rpc_rpc_proto_rawDesc = []byte{
+ 0x0a, 0x0d, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0x67, 0x0a, 0x15, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72,
+ 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65,
+ 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x66, 0x64, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x66, 0x64, 0x22, 0x3c, 0x0a, 0x0e, 0x63, 0x72, 0x69, 0x75,
+ 0x5f, 0x76, 0x65, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x69, 0x72, 0x12, 0x13, 0x0a, 0x05, 0x69, 0x66,
+ 0x5f, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x69, 0x66, 0x49, 0x6e, 0x12,
+ 0x15, 0x0a, 0x06, 0x69, 0x66, 0x5f, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52,
+ 0x05, 0x69, 0x66, 0x4f, 0x75, 0x74, 0x22, 0x33, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x5f, 0x6d, 0x6f,
+ 0x75, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x70, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x02, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x76, 0x61, 0x6c,
+ 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x03, 0x76, 0x61, 0x6c, 0x22, 0x56, 0x0a, 0x0e, 0x6a,
+ 0x6f, 0x69, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x0e, 0x0a,
+ 0x02, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x02, 0x6e, 0x73, 0x12, 0x17, 0x0a,
+ 0x07, 0x6e, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x06,
+ 0x6e, 0x73, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f,
+ 0x6f, 0x70, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x72, 0x61,
+ 0x4f, 0x70, 0x74, 0x22, 0x2e, 0x0a, 0x0a, 0x69, 0x6e, 0x68, 0x65, 0x72, 0x69, 0x74, 0x5f, 0x66,
+ 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x66, 0x64, 0x18, 0x02, 0x20, 0x02, 0x28, 0x05, 0x52,
+ 0x02, 0x66, 0x64, 0x22, 0x35, 0x0a, 0x0b, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x72, 0x6f,
+ 0x6f, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x74, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x63, 0x74, 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02,
+ 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x1f, 0x0a, 0x07, 0x75, 0x6e,
+ 0x69, 0x78, 0x5f, 0x73, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01,
+ 0x20, 0x02, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0x8c, 0x11, 0x0a, 0x09,
+ 0x63, 0x72, 0x69, 0x75, 0x5f, 0x6f, 0x70, 0x74, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x6d, 0x61,
+ 0x67, 0x65, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x5f, 0x66, 0x64, 0x18, 0x01, 0x20, 0x02, 0x28, 0x05,
+ 0x52, 0x0b, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x44, 0x69, 0x72, 0x46, 0x64, 0x12, 0x10, 0x0a,
+ 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12,
+ 0x23, 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x75, 0x6e,
+ 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x5f, 0x75, 0x6e, 0x69, 0x78,
+ 0x5f, 0x73, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x78, 0x74, 0x55, 0x6e,
+ 0x69, 0x78, 0x53, 0x6b, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x63, 0x70, 0x5f, 0x65, 0x73, 0x74, 0x61,
+ 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74,
+ 0x63, 0x70, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, 0x27, 0x0a,
+ 0x0f, 0x65, 0x76, 0x61, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x76, 0x61, 0x73, 0x69, 0x76, 0x65, 0x44,
+ 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x65, 0x6c, 0x6c, 0x5f,
+ 0x6a, 0x6f, 0x62, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x68, 0x65, 0x6c, 0x6c,
+ 0x4a, 0x6f, 0x62, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x6f, 0x63,
+ 0x6b, 0x73, 0x12, 0x1e, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18,
+ 0x09, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x01, 0x32, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0a,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x26, 0x0a,
+ 0x02, 0x70, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x72, 0x69, 0x75,
+ 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x52, 0x02, 0x70, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x5f,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6e,
+ 0x6f, 0x74, 0x69, 0x66, 0x79, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04,
+ 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6d, 0x67, 0x18, 0x0e,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x6d, 0x67, 0x12,
+ 0x1b, 0x0a, 0x09, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x6d, 0x65, 0x6d, 0x18, 0x0f, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x08, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x4d, 0x65, 0x6d, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x64, 0x65, 0x64, 0x75, 0x70, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x44, 0x65, 0x64, 0x75, 0x70, 0x12, 0x1e, 0x0a, 0x0b, 0x77,
+ 0x6f, 0x72, 0x6b, 0x5f, 0x64, 0x69, 0x72, 0x5f, 0x66, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x44, 0x69, 0x72, 0x46, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x6c,
+ 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x65, 0x6d, 0x61, 0x70, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x09, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x6d, 0x61, 0x70, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x65,
+ 0x74, 0x68, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x72, 0x69, 0x75,
+ 0x5f, 0x76, 0x65, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x69, 0x72, 0x52, 0x05, 0x76, 0x65, 0x74, 0x68,
+ 0x73, 0x12, 0x23, 0x0a, 0x07, 0x63, 0x70, 0x75, 0x5f, 0x63, 0x61, 0x70, 0x18, 0x14, 0x20, 0x01,
+ 0x28, 0x0d, 0x3a, 0x0a, 0x34, 0x32, 0x39, 0x34, 0x39, 0x36, 0x37, 0x32, 0x39, 0x35, 0x52, 0x06,
+ 0x63, 0x70, 0x75, 0x43, 0x61, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f,
+ 0x69, 0x72, 0x6d, 0x61, 0x70, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x66, 0x6f, 0x72,
+ 0x63, 0x65, 0x49, 0x72, 0x6d, 0x61, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x63, 0x5f,
+ 0x63, 0x6d, 0x64, 0x18, 0x16, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x65, 0x63, 0x43,
+ 0x6d, 0x64, 0x12, 0x27, 0x0a, 0x07, 0x65, 0x78, 0x74, 0x5f, 0x6d, 0x6e, 0x74, 0x18, 0x17, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x65, 0x78, 0x74, 0x5f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f,
+ 0x6d, 0x61, 0x70, 0x52, 0x06, 0x65, 0x78, 0x74, 0x4d, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x6d,
+ 0x61, 0x6e, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x18, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x43, 0x67, 0x72, 0x6f, 0x75,
+ 0x70, 0x73, 0x12, 0x25, 0x0a, 0x07, 0x63, 0x67, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x19, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x72, 0x6f, 0x6f,
+ 0x74, 0x52, 0x06, 0x63, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x73, 0x74,
+ 0x5f, 0x73, 0x69, 0x62, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a,
+ 0x72, 0x73, 0x74, 0x53, 0x69, 0x62, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x2a, 0x0a, 0x0a, 0x69, 0x6e,
+ 0x68, 0x65, 0x72, 0x69, 0x74, 0x5f, 0x66, 0x64, 0x18, 0x1b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b,
+ 0x2e, 0x69, 0x6e, 0x68, 0x65, 0x72, 0x69, 0x74, 0x5f, 0x66, 0x64, 0x52, 0x09, 0x69, 0x6e, 0x68,
+ 0x65, 0x72, 0x69, 0x74, 0x46, 0x64, 0x12, 0x20, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x65,
+ 0x78, 0x74, 0x5f, 0x6d, 0x6e, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x75,
+ 0x74, 0x6f, 0x45, 0x78, 0x74, 0x4d, 0x6e, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x5f,
+ 0x73, 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x65,
+ 0x78, 0x74, 0x53, 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74,
+ 0x5f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a,
+ 0x65, 0x78, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x6b,
+ 0x69, 0x70, 0x5f, 0x6d, 0x6e, 0x74, 0x18, 0x1f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x6b,
+ 0x69, 0x70, 0x4d, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f,
+ 0x66, 0x73, 0x18, 0x20, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
+ 0x46, 0x73, 0x12, 0x28, 0x0a, 0x0b, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x73, 0x6b, 0x5f, 0x69, 0x6e,
+ 0x6f, 0x18, 0x21, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x73,
+ 0x6b, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x78, 0x53, 0x6b, 0x49, 0x6e, 0x6f, 0x12, 0x3d, 0x0a, 0x13,
+ 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x5f, 0x6d,
+ 0x6f, 0x64, 0x65, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x72, 0x69, 0x75,
+ 0x5f, 0x63, 0x67, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x52, 0x11, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65,
+ 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x67,
+ 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0d,
+ 0x3a, 0x07, 0x31, 0x30, 0x34, 0x38, 0x35, 0x37, 0x36, 0x52, 0x0a, 0x67, 0x68, 0x6f, 0x73, 0x74,
+ 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x69, 0x72, 0x6d, 0x61, 0x70, 0x5f, 0x73,
+ 0x63, 0x61, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x24, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x0e, 0x69, 0x72, 0x6d, 0x61, 0x70, 0x53, 0x63, 0x61, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12,
+ 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x25, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x65,
+ 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x6e, 0x73, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x65,
+ 0x6d, 0x70, 0x74, 0x79, 0x4e, 0x73, 0x12, 0x28, 0x0a, 0x07, 0x6a, 0x6f, 0x69, 0x6e, 0x5f, 0x6e,
+ 0x73, 0x18, 0x27, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x06, 0x6a, 0x6f, 0x69, 0x6e, 0x4e, 0x73,
+ 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x73,
+ 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x72,
+ 0x6f, 0x70, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x70, 0x72,
+ 0x6f, 0x70, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f,
+ 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x70, 0x73, 0x46, 0x69, 0x6c, 0x65, 0x12,
+ 0x34, 0x0a, 0x16, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x18, 0x2b, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x14, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x44, 0x75, 0x6d, 0x70, 0x43, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x72, 0x65, 0x65, 0x7a, 0x65, 0x5f,
+ 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x66, 0x72,
+ 0x65, 0x65, 0x7a, 0x65, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69,
+ 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x74, 0x69, 0x6d,
+ 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2b, 0x0a, 0x12, 0x74, 0x63, 0x70, 0x5f, 0x73, 0x6b, 0x69, 0x70,
+ 0x5f, 0x69, 0x6e, 0x5f, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x18, 0x2e, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0f, 0x74, 0x63, 0x70, 0x53, 0x6b, 0x69, 0x70, 0x49, 0x6e, 0x46, 0x6c, 0x69, 0x67, 0x68,
+ 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x73, 0x79, 0x73, 0x63, 0x74, 0x6c,
+ 0x73, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x77, 0x65, 0x61, 0x6b, 0x53, 0x79, 0x73,
+ 0x63, 0x74, 0x6c, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x7a, 0x79, 0x5f, 0x70, 0x61, 0x67,
+ 0x65, 0x73, 0x18, 0x30, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6c, 0x61, 0x7a, 0x79, 0x50, 0x61,
+ 0x67, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x66, 0x64,
+ 0x18, 0x31, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x64,
+ 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x72, 0x70, 0x68, 0x61, 0x6e, 0x5f, 0x70, 0x74, 0x73, 0x5f, 0x6d,
+ 0x61, 0x73, 0x74, 0x65, 0x72, 0x18, 0x32, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6f, 0x72, 0x70,
+ 0x68, 0x61, 0x6e, 0x50, 0x74, 0x73, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x33, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1b, 0x0a,
+ 0x09, 0x74, 0x63, 0x70, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x34, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x08, 0x74, 0x63, 0x70, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x73,
+ 0x6d, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x35, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0a, 0x6c, 0x73, 0x6d, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74,
+ 0x6c, 0x73, 0x5f, 0x63, 0x61, 0x63, 0x65, 0x72, 0x74, 0x18, 0x36, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x09, 0x74, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x65, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6c,
+ 0x73, 0x5f, 0x63, 0x61, 0x63, 0x72, 0x6c, 0x18, 0x37, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74,
+ 0x6c, 0x73, 0x43, 0x61, 0x63, 0x72, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x6c, 0x73, 0x5f, 0x63,
+ 0x65, 0x72, 0x74, 0x18, 0x38, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x6c, 0x73, 0x43, 0x65,
+ 0x72, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x6c, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x39, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x6c, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x74,
+ 0x6c, 0x73, 0x18, 0x3a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x74, 0x6c, 0x73, 0x12, 0x27, 0x0a,
+ 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x6e, 0x6f, 0x5f, 0x63, 0x6e, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66,
+ 0x79, 0x18, 0x3b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x6c, 0x73, 0x4e, 0x6f, 0x43, 0x6e,
+ 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70,
+ 0x5f, 0x79, 0x61, 0x72, 0x64, 0x18, 0x3c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x67, 0x72,
+ 0x6f, 0x75, 0x70, 0x59, 0x61, 0x72, 0x64, 0x12, 0x3f, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x5f, 0x64,
+ 0x75, 0x6d, 0x70, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x3d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13,
+ 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x70, 0x72, 0x65, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x6d,
+ 0x6f, 0x64, 0x65, 0x3a, 0x06, 0x53, 0x50, 0x4c, 0x49, 0x43, 0x45, 0x52, 0x0b, 0x70, 0x72, 0x65,
+ 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x69, 0x64, 0x66,
+ 0x64, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x6b, 0x18, 0x3e, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x0c, 0x70, 0x69, 0x64, 0x66, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x6b, 0x12, 0x2a,
+ 0x0a, 0x11, 0x6c, 0x73, 0x6d, 0x5f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x78, 0x74, 0x18, 0x3f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x73, 0x6d, 0x4d, 0x6f,
+ 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x2c, 0x0a, 0x0e, 0x63, 0x72,
+ 0x69, 0x75, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x12, 0x1a, 0x0a, 0x08,
+ 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08,
+ 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x22, 0x25, 0x0a, 0x11, 0x63, 0x72, 0x69, 0x75,
+ 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x12, 0x10, 0x0a,
+ 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x02, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22,
+ 0x37, 0x0a, 0x0b, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x12, 0x16,
+ 0x0a, 0x06, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x6c, 0x0a, 0x0d, 0x63, 0x72, 0x69, 0x75,
+ 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x65, 0x6d,
+ 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x65,
+ 0x6d, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x7a, 0x79, 0x5f, 0x70,
+ 0x61, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6c, 0x61, 0x7a, 0x79,
+ 0x50, 0x61, 0x67, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x69, 0x64, 0x66, 0x64, 0x5f, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x70, 0x69, 0x64, 0x66,
+ 0x64, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x08, 0x63, 0x72, 0x69, 0x75, 0x5f,
+ 0x72, 0x65, 0x71, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28,
+ 0x0e, 0x32, 0x0e, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x72, 0x65, 0x71, 0x5f, 0x74, 0x79, 0x70,
+ 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x04, 0x6f, 0x70, 0x74, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x6f, 0x70, 0x74,
+ 0x73, 0x52, 0x04, 0x6f, 0x70, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x69, 0x66,
+ 0x79, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x1b,
+ 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x08, 0x6b, 0x65, 0x65, 0x70, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x2a, 0x0a, 0x08, 0x66,
+ 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x72, 0x69, 0x75, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x08, 0x66,
+ 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x8f, 0x03, 0x0a, 0x09, 0x63, 0x72,
+ 0x69, 0x75, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
+ 0x01, 0x20, 0x02, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x72, 0x65, 0x71,
+ 0x5f, 0x74, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73,
+ 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x64, 0x75, 0x6d, 0x70, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f,
+ 0x72, 0x65, 0x73, 0x70, 0x52, 0x04, 0x64, 0x75, 0x6d, 0x70, 0x12, 0x2c, 0x0a, 0x07, 0x72, 0x65,
+ 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x72,
+ 0x69, 0x75, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x52,
+ 0x07, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x69,
+ 0x66, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f,
+ 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x52, 0x06, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x12, 0x26,
+ 0x0a, 0x02, 0x70, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x72, 0x69,
+ 0x75, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x6e,
+ 0x66, 0x6f, 0x52, 0x02, 0x70, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x72, 0x5f, 0x65, 0x72, 0x72,
+ 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x63, 0x72, 0x45, 0x72, 0x72, 0x6e,
+ 0x6f, 0x12, 0x2a, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x73, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x1b, 0x0a,
+ 0x09, 0x63, 0x72, 0x5f, 0x65, 0x72, 0x72, 0x6d, 0x73, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x08, 0x63, 0x72, 0x45, 0x72, 0x72, 0x6d, 0x73, 0x67, 0x12, 0x27, 0x0a, 0x07, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x63, 0x72,
+ 0x69, 0x75, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0b, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb0, 0x01, 0x0a, 0x0c,
+ 0x63, 0x72, 0x69, 0x75, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c,
+ 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x02,
+ 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12,
+ 0x21, 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18,
+ 0x02, 0x20, 0x02, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x4e, 0x75, 0x6d, 0x62,
+ 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x67, 0x69, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x05, 0x67, 0x69, 0x74, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x75, 0x62, 0x6c,
+ 0x65, 0x76, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x73, 0x75, 0x62, 0x6c,
+ 0x65, 0x76, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x2a, 0x5f,
+ 0x0a, 0x0c, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x63, 0x67, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x0a,
+ 0x0a, 0x06, 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x47,
+ 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x52, 0x4f, 0x50, 0x53,
+ 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4f, 0x46, 0x54, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04,
+ 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54,
+ 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x06, 0x2a,
+ 0x2d, 0x0a, 0x12, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x70, 0x72, 0x65, 0x5f, 0x64, 0x75, 0x6d, 0x70,
+ 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x4c, 0x49, 0x43, 0x45, 0x10,
+ 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x4d, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x10, 0x02, 0x2a, 0xd0,
+ 0x01, 0x0a, 0x0d, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x72, 0x65, 0x71, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44,
+ 0x55, 0x4d, 0x50, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45,
+ 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0x03, 0x12, 0x0c, 0x0a,
+ 0x08, 0x50, 0x52, 0x45, 0x5f, 0x44, 0x55, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x50,
+ 0x41, 0x47, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06,
+ 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x59, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x43, 0x50, 0x55, 0x49,
+ 0x4e, 0x46, 0x4f, 0x5f, 0x44, 0x55, 0x4d, 0x50, 0x10, 0x07, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x50,
+ 0x55, 0x49, 0x4e, 0x46, 0x4f, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0x08, 0x12, 0x11, 0x0a,
+ 0x0d, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0x09,
+ 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x0a, 0x12, 0x0c, 0x0a,
+ 0x08, 0x57, 0x41, 0x49, 0x54, 0x5f, 0x50, 0x49, 0x44, 0x10, 0x0b, 0x12, 0x14, 0x0a, 0x10, 0x50,
+ 0x41, 0x47, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x5f, 0x43, 0x48, 0x4c, 0x44, 0x10,
+ 0x0c,
+}
+
+var (
+ file_rpc_rpc_proto_rawDescOnce sync.Once
+ file_rpc_rpc_proto_rawDescData = file_rpc_rpc_proto_rawDesc
+)
+
+func file_rpc_rpc_proto_rawDescGZIP() []byte {
+ file_rpc_rpc_proto_rawDescOnce.Do(func() {
+ file_rpc_rpc_proto_rawDescData = protoimpl.X.CompressGZIP(file_rpc_rpc_proto_rawDescData)
+ })
+ return file_rpc_rpc_proto_rawDescData
+}
+
+var file_rpc_rpc_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_rpc_rpc_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_rpc_rpc_proto_goTypes = []interface{}{
+ (CriuCgMode)(0), // 0: criu_cg_mode
+ (CriuPreDumpMode)(0), // 1: criu_pre_dump_mode
+ (CriuReqType)(0), // 2: criu_req_type
+ (*CriuPageServerInfo)(nil), // 3: criu_page_server_info
+ (*CriuVethPair)(nil), // 4: criu_veth_pair
+ (*ExtMountMap)(nil), // 5: ext_mount_map
+ (*JoinNamespace)(nil), // 6: join_namespace
+ (*InheritFd)(nil), // 7: inherit_fd
+ (*CgroupRoot)(nil), // 8: cgroup_root
+ (*UnixSk)(nil), // 9: unix_sk
+ (*CriuOpts)(nil), // 10: criu_opts
+ (*CriuDumpResp)(nil), // 11: criu_dump_resp
+ (*CriuRestoreResp)(nil), // 12: criu_restore_resp
+ (*CriuNotify)(nil), // 13: criu_notify
+ (*CriuFeatures)(nil), // 14: criu_features
+ (*CriuReq)(nil), // 15: criu_req
+ (*CriuResp)(nil), // 16: criu_resp
+ (*CriuVersion)(nil), // 17: criu_version
+}
+var file_rpc_rpc_proto_depIdxs = []int32{
+ 3, // 0: criu_opts.ps:type_name -> criu_page_server_info
+ 4, // 1: criu_opts.veths:type_name -> criu_veth_pair
+ 5, // 2: criu_opts.ext_mnt:type_name -> ext_mount_map
+ 8, // 3: criu_opts.cg_root:type_name -> cgroup_root
+ 7, // 4: criu_opts.inherit_fd:type_name -> inherit_fd
+ 9, // 5: criu_opts.unix_sk_ino:type_name -> unix_sk
+ 0, // 6: criu_opts.manage_cgroups_mode:type_name -> criu_cg_mode
+ 6, // 7: criu_opts.join_ns:type_name -> join_namespace
+ 1, // 8: criu_opts.pre_dump_mode:type_name -> criu_pre_dump_mode
+ 2, // 9: criu_req.type:type_name -> criu_req_type
+ 10, // 10: criu_req.opts:type_name -> criu_opts
+ 14, // 11: criu_req.features:type_name -> criu_features
+ 2, // 12: criu_resp.type:type_name -> criu_req_type
+ 11, // 13: criu_resp.dump:type_name -> criu_dump_resp
+ 12, // 14: criu_resp.restore:type_name -> criu_restore_resp
+ 13, // 15: criu_resp.notify:type_name -> criu_notify
+ 3, // 16: criu_resp.ps:type_name -> criu_page_server_info
+ 14, // 17: criu_resp.features:type_name -> criu_features
+ 17, // 18: criu_resp.version:type_name -> criu_version
+ 19, // [19:19] is the sub-list for method output_type
+ 19, // [19:19] is the sub-list for method input_type
+ 19, // [19:19] is the sub-list for extension type_name
+ 19, // [19:19] is the sub-list for extension extendee
+ 0, // [0:19] is the sub-list for field type_name
+}
+
+func init() { file_rpc_rpc_proto_init() }
+func file_rpc_rpc_proto_init() {
+ if File_rpc_rpc_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_rpc_rpc_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CriuPageServerInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_rpc_rpc_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CriuVethPair); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_rpc_rpc_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExtMountMap); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_rpc_rpc_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*JoinNamespace); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_rpc_rpc_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*InheritFd); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_rpc_rpc_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CgroupRoot); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_rpc_rpc_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UnixSk); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_rpc_rpc_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CriuOpts); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_rpc_rpc_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CriuDumpResp); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_rpc_rpc_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CriuRestoreResp); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_rpc_rpc_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CriuNotify); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_rpc_rpc_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CriuFeatures); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_rpc_rpc_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CriuReq); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_rpc_rpc_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CriuResp); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_rpc_rpc_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CriuVersion); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_rpc_rpc_proto_rawDesc,
+ NumEnums: 3,
+ NumMessages: 15,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_rpc_rpc_proto_goTypes,
+ DependencyIndexes: file_rpc_rpc_proto_depIdxs,
+ EnumInfos: file_rpc_rpc_proto_enumTypes,
+ MessageInfos: file_rpc_rpc_proto_msgTypes,
+ }.Build()
+ File_rpc_rpc_proto = out.File
+ file_rpc_rpc_proto_rawDesc = nil
+ file_rpc_rpc_proto_goTypes = nil
+ file_rpc_rpc_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/rpc/rpc.proto b/vendor/github.com/checkpoint-restore/go-criu/v5/rpc/rpc.proto
new file mode 100644
index 000000000..61e1b24f4
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/go-criu/v5/rpc/rpc.proto
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: MIT
+
+syntax = "proto2";
+
+message criu_page_server_info {
+ optional string address = 1;
+ optional int32 port = 2;
+ optional int32 pid = 3;
+ optional int32 fd = 4;
+}
+
+message criu_veth_pair {
+ required string if_in = 1;
+ required string if_out = 2;
+};
+
+message ext_mount_map {
+ required string key = 1;
+ required string val = 2;
+};
+
+message join_namespace {
+ required string ns = 1;
+ required string ns_file = 2;
+ optional string extra_opt = 3;
+}
+
+message inherit_fd {
+ required string key = 1;
+ required int32 fd = 2;
+};
+
+message cgroup_root {
+ optional string ctrl = 1;
+ required string path = 2;
+};
+
+message unix_sk {
+ required uint32 inode = 1;
+};
+
+enum criu_cg_mode {
+ IGNORE = 0;
+ CG_NONE = 1;
+ PROPS = 2;
+ SOFT = 3;
+ FULL = 4;
+ STRICT = 5;
+ DEFAULT = 6;
+};
+
+enum criu_pre_dump_mode {
+ SPLICE = 1;
+ VM_READ = 2;
+};
+
+message criu_opts {
+ required int32 images_dir_fd = 1;
+ optional int32 pid = 2; /* if not set on dump, will dump requesting process */
+
+ optional bool leave_running = 3;
+ optional bool ext_unix_sk = 4;
+ optional bool tcp_established = 5;
+ optional bool evasive_devices = 6;
+ optional bool shell_job = 7;
+ optional bool file_locks = 8;
+ optional int32 log_level = 9 [default = 2];
+ optional string log_file = 10; /* No subdirs are allowed. Consider using work-dir */
+
+ optional criu_page_server_info ps = 11;
+
+ optional bool notify_scripts = 12;
+
+ optional string root = 13;
+ optional string parent_img = 14;
+ optional bool track_mem = 15;
+ optional bool auto_dedup = 16;
+
+ optional int32 work_dir_fd = 17;
+ optional bool link_remap = 18;
+ repeated criu_veth_pair veths = 19; /* DEPRECATED, use external instead */
+
+ optional uint32 cpu_cap = 20 [default = 0xffffffff];
+ optional bool force_irmap = 21;
+ repeated string exec_cmd = 22;
+
+ repeated ext_mount_map ext_mnt = 23; /* DEPRECATED, use external instead */
+ optional bool manage_cgroups = 24; /* backward compatibility */
+ repeated cgroup_root cg_root = 25;
+
+ optional bool rst_sibling = 26; /* swrk only */
+ repeated inherit_fd inherit_fd = 27; /* swrk only */
+
+ optional bool auto_ext_mnt = 28;
+ optional bool ext_sharing = 29;
+ optional bool ext_masters = 30;
+
+ repeated string skip_mnt = 31;
+ repeated string enable_fs = 32;
+
+ repeated unix_sk unix_sk_ino = 33; /* DEPRECATED, use external instead */
+
+ optional criu_cg_mode manage_cgroups_mode = 34;
+ optional uint32 ghost_limit = 35 [default = 0x100000];
+ repeated string irmap_scan_paths = 36;
+ repeated string external = 37;
+ optional uint32 empty_ns = 38;
+ repeated join_namespace join_ns = 39;
+
+ optional string cgroup_props = 41;
+ optional string cgroup_props_file = 42;
+ repeated string cgroup_dump_controller = 43;
+
+ optional string freeze_cgroup = 44;
+ optional uint32 timeout = 45;
+ optional bool tcp_skip_in_flight = 46;
+ optional bool weak_sysctls = 47;
+ optional bool lazy_pages = 48;
+ optional int32 status_fd = 49;
+ optional bool orphan_pts_master = 50;
+ optional string config_file = 51;
+ optional bool tcp_close = 52;
+ optional string lsm_profile = 53;
+ optional string tls_cacert = 54;
+ optional string tls_cacrl = 55;
+ optional string tls_cert = 56;
+ optional string tls_key = 57;
+ optional bool tls = 58;
+ optional bool tls_no_cn_verify = 59;
+ optional string cgroup_yard = 60;
+ optional criu_pre_dump_mode pre_dump_mode = 61 [default = SPLICE];
+ optional int32 pidfd_store_sk = 62;
+ optional string lsm_mount_context = 63;
+/* optional bool check_mounts = 128; */
+}
+
+message criu_dump_resp {
+ optional bool restored = 1;
+}
+
+message criu_restore_resp {
+ required int32 pid = 1;
+}
+
+message criu_notify {
+ optional string script = 1;
+ optional int32 pid = 2;
+}
+
+enum criu_req_type {
+ EMPTY = 0;
+ DUMP = 1;
+ RESTORE = 2;
+ CHECK = 3;
+ PRE_DUMP = 4;
+ PAGE_SERVER = 5;
+
+ NOTIFY = 6;
+
+ CPUINFO_DUMP = 7;
+ CPUINFO_CHECK = 8;
+
+ FEATURE_CHECK = 9;
+
+ VERSION = 10;
+
+ WAIT_PID = 11;
+ PAGE_SERVER_CHLD = 12;
+}
+
+/*
+ * List of features which can queried via
+ * CRIU_REQ_TYPE__FEATURE_CHECK
+ */
+message criu_features {
+ optional bool mem_track = 1;
+ optional bool lazy_pages = 2;
+ optional bool pidfd_store = 3;
+}
+
+/*
+ * Request -- each type corresponds to must-be-there
+ * request arguments of respective type
+ */
+
+message criu_req {
+ required criu_req_type type = 1;
+
+ optional criu_opts opts = 2;
+ optional bool notify_success = 3;
+
+ /*
+ * When set service won't close the connection but
+ * will wait for more req-s to appear. Works not
+ * for all request types.
+ */
+ optional bool keep_open = 4;
+ /*
+ * 'features' can be used to query which features
+ * are supported by the installed criu/kernel
+ * via RPC.
+ */
+ optional criu_features features = 5;
+
+ /* 'pid' is used for WAIT_PID */
+ optional uint32 pid = 6;
+}
+
+/*
+ * Response -- it states whether the request was served
+ * and additional request-specific information
+ */
+
+message criu_resp {
+ required criu_req_type type = 1;
+ required bool success = 2;
+
+ optional criu_dump_resp dump = 3;
+ optional criu_restore_resp restore = 4;
+ optional criu_notify notify = 5;
+ optional criu_page_server_info ps = 6;
+
+ optional int32 cr_errno = 7;
+ optional criu_features features = 8;
+ optional string cr_errmsg = 9;
+ optional criu_version version = 10;
+
+ optional int32 status = 11;
+}
+
+/* Answer for criu_req_type.VERSION requests */
+message criu_version {
+ required int32 major_number = 1;
+ required int32 minor_number = 2;
+ optional string gitid = 3;
+ optional int32 sublevel = 4;
+ optional int32 extra = 5;
+ optional string name = 6;
+}
diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go
index 34cc0d45d..a44f098ad 100644
--- a/vendor/github.com/containers/common/libimage/copier.go
+++ b/vendor/github.com/containers/common/libimage/copier.go
@@ -342,7 +342,7 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere
}
}
- var copiedManifest []byte
+ var returnManifest []byte
f := func() error {
opts := c.imageCopyOptions
if sourceInsecure != nil {
@@ -354,11 +354,13 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere
opts.DestinationCtx.DockerInsecureSkipTLSVerify = value
}
- var err error
- copiedManifest, err = copy.Image(ctx, c.policyContext, destination, source, &opts)
+ copiedManifest, err := copy.Image(ctx, c.policyContext, destination, source, &opts)
+ if err == nil {
+ returnManifest = copiedManifest
+ }
return err
}
- return copiedManifest, retry.RetryIfNecessary(ctx, f, &c.retryOptions)
+ return returnManifest, retry.RetryIfNecessary(ctx, f, &c.retryOptions)
}
// checkRegistrySourcesAllows checks the $BUILD_REGISTRY_SOURCES environment
@@ -369,7 +371,7 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere
// If set, the insecure return value indicates whether the registry is set to
// be insecure.
//
-// NOTE: this functionality is required by Buildah.
+// NOTE: this functionality is required by Buildah for OpenShift.
func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err error) {
registrySources, ok := os.LookupEnv("BUILD_REGISTRY_SOURCES")
if !ok || registrySources == "" {
diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go
index 5b060a185..c47e63339 100644
--- a/vendor/github.com/containers/common/libimage/image.go
+++ b/vendor/github.com/containers/common/libimage/image.go
@@ -836,9 +836,9 @@ func (i *Image) Manifest(ctx context.Context) (rawManifest []byte, mimeType stri
return src.GetManifest(ctx, nil)
}
-// getImageDigest creates an image object and uses the hex value of the digest as the image ID
-// for parsing the store reference
-func getImageDigest(ctx context.Context, src types.ImageReference, sys *types.SystemContext) (string, error) {
+// getImageID creates an image object and uses the hex value of the config
+// blob's digest (if it has one) as the image ID for parsing the store reference
+func getImageID(ctx context.Context, src types.ImageReference, sys *types.SystemContext) (string, error) {
newImg, err := src.NewImage(ctx, sys)
if err != nil {
return "", err
@@ -852,5 +852,5 @@ func getImageDigest(ctx context.Context, src types.ImageReference, sys *types.Sy
if err = imageDigest.Validate(); err != nil {
return "", errors.Wrapf(err, "error getting config info")
}
- return "@" + imageDigest.Hex(), nil
+ return "@" + imageDigest.Encoded(), nil
}
diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go
index 9926aaec7..bcfb4e129 100644
--- a/vendor/github.com/containers/common/libimage/import.go
+++ b/vendor/github.com/containers/common/libimage/import.go
@@ -86,7 +86,7 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption
return "", err
}
- id, err := getImageDigest(ctx, srcRef, r.systemContextCopy())
+ id, err := getImageID(ctx, srcRef, r.systemContextCopy())
if err != nil {
return "", err
}
diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go
index 875c2948d..81b5343c0 100644
--- a/vendor/github.com/containers/common/libimage/manifests/manifests.go
+++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go
@@ -18,6 +18,7 @@ import (
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
+ "github.com/containers/storage/pkg/lockfile"
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
@@ -395,3 +396,20 @@ func (l *list) Remove(instanceDigest digest.Digest) error {
}
return err
}
+
+// LockerForImage returns a Locker for a given image record. It's recommended
+// that processes which use LoadFromImage() to load a list from an image and
+// then use that list's SaveToImage() method to save a modified version of the
+// list to that image record use this lock to avoid accidentally wiping out
+// changes that another process is also attempting to make.
+func LockerForImage(store storage.Store, image string) (lockfile.Locker, error) {
+ img, err := store.Image(image)
+ if err != nil {
+ return nil, errors.Wrapf(err, "locating image %q for locating lock", image)
+ }
+ d := digest.NewDigestFromEncoded(digest.Canonical, img.ID)
+ if err := d.Validate(); err != nil {
+ return nil, errors.Wrapf(err, "coercing image ID for %q into a digest", image)
+ }
+ return store.GetDigestLock(d)
+}
diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go
index 1a6ad1ce2..97347178a 100644
--- a/vendor/github.com/containers/common/libimage/pull.go
+++ b/vendor/github.com/containers/common/libimage/pull.go
@@ -12,6 +12,7 @@ import (
dockerArchiveTransport "github.com/containers/image/v5/docker/archive"
dockerDaemonTransport "github.com/containers/image/v5/docker/daemon"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
ociArchiveTransport "github.com/containers/image/v5/oci/archive"
ociTransport "github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/pkg/shortnames"
@@ -19,6 +20,7 @@ import (
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
+ digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -192,19 +194,19 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference,
imageName = storageName
case ociArchiveTransport.Transport.Name():
- manifest, err := ociArchiveTransport.LoadManifestDescriptor(ref)
+ manifestDescriptor, err := ociArchiveTransport.LoadManifestDescriptor(ref)
if err != nil {
return nil, err
}
- // if index.json has no reference name, compute the image digest instead
- if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" {
- storageName, err = getImageDigest(ctx, ref, nil)
+ // if index.json has no reference name, compute the image ID instead
+ if manifestDescriptor.Annotations == nil || manifestDescriptor.Annotations["org.opencontainers.image.ref.name"] == "" {
+ storageName, err = getImageID(ctx, ref, nil)
if err != nil {
return nil, err
}
imageName = "sha256:" + storageName[1:]
} else {
- storageName = manifest.Annotations["org.opencontainers.image.ref.name"]
+ storageName = manifestDescriptor.Annotations["org.opencontainers.image.ref.name"]
named, err := NormalizeName(storageName)
if err != nil {
return nil, err
@@ -248,7 +250,7 @@ func (r *Runtime) storageReferencesReferencesFromArchiveReader(ctx context.Conte
var imageNames []string
if len(destNames) == 0 {
- destName, err := getImageDigest(ctx, readerRef, &r.systemContext)
+ destName, err := getImageID(ctx, readerRef, &r.systemContext)
if err != nil {
return nil, nil, err
}
@@ -316,8 +318,8 @@ func (r *Runtime) copyFromDockerArchiveReaderReference(ctx context.Context, read
}
// copyFromRegistry pulls the specified, possibly unqualified, name from a
-// registry. On successful pull it returns the used fully-qualified name that
-// can later be used to look up the image in the local containers storage.
+// registry. On successful pull it returns the ID of the image in local
+// storage.
//
// If options.All is set, all tags from the specified registry will be pulled.
func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference, inputName string, pullPolicy config.PullPolicy, options *PullOptions) ([]string, error) {
@@ -337,7 +339,7 @@ func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference
return nil, err
}
- pulledTags := []string{}
+ pulledIDs := []string{}
for _, tag := range tags {
select { // Let's be gentle with Podman remote.
case <-ctx.Done():
@@ -353,15 +355,54 @@ func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference
if err != nil {
return nil, err
}
- pulledTags = append(pulledTags, pulled...)
+ pulledIDs = append(pulledIDs, pulled...)
}
- return pulledTags, nil
+ return pulledIDs, nil
+}
+
+// imageIDsForManifest() parses the manifest of the copied image and then looks
+// up the IDs of the matching image. There's a small slice of time, between
+// when we copy the image into local storage and when we go to look for it
+// using the name that we gave it when we copied it, when the name we wanted to
+// assign to the image could have been moved, but the image's ID will remain
+// the same until it is deleted.
+func (r *Runtime) imagesIDsForManifest(manifestBytes []byte, sys *types.SystemContext) ([]string, error) {
+ var imageDigest digest.Digest
+ manifestType := manifest.GuessMIMEType(manifestBytes)
+ if manifest.MIMETypeIsMultiImage(manifestType) {
+ list, err := manifest.ListFromBlob(manifestBytes, manifestType)
+ if err != nil {
+ return nil, errors.Wrapf(err, "parsing manifest list")
+ }
+ d, err := list.ChooseInstance(sys)
+ if err != nil {
+ return nil, errors.Wrapf(err, "choosing instance from manifest list")
+ }
+ imageDigest = d
+ } else {
+ d, err := manifest.Digest(manifestBytes)
+ if err != nil {
+ return nil, errors.Wrapf(err, "digesting manifest")
+ }
+ imageDigest = d
+ }
+ var results []string
+ images, err := r.store.ImagesByDigest(imageDigest)
+ if err != nil {
+ return nil, errors.Wrapf(err, "listing images by manifest digest")
+ }
+ for _, image := range images {
+ results = append(results, image.ID)
+ }
+ if len(results) == 0 {
+ return nil, errors.Wrapf(storage.ErrImageUnknown, "identifying new image by manifest digest")
+ }
+ return results, nil
}
// copySingleImageFromRegistry pulls the specified, possibly unqualified, name
-// from a registry. On successful pull it returns the used fully-qualified
-// name that can later be used to look up the image in the local containers
+// from a registry. On successful pull it returns the ID of the image in local
// storage.
func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName string, pullPolicy config.PullPolicy, options *PullOptions) ([]string, error) { //nolint:gocyclo
// Sanity check.
@@ -375,7 +416,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
err error
)
- // Always check if there's a local image. If, we should use it's
+ // Always check if there's a local image. If so, we should use its
// resolved name for pulling. Assume we're doing a `pull foo`.
// If there's already a local image "localhost/foo", then we should
// attempt pulling that instead of doing the full short-name dance.
@@ -454,7 +495,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
}
}
- // If we found a local image, we should use it's locally resolved name
+ // If we found a local image, we should use its locally resolved name
// (see containers/buildah/issues/2904). An exception is if a custom
// platform is specified (e.g., `--arch=arm64`). In that case, we need
// to pessimistically pull the image since some images declare wrong
@@ -462,7 +503,8 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
// containers/podman/issues/10682).
//
// In other words: multi-arch support can only be as good as the images
- // in the wild.
+ // in the wild, so we shouldn't break things for our users by trying to
+ // insist that they make sense.
if localImage != nil && !customPlatform {
if imageName != resolvedImageName {
logrus.Debugf("Image %s resolved to local image %s which will be used for pulling", imageName, resolvedImageName)
@@ -541,7 +583,8 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
return nil, err
}
}
- if _, err := c.copy(ctx, srcRef, destRef); err != nil {
+ var manifestBytes []byte
+ if manifestBytes, err = c.copy(ctx, srcRef, destRef); err != nil {
logrus.Debugf("Error pulling candidate %s: %v", candidateString, err)
pullErrors = append(pullErrors, err)
continue
@@ -554,6 +597,9 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str
}
logrus.Debugf("Pulled candidate %s successfully", candidateString)
+ if ids, err := r.imagesIDsForManifest(manifestBytes, sys); err == nil {
+ return ids, nil
+ }
return []string{candidate.Value.String()}, nil
}
diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go
index 4d1b842e7..df29bc7da 100644
--- a/vendor/github.com/containers/common/libimage/search.go
+++ b/vendor/github.com/containers/common/libimage/search.go
@@ -185,6 +185,10 @@ func (r *Runtime) searchImageInRegistry(ctx context.Context, term, registry stri
sys.DockerInsecureSkipTLSVerify = options.InsecureSkipTLSVerify
}
+ if options.Authfile != "" {
+ sys.AuthFilePath = options.Authfile
+ }
+
if options.ListTags {
results, err := searchRepositoryTags(ctx, sys, registry, term, options)
if err != nil {
diff --git a/vendor/github.com/containers/common/pkg/auth/auth.go b/vendor/github.com/containers/common/pkg/auth/auth.go
index d2e75c1f7..093da0299 100644
--- a/vendor/github.com/containers/common/pkg/auth/auth.go
+++ b/vendor/github.com/containers/common/pkg/auth/auth.go
@@ -9,6 +9,7 @@ import (
"strings"
"github.com/containers/image/v5/docker"
+ "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/pkg/docker/config"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
@@ -69,30 +70,50 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
systemContext = systemContextWithOptions(systemContext, opts.AuthFile, opts.CertDir)
var (
- server string
- err error
+ authConfig types.DockerAuthConfig
+ key, registry string
+ ref reference.Named
+ err error
)
- if len(args) > 1 {
- return errors.New("login accepts only one registry to login to")
- }
- if len(args) == 0 {
+ l := len(args)
+ switch l {
+ case 0:
if !opts.AcceptUnspecifiedRegistry {
return errors.New("please provide a registry to login to")
}
- if server, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
+ if key, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
return err
}
- logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", server)
- } else {
- server = getRegistryName(args[0])
+ registry = key
+ logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", key)
+
+ case 1:
+ key, registry, ref, err = parseRegistryArgument(args[0], opts.AcceptRepositories)
+ if err != nil {
+ return err
+ }
+
+ default:
+ return errors.New("login accepts only one registry to login to")
+
}
- authConfig, err := config.GetCredentials(systemContext, server)
- if err != nil {
- return errors.Wrap(err, "reading auth file")
+
+ if ref != nil {
+ authConfig, err = config.GetCredentialsForRef(systemContext, ref)
+ if err != nil {
+ return errors.Wrap(err, "get credentials for repository")
+ }
+ } else {
+ // nolint: staticcheck
+ authConfig, err = config.GetCredentials(systemContext, registry)
+ if err != nil {
+ return errors.Wrap(err, "get credentials")
+ }
}
+
if opts.GetLoginSet {
if authConfig.Username == "" {
- return errors.Errorf("not logged into %s", server)
+ return errors.Errorf("not logged into %s", key)
}
fmt.Fprintf(opts.Stdout, "%s\n", authConfig.Username)
return nil
@@ -119,9 +140,9 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
// If no username and no password is specified, try to use existing ones.
if opts.Username == "" && password == "" && authConfig.Username != "" && authConfig.Password != "" {
- fmt.Println("Authenticating with existing credentials...")
- if err := docker.CheckAuth(ctx, systemContext, authConfig.Username, authConfig.Password, server); err == nil {
- fmt.Fprintln(opts.Stdout, "Existing credentials are valid. Already logged in to", server)
+ fmt.Fprintf(opts.Stdout, "Authenticating with existing credentials for %s\n", key)
+ if err := docker.CheckAuth(ctx, systemContext, authConfig.Username, authConfig.Password, registry); err == nil {
+ fmt.Fprintf(opts.Stdout, "Existing credentials are valid. Already logged in to %s\n", registry)
return nil
}
fmt.Fprintln(opts.Stdout, "Existing credentials are invalid, please enter valid username and password")
@@ -132,9 +153,9 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
return errors.Wrap(err, "getting username and password")
}
- if err = docker.CheckAuth(ctx, systemContext, username, password, server); err == nil {
+ if err = docker.CheckAuth(ctx, systemContext, username, password, registry); err == nil {
// Write the new credentials to the authfile
- desc, err := config.SetCredentials(systemContext, server, username, password)
+ desc, err := config.SetCredentials(systemContext, key, username, password)
if err != nil {
return err
}
@@ -147,10 +168,45 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
return nil
}
if unauthorized, ok := err.(docker.ErrUnauthorizedForCredentials); ok {
- logrus.Debugf("error logging into %q: %v", server, unauthorized)
- return errors.Errorf("error logging into %q: invalid username/password", server)
+ logrus.Debugf("error logging into %q: %v", key, unauthorized)
+ return errors.Errorf("error logging into %q: invalid username/password", key)
+ }
+ return errors.Wrapf(err, "authenticating creds for %q", key)
+}
+
+// parseRegistryArgument verifies the provided arg depending if we accept
+// repositories or not.
+func parseRegistryArgument(arg string, acceptRepositories bool) (key, registry string, maybeRef reference.Named, err error) {
+ if !acceptRepositories {
+ registry = getRegistryName(arg)
+ key = registry
+ return key, registry, maybeRef, nil
+ }
+
+ key = trimScheme(arg)
+ if key != arg {
+ return key, registry, nil, errors.New("credentials key has https[s]:// prefix")
}
- return errors.Wrapf(err, "authenticating creds for %q", server)
+
+ registry = getRegistryName(key)
+ if registry == key {
+ // We cannot parse a reference from a registry, so we stop here
+ return key, registry, nil, nil
+ }
+
+ ref, parseErr := reference.ParseNamed(key)
+ if parseErr != nil {
+ return key, registry, nil, errors.Wrapf(parseErr, "parse reference from %q", key)
+ }
+
+ if !reference.IsNameOnly(ref) {
+ return key, registry, nil, errors.Errorf("reference %q contains tag or digest", ref.String())
+ }
+
+ maybeRef = ref
+ registry = reference.Domain(ref)
+
+ return key, registry, maybeRef, nil
}
// getRegistryName scrubs and parses the input to get the server name
@@ -158,13 +214,21 @@ func getRegistryName(server string) string {
// removes 'http://' or 'https://' from the front of the
// server/registry string if either is there. This will be mostly used
// for user input from 'Buildah login' and 'Buildah logout'.
- server = strings.TrimPrefix(strings.TrimPrefix(server, "https://"), "http://")
+ server = trimScheme(server)
// gets the registry from the input. If the input is of the form
// quay.io/myuser/myimage, it will parse it and just return quay.io
split := strings.Split(server, "/")
return split[0]
}
+// trimScheme removes the HTTP(s) scheme from the provided repository.
+func trimScheme(repository string) string {
+ // removes 'http://' or 'https://' from the front of the
+ // server/registry string if either is there. This will be mostly used
+ // for user input from 'Buildah login' and 'Buildah logout'.
+ return strings.TrimPrefix(strings.TrimPrefix(repository, "https://"), "http://")
+}
+
// getUserAndPass gets the username and password from STDIN if not given
// using the -u and -p flags. If the username prompt is left empty, the
// displayed userFromAuthFile will be used instead.
@@ -209,8 +273,9 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
systemContext = systemContextWithOptions(systemContext, opts.AuthFile, "")
var (
- server string
- err error
+ key, registry string
+ ref reference.Named
+ err error
)
if len(args) > 1 {
return errors.New("logout accepts only one registry to logout from")
@@ -219,16 +284,20 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
if !opts.AcceptUnspecifiedRegistry {
return errors.New("please provide a registry to logout from")
}
- if server, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
+ if key, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
return err
}
- logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", server)
+ registry = key
+ logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", key)
}
if len(args) != 0 {
if opts.All {
return errors.New("--all takes no arguments")
}
- server = getRegistryName(args[0])
+ key, registry, ref, err = parseRegistryArgument(args[0], opts.AcceptRepositories)
+ if err != nil {
+ return err
+ }
}
if opts.All {
@@ -239,24 +308,34 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
return nil
}
- err = config.RemoveAuthentication(systemContext, server)
+ err = config.RemoveAuthentication(systemContext, key)
switch errors.Cause(err) {
case nil:
- fmt.Fprintf(opts.Stdout, "Removed login credentials for %s\n", server)
+ fmt.Fprintf(opts.Stdout, "Removed login credentials for %s\n", key)
return nil
case config.ErrNotLoggedIn:
- authConfig, err := config.GetCredentials(systemContext, server)
- if err != nil {
- return errors.Wrap(err, "reading auth file")
+ var authConfig types.DockerAuthConfig
+ if ref != nil {
+ authConfig, err = config.GetCredentialsForRef(systemContext, ref)
+ if err != nil {
+ return errors.Wrap(err, "get credentials for repository")
+ }
+ } else {
+ // nolint: staticcheck
+ authConfig, err = config.GetCredentials(systemContext, registry)
+ if err != nil {
+ return errors.Wrap(err, "get credentials")
+ }
}
- authInvalid := docker.CheckAuth(context.Background(), systemContext, authConfig.Username, authConfig.Password, server)
+
+ authInvalid := docker.CheckAuth(context.Background(), systemContext, authConfig.Username, authConfig.Password, registry)
if authConfig.Username != "" && authConfig.Password != "" && authInvalid == nil {
- fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", server)
+ fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", key)
return nil
}
- return errors.Errorf("Not logged into %s\n", server)
+ return errors.Errorf("Not logged into %s\n", key)
default:
- return errors.Wrapf(err, "logging out of %q", server)
+ return errors.Wrapf(err, "logging out of %q", key)
}
}
diff --git a/vendor/github.com/containers/common/pkg/auth/cli.go b/vendor/github.com/containers/common/pkg/auth/cli.go
index 5a7c1137c..7266bf48b 100644
--- a/vendor/github.com/containers/common/pkg/auth/cli.go
+++ b/vendor/github.com/containers/common/pkg/auth/cli.go
@@ -14,13 +14,14 @@ type LoginOptions struct {
// CLI flags managed by the FlagSet returned by GetLoginFlags
// Callers that use GetLoginFlags should not need to touch these values at all; callers that use
// other CLI frameworks should set them based on user input.
- AuthFile string
- CertDir string
- Password string
- Username string
- StdinPassword bool
- GetLoginSet bool
- Verbose bool // set to true for verbose output
+ AuthFile string
+ CertDir string
+ Password string
+ Username string
+ StdinPassword bool
+ GetLoginSet bool
+ Verbose bool // set to true for verbose output
+ AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries
// Options caller can set
Stdin io.Reader // set to os.Stdin
Stdout io.Writer // set to os.Stdout
@@ -32,8 +33,9 @@ type LogoutOptions struct {
// CLI flags managed by the FlagSet returned by GetLogoutFlags
// Callers that use GetLogoutFlags should not need to touch these values at all; callers that use
// other CLI frameworks should set them based on user input.
- AuthFile string
- All bool
+ AuthFile string
+ All bool
+ AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries
// Options caller can set
Stdout io.Writer // set to os.Stdout
AcceptUnspecifiedRegistry bool // set to true if allows logout with unspecified registry
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index 84876026d..008cfb642 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -637,9 +637,14 @@ func (c *Config) CheckCgroupsAndAdjustConfig() {
session := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
hasSession := session != ""
- if hasSession && strings.HasPrefix(session, "unix:path=") {
- _, err := os.Stat(strings.TrimPrefix(session, "unix:path="))
- hasSession = err == nil
+ if hasSession {
+ for _, part := range strings.Split(session, ",") {
+ if strings.HasPrefix(part, "unix:path=") {
+ _, err := os.Stat(strings.TrimPrefix(part, "unix:path="))
+ hasSession = err == nil
+ break
+ }
+ }
}
if !hasSession && unshare.GetRootlessUID() != 0 {
diff --git a/vendor/github.com/containers/common/pkg/config/pull_policy.go b/vendor/github.com/containers/common/pkg/config/pull_policy.go
index 7c32dd660..8c1f0ec29 100644
--- a/vendor/github.com/containers/common/pkg/config/pull_policy.go
+++ b/vendor/github.com/containers/common/pkg/config/pull_policy.go
@@ -76,13 +76,13 @@ func (p PullPolicy) Validate() error {
// * "never" <-> PullPolicyNever
func ParsePullPolicy(s string) (PullPolicy, error) {
switch s {
- case "always":
+ case "always", "Always":
return PullPolicyAlways, nil
- case "missing", "ifnotpresent", "":
+ case "missing", "Missing", "ifnotpresent", "IfNotPresent", "":
return PullPolicyMissing, nil
- case "newer", "ifnewer":
+ case "newer", "Newer", "ifnewer", "IfNewer":
return PullPolicyNewer, nil
- case "never":
+ case "never", "Never":
return PullPolicyNever, nil
default:
return PullPolicyUnsupported, errors.Errorf("unsupported pull policy %q", s)
diff --git a/vendor/github.com/containers/common/pkg/seccomp/types.go b/vendor/github.com/containers/common/pkg/seccomp/types.go
index 36712458a..07751f729 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/types.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/types.go
@@ -7,7 +7,7 @@ package seccomp
// Seccomp represents the config for a seccomp profile for syscall restriction.
type Seccomp struct {
DefaultAction Action `json:"defaultAction"`
- DefaultErrnoRet *uint `json:"defaultErrnoRet"`
+ DefaultErrnoRet *uint `json:"defaultErrnoRet,omitempty"`
// Architectures is kept to maintain backward compatibility with the old
// seccomp profile.
Architectures []Arch `json:"architectures,omitempty"`
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index 9bc479d23..b4ff8aa10 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -20,6 +20,7 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache"
"github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
@@ -57,7 +58,7 @@ var compressionBufferSize = 1048576
// expectedCompressionFormats is used to check if a blob with a specified media type is compressed
// using the algorithm that the media type says it should be compressed with
-var expectedCompressionFormats = map[string]*compression.Algorithm{
+var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip,
imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd,
manifest.DockerV2Schema2LayerMediaType: &compression.Gzip,
@@ -117,13 +118,12 @@ type copier struct {
progress chan types.ProgressProperties
blobInfoCache internalblobinfocache.BlobInfoCache2
copyInParallel bool
- compressionFormat compression.Algorithm
+ compressionFormat compressiontypes.Algorithm
compressionLevel *int
ociDecryptConfig *encconfig.DecryptConfig
ociEncryptConfig *encconfig.EncryptConfig
maxParallelDownloads uint
downloadForeignLayers bool
- fetchPartialBlobs bool
}
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
@@ -207,9 +207,6 @@ type Options struct {
// Download layer contents with "nondistributable" media types ("foreign" layers) and translate the layer media type
// to not indicate "nondistributable".
DownloadForeignLayers bool
-
- // FetchPartialBlobs indicates whether to attempt to fetch the blob partially. Experimental.
- FetchPartialBlobs bool
}
// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value
@@ -290,15 +287,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
ociEncryptConfig: options.OciEncryptConfig,
maxParallelDownloads: options.MaxParallelDownloads,
downloadForeignLayers: options.DownloadForeignLayers,
- fetchPartialBlobs: options.FetchPartialBlobs,
}
// Default to using gzip compression unless specified otherwise.
if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil {
- algo, err := compression.AlgorithmByName("gzip")
- if err != nil {
- return nil, err
- }
- c.compressionFormat = algo
+ c.compressionFormat = compression.Gzip
} else {
c.compressionFormat = *options.DestinationCtx.CompressionFormat
}
@@ -1286,7 +1278,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// the destination has support for it.
imgSource, okSource := ic.c.rawSource.(internalTypes.ImageSourceSeekable)
imgDest, okDest := ic.c.dest.(internalTypes.ImageDestinationPartial)
- if ic.c.fetchPartialBlobs && okSource && okDest && !diffIDIsNeeded {
+ if okSource && okDest && !diffIDIsNeeded {
bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
progress := make(chan int64)
@@ -1320,7 +1312,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
return info, cachedDiffID, nil
}
bar.Abort(true)
- logrus.Errorf("Failed to retrieve partial blob: %v", err)
+ logrus.Debugf("Failed to retrieve partial blob: %v", err)
}
// Fallback: copy the layer, computing the diffID if we need to do so
@@ -1364,7 +1356,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
- var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
+ var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil
var diffIDChan chan diffIDResult
err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow
@@ -1375,7 +1367,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
_ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
}()
- getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer {
+ getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer {
// If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further
// reading from the pipe has failed, we don’t really care.
// We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it,
@@ -1394,7 +1386,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
}
// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest.
-func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) {
+func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) {
result := diffIDResult{
digest: "",
err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"),
@@ -1406,7 +1398,7 @@ func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadClo
}
// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
-func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) {
+func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) {
if decompressor != nil {
s, err := decompressor(stream)
if err != nil {
@@ -1439,7 +1431,7 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
// perhaps (de/re/)compressing it if canModifyBlob,
// and returns a complete blobInfo of the copied blob.
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
- getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
+ getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
if isConfig { // This is guaranteed by the caller, but set it here to be explicit.
canModifyBlob = false
@@ -1733,7 +1725,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
}
// doCompression reads all input from src and writes its compressed equivalent to dest.
-func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compression.Algorithm, compressionLevel *int) error {
+func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error {
compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel)
if err != nil {
return err
@@ -1751,7 +1743,7 @@ func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, co
}
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
-func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compression.Algorithm) {
+func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
err := errors.New("Internal error: unexpected panic in compressGoroutine")
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
_ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go
index 49957ac4e..e3280aa2b 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_dest.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go
@@ -21,13 +21,26 @@ const version = "Directory Transport Version: 1.1\n"
var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data")
type dirImageDestination struct {
- ref dirReference
- compress bool
+ ref dirReference
+ desiredLayerCompression types.LayerCompression
}
// newImageDestination returns an ImageDestination for writing to a directory.
-func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) {
- d := &dirImageDestination{ref: ref, compress: compress}
+func newImageDestination(sys *types.SystemContext, ref dirReference) (types.ImageDestination, error) {
+ desiredLayerCompression := types.PreserveOriginal
+ if sys != nil {
+ if sys.DirForceCompress {
+ desiredLayerCompression = types.Compress
+
+ if sys.DirForceDecompress {
+ return nil, errors.Errorf("Cannot compress and decompress at the same time")
+ }
+ }
+ if sys.DirForceDecompress {
+ desiredLayerCompression = types.Decompress
+ }
+ }
+ d := &dirImageDestination{ref: ref, desiredLayerCompression: desiredLayerCompression}
// If directory exists check if it is empty
// if not empty, check whether the contents match that of a container image directory and overwrite the contents
@@ -101,10 +114,7 @@ func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error {
}
func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression {
- if d.compress {
- return types.Compress
- }
- return types.PreserveOriginal
+ return d.desiredLayerCompression
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
diff --git a/vendor/github.com/containers/image/v5/directory/directory_transport.go b/vendor/github.com/containers/image/v5/directory/directory_transport.go
index adfec6ef3..e542d888c 100644
--- a/vendor/github.com/containers/image/v5/directory/directory_transport.go
+++ b/vendor/github.com/containers/image/v5/directory/directory_transport.go
@@ -153,11 +153,7 @@ func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemCon
// NewImageDestination returns a types.ImageDestination for this reference.
// The caller must call .Close() on the returned ImageDestination.
func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
- compress := false
- if sys != nil {
- compress = sys.DirForceCompress
- }
- return newImageDestination(ref, compress)
+ return newImageDestination(sys, ref)
}
// DeleteImage deletes the named image from the registry, if supported.
diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go
index 14c11dfd0..3fe9a11d0 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_client.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_client.go
@@ -304,7 +304,7 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password
Password: password,
}
- resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil)
+ resp, err := client.makeRequest(ctx, http.MethodGet, "/v2/", nil, nil, v2Auth, nil)
if err != nil {
return err
}
@@ -343,8 +343,8 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
v1Res := &V1Results{}
// Get credentials from authfile for the underlying hostname
- // lint:ignore SA1019 We can't use GetCredentialsForRef because we want to search the whole registry.
- auth, err := config.GetCredentials(sys, registry) // nolint:staticcheck // https://github.com/golangci/golangci-lint/issues/741
+ // We can't use GetCredentialsForRef here because we want to search the whole registry.
+ auth, err := config.GetCredentials(sys, registry)
if err != nil {
return nil, errors.Wrapf(err, "getting username and password")
}
@@ -380,7 +380,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
u.RawQuery = q.Encode()
logrus.Debugf("trying to talk to v1 search endpoint")
- resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth, nil)
+ resp, err := client.makeRequest(ctx, http.MethodGet, u.String(), nil, nil, noAuth, nil)
if err != nil {
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
} else {
@@ -400,14 +400,15 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
searchRes := []SearchResult{}
path := "/v2/_catalog"
for len(searchRes) < limit {
- resp, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
+ resp, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
- logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, httpResponseToError(resp, ""))
+ err := httpResponseToError(resp, "")
+ logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err)
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
}
v2Res := &V2Results{}
@@ -533,11 +534,10 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
// makeRequest should generally be preferred.
// Note that no exponential back off is performed when receiving an http 429 status code.
func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
- req, err := http.NewRequest(method, url, stream)
+ req, err := http.NewRequestWithContext(ctx, method, url, stream)
if err != nil {
return nil, err
}
- req = req.WithContext(ctx)
if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it.
req.ContentLength = streamLen
}
@@ -630,13 +630,11 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
return nil, errors.Errorf("missing realm in bearer auth challenge")
}
- authReq, err := http.NewRequest(http.MethodPost, realm, nil)
+ authReq, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, nil)
if err != nil {
return nil, err
}
- authReq = authReq.WithContext(ctx)
-
// Make the form data required against the oauth2 authentication
// More details here: https://docs.docker.com/registry/spec/auth/oauth/
params := authReq.URL.Query()
@@ -680,12 +678,11 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
return nil, errors.Errorf("missing realm in bearer auth challenge")
}
- authReq, err := http.NewRequest(http.MethodGet, realm, nil)
+ authReq, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil)
if err != nil {
return nil, err
}
- authReq = authReq.WithContext(ctx)
params := authReq.URL.Query()
if c.auth.Username != "" {
params.Add("account", c.auth.Username)
@@ -739,7 +736,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
ping := func(scheme string) error {
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
- resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
+ resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
if err != nil {
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
return err
@@ -766,7 +763,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
// best effort to understand if we're talking to a V1 registry
pingV1 := func(scheme string) bool {
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
- resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
+ resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
if err != nil {
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
return false
@@ -800,7 +797,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
// using the original data structures.
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
- res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
+ res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go
index 567a4bcf4..c84bb37d2 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image.go
@@ -68,7 +68,7 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
tags := make([]string, 0)
for {
- res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
+ res, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil {
return nil, err
}
@@ -134,7 +134,7 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
"Accept": manifest.DefaultRequestedManifestMIMETypes,
}
- res, err := client.makeRequest(ctx, "HEAD", path, headers, nil, v2Auth, nil)
+ res, err := client.makeRequest(ctx, http.MethodHead, path, headers, nil, v2Auth, nil)
if err != nil {
return "", err
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
index 84694e157..360a7122e 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go
@@ -147,7 +147,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// FIXME? Chunked upload, progress reporting, etc.
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
logrus.Debugf("Uploading %s", uploadPath)
- res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth, nil)
+ res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil)
if err != nil {
return types.BlobInfo{}, err
}
@@ -168,7 +168,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// This error text should never be user-visible, we terminate only after makeRequestToResolvedURL
// returns, so there isn’t a way for the error text to be provided to any of our callers.
defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload"))
- res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
+ res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
if err != nil {
logrus.Debugf("Error uploading layer chunked %v", err)
return nil, err
@@ -194,7 +194,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
locationQuery.Set("digest", computedDigest.String())
uploadLocation.RawQuery = locationQuery.Encode()
- res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
+ res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
if err != nil {
return types.BlobInfo{}, err
}
@@ -215,7 +215,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) {
checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
logrus.Debugf("Checking %s", checkPath)
- res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth, extraScope)
+ res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope)
if err != nil {
return false, -1, err
}
@@ -246,7 +246,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
}
mountPath := u.String()
logrus.Debugf("Trying to mount %s", mountPath)
- res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth, extraScope)
+ res, err := d.c.makeRequest(ctx, http.MethodPost, mountPath, nil, nil, v2Auth, extraScope)
if err != nil {
return err
}
@@ -264,7 +264,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
return errors.Wrap(err, "determining upload URL after a mount attempt")
}
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
- res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
+ res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
if err != nil {
logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
} else {
@@ -424,7 +424,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
if mimeType != "" {
headers["Content-Type"] = []string{mimeType}
}
- res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth, nil)
+ res, err := d.c.makeRequest(ctx, http.MethodPut, path, headers, bytes.NewReader(m), v2Auth, nil)
if err != nil {
return err
}
@@ -640,7 +640,7 @@ sigExists:
}
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String())
- res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil)
+ res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
index c5a428ba0..5dc8e7b1f 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
@@ -192,7 +192,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
headers := map[string][]string{
"Accept": manifest.DefaultRequestedManifestMIMETypes,
}
- res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil)
+ res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
if err != nil {
return nil, "", err
}
@@ -248,7 +248,7 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
// NOTE: we must not authenticate on additional URLs as those
// can be abused to leak credentials or tokens. Please
// refer to CVE-2020-15157 for more information.
- resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
+ resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
if err == nil {
if resp.StatusCode != http.StatusOK {
err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode))
@@ -295,7 +295,7 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
logrus.Debugf("Downloading %s", path)
- res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil)
+ res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
if err != nil {
return nil, nil, err
}
@@ -364,7 +364,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
logrus.Debugf("Downloading %s", path)
- res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
+ res, err := s.c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
if err != nil {
return nil, 0, err
}
@@ -454,11 +454,10 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
case "http", "https":
logrus.Debugf("GET %s", url)
- req, err := http.NewRequest("GET", url.String(), nil)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil)
if err != nil {
return nil, false, err
}
- req = req.WithContext(ctx)
res, err := s.c.client.Do(req)
if err != nil {
return nil, false, err
@@ -523,7 +522,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
return err
}
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
- get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth, nil)
+ get, err := c.makeRequest(ctx, http.MethodGet, getPath, headers, nil, v2Auth, nil)
if err != nil {
return err
}
@@ -545,7 +544,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
// When retrieving the digest from a registry >= 2.3 use the following header:
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
- delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth, nil)
+ delete, err := c.makeRequest(ctx, http.MethodDelete, deletePath, headers, nil, v2Auth, nil)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
index 1dceaa669..b86e8b1ac 100644
--- a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
+++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
@@ -2,6 +2,7 @@ package blobinfocache
import (
"github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
)
@@ -47,7 +48,7 @@ func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.B
// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by
// TryReusingBlob() implementations to set values in the BlobInfo structure that they return
// upon success.
-func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compression.Algorithm, error) {
+func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compressiontypes.Algorithm, error) {
switch compressorName {
case Uncompressed:
return types.Decompress, nil, nil
diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go
index 5930640ac..4692211c0 100644
--- a/vendor/github.com/containers/image/v5/manifest/common.go
+++ b/vendor/github.com/containers/image/v5/manifest/common.go
@@ -3,7 +3,7 @@ package manifest
import (
"fmt"
- "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/sirupsen/logrus"
)
@@ -44,7 +44,7 @@ func layerInfosToStrings(infos []LayerInfo) []string {
// compressionMIMETypeSet describes a set of MIME type “variants” that represent differently-compressed
// versions of “the same kind of content”.
-// The map key is the return value of compression.Algorithm.Name(), or mtsUncompressed;
+// The map key is the return value of compressiontypes.Algorithm.Name(), or mtsUncompressed;
// the map value is a MIME type, or mtsUnsupportedMIMEType to mean "recognized but unsupported".
type compressionMIMETypeSet map[string]string
@@ -59,7 +59,7 @@ const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that mean
// If the compression algorithm is unrecognized, or mimeType is not known to have variants that
// differ from it only in what type of compression has been applied, the returned error will not be
// a ManifestLayerCompressionIncompatibilityError.
-func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compression.Algorithm) (string, error) {
+func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compressiontypes.Algorithm) (string, error) {
if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
return "", fmt.Errorf("cannot update unknown MIME type")
}
diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
index 584b5f09c..2711ca5eb 100644
--- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go
@@ -5,7 +5,7 @@ import (
"fmt"
"time"
- "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/pkg/strslice"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
@@ -214,14 +214,14 @@ func (m *Schema2) LayerInfos() []LayerInfo {
var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{
{
- mtsUncompressed: DockerV2Schema2ForeignLayerMediaType,
- compression.Gzip.Name(): DockerV2Schema2ForeignLayerMediaTypeGzip,
- compression.Zstd.Name(): mtsUnsupportedMIMEType,
+ mtsUncompressed: DockerV2Schema2ForeignLayerMediaType,
+ compressiontypes.GzipAlgorithmName: DockerV2Schema2ForeignLayerMediaTypeGzip,
+ compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType,
},
{
- mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed,
- compression.Gzip.Name(): DockerV2Schema2LayerMediaType,
- compression.Zstd.Name(): mtsUnsupportedMIMEType,
+ mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed,
+ compressiontypes.GzipAlgorithmName: DockerV2Schema2LayerMediaType,
+ compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType,
},
}
diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go
index 24ce6d080..29a479c94 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci.go
@@ -5,7 +5,7 @@ import (
"fmt"
"strings"
- "github.com/containers/image/v5/pkg/compression"
+ compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
ociencspec "github.com/containers/ocicrypt/spec"
"github.com/opencontainers/go-digest"
@@ -96,14 +96,14 @@ func (m *OCI1) LayerInfos() []LayerInfo {
var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
{
- mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable,
- compression.Gzip.Name(): imgspecv1.MediaTypeImageLayerNonDistributableGzip,
- compression.Zstd.Name(): imgspecv1.MediaTypeImageLayerNonDistributableZstd,
+ mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable,
+ compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip,
+ compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd,
},
{
- mtsUncompressed: imgspecv1.MediaTypeImageLayer,
- compression.Gzip.Name(): imgspecv1.MediaTypeImageLayerGzip,
- compression.Zstd.Name(): imgspecv1.MediaTypeImageLayerZstd,
+ mtsUncompressed: imgspecv1.MediaTypeImageLayer,
+ compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerGzip,
+ compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerZstd,
},
}
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
index 9925aeda7..55d3f637a 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go
@@ -148,13 +148,13 @@ func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io
errWrap := errors.New("failed fetching external blob from all urls")
for _, url := range urls {
- req, err := http.NewRequest("GET", url, nil)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error())
continue
}
- resp, err := s.client.Do(req.WithContext(ctx))
+ resp, err := s.client.Do(req)
if err != nil {
errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error())
continue
diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go
index 889772fc0..6ea65bcf3 100644
--- a/vendor/github.com/containers/image/v5/openshift/openshift.go
+++ b/vendor/github.com/containers/image/v5/openshift/openshift.go
@@ -79,11 +79,10 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
logrus.Debugf("Will send body: %s", requestBody)
requestBodyReader = bytes.NewReader(requestBody)
}
- req, err := http.NewRequest(method, url.String(), requestBodyReader)
+ req, err := http.NewRequestWithContext(ctx, method, url.String(), requestBodyReader)
if err != nil {
return nil, err
}
- req = req.WithContext(ctx)
if len(c.bearerToken) != 0 {
req.Header.Set("Authorization", "Bearer "+c.bearerToken)
@@ -137,7 +136,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) {
// FIXME: validate components per validation.IsValidPathSegmentName?
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName)
- body, err := c.doRequest(ctx, "GET", path, nil)
+ body, err := c.doRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, err
}
@@ -273,7 +272,7 @@ func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error
// FIXME: validate components per validation.IsValidPathSegmentName?
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream)
- body, err := s.client.doRequest(ctx, "GET", path, nil)
+ body, err := s.client.doRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return err
}
@@ -496,7 +495,7 @@ sigExists:
if err != nil {
return err
}
- _, err = d.client.doRequest(ctx, "POST", "/oapi/v1/imagesignatures", body)
+ _, err = d.client.doRequest(ctx, http.MethodPost, "/oapi/v1/imagesignatures", body)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go
index 718b50c05..c28e81792 100644
--- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go
@@ -9,7 +9,7 @@ import (
"github.com/containers/image/v5/pkg/compression/internal"
"github.com/containers/image/v5/pkg/compression/types"
- "github.com/containers/storage/pkg/chunked"
+ "github.com/containers/storage/pkg/chunked/compressor"
"github.com/klauspost/pgzip"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -21,15 +21,20 @@ type Algorithm = types.Algorithm
var (
// Gzip compression.
- Gzip = internal.NewAlgorithm("gzip", "gzip", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor)
+ Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, types.GzipAlgorithmName,
+ []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor)
// Bzip2 compression.
- Bzip2 = internal.NewAlgorithm("bzip2", "bzip2", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor)
+ Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, types.Bzip2AlgorithmName,
+ []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor)
// Xz compression.
- Xz = internal.NewAlgorithm("Xz", "xz", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor)
+ Xz = internal.NewAlgorithm(types.XzAlgorithmName, types.XzAlgorithmName,
+ []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor)
// Zstd compression.
- Zstd = internal.NewAlgorithm("zstd", "zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
+ Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName,
+ []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
// Zstd:chunked compression.
- ZstdChunked = internal.NewAlgorithm("zstd:chunked", "zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, chunked.ZstdCompressor)
+ ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */
+ nil, ZstdDecompressor, compressor.ZstdCompressor)
compressionAlgorithms = map[string]Algorithm{
Gzip.Name(): Gzip,
@@ -118,7 +123,8 @@ func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.R
var retAlgo Algorithm
var decompressor DecompressorFunc
for _, algo := range compressionAlgorithms {
- if bytes.HasPrefix(buffer[:n], internal.AlgorithmPrefix(algo)) {
+ prefix := internal.AlgorithmPrefix(algo)
+ if len(prefix) > 0 && bytes.HasPrefix(buffer[:n], prefix) {
logrus.Debugf("Detected compression format %s", algo.Name())
retAlgo = algo
decompressor = internal.AlgorithmDecompressor(algo)
diff --git a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
index 5df5370b0..fb37ca317 100644
--- a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
@@ -14,7 +14,7 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
type Algorithm struct {
name string
mime string
- prefix []byte
+ prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection.
decompressor DecompressorFunc
compressor CompressorFunc
}
diff --git a/vendor/github.com/containers/image/v5/pkg/compression/types/types.go b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go
index f96eff2e3..43d03b601 100644
--- a/vendor/github.com/containers/image/v5/pkg/compression/types/types.go
+++ b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go
@@ -11,3 +11,31 @@ type DecompressorFunc = internal.DecompressorFunc
// Algorithm is a compression algorithm provided and supported by pkg/compression.
// It can’t be supplied from the outside.
type Algorithm = internal.Algorithm
+
+const (
+ // GzipAlgorithmName is the name used by pkg/compression.Gzip.
+ // NOTE: Importing only this /types package does not inherently guarantee a Gzip algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ GzipAlgorithmName = "gzip"
+ // Bzip2AlgorithmName is the name used by pkg/compression.Bzip2.
+ // NOTE: Importing only this /types package does not inherently guarantee a Bzip2 algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ Bzip2AlgorithmName = "bzip2"
+ // XzAlgorithmName is the name used by pkg/compression.Xz.
+ // NOTE: Importing only this /types package does not inherently guarantee a Xz algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ XzAlgorithmName = "Xz"
+ // ZstdAlgorithmName is the name used by pkg/compression.Zstd.
+ // NOTE: Importing only this /types package does not inherently guarantee a Zstd algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ ZstdAlgorithmName = "zstd"
+ // ZstdChunkedAlgorithmName is the name used by pkg/compression.ZstdChunked.
+ // NOTE: Importing only this /types package does not inherently guarantee a ZstdChunked algorithm
+ // will actually be available. (In fact it is intended for this types package not to depend
+ // on any of the implementations.)
+ ZstdChunkedAlgorithmName = "zstd:chunked"
+)
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
index 8436741f3..c82a9e1a0 100644
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
@@ -236,9 +236,8 @@ func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath {
// file or .docker/config.json, including support for OAuth2 and IdentityToken.
// If an entry is not found, an empty struct is returned.
//
-// Deprecated: GetCredentialsForRef should be used in favor of this API
-// because it allows different credentials for different repositories on the
-// same registry.
+// GetCredentialsForRef should almost always be used in favor of this API to
+// allow different credentials for different repositories on the same registry.
func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) {
return getCredentialsWithHomeDir(sys, nil, registry, homedir.Get())
}
@@ -665,14 +664,11 @@ func findAuthentication(ref reference.Named, registry, path string, legacyFormat
// those entries even in non-legacyFormat ~/.docker/config.json.
// The docker.io registry still uses the /v1/ key with a special host name,
// so account for that as well.
- registry = normalizeAuthFileKey(registry, legacyFormat)
- normalizedAuths := map[string]dockerAuthConfig{}
+ registry = normalizeRegistry(registry)
for k, v := range auths.AuthConfigs {
- normalizedAuths[normalizeAuthFileKey(k, legacyFormat)] = v
- }
-
- if val, exists := normalizedAuths[registry]; exists {
- return decodeDockerAuth(val)
+ if normalizeAuthFileKey(k, legacyFormat) == registry {
+ return decodeDockerAuth(v)
+ }
}
return types.DockerAuthConfig{}, nil
diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go
index 48efa195b..1c4a1419f 100644
--- a/vendor/github.com/containers/image/v5/types/types.go
+++ b/vendor/github.com/containers/image/v5/types/types.go
@@ -636,6 +636,8 @@ type SystemContext struct {
// === dir.Transport overrides ===
// DirForceCompress compresses the image layers if set to true
DirForceCompress bool
+ // DirForceDecompress decompresses the image layers if set to true
+ DirForceDecompress bool
// CompressionFormat is the format to use for the compression of the blobs
CompressionFormat *compression.Algorithm
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index 0a1971535..8936ec087 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -6,7 +6,7 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
- VersionMinor = 14
+ VersionMinor = 15
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 5d245052c..7aa332e41 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.32.6
+1.33.0
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 2fa54a207..ecfbae916 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -364,12 +364,12 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// Try to enable project quota support over xfs.
if d.quotaCtl, err = quota.NewControl(home); err == nil {
projectQuotaSupported = true
- } else if opts.quota.Size > 0 {
- return nil, fmt.Errorf("Storage option overlay.size not supported. Filesystem does not support Project Quota: %v", err)
+ } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 {
+ return nil, fmt.Errorf("Storage options overlay.size and overlay.inodes not supported. Filesystem does not support Project Quota: %v", err)
}
- } else if opts.quota.Size > 0 {
+ } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 {
// if xfs is not the backing fs then error out if the storage-opt overlay.size is used.
- return nil, fmt.Errorf("Storage option overlay.size only supported for backingFS XFS. Found %v", backingFs)
+ return nil, fmt.Errorf("Storage option overlay.size and overlay.inodes only supported for backingFS XFS. Found %v", backingFs)
}
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy)
@@ -400,6 +400,13 @@ func parseOptions(options []string) (*overlayOptions, error) {
return nil, err
}
o.quota.Size = uint64(size)
+ case "inodes":
+ logrus.Debugf("overlay: inodes=%s", val)
+ inodes, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ o.quota.Inodes = uint64(inodes)
case "imagestore", "additionalimagestore":
logrus.Debugf("overlay: imagestore=%s", val)
// Additional read only image stores to use for lower paths
@@ -613,6 +620,10 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
if unshare.IsRootless() {
flags = fmt.Sprintf("%s,userxattr", flags)
}
+ if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0600, int(unix.Mkdev(0, 0))); err != nil {
+ logrus.Debugf("unable to create kernel-style whiteout: %v", err)
+ return supportsDType, errors.Wrapf(err, "unable to create kernel-style whiteout")
+ }
if len(flags) < unix.Getpagesize() {
err := unix.Mount("overlay", mergedDir, "overlay", 0, flags)
@@ -784,6 +795,13 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10)
}
+ if _, ok := opts.StorageOpt["inodes"]; !ok {
+ if opts.StorageOpt == nil {
+ opts.StorageOpt = map[string]string{}
+ }
+ opts.StorageOpt["inodes"] = strconv.FormatUint(d.options.quota.Inodes, 10)
+ }
+
return d.create(id, parent, opts)
}
@@ -794,6 +812,9 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
if _, ok := opts.StorageOpt["size"]; ok {
return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers")
}
+ if _, ok := opts.StorageOpt["inodes"]; ok {
+ return fmt.Errorf("--storage-opt inodes is only supported for ReadWrite Layers")
+ }
}
return d.create(id, parent, opts)
@@ -850,7 +871,9 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
if driver.options.quota.Size > 0 {
quota.Size = driver.options.quota.Size
}
-
+ if driver.options.quota.Inodes > 0 {
+ quota.Inodes = driver.options.quota.Inodes
+ }
}
// Set container disk quota limit
// If it is set to 0, we will track the disk usage, but not enforce a limit
@@ -922,6 +945,12 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e
return err
}
driver.options.quota.Size = uint64(size)
+ case "inodes":
+ inodes, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return err
+ }
+ driver.options.quota.Inodes = uint64(inodes)
default:
return fmt.Errorf("Unknown option %s", key)
}
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
index d805623b5..a435f6b82 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
@@ -38,8 +38,8 @@ struct fsxattr {
#ifndef PRJQUOTA
#define PRJQUOTA 2
#endif
-#ifndef XFS_PROJ_QUOTA
-#define XFS_PROJ_QUOTA 2
+#ifndef FS_PROJ_QUOTA
+#define FS_PROJ_QUOTA 2
#endif
#ifndef Q_XSETPQLIM
#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA)
@@ -61,9 +61,10 @@ import (
"golang.org/x/sys/unix"
)
-// Quota limit params - currently we only control blocks hard limit
+// Quota limit params - currently we only control blocks hard limit and inodes
type Quota struct {
- Size uint64
+ Size uint64
+ Inodes uint64
}
// Control - Context to be used by storage driver (e.g. overlay)
@@ -119,7 +120,8 @@ func NewControl(basePath string) (*Control, error) {
// a quota on the first available project id
//
quota := Quota{
- Size: 0,
+ Size: 0,
+ Inodes: 0,
}
if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil {
return nil, err
@@ -166,7 +168,7 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
//
// set the quota limit for the container's project id
//
- logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID)
+ logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID)
return setProjectQuota(q.backingFsBlockDev, projectID, quota)
}
@@ -175,11 +177,18 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er
var d C.fs_disk_quota_t
d.d_version = C.FS_DQUOT_VERSION
d.d_id = C.__u32(projectID)
- d.d_flags = C.XFS_PROJ_QUOTA
+ d.d_flags = C.FS_PROJ_QUOTA
- d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT
- d.d_blk_hardlimit = C.__u64(quota.Size / 512)
- d.d_blk_softlimit = d.d_blk_hardlimit
+ if quota.Size > 0 {
+ d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT
+ d.d_blk_hardlimit = C.__u64(quota.Size / 512)
+ d.d_blk_softlimit = d.d_blk_hardlimit
+ }
+ if quota.Inodes > 0 {
+ d.d_fieldmask = C.FS_DQ_IHARD | C.FS_DQ_ISOFT
+ d.d_ino_hardlimit = C.__u64(quota.Inodes)
+ d.d_ino_softlimit = d.d_ino_hardlimit
+ }
var cs = C.CString(backingFsBlockDev)
defer C.free(unsafe.Pointer(cs))
@@ -202,6 +211,7 @@ func (q *Control) GetQuota(targetPath string, quota *Quota) error {
return err
}
quota.Size = uint64(d.d_blk_hardlimit) * 512
+ quota.Inodes = uint64(d.d_ino_hardlimit)
return nil
}
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
index be6c75a58..7469138db 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go
@@ -8,7 +8,8 @@ import (
// Quota limit params - currently we only control blocks hard limit
type Quota struct {
- Size uint64
+ Size uint64
+ Inodes uint64
}
// Control - Context to be used by storage driver (e.g. overlay)
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index d46000ace..e4f484d6b 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -16,7 +16,7 @@ require (
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
github.com/moby/sys/mountinfo v0.4.1
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/runc v1.0.0
+ github.com/opencontainers/runc v1.0.1
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/selinux v1.8.2
github.com/pkg/errors v0.9.1
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index 081da00e4..2607dbc9b 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -99,7 +99,7 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/cilium/ebpf v0.6.1/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
@@ -468,8 +468,8 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runc v1.0.0 h1:QOhAQAYUlKeofuyeKdR6ITvOnXLPbEAjPMjz9wCUXcU=
-github.com/opencontainers/runc v1.0.0/go.mod h1:MU2S3KEB2ZExnhnAQYbwjdYV6HwKtDlNbA2Z2OeNDeA=
+github.com/opencontainers/runc v1.0.1 h1:G18PGckGdAm3yVQRWDVQ1rLSLntiniKJ0cNRT2Tm5gs=
+github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 50e3e3555..48e846f7c 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -645,10 +645,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
file.Close()
- case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
+ case tar.TypeBlock, tar.TypeChar:
if inUserns { // cannot create devices in a userns
+ logrus.Debugf("Tar: Can't create device %v while running in user namespace", path)
return nil
}
+ fallthrough
+ case tar.TypeFifo:
// Handle this is an OS-specific way
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
return err
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
index e257737e7..7c3e442da 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
@@ -11,7 +11,6 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
- "github.com/opencontainers/runc/libcontainer/userns"
"golang.org/x/sys/unix"
)
@@ -88,11 +87,6 @@ func minor(device uint64) uint64 {
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
- if userns.RunningInUserNS() {
- // cannot create a device if running in user namespace
- return nil
- }
-
mode := uint32(hdr.Mode & 07777)
switch hdr.Typeflag {
case tar.TypeBlock:
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression.go b/vendor/github.com/containers/storage/pkg/chunked/compression.go
index 605be4b8f..f2811fb9a 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/compression.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/compression.go
@@ -2,72 +2,29 @@ package chunked
import (
"bytes"
- "encoding/base64"
"encoding/binary"
- "encoding/json"
"fmt"
"io"
- "io/ioutil"
- "time"
- "github.com/containers/storage/pkg/ioutils"
+ "github.com/containers/storage/pkg/chunked/compressor"
+ "github.com/containers/storage/pkg/chunked/internal"
"github.com/klauspost/compress/zstd"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/vbatts/tar-split/archive/tar"
)
-type zstdTOC struct {
- Version int `json:"version"`
- Entries []zstdFileMetadata `json:"entries"`
-}
-
-type zstdFileMetadata struct {
- Type string `json:"type"`
- Name string `json:"name"`
- Linkname string `json:"linkName,omitempty"`
- Mode int64 `json:"mode,omitempty"`
- Size int64 `json:"size"`
- UID int `json:"uid"`
- GID int `json:"gid"`
- ModTime time.Time `json:"modtime"`
- AccessTime time.Time `json:"accesstime"`
- ChangeTime time.Time `json:"changetime"`
- Devmajor int64 `json:"devMajor"`
- Devminor int64 `json:"devMinor"`
- Xattrs map[string]string `json:"xattrs,omitempty"`
- Digest string `json:"digest,omitempty"`
- Offset int64 `json:"offset,omitempty"`
- EndOffset int64 `json:"endOffset,omitempty"`
-
- // Currently chunking is not supported.
- ChunkSize int64 `json:"chunkSize,omitempty"`
- ChunkOffset int64 `json:"chunkOffset,omitempty"`
- ChunkDigest string `json:"chunkDigest,omitempty"`
-}
-
const (
- TypeReg = "reg"
- TypeChunk = "chunk"
- TypeLink = "hardlink"
- TypeChar = "char"
- TypeBlock = "block"
- TypeDir = "dir"
- TypeFifo = "fifo"
- TypeSymlink = "symlink"
+ TypeReg = internal.TypeReg
+ TypeChunk = internal.TypeChunk
+ TypeLink = internal.TypeLink
+ TypeChar = internal.TypeChar
+ TypeBlock = internal.TypeBlock
+ TypeDir = internal.TypeDir
+ TypeFifo = internal.TypeFifo
+ TypeSymlink = internal.TypeSymlink
)
-var tarTypes = map[byte]string{
- tar.TypeReg: TypeReg,
- tar.TypeRegA: TypeReg,
- tar.TypeLink: TypeLink,
- tar.TypeChar: TypeChar,
- tar.TypeBlock: TypeBlock,
- tar.TypeDir: TypeDir,
- tar.TypeFifo: TypeFifo,
- tar.TypeSymlink: TypeSymlink,
-}
-
var typesToTar = map[string]byte{
TypeReg: tar.TypeReg,
TypeLink: tar.TypeLink,
@@ -78,14 +35,6 @@ var typesToTar = map[string]byte{
TypeSymlink: tar.TypeSymlink,
}
-func getType(t byte) (string, error) {
- r, found := tarTypes[t]
- if !found {
- return "", fmt.Errorf("unknown tarball type: %v", t)
- }
- return r, nil
-}
-
func typeToTarType(t string) (byte, error) {
r, found := typesToTar[t]
if !found {
@@ -94,52 +43,30 @@ func typeToTarType(t string) (byte, error) {
return r, nil
}
-const (
- manifestChecksumKey = "io.containers.zstd-chunked.manifest-checksum"
- manifestInfoKey = "io.containers.zstd-chunked.manifest-position"
-
- // manifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
- manifestTypeCRFS = 1
-
- // footerSizeSupported is the footer size supported by this implementation.
- // Newer versions of the image format might increase this value, so reject
- // any version that is not supported.
- footerSizeSupported = 40
-)
-
-var (
- // when the zstd decoder encounters a skippable frame + 1 byte for the size, it
- // will ignore it.
- // https://tools.ietf.org/html/rfc8478#section-3.1.2
- skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18}
-
- zstdChunkedFrameMagic = []byte{0x47, 0x6e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78}
-)
-
func isZstdChunkedFrameMagic(data []byte) bool {
if len(data) < 8 {
return false
}
- return bytes.Equal(zstdChunkedFrameMagic, data[:8])
+ return bytes.Equal(internal.ZstdChunkedFrameMagic, data[:8])
}
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
// be specified.
// This function uses the io.containers.zstd-chunked. annotations when specified.
func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, error) {
- footerSize := int64(footerSizeSupported)
+ footerSize := int64(internal.FooterSizeSupported)
if blobSize <= footerSize {
return nil, errors.New("blob too small")
}
- manifestChecksumAnnotation := annotations[manifestChecksumKey]
+ manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey]
if manifestChecksumAnnotation == "" {
- return nil, fmt.Errorf("manifest checksum annotation %q not found", manifestChecksumKey)
+ return nil, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
}
var offset, length, lengthUncompressed, manifestType uint64
- if offsetMetadata := annotations[manifestInfoKey]; offsetMetadata != "" {
+ if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil {
return nil, err
}
@@ -173,7 +100,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
}
}
- if manifestType != manifestTypeCRFS {
+ if manifestType != internal.ManifestTypeCRFS {
return nil, errors.New("invalid manifest type")
}
@@ -235,279 +162,8 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
return manifest, nil
}
-func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
- if _, err := dest.Write(skippableFrameMagic); err != nil {
- return err
- }
-
- var size []byte = make([]byte, 4)
- binary.LittleEndian.PutUint32(size, uint32(len(data)))
- if _, err := dest.Write(size); err != nil {
- return err
- }
- if _, err := dest.Write(data); err != nil {
- return err
- }
- return nil
-}
-
-func writeZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []zstdFileMetadata, level int) error {
- // 8 is the size of the zstd skippable frame header + the frame size
- manifestOffset := offset + 8
-
- toc := zstdTOC{
- Version: 1,
- Entries: metadata,
- }
-
- // Generate the manifest
- manifest, err := json.Marshal(toc)
- if err != nil {
- return err
- }
-
- var compressedBuffer bytes.Buffer
- zstdWriter, err := zstdWriterWithLevel(&compressedBuffer, level)
- if err != nil {
- return err
- }
- if _, err := zstdWriter.Write(manifest); err != nil {
- zstdWriter.Close()
- return err
- }
- if err := zstdWriter.Close(); err != nil {
- return err
- }
- compressedManifest := compressedBuffer.Bytes()
-
- manifestDigester := digest.Canonical.Digester()
- manifestChecksum := manifestDigester.Hash()
- if _, err := manifestChecksum.Write(compressedManifest); err != nil {
- return err
- }
-
- outMetadata[manifestChecksumKey] = manifestDigester.Digest().String()
- outMetadata[manifestInfoKey] = fmt.Sprintf("%d:%d:%d:%d", manifestOffset, len(compressedManifest), len(manifest), manifestTypeCRFS)
- if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil {
- return err
- }
-
- // Store the offset to the manifest and its size in LE order
- var manifestDataLE []byte = make([]byte, footerSizeSupported)
- binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
- binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest)))
- binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest)))
- binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(manifestTypeCRFS))
- copy(manifestDataLE[32:], zstdChunkedFrameMagic)
-
- return appendZstdSkippableFrame(dest, manifestDataLE)
-}
-
-func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
- // total written so far. Used to retrieve partial offsets in the file
- dest := ioutils.NewWriteCounter(destFile)
-
- tr := tar.NewReader(reader)
- tr.RawAccounting = true
-
- buf := make([]byte, 4096)
-
- zstdWriter, err := zstdWriterWithLevel(dest, level)
- if err != nil {
- return err
- }
- defer func() {
- if zstdWriter != nil {
- zstdWriter.Close()
- zstdWriter.Flush()
- }
- }()
-
- restartCompression := func() (int64, error) {
- var offset int64
- if zstdWriter != nil {
- if err := zstdWriter.Close(); err != nil {
- return 0, err
- }
- if err := zstdWriter.Flush(); err != nil {
- return 0, err
- }
- offset = dest.Count
- zstdWriter.Reset(dest)
- }
- return offset, nil
- }
-
- var metadata []zstdFileMetadata
- for {
- hdr, err := tr.Next()
- if err != nil {
- if err == io.EOF {
- break
- }
- return err
- }
-
- rawBytes := tr.RawBytes()
- if _, err := zstdWriter.Write(rawBytes); err != nil {
- return err
- }
- payloadDigester := digest.Canonical.Digester()
- payloadChecksum := payloadDigester.Hash()
-
- payloadDest := io.MultiWriter(payloadChecksum, zstdWriter)
-
- // Now handle the payload, if any
- var startOffset, endOffset int64
- checksum := ""
- for {
- read, errRead := tr.Read(buf)
- if errRead != nil && errRead != io.EOF {
- return err
- }
-
- // restart the compression only if there is
- // a payload.
- if read > 0 {
- if startOffset == 0 {
- startOffset, err = restartCompression()
- if err != nil {
- return err
- }
- }
- _, err := payloadDest.Write(buf[:read])
- if err != nil {
- return err
- }
- }
- if errRead == io.EOF {
- if startOffset > 0 {
- endOffset, err = restartCompression()
- if err != nil {
- return err
- }
- checksum = payloadDigester.Digest().String()
- }
- break
- }
- }
-
- typ, err := getType(hdr.Typeflag)
- if err != nil {
- return err
- }
- xattrs := make(map[string]string)
- for k, v := range hdr.Xattrs {
- xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
- }
- m := zstdFileMetadata{
- Type: typ,
- Name: hdr.Name,
- Linkname: hdr.Linkname,
- Mode: hdr.Mode,
- Size: hdr.Size,
- UID: hdr.Uid,
- GID: hdr.Gid,
- ModTime: hdr.ModTime,
- AccessTime: hdr.AccessTime,
- ChangeTime: hdr.ChangeTime,
- Devmajor: hdr.Devmajor,
- Devminor: hdr.Devminor,
- Xattrs: xattrs,
- Digest: checksum,
- Offset: startOffset,
- EndOffset: endOffset,
-
- // ChunkSize is 0 for the last chunk
- ChunkSize: 0,
- ChunkOffset: 0,
- ChunkDigest: checksum,
- }
- metadata = append(metadata, m)
- }
-
- rawBytes := tr.RawBytes()
- if _, err := zstdWriter.Write(rawBytes); err != nil {
- return err
- }
- if err := zstdWriter.Flush(); err != nil {
- return err
- }
- if err := zstdWriter.Close(); err != nil {
- return err
- }
- zstdWriter = nil
-
- return writeZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level)
-}
-
-type zstdChunkedWriter struct {
- tarSplitOut *io.PipeWriter
- tarSplitErr chan error
-}
-
-func (w zstdChunkedWriter) Close() error {
- err := <-w.tarSplitErr
- if err != nil {
- w.tarSplitOut.Close()
- return err
- }
- return w.tarSplitOut.Close()
-}
-
-func (w zstdChunkedWriter) Write(p []byte) (int, error) {
- select {
- case err := <-w.tarSplitErr:
- w.tarSplitOut.Close()
- return 0, err
- default:
- return w.tarSplitOut.Write(p)
- }
-}
-
-// zstdChunkedWriterWithLevel writes a zstd compressed tarball where each file is
-// compressed separately so it can be addressed separately. Idea based on CRFS:
-// https://github.com/google/crfs
-// The difference with CRFS is that the zstd compression is used instead of gzip.
-// The reason for it is that zstd supports embedding metadata ignored by the decoder
-// as part of the compressed stream.
-// A manifest json file with all the metadata is appended at the end of the tarball
-// stream, using zstd skippable frames.
-// The final file will look like:
-// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2]
-// Where:
-// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER]
-// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST]
-// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER]
-// MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format.
-func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level int) (io.WriteCloser, error) {
- ch := make(chan error, 1)
- r, w := io.Pipe()
-
- go func() {
- ch <- writeZstdChunkedStream(out, metadata, r, level)
- io.Copy(ioutil.Discard, r)
- r.Close()
- close(ch)
- }()
-
- return zstdChunkedWriter{
- tarSplitOut: w,
- tarSplitErr: ch,
- }, nil
-}
-
-func zstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
- el := zstd.EncoderLevelFromZstd(level)
- return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
-}
-
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
+// Deprecated: Use pkg/chunked/compressor.ZstdCompressor.
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
- if level == nil {
- l := 3
- level = &l
- }
-
- return zstdChunkedWriterWithLevel(r, metadata, *level)
+ return compressor.ZstdCompressor(r, metadata, level)
}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
new file mode 100644
index 000000000..a205b73fd
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
@@ -0,0 +1,220 @@
+package compressor
+
+// NOTE: This is used from github.com/containers/image by callers that
+// don't otherwise use containers/storage, so don't make this depend on any
+// larger software like the graph drivers.
+
+import (
+ "encoding/base64"
+ "io"
+ "io/ioutil"
+
+ "github.com/containers/storage/pkg/chunked/internal"
+ "github.com/containers/storage/pkg/ioutils"
+ "github.com/opencontainers/go-digest"
+ "github.com/vbatts/tar-split/archive/tar"
+)
+
+func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
+ // total written so far. Used to retrieve partial offsets in the file
+ dest := ioutils.NewWriteCounter(destFile)
+
+ tr := tar.NewReader(reader)
+ tr.RawAccounting = true
+
+ buf := make([]byte, 4096)
+
+ zstdWriter, err := internal.ZstdWriterWithLevel(dest, level)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if zstdWriter != nil {
+ zstdWriter.Close()
+ zstdWriter.Flush()
+ }
+ }()
+
+ restartCompression := func() (int64, error) {
+ var offset int64
+ if zstdWriter != nil {
+ if err := zstdWriter.Close(); err != nil {
+ return 0, err
+ }
+ if err := zstdWriter.Flush(); err != nil {
+ return 0, err
+ }
+ offset = dest.Count
+ zstdWriter.Reset(dest)
+ }
+ return offset, nil
+ }
+
+ var metadata []internal.ZstdFileMetadata
+ for {
+ hdr, err := tr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ rawBytes := tr.RawBytes()
+ if _, err := zstdWriter.Write(rawBytes); err != nil {
+ return err
+ }
+ payloadDigester := digest.Canonical.Digester()
+ payloadChecksum := payloadDigester.Hash()
+
+ payloadDest := io.MultiWriter(payloadChecksum, zstdWriter)
+
+ // Now handle the payload, if any
+ var startOffset, endOffset int64
+ checksum := ""
+ for {
+ read, errRead := tr.Read(buf)
+ if errRead != nil && errRead != io.EOF {
+ return err
+ }
+
+ // restart the compression only if there is
+ // a payload.
+ if read > 0 {
+ if startOffset == 0 {
+ startOffset, err = restartCompression()
+ if err != nil {
+ return err
+ }
+ }
+ _, err := payloadDest.Write(buf[:read])
+ if err != nil {
+ return err
+ }
+ }
+ if errRead == io.EOF {
+ if startOffset > 0 {
+ endOffset, err = restartCompression()
+ if err != nil {
+ return err
+ }
+ checksum = payloadDigester.Digest().String()
+ }
+ break
+ }
+ }
+
+ typ, err := internal.GetType(hdr.Typeflag)
+ if err != nil {
+ return err
+ }
+ xattrs := make(map[string]string)
+ for k, v := range hdr.Xattrs {
+ xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
+ }
+ m := internal.ZstdFileMetadata{
+ Type: typ,
+ Name: hdr.Name,
+ Linkname: hdr.Linkname,
+ Mode: hdr.Mode,
+ Size: hdr.Size,
+ UID: hdr.Uid,
+ GID: hdr.Gid,
+ ModTime: hdr.ModTime,
+ AccessTime: hdr.AccessTime,
+ ChangeTime: hdr.ChangeTime,
+ Devmajor: hdr.Devmajor,
+ Devminor: hdr.Devminor,
+ Xattrs: xattrs,
+ Digest: checksum,
+ Offset: startOffset,
+ EndOffset: endOffset,
+
+ // ChunkSize is 0 for the last chunk
+ ChunkSize: 0,
+ ChunkOffset: 0,
+ ChunkDigest: checksum,
+ }
+ metadata = append(metadata, m)
+ }
+
+ rawBytes := tr.RawBytes()
+ if _, err := zstdWriter.Write(rawBytes); err != nil {
+ return err
+ }
+ if err := zstdWriter.Flush(); err != nil {
+ return err
+ }
+ if err := zstdWriter.Close(); err != nil {
+ return err
+ }
+ zstdWriter = nil
+
+ return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level)
+}
+
+type zstdChunkedWriter struct {
+ tarSplitOut *io.PipeWriter
+ tarSplitErr chan error
+}
+
+func (w zstdChunkedWriter) Close() error {
+ err := <-w.tarSplitErr
+ if err != nil {
+ w.tarSplitOut.Close()
+ return err
+ }
+ return w.tarSplitOut.Close()
+}
+
+func (w zstdChunkedWriter) Write(p []byte) (int, error) {
+ select {
+ case err := <-w.tarSplitErr:
+ w.tarSplitOut.Close()
+ return 0, err
+ default:
+ return w.tarSplitOut.Write(p)
+ }
+}
+
+// zstdChunkedWriterWithLevel writes a zstd compressed tarball where each file is
+// compressed separately so it can be addressed separately. Idea based on CRFS:
+// https://github.com/google/crfs
+// The difference with CRFS is that the zstd compression is used instead of gzip.
+// The reason for it is that zstd supports embedding metadata ignored by the decoder
+// as part of the compressed stream.
+// A manifest json file with all the metadata is appended at the end of the tarball
+// stream, using zstd skippable frames.
+// The final file will look like:
+// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2]
+// Where:
+// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER]
+// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST]
+// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER]
+// MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format.
+func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level int) (io.WriteCloser, error) {
+ ch := make(chan error, 1)
+ r, w := io.Pipe()
+
+ go func() {
+ ch <- writeZstdChunkedStream(out, metadata, r, level)
+ io.Copy(ioutil.Discard, r)
+ r.Close()
+ close(ch)
+ }()
+
+ return zstdChunkedWriter{
+ tarSplitOut: w,
+ tarSplitErr: ch,
+ }, nil
+}
+
+// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
+func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
+ if level == nil {
+ l := 3
+ level = &l
+ }
+
+ return zstdChunkedWriterWithLevel(r, metadata, *level)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
new file mode 100644
index 000000000..af0025c20
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
@@ -0,0 +1,172 @@
+package internal
+
+// NOTE: This is used from github.com/containers/image by callers that
+// don't otherwise use containers/storage, so don't make this depend on any
+// larger software like the graph drivers.
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/klauspost/compress/zstd"
+ "github.com/opencontainers/go-digest"
+)
+
+type ZstdTOC struct {
+ Version int `json:"version"`
+ Entries []ZstdFileMetadata `json:"entries"`
+}
+
+type ZstdFileMetadata struct {
+ Type string `json:"type"`
+ Name string `json:"name"`
+ Linkname string `json:"linkName,omitempty"`
+ Mode int64 `json:"mode,omitempty"`
+ Size int64 `json:"size"`
+ UID int `json:"uid"`
+ GID int `json:"gid"`
+ ModTime time.Time `json:"modtime"`
+ AccessTime time.Time `json:"accesstime"`
+ ChangeTime time.Time `json:"changetime"`
+ Devmajor int64 `json:"devMajor"`
+ Devminor int64 `json:"devMinor"`
+ Xattrs map[string]string `json:"xattrs,omitempty"`
+ Digest string `json:"digest,omitempty"`
+ Offset int64 `json:"offset,omitempty"`
+ EndOffset int64 `json:"endOffset,omitempty"`
+
+ // Currently chunking is not supported.
+ ChunkSize int64 `json:"chunkSize,omitempty"`
+ ChunkOffset int64 `json:"chunkOffset,omitempty"`
+ ChunkDigest string `json:"chunkDigest,omitempty"`
+}
+
+const (
+ TypeReg = "reg"
+ TypeChunk = "chunk"
+ TypeLink = "hardlink"
+ TypeChar = "char"
+ TypeBlock = "block"
+ TypeDir = "dir"
+ TypeFifo = "fifo"
+ TypeSymlink = "symlink"
+)
+
+var TarTypes = map[byte]string{
+ tar.TypeReg: TypeReg,
+ tar.TypeRegA: TypeReg,
+ tar.TypeLink: TypeLink,
+ tar.TypeChar: TypeChar,
+ tar.TypeBlock: TypeBlock,
+ tar.TypeDir: TypeDir,
+ tar.TypeFifo: TypeFifo,
+ tar.TypeSymlink: TypeSymlink,
+}
+
+func GetType(t byte) (string, error) {
+ r, found := TarTypes[t]
+ if !found {
+ return "", fmt.Errorf("unknown tarball type: %v", t)
+ }
+ return r, nil
+}
+
+const (
+ ManifestChecksumKey = "io.containers.zstd-chunked.manifest-checksum"
+ ManifestInfoKey = "io.containers.zstd-chunked.manifest-position"
+
+ // ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
+ ManifestTypeCRFS = 1
+
+ // FooterSizeSupported is the footer size supported by this implementation.
+ // Newer versions of the image format might increase this value, so reject
+ // any version that is not supported.
+ FooterSizeSupported = 40
+)
+
+var (
+ // when the zstd decoder encounters a skippable frame + 1 byte for the size, it
+ // will ignore it.
+ // https://tools.ietf.org/html/rfc8478#section-3.1.2
+ skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18}
+
+ ZstdChunkedFrameMagic = []byte{0x47, 0x6e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78}
+)
+
+func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
+ if _, err := dest.Write(skippableFrameMagic); err != nil {
+ return err
+ }
+
+ var size []byte = make([]byte, 4)
+ binary.LittleEndian.PutUint32(size, uint32(len(data)))
+ if _, err := dest.Write(size); err != nil {
+ return err
+ }
+ if _, err := dest.Write(data); err != nil {
+ return err
+ }
+ return nil
+}
+
+func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []ZstdFileMetadata, level int) error {
+ // 8 is the size of the zstd skippable frame header + the frame size
+ manifestOffset := offset + 8
+
+ toc := ZstdTOC{
+ Version: 1,
+ Entries: metadata,
+ }
+
+ // Generate the manifest
+ manifest, err := json.Marshal(toc)
+ if err != nil {
+ return err
+ }
+
+ var compressedBuffer bytes.Buffer
+ zstdWriter, err := ZstdWriterWithLevel(&compressedBuffer, level)
+ if err != nil {
+ return err
+ }
+ if _, err := zstdWriter.Write(manifest); err != nil {
+ zstdWriter.Close()
+ return err
+ }
+ if err := zstdWriter.Close(); err != nil {
+ return err
+ }
+ compressedManifest := compressedBuffer.Bytes()
+
+ manifestDigester := digest.Canonical.Digester()
+ manifestChecksum := manifestDigester.Hash()
+ if _, err := manifestChecksum.Write(compressedManifest); err != nil {
+ return err
+ }
+
+ outMetadata[ManifestChecksumKey] = manifestDigester.Digest().String()
+ outMetadata[ManifestInfoKey] = fmt.Sprintf("%d:%d:%d:%d", manifestOffset, len(compressedManifest), len(manifest), ManifestTypeCRFS)
+ if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil {
+ return err
+ }
+
+ // Store the offset to the manifest and its size in LE order
+ var manifestDataLE []byte = make([]byte, FooterSizeSupported)
+ binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
+ binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest)))
+ binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest)))
+ binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(ManifestTypeCRFS))
+ copy(manifestDataLE[32:], ZstdChunkedFrameMagic)
+
+ return appendZstdSkippableFrame(dest, manifestDataLE)
+}
+
+func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
+ el := zstd.EncoderLevelFromZstd(level)
+ return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
index 8c5062475..0f14d8af9 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
@@ -19,6 +19,7 @@ import (
graphdriver "github.com/containers/storage/drivers"
driversCopy "github.com/containers/storage/drivers/copy"
"github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/types"
"github.com/klauspost/compress/zstd"
@@ -39,7 +40,7 @@ const (
type chunkedZstdDiffer struct {
stream ImageSourceSeekable
manifest []byte
- layersMetadata map[string][]zstdFileMetadata
+ layersMetadata map[string][]internal.ZstdFileMetadata
layersTarget map[string]string
}
@@ -75,11 +76,11 @@ func copyFileContent(src, destFile, root string, dirfd int, missingDirsMode, mod
return dstFile, st.Size(), err
}
-func prepareOtherLayersCache(layersMetadata map[string][]zstdFileMetadata) map[string]map[string]*zstdFileMetadata {
- maps := make(map[string]map[string]*zstdFileMetadata)
+func prepareOtherLayersCache(layersMetadata map[string][]internal.ZstdFileMetadata) map[string]map[string]*internal.ZstdFileMetadata {
+ maps := make(map[string]map[string]*internal.ZstdFileMetadata)
for layerID, v := range layersMetadata {
- r := make(map[string]*zstdFileMetadata)
+ r := make(map[string]*internal.ZstdFileMetadata)
for i := range v {
r[v[i].Digest] = &v[i]
}
@@ -88,13 +89,13 @@ func prepareOtherLayersCache(layersMetadata map[string][]zstdFileMetadata) map[s
return maps
}
-func getLayersCache(store storage.Store) (map[string][]zstdFileMetadata, map[string]string, error) {
+func getLayersCache(store storage.Store) (map[string][]internal.ZstdFileMetadata, map[string]string, error) {
allLayers, err := store.Layers()
if err != nil {
return nil, nil, err
}
- layersMetadata := make(map[string][]zstdFileMetadata)
+ layersMetadata := make(map[string][]internal.ZstdFileMetadata)
layersTarget := make(map[string]string)
for _, r := range allLayers {
manifestReader, err := store.LayerBigData(r.ID, bigDataKey)
@@ -106,7 +107,7 @@ func getLayersCache(store storage.Store) (map[string][]zstdFileMetadata, map[str
if err != nil {
return nil, nil, err
}
- var toc zstdTOC
+ var toc internal.ZstdTOC
if err := json.Unmarshal(manifest, &toc); err != nil {
continue
}
@@ -123,7 +124,7 @@ func getLayersCache(store storage.Store) (map[string][]zstdFileMetadata, map[str
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
- if _, ok := annotations[manifestChecksumKey]; ok {
+ if _, ok := annotations[internal.ManifestChecksumKey]; ok {
return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss)
}
return nil, errors.New("blob type not supported for partial retrieval")
@@ -147,7 +148,7 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
}, nil
}
-func findFileInOtherLayers(file zstdFileMetadata, root string, dirfd int, layersMetadata map[string]map[string]*zstdFileMetadata, layersTarget map[string]string, missingDirsMode os.FileMode) (*os.File, int64, error) {
+func findFileInOtherLayers(file internal.ZstdFileMetadata, root string, dirfd int, layersMetadata map[string]map[string]*internal.ZstdFileMetadata, layersTarget map[string]string, missingDirsMode os.FileMode) (*os.File, int64, error) {
// this is ugly, needs to be indexed
for layerID, checksums := range layersMetadata {
m, found := checksums[file.Digest]
@@ -194,7 +195,7 @@ func getFileDigest(f *os.File) (digest.Digest, error) {
// findFileOnTheHost checks whether the requested file already exist on the host and copies the file content from there if possible.
// It is currently implemented to look only at the file with the same path. Ideally it can detect the same content also at different
// paths.
-func findFileOnTheHost(file zstdFileMetadata, root string, dirfd int, missingDirsMode os.FileMode) (*os.File, int64, error) {
+func findFileOnTheHost(file internal.ZstdFileMetadata, root string, dirfd int, missingDirsMode os.FileMode) (*os.File, int64, error) {
sourceFile := filepath.Clean(filepath.Join("/", file.Name))
if !strings.HasPrefix(sourceFile, "/usr/") {
// limit host deduplication to files under /usr.
@@ -251,7 +252,7 @@ func findFileOnTheHost(file zstdFileMetadata, root string, dirfd int, missingDir
return dstFile, written, nil
}
-func maybeDoIDRemap(manifest []zstdFileMetadata, options *archive.TarOptions) error {
+func maybeDoIDRemap(manifest []internal.ZstdFileMetadata, options *archive.TarOptions) error {
if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 {
return nil
}
@@ -278,7 +279,7 @@ func maybeDoIDRemap(manifest []zstdFileMetadata, options *archive.TarOptions) er
}
type missingFile struct {
- File *zstdFileMetadata
+ File *internal.ZstdFileMetadata
Gap int64
}
@@ -291,7 +292,7 @@ type missingChunk struct {
Files []missingFile
}
-func setFileAttrs(file *os.File, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) error {
+func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
if file == nil || file.Fd() < 0 {
return errors.Errorf("invalid file")
}
@@ -346,7 +347,7 @@ func openFileUnderRoot(name, root string, dirfd int, flags uint64, mode os.FileM
return os.NewFile(uintptr(fd), name), nil
}
-func createFileFromZstdStream(dest string, dirfd int, reader io.Reader, missingDirsMode, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) (err error) {
+func createFileFromZstdStream(dest string, dirfd int, reader io.Reader, missingDirsMode, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) (err error) {
file, err := openFileUnderRoot(metadata.Name, dest, dirfd, newFileFlags, 0)
if err != nil {
return err
@@ -497,7 +498,7 @@ func retrieveMissingFiles(input *chunkedZstdDiffer, dest string, dirfd int, miss
return nil
}
-func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) error {
+func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
parent := filepath.Dir(metadata.Name)
base := filepath.Base(metadata.Name)
@@ -526,7 +527,7 @@ func safeMkdir(target string, dirfd int, mode os.FileMode, metadata *zstdFileMet
return setFileAttrs(file, mode, metadata, options)
}
-func safeLink(target string, dirfd int, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) error {
+func safeLink(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
sourceFile, err := openFileUnderRoot(metadata.Linkname, target, dirfd, unix.O_RDONLY, 0)
if err != nil {
return err
@@ -558,7 +559,7 @@ func safeLink(target string, dirfd int, mode os.FileMode, metadata *zstdFileMeta
return setFileAttrs(newFile, mode, metadata, options)
}
-func safeSymlink(target string, dirfd int, mode os.FileMode, metadata *zstdFileMetadata, options *archive.TarOptions) error {
+func safeSymlink(target string, dirfd int, mode os.FileMode, metadata *internal.ZstdFileMetadata, options *archive.TarOptions) error {
destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
destDirFd := dirfd
if destDir != "." {
@@ -636,7 +637,7 @@ type hardLinkToCreate struct {
dest string
dirfd int
mode os.FileMode
- metadata *zstdFileMetadata
+ metadata *internal.ZstdFileMetadata
}
func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) {
@@ -659,7 +660,7 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
}
// Generate the manifest
- var toc zstdTOC
+ var toc internal.ZstdTOC
if err := json.Unmarshal(d.manifest, &toc); err != nil {
return output, err
}
@@ -667,7 +668,7 @@ func (d *chunkedZstdDiffer) ApplyDiff(dest string, options *archive.TarOptions)
whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
var missingChunks []missingChunk
- var mergedEntries []zstdFileMetadata
+ var mergedEntries []internal.ZstdFileMetadata
if err := maybeDoIDRemap(toc.Entries, options); err != nil {
return output, err
diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf
index e70f4f018..722750c0c 100644
--- a/vendor/github.com/containers/storage/storage.conf
+++ b/vendor/github.com/containers/storage/storage.conf
@@ -69,6 +69,9 @@ additionalimagestores = [
# and vfs drivers.
#ignore_chown_errors = "false"
+# Inodes is used to set a maximum inodes of the container image.
+# inodes = ""
+
# Path to an helper program to use for mounting the file system instead of mounting it
# directly.
#mount_program = "/usr/bin/fuse-overlayfs"
diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go
index 03ddd5ad9..4d62b151a 100644
--- a/vendor/github.com/containers/storage/types/utils.go
+++ b/vendor/github.com/containers/storage/types/utils.go
@@ -2,6 +2,7 @@ package types
import (
"fmt"
+ "io/ioutil"
"os"
"path/filepath"
"strconv"
@@ -74,9 +75,12 @@ func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, e
return runtimeDir, nil
}
- runUserDir := env.getRunUserDir()
- if isRootlessRuntimeDirOwner(runUserDir, env) {
- return runUserDir, nil
+ initCommand, err := ioutil.ReadFile(env.getProcCommandFile())
+ if err != nil || string(initCommand) == "systemd" {
+ runUserDir := env.getRunUserDir()
+ if isRootlessRuntimeDirOwner(runUserDir, env) {
+ return runUserDir, nil
+ }
}
tmpPerUserDir := env.getTmpPerUserDir()
diff --git a/vendor/modules.txt b/vendor/modules.txt
index b21a65251..c1c4b34c9 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -46,9 +46,9 @@ github.com/buger/goterm
github.com/cespare/xxhash/v2
# github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7
github.com/checkpoint-restore/checkpointctl/lib
-# github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b
-github.com/checkpoint-restore/go-criu
-github.com/checkpoint-restore/go-criu/rpc
+# github.com/checkpoint-restore/go-criu/v5 v5.1.0
+github.com/checkpoint-restore/go-criu/v5
+github.com/checkpoint-restore/go-criu/v5/rpc
# github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e
github.com/chzyer/readline
# github.com/container-orchestrated-devices/container-device-interface v0.0.0-20210325223243-f99e8b6c10b9
@@ -93,7 +93,7 @@ github.com/containers/buildah/pkg/overlay
github.com/containers/buildah/pkg/parse
github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/util
-# github.com/containers/common v0.41.1-0.20210721172332-291287e9d060
+# github.com/containers/common v0.41.1-0.20210730122913-cd6c45fd20e3
github.com/containers/common/libimage
github.com/containers/common/libimage/manifests
github.com/containers/common/pkg/apparmor
@@ -125,7 +125,7 @@ github.com/containers/common/pkg/umask
github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible
github.com/containers/conmon/runner/config
-# github.com/containers/image/v5 v5.14.0
+# github.com/containers/image/v5 v5.15.0
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
github.com/containers/image/v5/directory/explicitfilepath
@@ -197,7 +197,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.32.6
+# github.com/containers/storage v1.33.0
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs
@@ -214,6 +214,8 @@ github.com/containers/storage/drivers/zfs
github.com/containers/storage/pkg/archive
github.com/containers/storage/pkg/chrootarchive
github.com/containers/storage/pkg/chunked
+github.com/containers/storage/pkg/chunked/compressor
+github.com/containers/storage/pkg/chunked/internal
github.com/containers/storage/pkg/config
github.com/containers/storage/pkg/devicemapper
github.com/containers/storage/pkg/directory