summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile13
-rw-r--r--cmd/podman/common/create_opts.go11
-rw-r--r--cmd/podman/images/build.go5
-rw-r--r--go.mod1
-rw-r--r--go.sum10
-rw-r--r--libpod/boltdb_state.go98
-rw-r--r--libpod/container_internal.go22
-rw-r--r--libpod/container_internal_linux.go212
-rw-r--r--libpod/container_log.go1
-rw-r--r--libpod/container_log_linux.go1
-rw-r--r--libpod/in_memory_state.go40
-rw-r--r--libpod/oci_conmon_linux.go12
-rw-r--r--libpod/runtime_ctr.go113
-rw-r--r--libpod/state.go13
-rw-r--r--pkg/api/handlers/compat/images_build.go6
-rw-r--r--pkg/api/server/register_archive.go2
-rw-r--r--pkg/bindings/images/build.go6
-rw-r--r--pkg/checkpoint/checkpoint_restore.go34
-rw-r--r--pkg/checkpoint/crutils/checkpoint_restore_utils.go191
-rw-r--r--test/apiv2/44-mounts.at21
-rw-r--r--test/e2e/build_test.go16
-rw-r--r--test/e2e/logs_test.go110
-rw-r--r--test/e2e/rename_test.go21
-rw-r--r--vendor/github.com/checkpoint-restore/checkpointctl/LICENSE201
-rw-r--r--vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go221
-rw-r--r--vendor/modules.txt2
26 files changed, 973 insertions, 410 deletions
diff --git a/Makefile b/Makefile
index 4c2a4047b..268cfe7f9 100644
--- a/Makefile
+++ b/Makefile
@@ -543,14 +543,18 @@ install.cni:
install ${SELINUXOPT} -m 644 cni/87-podman-bridge.conflist ${DESTDIR}${ETCDIR}/cni/net.d/87-podman-bridge.conflist
.PHONY: install.docker
-install.docker: docker-docs
- install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(BINDIR) $(DESTDIR)$(MANDIR)/man1
+install.docker:
install ${SELINUXOPT} -m 755 docker $(DESTDIR)$(BINDIR)/docker
- install ${SELINUXOPT} -m 644 docs/build/man/docker*.1 -t $(DESTDIR)$(MANDIR)/man1
install ${SELINUXOPT} -m 755 -d ${DESTDIR}${SYSTEMDDIR} ${DESTDIR}${USERSYSTEMDDIR} ${DESTDIR}${TMPFILESDIR}
install ${SELINUXOPT} -m 644 contrib/systemd/system/podman-docker.conf -t ${DESTDIR}${TMPFILESDIR}
+.PHONY: install.docker-docs
+install.docker-docs: docker-docs
+ install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(BINDIR) $(DESTDIR)$(MANDIR)/man1
+ install ${SELINUXOPT} -m 644 docs/build/man/docker*.1 -t $(DESTDIR)$(MANDIR)/man1
+
.PHONY: install.systemd
+ifneq (,$(findstring systemd,$(BUILDTAGS)))
install.systemd:
install ${SELINUXOPT} -m 755 -d ${DESTDIR}${SYSTEMDDIR} ${DESTDIR}${USERSYSTEMDDIR}
# User services
@@ -563,6 +567,9 @@ install.systemd:
install ${SELINUXOPT} -m 644 contrib/systemd/auto-update/podman-auto-update.timer ${DESTDIR}${SYSTEMDDIR}/podman-auto-update.timer
install ${SELINUXOPT} -m 644 contrib/systemd/system/podman.socket ${DESTDIR}${SYSTEMDDIR}/podman.socket
install ${SELINUXOPT} -m 644 contrib/systemd/system/podman.service ${DESTDIR}${SYSTEMDDIR}/podman.service
+else
+install.systemd:
+endif
.PHONY: uninstall
uninstall:
diff --git a/cmd/podman/common/create_opts.go b/cmd/podman/common/create_opts.go
index 78611371d..f945c9c54 100644
--- a/cmd/podman/common/create_opts.go
+++ b/cmd/podman/common/create_opts.go
@@ -311,6 +311,15 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroup
netInfo.CNINetworks = []string{string(cc.HostConfig.NetworkMode)}
}
+ parsedTmp := make([]string, 0, len(cc.HostConfig.Tmpfs))
+ for path, options := range cc.HostConfig.Tmpfs {
+ finalString := path
+ if options != "" {
+ finalString += ":" + options
+ }
+ parsedTmp = append(parsedTmp, finalString)
+ }
+
// Note: several options here are marked as "don't need". this is based
// on speculation by Matt and I. We think that these come into play later
// like with start. We believe this is just a difference in podman/compat
@@ -367,7 +376,7 @@ func ContainerCreateToContainerCLIOpts(cc handlers.CreateContainerConfig, cgroup
StorageOpt: stringMaptoArray(cc.HostConfig.StorageOpt),
Sysctl: stringMaptoArray(cc.HostConfig.Sysctls),
Systemd: "true", // podman default
- TmpFS: stringMaptoArray(cc.HostConfig.Tmpfs),
+ TmpFS: parsedTmp,
TTY: cc.Config.Tty,
User: cc.Config.User,
UserNS: string(cc.HostConfig.UsernsMode),
diff --git a/cmd/podman/images/build.go b/cmd/podman/images/build.go
index d6bf761db..0e1c47399 100644
--- a/cmd/podman/images/build.go
+++ b/cmd/podman/images/build.go
@@ -509,6 +509,11 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
TransientMounts: flags.Volumes,
}
+ if c.Flag("timestamp").Changed {
+ timestamp := time.Unix(flags.Timestamp, 0).UTC()
+ opts.Timestamp = &timestamp
+ }
+
return &entities.BuildOptions{BuildOptions: opts}, nil
}
diff --git a/go.mod b/go.mod
index bec99270f..1972eb2d2 100644
--- a/go.mod
+++ b/go.mod
@@ -6,6 +6,7 @@ require (
github.com/BurntSushi/toml v0.3.1
github.com/blang/semver v3.5.1+incompatible
github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37
+ github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
github.com/containernetworking/cni v0.8.1
diff --git a/go.sum b/go.sum
index f19b76d47..5dc568719 100644
--- a/go.sum
+++ b/go.sum
@@ -57,6 +57,8 @@ github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37/go.mod h1:u9UyCz2eTrS
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7 h1:ZmSAEFFtv3mepC4/Ze6E/hi6vGZlhRvywqp1l+w+qqw=
+github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7/go.mod h1:Kp3ezoDVdhfYxZUtgs4OL8sVvgOLz3txk0sbQD0opvw=
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b h1:T4nWG1TXIxeor8mAu5bFguPJgSIGhZqv/f0z55KCrJM=
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho=
github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
@@ -112,6 +114,8 @@ github.com/containers/ocicrypt v1.1.0 h1:A6UzSUFMla92uxO43O6lm86i7evMGjTY7wTKB2D
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
github.com/containers/psgo v1.5.2 h1:3aoozst/GIwsrr/5jnFy3FrJay98uujPCu9lTuSZ/Cw=
github.com/containers/psgo v1.5.2/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU=
+github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM=
+github.com/containers/storage v1.24.5 h1:BusfdU0rCS2/Daa/DPw+0iLfGRlYA7UVF7D0el3N7Vk=
github.com/containers/storage v1.24.5/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ=
github.com/containers/storage v1.24.6/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ=
github.com/containers/storage v1.25.0 h1:p0PLlQcWmtE+7XLfOCR0WuYyMTby1yozpI4DaKOtWTA=
@@ -333,6 +337,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.5 h1:xNCE0uE6yvTPRS+0wGNMHPo3NIpwnk6aluQZ6R6kRcc=
github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg=
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
@@ -363,6 +369,7 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
@@ -423,6 +430,7 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -466,6 +474,7 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20200817204227-f9c09b4ea1df/go.m
github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
+github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0 h1:+77ba4ar4jsCbL1GLbFL8fFM57w6suPfSS9PDLDY7KM=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/openshift/imagebuilder v1.1.8 h1:gjiIl8pbNj0eC4XWvFJHATdDvYm64p9/pLDLQWoLZPA=
@@ -594,6 +603,7 @@ github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmF
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 6f2eaeab2..122dd080f 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -1681,6 +1681,104 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf
return err
}
+// SafeRewriteContainerConfig rewrites a container's configuration in a more
+// limited fashion than RewriteContainerConfig. It is marked as safe to use
+// under most circumstances, unlike RewriteContainerConfig.
+// DO NOT USE TO: Change container dependencies, change pod membership, change
+// locks, change container ID.
+func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName string, newCfg *ContainerConfig) error {
+ if !s.valid {
+ return define.ErrDBClosed
+ }
+
+ if !ctr.valid {
+ return define.ErrCtrRemoved
+ }
+
+ if newName != "" && newCfg.Name != newName {
+ return errors.Wrapf(define.ErrInvalidArg, "new name %s for container %s must match name in given container config", newName, ctr.ID())
+ }
+ if newName != "" && oldName == "" {
+ return errors.Wrapf(define.ErrInvalidArg, "must provide old name for container if a new name is given")
+ }
+
+ newCfgJSON, err := json.Marshal(newCfg)
+ if err != nil {
+ return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", ctr.ID())
+ }
+
+ db, err := s.getDBCon()
+ if err != nil {
+ return err
+ }
+ defer s.deferredCloseDBCon(db)
+
+ err = db.Update(func(tx *bolt.Tx) error {
+ if newName != "" {
+ idBkt, err := getIDBucket(tx)
+ if err != nil {
+ return err
+ }
+ namesBkt, err := getNamesBucket(tx)
+ if err != nil {
+ return err
+ }
+ allCtrsBkt, err := getAllCtrsBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ needsRename := true
+ if exists := namesBkt.Get([]byte(newName)); exists != nil {
+ if string(exists) == ctr.ID() {
+ // Name already associated with the ID
+ // of this container. No need for a
+ // rename.
+ needsRename = false
+ } else {
+ return errors.Wrapf(define.ErrCtrExists, "name %s already in use, cannot rename container %s", newName, ctr.ID())
+ }
+ }
+
+ if needsRename {
+ // We do have to remove the old name. The other
+ // buckets are ID-indexed so we just need to
+ // overwrite the values there.
+ if err := namesBkt.Delete([]byte(oldName)); err != nil {
+ return errors.Wrapf(err, "error deleting container %s old name from DB for rename", ctr.ID())
+ }
+ if err := idBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil {
+ return errors.Wrapf(err, "error renaming container %s in ID bucket in DB", ctr.ID())
+ }
+ if err := namesBkt.Put([]byte(newName), []byte(ctr.ID())); err != nil {
+ return errors.Wrapf(err, "error adding new name %s for container %s in DB", newName, ctr.ID())
+ }
+ if err := allCtrsBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil {
+ return errors.Wrapf(err, "error renaming container %s in all containers bucket in DB", ctr.ID())
+ }
+ }
+ }
+
+ ctrBkt, err := getCtrBucket(tx)
+ if err != nil {
+ return err
+ }
+
+ ctrDB := ctrBkt.Bucket([]byte(ctr.ID()))
+ if ctrDB == nil {
+ ctr.valid = false
+ return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
+ }
+
+ if err := ctrDB.Put(configKey, newCfgJSON); err != nil {
+ return errors.Wrapf(err, "error updating container %s config JSON", ctr.ID())
+ }
+
+ return nil
+ })
+ return err
+}
+
// RewritePodConfig rewrites a pod's configuration.
// WARNING: This function is DANGEROUS. Do not use without reading the full
// comment on this function in state.go.
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 2e0c24579..7e8226de4 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -13,6 +13,7 @@ import (
"strings"
"time"
+ metadata "github.com/checkpoint-restore/checkpointctl/lib"
"github.com/containers/buildah/copier"
"github.com/containers/common/pkg/secrets"
"github.com/containers/podman/v3/libpod/define"
@@ -135,7 +136,7 @@ func (c *Container) ControlSocketPath() string {
// CheckpointPath returns the path to the directory containing the checkpoint
func (c *Container) CheckpointPath() string {
- return filepath.Join(c.bundlePath(), "checkpoint")
+ return filepath.Join(c.bundlePath(), metadata.CheckpointDirectory)
}
// PreCheckpointPath returns the path to the directory containing the pre-checkpoint-images
@@ -2141,26 +2142,11 @@ func (c *Container) canWithPrevious() error {
return err
}
-// writeJSONFile marshalls and writes the given data to a JSON file
-// in the bundle path
-func (c *Container) writeJSONFile(v interface{}, file string) error {
- fileJSON, err := json.MarshalIndent(v, "", " ")
- if err != nil {
- return errors.Wrapf(err, "error writing JSON to %s for container %s", file, c.ID())
- }
- file = filepath.Join(c.bundlePath(), file)
- if err := ioutil.WriteFile(file, fileJSON, 0644); err != nil {
- return err
- }
-
- return nil
-}
-
// prepareCheckpointExport writes the config and spec to
// JSON files for later export
func (c *Container) prepareCheckpointExport() error {
// save live config
- if err := c.writeJSONFile(c.Config(), "config.dump"); err != nil {
+ if _, err := metadata.WriteJSONFile(c.Config(), c.bundlePath(), metadata.ConfigDumpFile); err != nil {
return err
}
@@ -2171,7 +2157,7 @@ func (c *Container) prepareCheckpointExport() error {
logrus.Debugf("generating spec for container %q failed with %v", c.ID(), err)
return err
}
- if err := c.writeJSONFile(g.Config, "spec.dump"); err != nil {
+ if _, err := metadata.WriteJSONFile(g.Config, c.bundlePath(), metadata.SpecDumpFile); err != nil {
return err
}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index dc0418148..2684c2845 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -19,6 +19,7 @@ import (
"syscall"
"time"
+ metadata "github.com/checkpoint-restore/checkpointctl/lib"
cnitypes "github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/buildah/pkg/chrootuser"
@@ -33,6 +34,7 @@ import (
"github.com/containers/podman/v3/libpod/events"
"github.com/containers/podman/v3/pkg/annotations"
"github.com/containers/podman/v3/pkg/cgroups"
+ "github.com/containers/podman/v3/pkg/checkpoint/crutils"
"github.com/containers/podman/v3/pkg/criu"
"github.com/containers/podman/v3/pkg/lookup"
"github.com/containers/podman/v3/pkg/resolvconf"
@@ -884,80 +886,32 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
logrus.Debugf("Exporting checkpoint image of container %q to %q", c.ID(), options.TargetFile)
includeFiles := []string{
- "checkpoint",
"artifacts",
"ctr.log",
- "config.dump",
- "spec.dump",
- "network.status"}
+ metadata.CheckpointDirectory,
+ metadata.ConfigDumpFile,
+ metadata.SpecDumpFile,
+ metadata.NetworkStatusFile,
+ }
if options.PreCheckPoint {
includeFiles[0] = "pre-checkpoint"
}
// Get root file-system changes included in the checkpoint archive
- rootfsDiffPath := filepath.Join(c.bundlePath(), "rootfs-diff.tar")
- deleteFilesList := filepath.Join(c.bundlePath(), "deleted.files")
+ var addToTarFiles []string
if !options.IgnoreRootfs {
// To correctly track deleted files, let's go through the output of 'podman diff'
- tarFiles, err := c.runtime.GetDiff("", c.ID())
+ rootFsChanges, err := c.runtime.GetDiff("", c.ID())
if err != nil {
- return errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath)
+ return errors.Wrapf(err, "error exporting root file-system diff for %q", c.ID())
}
- var rootfsIncludeFiles []string
- var deletedFiles []string
-
- for _, file := range tarFiles {
- if file.Kind == archive.ChangeAdd {
- rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path)
- continue
- }
- if file.Kind == archive.ChangeDelete {
- deletedFiles = append(deletedFiles, file.Path)
- continue
- }
- fileName, err := os.Stat(file.Path)
- if err != nil {
- continue
- }
- if !fileName.IsDir() && file.Kind == archive.ChangeModify {
- rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path)
- continue
- }
- }
-
- if len(rootfsIncludeFiles) > 0 {
- rootfsTar, err := archive.TarWithOptions(c.state.Mountpoint, &archive.TarOptions{
- Compression: archive.Uncompressed,
- IncludeSourceDir: true,
- IncludeFiles: rootfsIncludeFiles,
- })
- if err != nil {
- return errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath)
- }
- rootfsDiffFile, err := os.Create(rootfsDiffPath)
- if err != nil {
- return errors.Wrapf(err, "error creating root file-system diff file %q", rootfsDiffPath)
- }
- defer rootfsDiffFile.Close()
- _, err = io.Copy(rootfsDiffFile, rootfsTar)
- if err != nil {
- return err
- }
- includeFiles = append(includeFiles, "rootfs-diff.tar")
+ addToTarFiles, err := crutils.CRCreateRootFsDiffTar(&rootFsChanges, c.state.Mountpoint, c.bundlePath())
+ if err != nil {
+ return err
}
- if len(deletedFiles) > 0 {
- formatJSON, err := json.MarshalIndent(deletedFiles, "", " ")
- if err != nil {
- return errors.Wrapf(err, "error creating delete files list file %q", deleteFilesList)
- }
- if err := ioutil.WriteFile(deleteFilesList, formatJSON, 0600); err != nil {
- return errors.Wrap(err, "error creating delete files list file")
- }
-
- includeFiles = append(includeFiles, "deleted.files")
- }
+ includeFiles = append(includeFiles, addToTarFiles...)
}
// Folder containing archived volumes that will be included in the export
@@ -1034,8 +988,9 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
return err
}
- os.Remove(rootfsDiffPath)
- os.Remove(deleteFilesList)
+ for _, file := range addToTarFiles {
+ os.Remove(filepath.Join(c.bundlePath(), file))
+ }
if !options.IgnoreVolumes {
os.RemoveAll(expVolDir)
@@ -1054,23 +1009,6 @@ func (c *Container) checkpointRestoreSupported() error {
return nil
}
-func (c *Container) checkpointRestoreLabelLog(fileName string) error {
- // Create the CRIU log file and label it
- dumpLog := filepath.Join(c.bundlePath(), fileName)
-
- logFile, err := os.OpenFile(dumpLog, os.O_CREATE, 0600)
- if err != nil {
- return errors.Wrap(err, "failed to create CRIU log file")
- }
- if err := logFile.Close(); err != nil {
- logrus.Error(err)
- }
- if err = label.SetFileLabel(dumpLog, c.MountLabel()); err != nil {
- return err
- }
- return nil
-}
-
func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) error {
if err := c.checkpointRestoreSupported(); err != nil {
return err
@@ -1084,7 +1022,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
return errors.Errorf("cannot checkpoint containers that have been started with '--rm' unless '--export' is used")
}
- if err := c.checkpointRestoreLabelLog("dump.log"); err != nil {
+ if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "dump.log", c.MountLabel()); err != nil {
return err
}
@@ -1095,11 +1033,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
// Save network.status. This is needed to restore the container with
// the same IP. Currently limited to one IP address in a container
// with one interface.
- formatJSON, err := json.MarshalIndent(c.state.NetworkStatus, "", " ")
- if err != nil {
- return err
- }
- if err := ioutil.WriteFile(filepath.Join(c.bundlePath(), "network.status"), formatJSON, 0644); err != nil {
+ if _, err := metadata.WriteJSONFile(c.state.NetworkStatus, c.bundlePath(), metadata.NetworkStatusFile); err != nil {
return err
}
@@ -1115,7 +1049,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
if options.TargetFile != "" {
- if err = c.exportCheckpoint(options); err != nil {
+ if err := c.exportCheckpoint(options); err != nil {
return err
}
}
@@ -1135,8 +1069,8 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
cleanup := []string{
"dump.log",
"stats-dump",
- "config.dump",
- "spec.dump",
+ metadata.ConfigDumpFile,
+ metadata.SpecDumpFile,
}
for _, del := range cleanup {
file := filepath.Join(c.bundlePath(), del)
@@ -1151,28 +1085,13 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
}
func (c *Container) importCheckpoint(input string) error {
- archiveFile, err := os.Open(input)
- if err != nil {
- return errors.Wrap(err, "failed to open checkpoint archive for import")
- }
-
- defer archiveFile.Close()
- options := &archive.TarOptions{
- ExcludePatterns: []string{
- // config.dump and spec.dump are only required
- // container creation
- "config.dump",
- "spec.dump",
- },
- }
- err = archive.Untar(archiveFile, c.bundlePath(), options)
- if err != nil {
- return errors.Wrapf(err, "unpacking of checkpoint archive %s failed", input)
+ if err := crutils.CRImportCheckpointWithoutConfig(c.bundlePath(), input); err != nil {
+ return err
}
// Make sure the newly created config.json exists on disk
g := generate.Generator{Config: c.config.Spec}
- if err = c.saveSpec(g.Config); err != nil {
+ if err := c.saveSpec(g.Config); err != nil {
return errors.Wrap(err, "saving imported container specification for restore failed")
}
@@ -1221,7 +1140,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
return errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore")
}
- if err := c.checkpointRestoreLabelLog("restore.log"); err != nil {
+ if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "restore.log", c.MountLabel()); err != nil {
return err
}
@@ -1244,7 +1163,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// Read network configuration from checkpoint
// Currently only one interface with one IP is supported.
- networkStatusFile, err := os.Open(filepath.Join(c.bundlePath(), "network.status"))
+ networkStatus, _, err := metadata.ReadContainerCheckpointNetworkStatus(c.bundlePath())
// If the restored container should get a new name, the IP address of
// the container will not be restored. This assumes that if a new name is
// specified, the container is restored multiple times.
@@ -1254,43 +1173,14 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if err == nil && options.Name == "" && (!options.IgnoreStaticIP || !options.IgnoreStaticMAC) {
// The file with the network.status does exist. Let's restore the
// container with the same IP address / MAC address as during checkpointing.
- defer networkStatusFile.Close()
- var networkStatus []*cnitypes.Result
- networkJSON, err := ioutil.ReadAll(networkStatusFile)
- if err != nil {
- return err
- }
- if err := json.Unmarshal(networkJSON, &networkStatus); err != nil {
- return err
- }
if !options.IgnoreStaticIP {
- // Take the first IP address
- var IP net.IP
- if len(networkStatus) > 0 {
- if len(networkStatus[0].IPs) > 0 {
- IP = networkStatus[0].IPs[0].Address.IP
- }
- }
- if IP != nil {
+ if IP := metadata.GetIPFromNetworkStatus(networkStatus); IP != nil {
// Tell CNI which IP address we want.
c.requestedIP = IP
}
}
if !options.IgnoreStaticMAC {
- // Take the first device with a defined sandbox.
- var MAC net.HardwareAddr
- if len(networkStatus) > 0 {
- for _, n := range networkStatus[0].Interfaces {
- if n.Sandbox != "" {
- MAC, err = net.ParseMAC(n.Mac)
- if err != nil {
- return err
- }
- break
- }
- }
- }
- if MAC != nil {
+ if MAC := metadata.GetMACFromNetworkStatus(networkStatus); MAC != nil {
// Tell CNI which MAC address we want.
c.requestedMAC = MAC
}
@@ -1398,36 +1288,12 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// Before actually restarting the container, apply the root file-system changes
if !options.IgnoreRootfs {
- rootfsDiffPath := filepath.Join(c.bundlePath(), "rootfs-diff.tar")
- if _, err := os.Stat(rootfsDiffPath); err == nil {
- // Only do this if a rootfs-diff.tar actually exists
- rootfsDiffFile, err := os.Open(rootfsDiffPath)
- if err != nil {
- return errors.Wrap(err, "failed to open root file-system diff file")
- }
- defer rootfsDiffFile.Close()
- if err := c.runtime.ApplyDiffTarStream(c.ID(), rootfsDiffFile); err != nil {
- return errors.Wrapf(err, "failed to apply root file-system diff file %s", rootfsDiffPath)
- }
+ if err := crutils.CRApplyRootFsDiffTar(c.bundlePath(), c.state.Mountpoint); err != nil {
+ return err
}
- deletedFilesPath := filepath.Join(c.bundlePath(), "deleted.files")
- if _, err := os.Stat(deletedFilesPath); err == nil {
- var deletedFiles []string
- deletedFilesJSON, err := ioutil.ReadFile(deletedFilesPath)
- if err != nil {
- return errors.Wrapf(err, "failed to read deleted files file")
- }
- if err := json.Unmarshal(deletedFilesJSON, &deletedFiles); err != nil {
- return errors.Wrapf(err, "failed to unmarshal deleted files file %s", deletedFilesPath)
- }
- for _, deleteFile := range deletedFiles {
- // Using RemoveAll as deletedFiles, which is generated from 'podman diff'
- // lists completely deleted directories as a single entry: 'D /root'.
- err = os.RemoveAll(filepath.Join(c.state.Mountpoint, deleteFile))
- if err != nil {
- return errors.Wrapf(err, "failed to delete files from container %s during restore", c.ID())
- }
- }
+
+ if err := crutils.CRRemoveDeletedFiles(c.ID(), c.bundlePath(), c.state.Mountpoint); err != nil {
+ return err
}
}
@@ -1452,7 +1318,15 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if err != nil {
logrus.Debugf("Non-fatal: removal of pre-checkpoint directory (%s) failed: %v", c.PreCheckPointPath(), err)
}
- cleanup := [...]string{"restore.log", "dump.log", "stats-dump", "stats-restore", "network.status", "rootfs-diff.tar", "deleted.files"}
+ cleanup := [...]string{
+ "restore.log",
+ "dump.log",
+ "stats-dump",
+ "stats-restore",
+ metadata.NetworkStatusFile,
+ metadata.RootFsDiffTar,
+ metadata.DeletedFilesFile,
+ }
for _, del := range cleanup {
file := filepath.Join(c.bundlePath(), del)
err = os.Remove(file)
diff --git a/libpod/container_log.go b/libpod/container_log.go
index a3b700004..c207df819 100644
--- a/libpod/container_log.go
+++ b/libpod/container_log.go
@@ -29,7 +29,6 @@ func (c *Container) ReadLog(ctx context.Context, options *logs.LogOptions, logCh
case define.NoLogging:
return errors.Wrapf(define.ErrNoLogs, "this container is using the 'none' log driver, cannot read logs")
case define.JournaldLogging:
- // TODO Skip sending logs until journald logs can be read
return c.readFromJournal(ctx, options, logChannel)
case define.JSONLogging:
// TODO provide a separate implementation of this when Conmon
diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go
index 5792633b0..4a541b6e7 100644
--- a/libpod/container_log_linux.go
+++ b/libpod/container_log_linux.go
@@ -52,6 +52,7 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
if time.Now().Before(options.Since) {
return nil
}
+ // coreos/go-systemd/sdjournal expects a negative time.Duration for times in the past
config.Since = -time.Since(options.Since)
}
config.Matches = append(config.Matches, journal.Match{
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 26f15d9c8..3875878ed 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -822,6 +822,46 @@ func (s *InMemoryState) RewriteContainerConfig(ctr *Container, newCfg *Container
return nil
}
+// SafeRewriteContainerConfig rewrites a container's configuration.
+// It's safer than RewriteContainerConfig, but still has limitations. Please
+// read the comment in state.go before using.
+func (s *InMemoryState) SafeRewriteContainerConfig(ctr *Container, oldName, newName string, newCfg *ContainerConfig) error {
+ if !ctr.valid {
+ return define.ErrCtrRemoved
+ }
+
+ if _, err := s.nameIndex.Get(newName); err == nil {
+ return errors.Wrapf(define.ErrCtrExists, "name %s is in use", newName)
+ }
+
+ // If the container does not exist, return error
+ stateCtr, ok := s.containers[ctr.ID()]
+ if !ok {
+ ctr.valid = false
+ return errors.Wrapf(define.ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID())
+ }
+
+ // Change name in registry.
+ if s.namespace != "" {
+ nsIndex, ok := s.namespaceIndexes[s.namespace]
+ if !ok {
+ return define.ErrInternal
+ }
+ nsIndex.nameIndex.Release(oldName)
+ if err := nsIndex.nameIndex.Reserve(newName, ctr.ID()); err != nil {
+ return errors.Wrapf(err, "error registering name %s", newName)
+ }
+ }
+ s.nameIndex.Release(oldName)
+ if err := s.nameIndex.Reserve(newName, ctr.ID()); err != nil {
+ return errors.Wrapf(err, "error registering name %s", newName)
+ }
+
+ stateCtr.config = newCfg
+
+ return nil
+}
+
// RewritePodConfig rewrites a pod's configuration.
// This function is DANGEROUS, even with in-memory state.
// Please read the full comment on it in state.go before using it.
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index de7630c06..492bc807a 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -28,6 +28,7 @@ import (
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/libpod/logs"
"github.com/containers/podman/v3/pkg/cgroups"
+ "github.com/containers/podman/v3/pkg/checkpoint/crutils"
"github.com/containers/podman/v3/pkg/errorhandling"
"github.com/containers/podman/v3/pkg/lookup"
"github.com/containers/podman/v3/pkg/rootless"
@@ -837,16 +838,7 @@ func (r *ConmonOCIRuntime) CheckConmonRunning(ctr *Container) (bool, error) {
// SupportsCheckpoint checks if the OCI runtime supports checkpointing
// containers.
func (r *ConmonOCIRuntime) SupportsCheckpoint() bool {
- // Check if the runtime implements checkpointing. Currently only
- // runc's checkpoint/restore implementation is supported.
- cmd := exec.Command(r.path, "checkpoint", "--help")
- if err := cmd.Start(); err != nil {
- return false
- }
- if err := cmd.Wait(); err == nil {
- return true
- }
- return false
+ return crutils.CRRuntimeSupportsCheckpointRestore(r.path)
}
// SupportsJSONErrors checks if the OCI runtime supports JSON-formatted error
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 8bf862bf2..301c4627d 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -74,8 +74,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config
}
// RenameContainer renames the given container.
-// The given container object will be rendered unusable, and a new, renamed
-// Container will be returned.
+// Returns a copy of the container that has been renamed if successful.
func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName string) (*Container, error) {
ctr.lock.Lock()
defer ctr.lock.Unlock()
@@ -88,26 +87,6 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
return nil, define.RegexError
}
- // Check if the name is available.
- // This is *100% NOT ATOMIC* so any failures in-flight will do
- // *VERY BAD THINGS* to the state. So we have to try and catch all we
- // can before starting.
- if _, err := r.state.LookupContainerID(newName); err == nil {
- return nil, errors.Wrapf(define.ErrCtrExists, "name %s is already in use by another container", newName)
- }
- if _, err := r.state.LookupPod(newName); err == nil {
- return nil, errors.Wrapf(define.ErrPodExists, "name %s is already in use by another pod", newName)
- }
-
- // TODO: Investigate if it is possible to remove this limitation.
- depCtrs, err := r.state.ContainerInUse(ctr)
- if err != nil {
- return nil, err
- }
- if len(depCtrs) > 0 {
- return nil, errors.Wrapf(define.ErrCtrExists, "cannot rename container %s as it is in use by other containers: %v", ctr.ID(), strings.Join(depCtrs, ","))
- }
-
// We need to pull an updated config, in case another rename fired and
// the config was re-written.
newConf, err := r.state.GetContainerConfig(ctr.ID())
@@ -116,95 +95,33 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s
}
ctr.config = newConf
- // TODO: This is going to fail if we have active exec sessions, too.
- // Investigate fixing that at a later date.
-
- var pod *Pod
- if ctr.config.Pod != "" {
- tmpPod, err := r.state.Pod(ctr.config.Pod)
- if err != nil {
- return nil, errors.Wrapf(err, "error retrieving container %s pod", ctr.ID())
- }
- pod = tmpPod
- // Lock pod to ensure it's not removed while we're working
- pod.lock.Lock()
- defer pod.lock.Unlock()
- }
-
- // Lock all volumes to ensure they are not removed while we're working
- volsLocked := make(map[string]bool)
- for _, namedVol := range ctr.config.NamedVolumes {
- if volsLocked[namedVol.Name] {
- continue
- }
- vol, err := r.state.Volume(namedVol.Name)
- if err != nil {
- return nil, errors.Wrapf(err, "error retrieving volume used by container %s", ctr.ID())
- }
-
- volsLocked[vol.Name()] = true
- vol.lock.Lock()
- defer vol.lock.Unlock()
- }
-
logrus.Infof("Going to rename container %s from %q to %q", ctr.ID(), ctr.Name(), newName)
- // Step 1: remove the old container.
- if pod != nil {
- if err := r.state.RemoveContainerFromPod(pod, ctr); err != nil {
- return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
- }
- } else {
- if err := r.state.RemoveContainer(ctr); err != nil {
- return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
- }
- }
-
- // Step 2: Make a new container based on the old one.
- // TODO: Should we deep-copy the container config and state, to be safe?
- newCtr := new(Container)
- newCtr.config = ctr.config
- newCtr.state = ctr.state
- newCtr.lock = ctr.lock
- newCtr.ociRuntime = ctr.ociRuntime
- newCtr.runtime = r
- newCtr.rootlessSlirpSyncR = ctr.rootlessSlirpSyncR
- newCtr.rootlessSlirpSyncW = ctr.rootlessSlirpSyncW
- newCtr.rootlessPortSyncR = ctr.rootlessPortSyncR
- newCtr.rootlessPortSyncW = ctr.rootlessPortSyncW
-
- newCtr.valid = true
- newCtr.config.Name = newName
-
- // Step 3: Add that new container to the DB
- if pod != nil {
- if err := r.state.AddContainerToPod(pod, newCtr); err != nil {
- return nil, errors.Wrapf(err, "error renaming container %s", newCtr.ID())
- }
- } else {
- if err := r.state.AddContainer(newCtr); err != nil {
- return nil, errors.Wrapf(err, "error renaming container %s", newCtr.ID())
- }
- }
+ // Step 1: Alter the config. Save the old name, we need it to rewrite
+ // the config.
+ oldName := ctr.config.Name
+ ctr.config.Name = newName
- // Step 4: Save the new container, to force the state to be written to
- // the DB. This may not be necessary, depending on DB implementation,
- // but let's do it to be safe.
- if err := newCtr.save(); err != nil {
- return nil, err
+ // Step 2: rewrite the old container's config in the DB.
+ if err := r.state.SafeRewriteContainerConfig(ctr, oldName, ctr.config.Name, ctr.config); err != nil {
+ // Assume the rename failed.
+ // Set config back to the old name so reflect what is actually
+ // present in the DB.
+ ctr.config.Name = oldName
+ return nil, errors.Wrapf(err, "error renaming container %s", ctr.ID())
}
- // Step 5: rename the container in c/storage.
+ // Step 3: rename the container in c/storage.
// This can fail if the name is already in use by a non-Podman
// container. This puts us in a bad spot - we've already renamed the
// container in Podman. We can swap the order, but then we have the
// opposite problem. Atomicity is a real problem here, with no easy
// solution.
- if err := r.store.SetNames(newCtr.ID(), []string{newCtr.Name()}); err != nil {
+ if err := r.store.SetNames(ctr.ID(), []string{ctr.Name()}); err != nil {
return nil, err
}
- return newCtr, nil
+ return ctr, nil
}
func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (*Container, error) {
diff --git a/libpod/state.go b/libpod/state.go
index 074d21740..4b711bae9 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -155,6 +155,19 @@ type State interface {
// answer is this: use this only very sparingly, and only if you really
// know what you're doing.
RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error
+ // This is a more limited version of RewriteContainerConfig, though it
+ // comes with the added ability to alter a container's name. In exchange
+ // it loses the ability to manipulate the container's locks.
+ // It is not intended to be as restrictive as RewriteContainerConfig, in
+ // that we allow it to be run while other Podman processes are running,
+ // and without holding the alive lock.
+ // Container ID and pod membership still *ABSOLUTELY CANNOT* be altered.
+ // Also, you cannot change a container's dependencies - shared namespace
+ // containers or generic dependencies - at present. This is
+ // theoretically possible but not yet implemented.
+ // If newName is not "" the container will be renamed to the new name.
+ // The oldName parameter is only required if newName is given.
+ SafeRewriteContainerConfig(ctr *Container, oldName, newName string, newCfg *ContainerConfig) error
// PLEASE READ THE DESCRIPTION FOR RewriteContainerConfig BEFORE USING.
// This function is identical to RewriteContainerConfig, save for the
// fact that it is used with pods instead.
diff --git a/pkg/api/handlers/compat/images_build.go b/pkg/api/handlers/compat/images_build.go
index d79b100e8..009fcf7e8 100644
--- a/pkg/api/handlers/compat/images_build.go
+++ b/pkg/api/handlers/compat/images_build.go
@@ -104,6 +104,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
Squash bool `schema:"squash"`
Tag []string `schema:"t"`
Target string `schema:"target"`
+ Timestamp int64 `schema:"timestamp"`
}{
Dockerfile: "Dockerfile",
Registry: "docker.io",
@@ -318,6 +319,11 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
Target: query.Target,
}
+ if _, found := r.URL.Query()["timestamp"]; found {
+ ts := time.Unix(query.Timestamp, 0)
+ buildOptions.Timestamp = &ts
+ }
+
runCtx, cancel := context.WithCancel(context.Background())
var imageID string
go func() {
diff --git a/pkg/api/server/register_archive.go b/pkg/api/server/register_archive.go
index 2a5cfba0b..2ac126644 100644
--- a/pkg/api/server/register_archive.go
+++ b/pkg/api/server/register_archive.go
@@ -91,7 +91,7 @@ func (s *APIServer) registerArchiveHandlers(r *mux.Router) error {
Libpod
*/
- // swagger:operation POST /libpod/containers/{name}/archive libpod libpodPutArchive
+ // swagger:operation PUT /libpod/containers/{name}/archive libpod libpodPutArchive
// ---
// summary: Copy files into a container
// description: Copy a tar archive of files into a container
diff --git a/pkg/bindings/images/build.go b/pkg/bindings/images/build.go
index 6e16461e5..27706fd2c 100644
--- a/pkg/bindings/images/build.go
+++ b/pkg/bindings/images/build.go
@@ -185,6 +185,12 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
if options.Squash {
params.Set("squash", "1")
}
+
+ if options.Timestamp != nil {
+ t := *options.Timestamp
+ params.Set("timestamp", strconv.FormatInt(t.Unix(), 10))
+ }
+
var (
headers map[string]string
err error
diff --git a/pkg/checkpoint/checkpoint_restore.go b/pkg/checkpoint/checkpoint_restore.go
index 6285680c0..77a993128 100644
--- a/pkg/checkpoint/checkpoint_restore.go
+++ b/pkg/checkpoint/checkpoint_restore.go
@@ -4,15 +4,14 @@ import (
"context"
"io/ioutil"
"os"
- "path/filepath"
+ metadata "github.com/checkpoint-restore/checkpointctl/lib"
"github.com/containers/podman/v3/libpod"
"github.com/containers/podman/v3/libpod/image"
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/errorhandling"
"github.com/containers/podman/v3/pkg/util"
"github.com/containers/storage/pkg/archive"
- jsoniter "github.com/json-iterator/go"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -20,21 +19,6 @@ import (
// Prefixing the checkpoint/restore related functions with 'cr'
-// crImportFromJSON imports the JSON files stored in the exported
-// checkpoint tarball
-func crImportFromJSON(filePath string, v interface{}) error {
- content, err := ioutil.ReadFile(filePath)
- if err != nil {
- return errors.Wrap(err, "failed to read container definition for restore")
- }
- json := jsoniter.ConfigCompatibleWithStandardLibrary
- if err = json.Unmarshal(content, v); err != nil {
- return errors.Wrapf(err, "failed to unmarshal container definition %s for restore", filePath)
- }
-
- return nil
-}
-
// CRImportCheckpoint it the function which imports the information
// from checkpoint tarball and re-creates the container from that information
func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOptions entities.RestoreOptions) ([]*libpod.Container, error) {
@@ -48,13 +32,13 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
options := &archive.TarOptions{
// Here we only need the files config.dump and spec.dump
ExcludePatterns: []string{
- "checkpoint",
- "artifacts",
- "ctr.log",
- "rootfs-diff.tar",
- "network.status",
- "deleted.files",
"volumes",
+ "ctr.log",
+ "artifacts",
+ metadata.RootFsDiffTar,
+ metadata.DeletedFilesFile,
+ metadata.NetworkStatusFile,
+ metadata.CheckpointDirectory,
},
}
dir, err := ioutil.TempDir("", "checkpoint")
@@ -73,13 +57,13 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
// Load spec.dump from temporary directory
dumpSpec := new(spec.Spec)
- if err := crImportFromJSON(filepath.Join(dir, "spec.dump"), dumpSpec); err != nil {
+ if _, err := metadata.ReadJSONFile(dumpSpec, dir, metadata.SpecDumpFile); err != nil {
return nil, err
}
// Load config.dump from temporary directory
config := new(libpod.ContainerConfig)
- if err = crImportFromJSON(filepath.Join(dir, "config.dump"), config); err != nil {
+ if _, err = metadata.ReadJSONFile(config, dir, metadata.ConfigDumpFile); err != nil {
return nil, err
}
diff --git a/pkg/checkpoint/crutils/checkpoint_restore_utils.go b/pkg/checkpoint/crutils/checkpoint_restore_utils.go
new file mode 100644
index 000000000..53ff55865
--- /dev/null
+++ b/pkg/checkpoint/crutils/checkpoint_restore_utils.go
@@ -0,0 +1,191 @@
+package crutils
+
+import (
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+
+ metadata "github.com/checkpoint-restore/checkpointctl/lib"
+ "github.com/containers/storage/pkg/archive"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
+)
+
+// This file mainly exist to make the checkpoint/restore functions
+// available for other users. One possible candidate would be CRI-O.
+
+// CRImportCheckpointWithoutConfig imports the checkpoint archive (input)
+// into the directory destination without "config.dump" and "spec.dump"
+func CRImportCheckpointWithoutConfig(destination, input string) error {
+ archiveFile, err := os.Open(input)
+ if err != nil {
+ return errors.Wrapf(err, "Failed to open checkpoint archive %s for import", input)
+ }
+
+ defer archiveFile.Close()
+ options := &archive.TarOptions{
+ ExcludePatterns: []string{
+ // Import everything else besides the container config
+ metadata.ConfigDumpFile,
+ metadata.SpecDumpFile,
+ },
+ }
+ if err = archive.Untar(archiveFile, destination, options); err != nil {
+ return errors.Wrapf(err, "Unpacking of checkpoint archive %s failed", input)
+ }
+
+ return nil
+}
+
+// CRRemoveDeletedFiles loads the list of deleted files and if
+// it exists deletes all files listed.
+func CRRemoveDeletedFiles(id, baseDirectory, containerRootDirectory string) error {
+ deletedFiles, _, err := metadata.ReadContainerCheckpointDeletedFiles(baseDirectory)
+ if os.IsNotExist(errors.Unwrap(errors.Unwrap(err))) {
+ // No files to delete. Just return
+ return nil
+ }
+
+ if err != nil {
+ return errors.Wrapf(err, "failed to read deleted files file")
+ }
+
+ for _, deleteFile := range deletedFiles {
+ // Using RemoveAll as deletedFiles, which is generated from 'podman diff'
+ // lists completely deleted directories as a single entry: 'D /root'.
+ if err := os.RemoveAll(filepath.Join(containerRootDirectory, deleteFile)); err != nil {
+ return errors.Wrapf(err, "failed to delete files from container %s during restore", id)
+ }
+ }
+
+ return nil
+}
+
+// CRApplyRootFsDiffTar applies the tar archive found in baseDirectory with the
+// root file system changes on top of containerRootDirectory
+func CRApplyRootFsDiffTar(baseDirectory, containerRootDirectory string) error {
+ rootfsDiffPath := filepath.Join(baseDirectory, metadata.RootFsDiffTar)
+ if _, err := os.Stat(rootfsDiffPath); err != nil {
+ // Only do this if a rootfs-diff.tar actually exists
+ return nil
+ }
+
+ rootfsDiffFile, err := os.Open(rootfsDiffPath)
+ if err != nil {
+ return errors.Wrap(err, "failed to open root file-system diff file")
+ }
+ defer rootfsDiffFile.Close()
+
+ if err := archive.Untar(rootfsDiffFile, containerRootDirectory, nil); err != nil {
+ return errors.Wrapf(err, "failed to apply root file-system diff file %s", rootfsDiffPath)
+ }
+
+ return nil
+}
+
+// CRCreateRootFsDiffTar goes through the 'changes' and can create two files:
+// * metadata.RootFsDiffTar will contain all new and changed files
+// * metadata.DeletedFilesFile will contain a list of deleted files
+// With these two files it is possible to restore the container file system to the same
+// state it was during checkpointing.
+// Changes to directories (owner, mode) are not handled.
+func CRCreateRootFsDiffTar(changes *[]archive.Change, mountPoint, destination string) (includeFiles []string, err error) {
+ if len(*changes) == 0 {
+ return includeFiles, nil
+ }
+
+ var rootfsIncludeFiles []string
+ var deletedFiles []string
+
+ rootfsDiffPath := filepath.Join(destination, metadata.RootFsDiffTar)
+
+ for _, file := range *changes {
+ if file.Kind == archive.ChangeAdd {
+ rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path)
+ continue
+ }
+ if file.Kind == archive.ChangeDelete {
+ deletedFiles = append(deletedFiles, file.Path)
+ continue
+ }
+ fileName, err := os.Stat(file.Path)
+ if err != nil {
+ continue
+ }
+ if !fileName.IsDir() && file.Kind == archive.ChangeModify {
+ rootfsIncludeFiles = append(rootfsIncludeFiles, file.Path)
+ continue
+ }
+ }
+
+ if len(rootfsIncludeFiles) > 0 {
+ rootfsTar, err := archive.TarWithOptions(mountPoint, &archive.TarOptions{
+ Compression: archive.Uncompressed,
+ IncludeSourceDir: true,
+ IncludeFiles: rootfsIncludeFiles,
+ })
+ if err != nil {
+ return includeFiles, errors.Wrapf(err, "error exporting root file-system diff to %q", rootfsDiffPath)
+ }
+ rootfsDiffFile, err := os.Create(rootfsDiffPath)
+ if err != nil {
+ return includeFiles, errors.Wrapf(err, "error creating root file-system diff file %q", rootfsDiffPath)
+ }
+ defer rootfsDiffFile.Close()
+ if _, err = io.Copy(rootfsDiffFile, rootfsTar); err != nil {
+ return includeFiles, err
+ }
+
+ includeFiles = append(includeFiles, metadata.RootFsDiffTar)
+ }
+
+ if len(deletedFiles) == 0 {
+ return includeFiles, nil
+ }
+
+ if _, err := metadata.WriteJSONFile(deletedFiles, destination, metadata.DeletedFilesFile); err != nil {
+ return includeFiles, nil
+ }
+
+ includeFiles = append(includeFiles, metadata.DeletedFilesFile)
+
+ return includeFiles, nil
+}
+
+// CRCreateFileWithLabel creates an empty file and sets the corresponding ('fileLabel')
+// SELinux label on the file.
+// This is necessary for CRIU log files because CRIU infects the processes in
+// the container with a 'parasite' and this will also try to write to the log files
+// from the context of the container processes.
+func CRCreateFileWithLabel(directory, fileName, fileLabel string) error {
+ logFileName := filepath.Join(directory, fileName)
+
+ logFile, err := os.OpenFile(logFileName, os.O_CREATE, 0o600)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create file %q", logFileName)
+ }
+ defer logFile.Close()
+ if err = label.SetFileLabel(logFileName, fileLabel); err != nil {
+ return errors.Wrapf(err, "failed to label file %q", logFileName)
+ }
+
+ return nil
+}
+
+// CRRuntimeSupportsCheckpointRestore tests if the given runtime at 'runtimePath'
+// supports checkpointing. The checkpoint restore interface has no definition
+// but crun implements all commands just as runc does. Whathh runc does it the
+// official definition of the checkpoint/restore interface.
+func CRRuntimeSupportsCheckpointRestore(runtimePath string) bool {
+ // Check if the runtime implements checkpointing. Currently only
+ // runc's and crun's checkpoint/restore implementation is supported.
+ cmd := exec.Command(runtimePath, "checkpoint", "--help")
+ if err := cmd.Start(); err != nil {
+ return false
+ }
+ if err := cmd.Wait(); err == nil {
+ return true
+ }
+ return false
+}
diff --git a/test/apiv2/44-mounts.at b/test/apiv2/44-mounts.at
new file mode 100644
index 000000000..fe202576d
--- /dev/null
+++ b/test/apiv2/44-mounts.at
@@ -0,0 +1,21 @@
+# -*- sh -*-
+
+podman pull $IMAGE &>/dev/null
+
+# Test various HostConfig options
+tmpfs_name="/mytmpfs"
+t POST containers/create?name=hostconfig_test '"Image":"'$IMAGE'","Cmd":["df"],"HostConfig":{"TmpFs":{"'$tmpfs_name'":"rw"}}' 201 \
+ .Id~[0-9a-f]\\{64\\}
+cid=$(jq -r '.Id' <<<"$output")
+
+# Prior to #9512, the tmpfs would be called '/mytmpfs=rw', with the '=rw'
+t GET containers/${cid}/json 200 \
+ .HostConfig.Tmpfs[\"${tmpfs_name}\"]~rw,
+
+# Run the container, verify output
+t POST containers/${cid}/start '' 204
+t POST containers/${cid}/wait '' 200
+t GET containers/${cid}/logs?stdout=true 200
+
+like "$(<$WORKDIR/curl.result.out)" ".* ${tmpfs_name}" \
+ "'df' output includes tmpfs name"
diff --git a/test/e2e/build_test.go b/test/e2e/build_test.go
index c733db61c..4839d66ec 100644
--- a/test/e2e/build_test.go
+++ b/test/e2e/build_test.go
@@ -532,4 +532,20 @@ RUN grep CapEff /proc/self/status`
// Then
Expect(session.ExitCode()).To(Equal(125))
})
+
+ It("podman build --timestamp flag", func() {
+ containerfile := `FROM quay.io/libpod/alpine:latest
+RUN echo hello`
+
+ containerfilePath := filepath.Join(podmanTest.TempDir, "Containerfile")
+ err := ioutil.WriteFile(containerfilePath, []byte(containerfile), 0755)
+ Expect(err).To(BeNil())
+ session := podmanTest.Podman([]string{"build", "-t", "test", "--timestamp", "0", "--file", containerfilePath, podmanTest.TempDir})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ inspect := podmanTest.Podman([]string{"image", "inspect", "--format", "{{ .Created }}", "test"})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.OutputToString()).To(Equal("1970-01-01 00:00:00 +0000 UTC"))
+ })
})
diff --git a/test/e2e/logs_test.go b/test/e2e/logs_test.go
index 8f695279a..3051031a5 100644
--- a/test/e2e/logs_test.go
+++ b/test/e2e/logs_test.go
@@ -37,16 +37,18 @@ var _ = Describe("Podman logs", func() {
})
for _, log := range []string{"k8s-file", "journald", "json-file"} {
+
It("all lines: "+log, func() {
logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
logc.WaitWithDefaultTimeout()
Expect(logc).To(Exit(0))
-
cid := logc.OutputToString()
+
results := podmanTest.Podman([]string{"logs", cid})
results.WaitWithDefaultTimeout()
Expect(results).To(Exit(0))
Expect(len(results.OutputToStringArray())).To(Equal(3))
+ Expect(results.OutputToString()).To(Equal("podman podman podman"))
})
It("tail two lines: "+log, func() {
@@ -73,6 +75,18 @@ var _ = Describe("Podman logs", func() {
Expect(len(results.OutputToStringArray())).To(Equal(0))
})
+ It("tail 99 lines: "+log, func() {
+ logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
+ logc.WaitWithDefaultTimeout()
+ Expect(logc).To(Exit(0))
+ cid := logc.OutputToString()
+
+ results := podmanTest.Podman([]string{"logs", "--tail", "99", cid})
+ results.WaitWithDefaultTimeout()
+ Expect(results).To(Exit(0))
+ Expect(len(results.OutputToStringArray())).To(Equal(3))
+ })
+
It("tail 800 lines: "+log, func() {
logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "i=1; while [ \"$i\" -ne 1000 ]; do echo \"line $i\"; i=$((i + 1)); done"})
logc.WaitWithDefaultTimeout()
@@ -158,78 +172,6 @@ var _ = Describe("Podman logs", func() {
Expect(results).To(Exit(0))
})
- It("for container: "+log, func() {
- logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
- logc.WaitWithDefaultTimeout()
- Expect(logc).To(Exit(0))
- cid := logc.OutputToString()
-
- results := podmanTest.Podman([]string{"logs", cid})
- results.WaitWithDefaultTimeout()
- Expect(results).To(Exit(0))
- Expect(len(results.OutputToStringArray())).To(Equal(3))
- Expect(results.OutputToString()).To(Equal("podman podman podman"))
- })
-
- It("tail two lines: "+log, func() {
- logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
- logc.WaitWithDefaultTimeout()
- Expect(logc).To(Exit(0))
- cid := logc.OutputToString()
- results := podmanTest.Podman([]string{"logs", "--tail", "2", cid})
- results.WaitWithDefaultTimeout()
- Expect(results).To(Exit(0))
- Expect(len(results.OutputToStringArray())).To(Equal(2))
- })
-
- It("tail 99 lines: "+log, func() {
- logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
- logc.WaitWithDefaultTimeout()
- Expect(logc).To(Exit(0))
- cid := logc.OutputToString()
-
- results := podmanTest.Podman([]string{"logs", "--tail", "99", cid})
- results.WaitWithDefaultTimeout()
- Expect(results).To(Exit(0))
- Expect(len(results.OutputToStringArray())).To(Equal(3))
- })
-
- It("tail 2 lines with timestamps: "+log, func() {
- logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
- logc.WaitWithDefaultTimeout()
- Expect(logc).To(Exit(0))
- cid := logc.OutputToString()
-
- results := podmanTest.Podman([]string{"logs", "--tail", "2", "-t", cid})
- results.WaitWithDefaultTimeout()
- Expect(results).To(Exit(0))
- Expect(len(results.OutputToStringArray())).To(Equal(2))
- })
-
- It("since time 2017-08-07: "+log, func() {
- logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
- logc.WaitWithDefaultTimeout()
- Expect(logc).To(Exit(0))
- cid := logc.OutputToString()
-
- results := podmanTest.Podman([]string{"logs", "--since", "2017-08-07T10:10:09.056611202-04:00", cid})
- results.WaitWithDefaultTimeout()
- Expect(results).To(Exit(0))
- Expect(len(results.OutputToStringArray())).To(Equal(3))
- })
-
- It("with duration 10m: "+log, func() {
- logc := podmanTest.Podman([]string{"run", "--log-driver", log, "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
- logc.WaitWithDefaultTimeout()
- Expect(logc).To(Exit(0))
- cid := logc.OutputToString()
-
- results := podmanTest.Podman([]string{"logs", "--since", "10m", cid})
- results.WaitWithDefaultTimeout()
- Expect(results).To(Exit(0))
- Expect(len(results.OutputToStringArray())).To(Equal(3))
- })
-
It("streaming output: "+log, func() {
containerName := "logs-f-rm"
@@ -259,17 +201,6 @@ var _ = Describe("Podman logs", func() {
}
})
- It("podman logs with log-driver=none errors: "+log, func() {
- ctrName := "logsctr"
- logc := podmanTest.Podman([]string{"run", "--log-driver", log, "--name", ctrName, "-d", "--log-driver", "none", ALPINE, "top"})
- logc.WaitWithDefaultTimeout()
- Expect(logc).To(Exit(0))
-
- logs := podmanTest.Podman([]string{"logs", "-f", ctrName})
- logs.WaitWithDefaultTimeout()
- Expect(logs).To(Not(Exit(0)))
- })
-
It("follow output stopped container: "+log, func() {
containerName := "logs-f"
@@ -373,4 +304,15 @@ var _ = Describe("Podman logs", func() {
Expect(err).To(BeNil())
Expect(string(out)).To(ContainSubstring(containerName))
})
+
+ It("podman logs with log-driver=none errors", func() {
+ ctrName := "logsctr"
+ logc := podmanTest.Podman([]string{"run", "--name", ctrName, "-d", "--log-driver", "none", ALPINE, "top"})
+ logc.WaitWithDefaultTimeout()
+ Expect(logc).To(Exit(0))
+
+ logs := podmanTest.Podman([]string{"logs", "-f", ctrName})
+ logs.WaitWithDefaultTimeout()
+ Expect(logs).To(Not(Exit(0)))
+ })
})
diff --git a/test/e2e/rename_test.go b/test/e2e/rename_test.go
index f19413221..14696c0f6 100644
--- a/test/e2e/rename_test.go
+++ b/test/e2e/rename_test.go
@@ -89,4 +89,25 @@ var _ = Describe("podman rename", func() {
Expect(ps.ExitCode()).To(Equal(0))
Expect(ps.OutputToString()).To(ContainSubstring(newName))
})
+
+ It("Rename a running container with exec sessions", func() {
+ ctrName := "testCtr"
+ ctr := podmanTest.Podman([]string{"run", "-d", "--name", ctrName, ALPINE, "top"})
+ ctr.WaitWithDefaultTimeout()
+ Expect(ctr.ExitCode()).To(Equal(0))
+
+ exec := podmanTest.Podman([]string{"exec", "-d", ctrName, "top"})
+ exec.WaitWithDefaultTimeout()
+ Expect(exec.ExitCode()).To(Equal(0))
+
+ newName := "aNewName"
+ rename := podmanTest.Podman([]string{"rename", ctrName, newName})
+ rename.WaitWithDefaultTimeout()
+ Expect(rename.ExitCode()).To(Equal(0))
+
+ ps := podmanTest.Podman([]string{"ps", "-aq", "--filter", fmt.Sprintf("name=%s", newName), "--format", "{{ .Names }}"})
+ ps.WaitWithDefaultTimeout()
+ Expect(ps.ExitCode()).To(Equal(0))
+ Expect(ps.OutputToString()).To(ContainSubstring(newName))
+ })
})
diff --git a/vendor/github.com/checkpoint-restore/checkpointctl/LICENSE b/vendor/github.com/checkpoint-restore/checkpointctl/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/checkpointctl/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go b/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go
new file mode 100644
index 000000000..1c74903ad
--- /dev/null
+++ b/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: Apache-2.0
+
+package metadata
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "path/filepath"
+ "time"
+
+ cnitypes "github.com/containernetworking/cni/pkg/types/current"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+)
+
+type CheckpointedPod struct {
+ PodUID string `json:"io.kubernetes.pod.uid,omitempty"`
+ ID string `json:"SandboxID,omitempty"`
+ Name string `json:"io.kubernetes.pod.name,omitempty"`
+ TerminationGracePeriod int64 `json:"io.kubernetes.pod.terminationGracePeriod,omitempty"`
+ Namespace string `json:"io.kubernetes.pod.namespace,omitempty"`
+ ConfigSource string `json:"kubernetes.io/config.source,omitempty"`
+ ConfigSeen string `json:"kubernetes.io/config.seen,omitempty"`
+ Manager string `json:"io.container.manager,omitempty"`
+ Containers []CheckpointedContainer `json:"Containers"`
+ HostIP string `json:"hostIP,omitempty"`
+ PodIP string `json:"podIP,omitempty"`
+ PodIPs []string `json:"podIPs,omitempty"`
+}
+
+type CheckpointedContainer struct {
+ Name string `json:"io.kubernetes.container.name,omitempty"`
+ ID string `json:"id,omitempty"`
+ TerminationMessagePath string `json:"io.kubernetes.container.terminationMessagePath,omitempty"`
+ TerminationMessagePolicy string `json:"io.kubernetes.container.terminationMessagePolicy,omitempty"`
+ RestartCounter int32 `json:"io.kubernetes.container.restartCount,omitempty"`
+ TerminationMessagePathUID string `json:"terminationMessagePathUID,omitempty"`
+ Image string `json:"Image"`
+}
+
+type CheckpointMetadata struct {
+ Version int `json:"version"`
+ CheckpointedPods []CheckpointedPod
+}
+
+const (
+ // kubelet archive
+ CheckpointedPodsFile = "checkpointed.pods"
+ // container archive
+ ConfigDumpFile = "config.dump"
+ SpecDumpFile = "spec.dump"
+ NetworkStatusFile = "network.status"
+ CheckpointDirectory = "checkpoint"
+ RootFsDiffTar = "rootfs-diff.tar"
+ DeletedFilesFile = "deleted.files"
+ // pod archive
+ PodOptionsFile = "pod.options"
+ PodDumpFile = "pod.dump"
+)
+
+type CheckpointType int
+
+const (
+ // The checkpoint archive contains a kubelet checkpoint
+ // One or multiple pods and kubelet metadata (checkpointed.pods)
+ Kubelet CheckpointType = iota
+ // The checkpoint archive contains one pod including one or multiple containers
+ Pod
+ // The checkpoint archive contains a single container
+ Container
+ Unknown
+)
+
+// This is a reduced copy of what Podman uses to store checkpoint metadata
+type ContainerConfig struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ RootfsImageName string `json:"rootfsImageName,omitempty"`
+ OCIRuntime string `json:"runtime,omitempty"`
+ CreatedTime time.Time `json:"createdTime"`
+}
+
+// This is metadata stored inside of a Pod checkpoint archive
+type CheckpointedPodOptions struct {
+ Version int `json:"version"`
+ Containers []string `json:"containers,omitempty"`
+ MountLabel string `json:"mountLabel"`
+ ProcessLabel string `json:"processLabel"`
+}
+
+func DetectCheckpointArchiveType(checkpointDirectory string) (CheckpointType, error) {
+ _, err := os.Stat(filepath.Join(checkpointDirectory, CheckpointedPodsFile))
+ if err != nil && !os.IsNotExist(err) {
+ return Unknown, errors.Wrapf(err, "Failed to access %q\n", CheckpointedPodsFile)
+ }
+ if os.IsNotExist(err) {
+ return Container, nil
+ }
+
+ return Kubelet, nil
+}
+
+func ReadContainerCheckpointSpecDump(checkpointDirectory string) (*spec.Spec, string, error) {
+ var specDump spec.Spec
+ specDumpFile, err := ReadJSONFile(&specDump, checkpointDirectory, SpecDumpFile)
+
+ return &specDump, specDumpFile, err
+}
+
+func ReadContainerCheckpointConfigDump(checkpointDirectory string) (*ContainerConfig, string, error) {
+ var containerConfig ContainerConfig
+ configDumpFile, err := ReadJSONFile(&containerConfig, checkpointDirectory, ConfigDumpFile)
+
+ return &containerConfig, configDumpFile, err
+}
+
+func ReadContainerCheckpointDeletedFiles(checkpointDirectory string) ([]string, string, error) {
+ var deletedFiles []string
+ deletedFilesFile, err := ReadJSONFile(&deletedFiles, checkpointDirectory, DeletedFilesFile)
+
+ return deletedFiles, deletedFilesFile, err
+}
+
+func ReadContainerCheckpointNetworkStatus(checkpointDirectory string) ([]*cnitypes.Result, string, error) {
+ var networkStatus []*cnitypes.Result
+ networkStatusFile, err := ReadJSONFile(&networkStatus, checkpointDirectory, NetworkStatusFile)
+
+ return networkStatus, networkStatusFile, err
+}
+
+func ReadKubeletCheckpoints(checkpointsDirectory string) (*CheckpointMetadata, string, error) {
+ var checkpointMetadata CheckpointMetadata
+ checkpointMetadataPath, err := ReadJSONFile(&checkpointMetadata, checkpointsDirectory, CheckpointedPodsFile)
+
+ return &checkpointMetadata, checkpointMetadataPath, err
+}
+
+func GetIPFromNetworkStatus(networkStatus []*cnitypes.Result) net.IP {
+ if len(networkStatus) == 0 {
+ return nil
+ }
+ // Take the first IP address
+ if len(networkStatus[0].IPs) == 0 {
+ return nil
+ }
+ IP := networkStatus[0].IPs[0].Address.IP
+
+ return IP
+}
+
+func GetMACFromNetworkStatus(networkStatus []*cnitypes.Result) net.HardwareAddr {
+ if len(networkStatus) == 0 {
+ return nil
+ }
+ // Take the first device with a defined sandbox
+ if len(networkStatus[0].Interfaces) == 0 {
+ return nil
+ }
+ var MAC net.HardwareAddr
+ MAC = nil
+ for _, n := range networkStatus[0].Interfaces {
+ if n.Sandbox != "" {
+ MAC, _ = net.ParseMAC(n.Mac)
+
+ break
+ }
+ }
+
+ return MAC
+}
+
+// WriteJSONFile marshalls and writes the given data to a JSON file
+func WriteJSONFile(v interface{}, dir, file string) (string, error) {
+ fileJSON, err := json.MarshalIndent(v, "", " ")
+ if err != nil {
+ return "", errors.Wrapf(err, "Error marshalling JSON")
+ }
+ file = filepath.Join(dir, file)
+ if err := ioutil.WriteFile(file, fileJSON, 0o600); err != nil {
+ return "", errors.Wrapf(err, "Error writing to %q", file)
+ }
+
+ return file, nil
+}
+
+func ReadJSONFile(v interface{}, dir, file string) (string, error) {
+ file = filepath.Join(dir, file)
+ content, err := ioutil.ReadFile(file)
+ if err != nil {
+ return "", errors.Wrapf(err, "failed to read %s", file)
+ }
+ if err = json.Unmarshal(content, v); err != nil {
+ return "", errors.Wrapf(err, "failed to unmarshal %s", file)
+ }
+
+ return file, nil
+}
+
+func WriteKubeletCheckpointsMetadata(checkpointMetadata *CheckpointMetadata, dir string) error {
+ _, err := WriteJSONFile(checkpointMetadata, dir, CheckpointedPodsFile)
+
+ return err
+}
+
+func ByteToString(b int64) string {
+ const unit = 1024
+ if b < unit {
+ return fmt.Sprintf("%d B", b)
+ }
+ div, exp := int64(unit), 0
+ for n := b / unit; n >= unit; n /= unit {
+ div *= unit
+ exp++
+ }
+
+ return fmt.Sprintf("%.1f %ciB",
+ float64(b)/float64(div), "KMGTPE"[exp])
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 5e2139c9f..1d192693d 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -40,6 +40,8 @@ github.com/beorn7/perks/quantile
github.com/blang/semver
# github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37
github.com/buger/goterm
+# github.com/checkpoint-restore/checkpointctl v0.0.0-20210301084134-a2024f5584e7
+github.com/checkpoint-restore/checkpointctl/lib
# github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b
github.com/checkpoint-restore/go-criu
github.com/checkpoint-restore/go-criu/rpc