summaryrefslogtreecommitdiff
path: root/pkg/adapter/containers_remote.go
diff options
context:
space:
mode:
authorbaude <bbaude@redhat.com>2019-04-02 08:37:11 -0500
committerbaude <bbaude@redhat.com>2019-04-11 10:13:58 -0500
commit72d08d4c61c1466a4e10fc46c29cb0a14893f923 (patch)
treef9661e51323f6aa2c66fa4d27b777d3a8340007d /pkg/adapter/containers_remote.go
parent4596c39655f7ff5e741adbc97aaa49bb3a9d453e (diff)
downloadpodman-72d08d4c61c1466a4e10fc46c29cb0a14893f923.tar.gz
podman-72d08d4c61c1466a4e10fc46c29cb0a14893f923.tar.bz2
podman-72d08d4c61c1466a4e10fc46c29cb0a14893f923.zip
remote-client checkpoint/restore
add the ability for the remote client to be able to checkpoint and restore containers. Signed-off-by: baude <bbaude@redhat.com>
Diffstat (limited to 'pkg/adapter/containers_remote.go')
-rw-r--r--pkg/adapter/containers_remote.go72
1 files changed, 72 insertions, 0 deletions
diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go
index 1ae39749f..31727fd0e 100644
--- a/pkg/adapter/containers_remote.go
+++ b/pkg/adapter/containers_remote.go
@@ -539,3 +539,75 @@ func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) er
}
return <-errChan
}
+
+// Checkpoint one or more containers
+func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues, options libpod.ContainerCheckpointOptions) error {
+ var lastError error
+ ids, err := iopodman.GetContainersByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs)
+ if err != nil {
+ return err
+ }
+ if c.All {
+ // We dont have a great way to get all the running containers, so need to get all and then
+ // check status on them bc checkpoint considers checkpointing a stopped container an error
+ var runningIds []string
+ for _, id := range ids {
+ ctr, err := r.LookupContainer(id)
+ if err != nil {
+ return err
+ }
+ if ctr.state.State == libpod.ContainerStateRunning {
+ runningIds = append(runningIds, id)
+ }
+ }
+ ids = runningIds
+ }
+
+ for _, id := range ids {
+ if _, err := iopodman.ContainerCheckpoint().Call(r.Conn, id, options.Keep, options.KeepRunning, options.TCPEstablished); err != nil {
+ if lastError != nil {
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = errors.Wrapf(err, "failed to checkpoint container %v", id)
+ } else {
+ fmt.Println(id)
+ }
+ }
+ return lastError
+}
+
+// Restore one or more containers
+func (r *LocalRuntime) Restore(c *cliconfig.RestoreValues, options libpod.ContainerCheckpointOptions) error {
+ var lastError error
+ ids, err := iopodman.GetContainersByContext().Call(r.Conn, c.All, c.Latest, c.InputArgs)
+ if err != nil {
+ return err
+ }
+ if c.All {
+ // We dont have a great way to get all the exited containers, so need to get all and then
+ // check status on them bc checkpoint considers restoring a running container an error
+ var exitedIDs []string
+ for _, id := range ids {
+ ctr, err := r.LookupContainer(id)
+ if err != nil {
+ return err
+ }
+ if ctr.state.State != libpod.ContainerStateRunning {
+ exitedIDs = append(exitedIDs, id)
+ }
+ }
+ ids = exitedIDs
+ }
+
+ for _, id := range ids {
+ if _, err := iopodman.ContainerRestore().Call(r.Conn, id, options.Keep, options.TCPEstablished); err != nil {
+ if lastError != nil {
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = errors.Wrapf(err, "failed to restore container %v", id)
+ } else {
+ fmt.Println(id)
+ }
+ }
+ return lastError
+}