summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorcdoern <cdoern@redhat.com>2022-01-14 13:49:34 -0500
committercdoern <cdoern@redhat.com>2022-02-20 21:11:14 -0500
commit94df7015121759ce69f35f7e7735aa2e4a2dc71a (patch)
tree154e04dc3073acc57cecedfc703e6c1c4daf4bf1
parentf918a9418f5eeb00b289c127142953da2c394867 (diff)
downloadpodman-94df7015121759ce69f35f7e7735aa2e4a2dc71a.tar.gz
podman-94df7015121759ce69f35f7e7735aa2e4a2dc71a.tar.bz2
podman-94df7015121759ce69f35f7e7735aa2e4a2dc71a.zip
Implement Podman Container Clone
podman container clone takes the id of an existing continer and creates a specgen from the given container's config recreating all proper namespaces and overriding spec options like resource limits and the container name if given in the cli options this command utilizes the common function DefineCreateFlags meaning that we can funnel as many create options as we want into clone over time allowing the user to clone with as much or as little of the original config as they want. container clone takes a second argument which is a new name and a third argument which is an image name to use instead of the original container's the current supported flags are: --destroy (remove the original container) --name (new ctr name) --cpus (sets cpu period and quota) --cpuset-cpus --cpu-period --cpu-rt-period --cpu-rt-runtime --cpu-shares --cpuset-mems --memory --run resolves #10875 Signed-off-by: cdoern <cdoern@redhat.com> Signed-off-by: cdoern <cbdoer23@g.holycross.edu> Signed-off-by: cdoern <cdoern@redhat.com>
-rw-r--r--cmd/podman/common/completion.go14
-rw-r--r--cmd/podman/common/create.go445
-rw-r--r--cmd/podman/containers/clone.go80
-rw-r--r--cmd/podman/containers/create.go2
-rw-r--r--cmd/podman/containers/run.go2
-rw-r--r--cmd/podman/pods/create.go2
-rw-r--r--docs/source/markdown/podman-container-clone.1.md180
-rw-r--r--docs/source/markdown/podman-container.1.md1
-rw-r--r--libpod/container.go5
-rw-r--r--libpod/container_config.go4
-rw-r--r--libpod/container_inspect.go2
-rw-r--r--libpod/container_internal.go4
-rw-r--r--libpod/container_internal_linux.go2
-rw-r--r--libpod/options.go3
-rw-r--r--pkg/api/handlers/libpod/containers_create.go2
-rw-r--r--pkg/domain/entities/containers.go10
-rw-r--r--pkg/domain/entities/engine_container.go1
-rw-r--r--pkg/domain/entities/pods.go1
-rw-r--r--pkg/domain/infra/abi/containers.go91
-rw-r--r--pkg/domain/infra/abi/play.go4
-rw-r--r--pkg/domain/infra/tunnel/containers.go4
-rw-r--r--pkg/specgen/generate/container.go150
-rw-r--r--pkg/specgen/generate/container_create.go39
-rw-r--r--pkg/specgen/generate/oci.go9
-rw-r--r--pkg/specgen/generate/pod_create.go2
-rw-r--r--pkg/specgenutil/createparse.go2
-rw-r--r--pkg/specgenutil/specgen.go295
-rw-r--r--test/e2e/container_clone_test.go187
28 files changed, 1215 insertions, 328 deletions
diff --git a/cmd/podman/common/completion.go b/cmd/podman/common/completion.go
index a2ce3834d..9ebdcda2b 100644
--- a/cmd/podman/common/completion.go
+++ b/cmd/podman/common/completion.go
@@ -1308,3 +1308,17 @@ func AutocompleteCompressionFormat(cmd *cobra.Command, args []string, toComplete
types := []string{"gzip", "zstd", "zstd:chunked"}
return types, cobra.ShellCompDirectiveNoFileComp
}
+
+// AutocompleteClone - Autocomplete container and image names
+func AutocompleteClone(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if !validCurrentCmdLine(cmd, args, toComplete) {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+ switch len(args) {
+ case 0:
+ return getContainers(cmd, toComplete, completeDefault)
+ case 2:
+ return getImages(cmd, toComplete)
+ }
+ return nil, cobra.ShellCompDirectiveNoFileComp
+}
diff --git a/cmd/podman/common/create.go b/cmd/podman/common/create.go
index 1121806d5..634b49db7 100644
--- a/cmd/podman/common/create.go
+++ b/cmd/podman/common/create.go
@@ -28,10 +28,10 @@ func ContainerToPodOptions(containerCreate *entities.ContainerCreateOptions, pod
}
// DefineCreateFlags declares and instantiates the container create flags
-func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions, isInfra bool) {
+func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions, isInfra bool, clone bool) {
createFlags := cmd.Flags()
- if !isInfra {
+ if !isInfra && !clone { // regular create flags
annotationFlagName := "annotation"
createFlags.StringSliceVar(
&cf.Annotation,
@@ -103,45 +103,6 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
)
_ = cmd.RegisterFlagCompletionFunc(cgroupsFlagName, AutocompleteCgroupMode)
- cpuPeriodFlagName := "cpu-period"
- createFlags.Uint64Var(
- &cf.CPUPeriod,
- cpuPeriodFlagName, 0,
- "Limit the CPU CFS (Completely Fair Scheduler) period",
- )
- _ = cmd.RegisterFlagCompletionFunc(cpuPeriodFlagName, completion.AutocompleteNone)
-
- cpuQuotaFlagName := "cpu-quota"
- createFlags.Int64Var(
- &cf.CPUQuota,
- cpuQuotaFlagName, 0,
- "Limit the CPU CFS (Completely Fair Scheduler) quota",
- )
- _ = cmd.RegisterFlagCompletionFunc(cpuQuotaFlagName, completion.AutocompleteNone)
-
- cpuRtPeriodFlagName := "cpu-rt-period"
- createFlags.Uint64Var(
- &cf.CPURTPeriod,
- cpuRtPeriodFlagName, 0,
- "Limit the CPU real-time period in microseconds",
- )
- _ = cmd.RegisterFlagCompletionFunc(cpuRtPeriodFlagName, completion.AutocompleteNone)
-
- cpuRtRuntimeFlagName := "cpu-rt-runtime"
- createFlags.Int64Var(
- &cf.CPURTRuntime,
- cpuRtRuntimeFlagName, 0,
- "Limit the CPU real-time runtime in microseconds",
- )
- _ = cmd.RegisterFlagCompletionFunc(cpuRtRuntimeFlagName, completion.AutocompleteNone)
-
- cpuSharesFlagName := "cpu-shares"
- createFlags.Uint64Var(
- &cf.CPUShares,
- cpuSharesFlagName, 0,
- "CPU shares (relative weight)",
- )
- _ = cmd.RegisterFlagCompletionFunc(cpuSharesFlagName, completion.AutocompleteNone)
cidfileFlagName := "cidfile"
createFlags.StringVar(
&cf.CIDFile,
@@ -149,13 +110,6 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
"Write the container ID to the file",
)
_ = cmd.RegisterFlagCompletionFunc(cidfileFlagName, completion.AutocompleteDefault)
- cpusetMemsFlagName := "cpuset-mems"
- createFlags.StringVar(
- &cf.CPUSetMems,
- cpusetMemsFlagName, "",
- "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.",
- )
- _ = cmd.RegisterFlagCompletionFunc(cpusetMemsFlagName, completion.AutocompleteNone)
deviceCgroupRuleFlagName := "device-cgroup-rule"
createFlags.StringSliceVar(
@@ -358,14 +312,6 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
)
_ = cmd.RegisterFlagCompletionFunc(logOptFlagName, AutocompleteLogOpt)
- memoryFlagName := "memory"
- createFlags.StringVarP(
- &cf.Memory,
- memoryFlagName, "m", "",
- "Memory limit "+sizeWithUnitFormat,
- )
- _ = cmd.RegisterFlagCompletionFunc(memoryFlagName, completion.AutocompleteNone)
-
memoryReservationFlagName := "memory-reservation"
createFlags.StringVar(
&cf.MemoryReservation,
@@ -703,199 +649,256 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
`If a container with the same name exists, replace it`,
)
}
+ if isInfra || (!clone && !isInfra) { // infra container flags, create should also pick these up
+ sysctlFlagName := "sysctl"
+ createFlags.StringSliceVar(
+ &cf.Sysctl,
+ sysctlFlagName, []string{},
+ "Sysctl options",
+ )
+ //TODO: Add function for sysctl completion.
+ _ = cmd.RegisterFlagCompletionFunc(sysctlFlagName, completion.AutocompleteNone)
- sysctlFlagName := "sysctl"
- createFlags.StringSliceVar(
- &cf.Sysctl,
- sysctlFlagName, []string{},
- "Sysctl options",
- )
- //TODO: Add function for sysctl completion.
- _ = cmd.RegisterFlagCompletionFunc(sysctlFlagName, completion.AutocompleteNone)
-
- securityOptFlagName := "security-opt"
- createFlags.StringArrayVar(
- &cf.SecurityOpt,
- securityOptFlagName, []string{},
- "Security Options",
- )
- _ = cmd.RegisterFlagCompletionFunc(securityOptFlagName, AutocompleteSecurityOption)
-
- subgidnameFlagName := "subgidname"
- createFlags.StringVar(
- &cf.SubUIDName,
- subgidnameFlagName, "",
- "Name of range listed in /etc/subgid for use in user namespace",
- )
- _ = cmd.RegisterFlagCompletionFunc(subgidnameFlagName, completion.AutocompleteSubgidName)
+ securityOptFlagName := "security-opt"
+ createFlags.StringArrayVar(
+ &cf.SecurityOpt,
+ securityOptFlagName, []string{},
+ "Security Options",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(securityOptFlagName, AutocompleteSecurityOption)
- subuidnameFlagName := "subuidname"
- createFlags.StringVar(
- &cf.SubGIDName,
- subuidnameFlagName, "",
- "Name of range listed in /etc/subuid for use in user namespace",
- )
- _ = cmd.RegisterFlagCompletionFunc(subuidnameFlagName, completion.AutocompleteSubuidName)
+ subgidnameFlagName := "subgidname"
+ createFlags.StringVar(
+ &cf.SubUIDName,
+ subgidnameFlagName, "",
+ "Name of range listed in /etc/subgid for use in user namespace",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(subgidnameFlagName, completion.AutocompleteSubgidName)
- gidmapFlagName := "gidmap"
- createFlags.StringSliceVar(
- &cf.GIDMap,
- gidmapFlagName, []string{},
- "GID map to use for the user namespace",
- )
- _ = cmd.RegisterFlagCompletionFunc(gidmapFlagName, completion.AutocompleteNone)
+ subuidnameFlagName := "subuidname"
+ createFlags.StringVar(
+ &cf.SubGIDName,
+ subuidnameFlagName, "",
+ "Name of range listed in /etc/subuid for use in user namespace",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(subuidnameFlagName, completion.AutocompleteSubuidName)
- uidmapFlagName := "uidmap"
- createFlags.StringSliceVar(
- &cf.UIDMap,
- uidmapFlagName, []string{},
- "UID map to use for the user namespace",
- )
- _ = cmd.RegisterFlagCompletionFunc(uidmapFlagName, completion.AutocompleteNone)
+ gidmapFlagName := "gidmap"
+ createFlags.StringSliceVar(
+ &cf.GIDMap,
+ gidmapFlagName, []string{},
+ "GID map to use for the user namespace",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(gidmapFlagName, completion.AutocompleteNone)
- usernsFlagName := "userns"
- createFlags.String(
- usernsFlagName, os.Getenv("PODMAN_USERNS"),
- "User namespace to use",
- )
- _ = cmd.RegisterFlagCompletionFunc(usernsFlagName, AutocompleteUserNamespace)
+ uidmapFlagName := "uidmap"
+ createFlags.StringSliceVar(
+ &cf.UIDMap,
+ uidmapFlagName, []string{},
+ "UID map to use for the user namespace",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(uidmapFlagName, completion.AutocompleteNone)
- cgroupParentFlagName := "cgroup-parent"
- createFlags.StringVar(
- &cf.CgroupParent,
- cgroupParentFlagName, "",
- "Optional parent cgroup for the container",
- )
- _ = cmd.RegisterFlagCompletionFunc(cgroupParentFlagName, completion.AutocompleteDefault)
+ usernsFlagName := "userns"
+ createFlags.String(
+ usernsFlagName, os.Getenv("PODMAN_USERNS"),
+ "User namespace to use",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(usernsFlagName, AutocompleteUserNamespace)
- conmonPidfileFlagName := ""
- if !isInfra {
- conmonPidfileFlagName = "conmon-pidfile"
- } else {
- conmonPidfileFlagName = "infra-conmon-pidfile"
- }
- createFlags.StringVar(
- &cf.ConmonPIDFile,
- conmonPidfileFlagName, "",
- "Path to the file that will receive the PID of conmon",
- )
- _ = cmd.RegisterFlagCompletionFunc(conmonPidfileFlagName, completion.AutocompleteDefault)
+ cgroupParentFlagName := "cgroup-parent"
+ createFlags.StringVar(
+ &cf.CgroupParent,
+ cgroupParentFlagName, "",
+ "Optional parent cgroup for the container",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(cgroupParentFlagName, completion.AutocompleteDefault)
+ conmonPidfileFlagName := ""
+ if !isInfra {
+ conmonPidfileFlagName = "conmon-pidfile"
+ } else {
+ conmonPidfileFlagName = "infra-conmon-pidfile"
+ }
+ createFlags.StringVar(
+ &cf.ConmonPIDFile,
+ conmonPidfileFlagName, "",
+ "Path to the file that will receive the PID of conmon",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(conmonPidfileFlagName, completion.AutocompleteDefault)
- cpusFlagName := "cpus"
- createFlags.Float64Var(
- &cf.CPUS,
- cpusFlagName, 0,
- "Number of CPUs. The default is 0.000 which means no limit",
- )
- _ = cmd.RegisterFlagCompletionFunc(cpusFlagName, completion.AutocompleteNone)
+ entrypointFlagName := ""
+ if !isInfra {
+ entrypointFlagName = "entrypoint"
+ } else {
+ entrypointFlagName = "infra-command"
+ }
- cpusetCpusFlagName := "cpuset-cpus"
- createFlags.StringVar(
- &cf.CPUSetCPUs,
- cpusetCpusFlagName, "",
- "CPUs in which to allow execution (0-3, 0,1)",
- )
- _ = cmd.RegisterFlagCompletionFunc(cpusetCpusFlagName, completion.AutocompleteNone)
+ createFlags.String(entrypointFlagName, "",
+ "Overwrite the default ENTRYPOINT of the image",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(entrypointFlagName, completion.AutocompleteNone)
- entrypointFlagName := ""
- if !isInfra {
- entrypointFlagName = "entrypoint"
- } else {
- entrypointFlagName = "infra-command"
- }
+ hostnameFlagName := "hostname"
+ createFlags.StringVarP(
+ &cf.Hostname,
+ hostnameFlagName, "h", "",
+ "Set container hostname",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(hostnameFlagName, completion.AutocompleteNone)
- createFlags.String(entrypointFlagName, "",
- "Overwrite the default ENTRYPOINT of the image",
- )
- _ = cmd.RegisterFlagCompletionFunc(entrypointFlagName, completion.AutocompleteNone)
+ labelFlagName := "label"
+ createFlags.StringArrayVarP(
+ &cf.Label,
+ labelFlagName, "l", []string{},
+ "Set metadata on container",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(labelFlagName, completion.AutocompleteNone)
- hostnameFlagName := "hostname"
- createFlags.StringVarP(
- &cf.Hostname,
- hostnameFlagName, "h", "",
- "Set container hostname",
- )
- _ = cmd.RegisterFlagCompletionFunc(hostnameFlagName, completion.AutocompleteNone)
+ labelFileFlagName := "label-file"
+ createFlags.StringSliceVar(
+ &cf.LabelFile,
+ labelFileFlagName, []string{},
+ "Read in a line delimited file of labels",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(labelFileFlagName, completion.AutocompleteDefault)
- labelFlagName := "label"
- createFlags.StringArrayVarP(
- &cf.Label,
- labelFlagName, "l", []string{},
- "Set metadata on container",
- )
- _ = cmd.RegisterFlagCompletionFunc(labelFlagName, completion.AutocompleteNone)
+ if isInfra {
+ nameFlagName := "infra-name"
+ createFlags.StringVar(
+ &cf.Name,
+ nameFlagName, "",
+ "Assign a name to the container",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(nameFlagName, completion.AutocompleteNone)
+ }
- labelFileFlagName := "label-file"
- createFlags.StringSliceVar(
- &cf.LabelFile,
- labelFileFlagName, []string{},
- "Read in a line delimited file of labels",
- )
- _ = cmd.RegisterFlagCompletionFunc(labelFileFlagName, completion.AutocompleteDefault)
+ createFlags.Bool(
+ "help", false, "",
+ )
- nameFlagName := ""
- if !isInfra {
- nameFlagName = "name"
+ pidFlagName := "pid"
createFlags.StringVar(
- &cf.Name,
- nameFlagName, "",
- "Assign a name to the container",
+ &cf.PID,
+ pidFlagName, "",
+ "PID namespace to use",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(pidFlagName, AutocompleteNamespace)
+
+ volumeDesciption := "Bind mount a volume into the container"
+ if registry.IsRemote() {
+ volumeDesciption = "Bind mount a volume into the container. Volume source will be on the server machine, not the client"
+ }
+ volumeFlagName := "volume"
+ createFlags.StringArrayVarP(
+ &cf.Volume,
+ volumeFlagName, "v", volumes(),
+ volumeDesciption,
+ )
+ _ = cmd.RegisterFlagCompletionFunc(volumeFlagName, AutocompleteVolumeFlag)
+
+ deviceFlagName := "device"
+ createFlags.StringSliceVar(
+ &cf.Devices,
+ deviceFlagName, devices(),
+ "Add a host device to the container",
)
- } else {
- nameFlagName = "infra-name"
+ _ = cmd.RegisterFlagCompletionFunc(deviceFlagName, completion.AutocompleteDefault)
+
+ deviceReadBpsFlagName := "device-read-bps"
+ createFlags.StringSliceVar(
+ &cf.DeviceReadBPs,
+ deviceReadBpsFlagName, []string{},
+ "Limit read rate (bytes per second) from a device (e.g. --device-read-bps=/dev/sda:1mb)",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(deviceReadBpsFlagName, completion.AutocompleteDefault)
+
+ volumesFromFlagName := "volumes-from"
+ createFlags.StringArrayVar(
+ &cf.VolumesFrom,
+ volumesFromFlagName, []string{},
+ "Mount volumes from the specified container(s)",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(volumesFromFlagName, AutocompleteContainers)
+ }
+ if clone || !isInfra { // clone and create only flags, we need this level of separation so clone does not pick up all of the flags
+ nameFlagName := "name"
createFlags.StringVar(
&cf.Name,
nameFlagName, "",
"Assign a name to the container",
)
- }
- _ = cmd.RegisterFlagCompletionFunc(nameFlagName, completion.AutocompleteNone)
+ _ = cmd.RegisterFlagCompletionFunc(nameFlagName, completion.AutocompleteNone)
- createFlags.Bool(
- "help", false, "",
- )
+ cpuPeriodFlagName := "cpu-period"
+ createFlags.Uint64Var(
+ &cf.CPUPeriod,
+ cpuPeriodFlagName, 0,
+ "Limit the CPU CFS (Completely Fair Scheduler) period",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(cpuPeriodFlagName, completion.AutocompleteNone)
- pidFlagName := "pid"
- createFlags.StringVar(
- &cf.PID,
- pidFlagName, "",
- "PID namespace to use",
- )
- _ = cmd.RegisterFlagCompletionFunc(pidFlagName, AutocompleteNamespace)
+ cpuQuotaFlagName := "cpu-quota"
+ createFlags.Int64Var(
+ &cf.CPUQuota,
+ cpuQuotaFlagName, 0,
+ "Limit the CPU CFS (Completely Fair Scheduler) quota",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(cpuQuotaFlagName, completion.AutocompleteNone)
- volumeDesciption := "Bind mount a volume into the container"
- if registry.IsRemote() {
- volumeDesciption = "Bind mount a volume into the container. Volume source will be on the server machine, not the client"
- }
- volumeFlagName := "volume"
- createFlags.StringArrayVarP(
- &cf.Volume,
- volumeFlagName, "v", volumes(),
- volumeDesciption,
- )
- _ = cmd.RegisterFlagCompletionFunc(volumeFlagName, AutocompleteVolumeFlag)
+ cpuRtPeriodFlagName := "cpu-rt-period"
+ createFlags.Uint64Var(
+ &cf.CPURTPeriod,
+ cpuRtPeriodFlagName, 0,
+ "Limit the CPU real-time period in microseconds",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(cpuRtPeriodFlagName, completion.AutocompleteNone)
- deviceFlagName := "device"
- createFlags.StringSliceVar(
- &cf.Devices,
- deviceFlagName, devices(),
- "Add a host device to the container",
- )
- _ = cmd.RegisterFlagCompletionFunc(deviceFlagName, completion.AutocompleteDefault)
+ cpuRtRuntimeFlagName := "cpu-rt-runtime"
+ createFlags.Int64Var(
+ &cf.CPURTRuntime,
+ cpuRtRuntimeFlagName, 0,
+ "Limit the CPU real-time runtime in microseconds",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(cpuRtRuntimeFlagName, completion.AutocompleteNone)
+
+ cpuSharesFlagName := "cpu-shares"
+ createFlags.Uint64Var(
+ &cf.CPUShares,
+ cpuSharesFlagName, 0,
+ "CPU shares (relative weight)",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(cpuSharesFlagName, completion.AutocompleteNone)
+
+ cpusetMemsFlagName := "cpuset-mems"
+ createFlags.StringVar(
+ &cf.CPUSetMems,
+ cpusetMemsFlagName, "",
+ "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.",
+ )
+ _ = cmd.RegisterFlagCompletionFunc(cpusetMemsFlagName, completion.AutocompleteNone)
- deviceReadBpsFlagName := "device-read-bps"
- createFlags.StringSliceVar(
- &cf.DeviceReadBPs,
- deviceReadBpsFlagName, []string{},
- "Limit read rate (bytes per second) from a device (e.g. --device-read-bps=/dev/sda:1mb)",
+ memoryFlagName := "memory"
+ createFlags.StringVarP(
+ &cf.Memory,
+ memoryFlagName, "m", "",
+ "Memory limit "+sizeWithUnitFormat,
+ )
+ _ = cmd.RegisterFlagCompletionFunc(memoryFlagName, completion.AutocompleteNone)
+ }
+ //anyone can use these
+ cpusFlagName := "cpus"
+ createFlags.Float64Var(
+ &cf.CPUS,
+ cpusFlagName, 0,
+ "Number of CPUs. The default is 0.000 which means no limit",
)
- _ = cmd.RegisterFlagCompletionFunc(deviceReadBpsFlagName, completion.AutocompleteDefault)
+ _ = cmd.RegisterFlagCompletionFunc(cpusFlagName, completion.AutocompleteNone)
- volumesFromFlagName := "volumes-from"
- createFlags.StringArrayVar(
- &cf.VolumesFrom,
- volumesFromFlagName, []string{},
- "Mount volumes from the specified container(s)",
+ cpusetCpusFlagName := "cpuset-cpus"
+ createFlags.StringVar(
+ &cf.CPUSetCPUs,
+ cpusetCpusFlagName, "",
+ "CPUs in which to allow execution (0-3, 0,1)",
)
- _ = cmd.RegisterFlagCompletionFunc(volumesFromFlagName, AutocompleteContainers)
+ _ = cmd.RegisterFlagCompletionFunc(cpusetCpusFlagName, completion.AutocompleteNone)
}
diff --git a/cmd/podman/containers/clone.go b/cmd/podman/containers/clone.go
new file mode 100644
index 000000000..d095d24ba
--- /dev/null
+++ b/cmd/podman/containers/clone.go
@@ -0,0 +1,80 @@
+package containers
+
+import (
+ "fmt"
+
+ "github.com/containers/podman/v4/cmd/podman/common"
+ "github.com/containers/podman/v4/cmd/podman/registry"
+ "github.com/containers/podman/v4/libpod/define"
+ "github.com/containers/podman/v4/pkg/domain/entities"
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+)
+
+var (
+ cloneDescription = `Creates a copy of an existing container.`
+
+ containerCloneCommand = &cobra.Command{
+ Use: "clone [options] CONTAINER NAME IMAGE",
+ Short: "Clone an existing container",
+ Long: cloneDescription,
+ RunE: clone,
+ Args: cobra.RangeArgs(1, 3),
+ ValidArgsFunction: common.AutocompleteClone,
+ Example: `podman container clone container_name new_name image_name`,
+ }
+)
+
+var (
+ ctrClone entities.ContainerCloneOptions
+)
+
+func cloneFlags(cmd *cobra.Command) {
+ flags := cmd.Flags()
+
+ destroyFlagName := "destroy"
+ flags.BoolVar(&ctrClone.Destroy, destroyFlagName, false, "destroy the original container")
+
+ runFlagName := "run"
+ flags.BoolVar(&ctrClone.Run, runFlagName, false, "run the new container")
+
+ common.DefineCreateFlags(cmd, &ctrClone.CreateOpts, false, true)
+}
+func init() {
+ registry.Commands = append(registry.Commands, registry.CliCommand{
+ Command: containerCloneCommand,
+ Parent: containerCmd,
+ })
+
+ cloneFlags(containerCloneCommand)
+}
+
+func clone(cmd *cobra.Command, args []string) error {
+ switch len(args) {
+ case 0:
+ return errors.Wrapf(define.ErrInvalidArg, "Must Specify at least 1 argument")
+ case 2:
+ ctrClone.CreateOpts.Name = args[1]
+ case 3:
+ ctrClone.CreateOpts.Name = args[1]
+ ctrClone.Image = args[2]
+ rawImageName := ""
+ if !cliVals.RootFS {
+ rawImageName = args[0]
+ name, err := PullImage(ctrClone.Image, ctrClone.CreateOpts)
+ if err != nil {
+ return err
+ }
+ ctrClone.Image = name
+ ctrClone.RawImageName = rawImageName
+ }
+ }
+ ctrClone.ID = args[0]
+ ctrClone.CreateOpts.IsClone = true
+ rep, err := registry.ContainerEngine().ContainerClone(registry.GetContext(), ctrClone)
+ if err != nil {
+ return err
+ }
+ fmt.Println(rep.Id)
+ return nil
+}
diff --git a/cmd/podman/containers/create.go b/cmd/podman/containers/create.go
index 89d2e5515..de1a41e25 100644
--- a/cmd/podman/containers/create.go
+++ b/cmd/podman/containers/create.go
@@ -70,7 +70,7 @@ func createFlags(cmd *cobra.Command) {
)
flags.SetInterspersed(false)
- common.DefineCreateFlags(cmd, &cliVals, false)
+ common.DefineCreateFlags(cmd, &cliVals, false, false)
common.DefineNetFlags(cmd)
flags.SetNormalizeFunc(utils.AliasFlags)
diff --git a/cmd/podman/containers/run.go b/cmd/podman/containers/run.go
index 5a64b8c32..1d98eb3b3 100644
--- a/cmd/podman/containers/run.go
+++ b/cmd/podman/containers/run.go
@@ -61,7 +61,7 @@ func runFlags(cmd *cobra.Command) {
flags := cmd.Flags()
flags.SetInterspersed(false)
- common.DefineCreateFlags(cmd, &cliVals, false)
+ common.DefineCreateFlags(cmd, &cliVals, false, false)
common.DefineNetFlags(cmd)
flags.SetNormalizeFunc(utils.AliasFlags)
diff --git a/cmd/podman/pods/create.go b/cmd/podman/pods/create.go
index 1cd36008e..ab3a6d578 100644
--- a/cmd/podman/pods/create.go
+++ b/cmd/podman/pods/create.go
@@ -63,7 +63,7 @@ func init() {
})
flags := createCommand.Flags()
flags.SetInterspersed(false)
- common.DefineCreateFlags(createCommand, &infraOptions, true)
+ common.DefineCreateFlags(createCommand, &infraOptions, true, false)
common.DefineNetFlags(createCommand)
flags.BoolVar(&createOptions.Infra, "infra", true, "Create an infra container associated with the pod to share namespaces with")
diff --git a/docs/source/markdown/podman-container-clone.1.md b/docs/source/markdown/podman-container-clone.1.md
new file mode 100644
index 000000000..52fa023f3
--- /dev/null
+++ b/docs/source/markdown/podman-container-clone.1.md
@@ -0,0 +1,180 @@
+% podman-container-clone(1)
+
+## NAME
+podman\-container\-clone - Creates a copy of an existing container
+
+## SYNOPSIS
+**podman container clone** [*options*] *container* *name* *image*
+
+## DESCRIPTION
+**podman container clone** creates a copy of a container, recreating the original with an identical configuration. This command takes three arguments: the first being the container id or name ot clone, the second argument in this command can change the name of the clone from the default of $ORIGINAL_NAME-clone, and the third is a new image to use in the cloned container.
+
+## OPTIONS
+
+#### **--name**
+
+Set a custom name for the cloned container. The default if not specified is of the syntax: **<ORIGINAL_NAME>-clone**
+
+#### **--destroy**
+
+Remove the original container that we are cloning once used to mimic the configuration.
+
+#### **--cpus**
+
+Set a number of CPUs for the container that overrides the original containers CPU limits. If none are specified, the original container's Nano CPUs are used.
+
+This is shorthand
+for **--cpu-period** and **--cpu-quota**, so only **--cpus** or either both the **--cpu-period** and **--cpu-quota** options can be set.
+
+#### **--cpuset-cpus**
+
+CPUs in which to allow execution (0-3, 0,1). If none are specified, the original container's CPUset is used.
+
+#### **--cpu-period**=*limit*
+
+Set the CPU period for the Completely Fair Scheduler (CFS), which is a
+duration in microseconds. Once the container's CPU quota is used up, it will
+not be scheduled to run until the current period ends. Defaults to 100000
+microseconds.
+
+On some systems, changing the CPU limits may not be allowed for non-root
+users. For more details, see
+https://github.com/containers/podman/blob/master/troubleshooting.md#26-running-containers-with-cpu-limits-fails-with-a-permissions-error
+
+If none is specified, the original container's cpu period is used
+
+#### **--cpu-shares**=*shares*
+
+CPU shares (relative weight)
+
+By default, all containers get the same proportion of CPU cycles. This proportion
+can be modified by changing the container's CPU share weighting relative
+to the weighting of all other running containers.
+
+To modify the proportion from the default of 1024, use the **--cpu-shares**
+option to set the weighting to 2 or higher.
+
+The proportion will only apply when CPU-intensive processes are running.
+When tasks in one container are idle, other containers can use the
+left-over CPU time. The actual amount of CPU time will vary depending on
+the number of containers running on the system.
+
+For example, consider three containers, one has a cpu-share of 1024 and
+two others have a cpu-share setting of 512. When processes in all three
+containers attempt to use 100% of CPU, the first container would receive
+50% of the total CPU time. If a fourth container is added with a cpu-share
+of 1024, the first container only gets 33% of the CPU. The remaining containers
+receive 16.5%, 16.5% and 33% of the CPU.
+
+On a multi-core system, the shares of CPU time are distributed over all CPU
+cores. Even if a container is limited to less than 100% of CPU time, it can
+use 100% of each individual CPU core.
+
+For example, consider a system with more than three cores.
+container **{C0}** is started with **-c=512** running one process, and another container
+**{C1}** with **-c=1024** running two processes, this can result in the following
+division of CPU shares:
+
+PID container CPU CPU share
+100 {C0} 0 100% of CPU0
+101 {C1} 1 100% of CPU1
+102 {C1} 2 100% of CPU2
+
+If none are specified, the original container's CPU shares are used.
+
+#### **--cpuset-mems**=*nodes*
+
+Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
+
+If there are four memory nodes on the system (0-3), use `--cpuset-mems=0,1`
+then processes in the container will only use memory from the first
+two memory nodes.
+
+If none are specified, the original container's CPU memory nodes are used.
+
+#### **--cpu-quota**=*limit*
+
+Limit the CPU Completely Fair Scheduler (CFS) quota.
+
+Limit the container's CPU usage. By default, containers run with the full
+CPU resource. The limit is a number in microseconds. If a number is provided,
+the container will be allowed to use that much CPU time until the CPU period
+ends (controllable via **--cpu-period**).
+
+On some systems, changing the CPU limits may not be allowed for non-root
+users. For more details, see
+https://github.com/containers/podman/blob/master/troubleshooting.md#26-running-containers-with-cpu-limits-fails-with-a-permissions-error
+
+If none is specified, the original container's CPU quota are used.
+
+#### **--cpu-rt-period**=*microseconds*
+
+Limit the CPU real-time period in microseconds
+
+Limit the container's Real Time CPU usage. This option tells the kernel to restrict the container's Real Time CPU usage to the period specified.
+
+This option is not supported on cgroups V2 systems.
+
+If none is specified, the original container's CPU runtime period is used.
+
+
+#### **--cpu-rt-runtime**=*microseconds*
+
+Limit the CPU real-time runtime in microseconds.
+
+Limit the containers Real Time CPU usage. This option tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex:
+Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks.
+
+The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup.
+
+This option is not supported on cgroup V2 systems.
+
+#### **--memory**, **-m**=*limit*
+
+Memory limit (format: `<number>[<unit>]`, where unit = b (bytes), k (kilobytes), m (megabytes), or g (gigabytes))
+
+Allows the memory available to a container to be constrained. If the host
+supports swap memory, then the **-m** memory setting can be larger than physical
+RAM. If a limit of 0 is specified (not using **-m**), the container's memory is
+not limited. The actual limit may be rounded up to a multiple of the operating
+system's page size (the value would be very large, that's millions of trillions).
+
+If no memory limits are specified, the original container's will be used.
+
+#### **--run**
+
+When set to true, this flag runs the newly created container after the
+clone process has completed, this specifies a detached running mode.
+
+## EXAMPLES
+```
+# podman container clone d0cf1f782e2ed67e8c0050ff92df865a039186237a4df24d7acba5b1fa8cc6e7
+6b2c73ff8a1982828c9ae2092954bcd59836a131960f7e05221af9df5939c584
+```
+
+```
+# podman container clone --name=clone d0cf1f782e2ed67e8c0050ff92df865a039186237a4df24d7acba5b1fa8cc6e7
+6b2c73ff8a1982828c9ae2092954bcd59836a131960f7e05221af9df5939c584
+```
+
+```
+# podman container clone --destroy --cpus=5 d0cf1f782e2ed67e8c0050ff92df865a039186237a4df24d7acba5b1fa8cc6e7
+6b2c73ff8a1982828c9ae2092954bcd59836a131960f7e05221af9df5939c584
+```
+
+```
+# podman container clone 2d4d4fca7219b4437e0d74fcdc272c4f031426a6eacd207372691207079551de new_name fedora
+Resolved "fedora" as an alias (/etc/containers/registries.conf.d/shortnames.conf)
+Trying to pull registry.fedoraproject.org/fedora:latest...
+Getting image source signatures
+Copying blob c6183d119aa8 done
+Copying config e417cd49a8 done
+Writing manifest to image destination
+Storing signatures
+5a9b7851013d326aa4ac4565726765901b3ecc01fcbc0f237bc7fd95588a24f9
+```
+## SEE ALSO
+**[podman-create(1)](podman-create.1.md)**, **[cgroups(7)](https://man7.org/linux/man-pages/man7/cgroups.7.html)**
+
+## HISTORY
+January 2022, Originally written by Charlie Doern <cdoern@redhat.com>
diff --git a/docs/source/markdown/podman-container.1.md b/docs/source/markdown/podman-container.1.md
index 540ebc723..36623c718 100644
--- a/docs/source/markdown/podman-container.1.md
+++ b/docs/source/markdown/podman-container.1.md
@@ -16,6 +16,7 @@ The container command allows you to manage containers
| attach | [podman-attach(1)](podman-attach.1.md) | Attach to a running container. |
| checkpoint | [podman-container-checkpoint(1)](podman-container-checkpoint.1.md) | Checkpoints one or more running containers. |
| cleanup | [podman-container-cleanup(1)](podman-container-cleanup.1.md) | Cleanup the container's network and mountpoints. |
+| clone | [podman-container-clone(1)](podman-container-clone.1.md) | Creates a copy of an existing container. |
| commit | [podman-commit(1)](podman-commit.1.md) | Create new image based on the changed container. |
| cp | [podman-cp(1)](podman-cp.1.md) | Copy files/folders between a container and the local filesystem. |
| create | [podman-create(1)](podman-create.1.md) | Create a new container. |
diff --git a/libpod/container.go b/libpod/container.go
index e280b87a8..578f16905 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -417,7 +417,10 @@ func (c *Container) MountLabel() string {
// Systemd returns whether the container will be running in systemd mode
func (c *Container) Systemd() bool {
- return c.config.Systemd
+ if c.config.Systemd != nil {
+ return *c.config.Systemd
+ }
+ return false
}
// User returns the user who the container is run as
diff --git a/libpod/container_config.go b/libpod/container_config.go
index d5374aaaf..e56f1342a 100644
--- a/libpod/container_config.go
+++ b/libpod/container_config.go
@@ -375,8 +375,8 @@ type ContainerMiscConfig struct {
IsInfra bool `json:"pause"`
// SdNotifyMode tells libpod what to do with a NOTIFY_SOCKET if passed
SdNotifyMode string `json:"sdnotifyMode,omitempty"`
- // Systemd tells libpod to setup the container in systemd mode
- Systemd bool `json:"systemd"`
+ // Systemd tells libpod to setup the container in systemd mode, a value of nil denotes false
+ Systemd *bool `json:"systemd,omitempty"`
// HealthCheckConfig has the health check command and related timings
HealthCheckConfig *manifest.Schema2HealthConfig `json:"healthcheck"`
// PreserveFDs is a number of additional file descriptors (in addition
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 1344fc659..07b28ba93 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -346,7 +346,7 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) *define.Insp
ctrConfig.Timeout = c.config.Timeout
ctrConfig.OpenStdin = c.config.Stdin
ctrConfig.Image = c.config.RootfsImageName
- ctrConfig.SystemdMode = c.config.Systemd
+ ctrConfig.SystemdMode = c.Systemd()
// Leave empty is not explicitly overwritten by user
if len(c.config.Command) != 0 {
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 51533b3bf..3c21cade8 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -557,7 +557,7 @@ func (c *Container) setupStorage(ctx context.Context) error {
}
func (c *Container) processLabel(processLabel string) (string, error) {
- if !c.config.Systemd && !c.ociRuntime.SupportsKVM() {
+ if !c.Systemd() && !c.ociRuntime.SupportsKVM() {
return processLabel, nil
}
ctrSpec, err := c.specFromState()
@@ -569,7 +569,7 @@ func (c *Container) processLabel(processLabel string) (string, error) {
switch {
case c.ociRuntime.SupportsKVM():
return selinux.KVMLabel(processLabel)
- case c.config.Systemd:
+ case c.Systemd():
return selinux.InitLabel(processLabel)
}
}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index afa351c17..cef9e2c04 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -614,7 +614,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
}
}
- if c.config.Systemd {
+ if c.Systemd() {
if err := c.setupSystemd(g.Mounts(), g); err != nil {
return nil, errors.Wrapf(err, "error adding systemd-specific mounts")
}
diff --git a/libpod/options.go b/libpod/options.go
index e0502a72d..1ee4e7322 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -566,7 +566,8 @@ func WithSystemd() CtrCreateOption {
return define.ErrCtrFinalized
}
- ctr.config.Systemd = true
+ t := true
+ ctr.config.Systemd = &t
return nil
}
}
diff --git a/pkg/api/handlers/libpod/containers_create.go b/pkg/api/handlers/libpod/containers_create.go
index 8e5fc1c1c..61f437faf 100644
--- a/pkg/api/handlers/libpod/containers_create.go
+++ b/pkg/api/handlers/libpod/containers_create.go
@@ -33,7 +33,7 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
return
}
- rtSpec, spec, opts, err := generate.MakeContainer(context.Background(), runtime, &sg)
+ rtSpec, spec, opts, err := generate.MakeContainer(context.Background(), runtime, &sg, false, nil)
if err != nil {
utils.InternalServerError(w, err)
return
diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go
index d39a70732..e9bce0eb7 100644
--- a/pkg/domain/entities/containers.go
+++ b/pkg/domain/entities/containers.go
@@ -463,3 +463,13 @@ type ContainerRenameOptions struct {
// NewName is the new name that will be given to the container.
NewName string
}
+
+// ContainerCloneOptions contains options for cloning an existing continer
+type ContainerCloneOptions struct {
+ ID string
+ Destroy bool
+ CreateOpts ContainerCreateOptions
+ Image string
+ RawImageName string
+ Run bool
+}
diff --git a/pkg/domain/entities/engine_container.go b/pkg/domain/entities/engine_container.go
index 5dedceacc..21272d33f 100644
--- a/pkg/domain/entities/engine_container.go
+++ b/pkg/domain/entities/engine_container.go
@@ -19,6 +19,7 @@ type ContainerEngine interface {
ContainerAttach(ctx context.Context, nameOrID string, options AttachOptions) error
ContainerCheckpoint(ctx context.Context, namesOrIds []string, options CheckpointOptions) ([]*CheckpointReport, error)
ContainerCleanup(ctx context.Context, namesOrIds []string, options ContainerCleanupOptions) ([]*ContainerCleanupReport, error)
+ ContainerClone(ctx context.Context, ctrClone ContainerCloneOptions) (*ContainerCreateReport, error)
ContainerCommit(ctx context.Context, nameOrID string, options CommitOptions) (*CommitReport, error)
ContainerCopyFromArchive(ctx context.Context, nameOrID, path string, reader io.Reader, options CopyOptions) (ContainerCopyFunc, error)
ContainerCopyToArchive(ctx context.Context, nameOrID string, path string, writer io.Writer) (ContainerCopyFunc, error)
diff --git a/pkg/domain/entities/pods.go b/pkg/domain/entities/pods.go
index 7922db4e6..6fb3db1b5 100644
--- a/pkg/domain/entities/pods.go
+++ b/pkg/domain/entities/pods.go
@@ -264,6 +264,7 @@ type ContainerCreateOptions struct {
SeccompPolicy string
PidFile string
IsInfra bool
+ IsClone bool
Net *NetOptions `json:"net,omitempty"`
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index a5c4647d7..92f5b1a80 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -6,6 +6,7 @@ import (
"io/ioutil"
"os"
"strconv"
+ "strings"
"sync"
"time"
@@ -648,7 +649,7 @@ func (ic *ContainerEngine) ContainerCreate(ctx context.Context, s *specgen.SpecG
for _, w := range warn {
fmt.Fprintf(os.Stderr, "%s\n", w)
}
- rtSpec, spec, opts, err := generate.MakeContainer(context.Background(), ic.Libpod, s)
+ rtSpec, spec, opts, err := generate.MakeContainer(context.Background(), ic.Libpod, s, false, nil)
if err != nil {
return nil, err
}
@@ -971,7 +972,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
fmt.Fprintf(os.Stderr, "%s\n", w)
}
- rtSpec, spec, optsN, err := generate.MakeContainer(ctx, ic.Libpod, opts.Spec)
+ rtSpec, spec, optsN, err := generate.MakeContainer(ctx, ic.Libpod, opts.Spec, false, nil)
if err != nil {
return nil, err
}
@@ -1490,3 +1491,89 @@ func (ic *ContainerEngine) ContainerRename(ctx context.Context, nameOrID string,
return nil
}
+
+func (ic *ContainerEngine) ContainerClone(ctx context.Context, ctrCloneOpts entities.ContainerCloneOptions) (*entities.ContainerCreateReport, error) {
+ spec := specgen.NewSpecGenerator(ctrCloneOpts.Image, ctrCloneOpts.CreateOpts.RootFS)
+ var c *libpod.Container
+ c, err := generate.ConfigToSpec(ic.Libpod, spec, ctrCloneOpts.ID)
+ if err != nil {
+ return nil, err
+ }
+
+ err = specgenutil.FillOutSpecGen(spec, &ctrCloneOpts.CreateOpts, []string{})
+ if err != nil {
+ return nil, err
+ }
+ out, err := generate.CompleteSpec(ctx, ic.Libpod, spec)
+ if err != nil {
+ return nil, err
+ }
+
+ // Print warnings
+ if len(out) > 0 {
+ for _, w := range out {
+ fmt.Println("Could not properly complete the spec as expected:")
+ fmt.Fprintf(os.Stderr, "%s\n", w)
+ }
+ }
+
+ if len(ctrCloneOpts.CreateOpts.Name) > 0 {
+ spec.Name = ctrCloneOpts.CreateOpts.Name
+ } else {
+ n := c.Name()
+ _, err := ic.Libpod.LookupContainer(c.Name() + "-clone")
+ if err == nil {
+ n += "-clone"
+ }
+ switch {
+ case strings.Contains(n, "-clone"):
+ ind := strings.Index(n, "-clone") + 6
+ num, _ := strconv.Atoi(n[ind:])
+ if num == 0 { // clone1 is hard to get with this logic, just check for it here.
+ _, err = ic.Libpod.LookupContainer(n + "1")
+ if err != nil {
+ spec.Name = n + "1"
+ break
+ }
+ } else {
+ n = n[0:ind]
+ }
+ err = nil
+ count := num
+ for err == nil {
+ count++
+ tempN := n + strconv.Itoa(count)
+ _, err = ic.Libpod.LookupContainer(tempN)
+ }
+ n += strconv.Itoa(count)
+ spec.Name = n
+ default:
+ spec.Name = c.Name() + "-clone"
+ }
+ }
+
+ rtSpec, spec, opts, err := generate.MakeContainer(context.Background(), ic.Libpod, spec, true, c)
+ if err != nil {
+ return nil, err
+ }
+ ctr, err := generate.ExecuteCreate(ctx, ic.Libpod, rtSpec, spec, false, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ if ctrCloneOpts.Destroy {
+ var time *uint
+ err := ic.Libpod.RemoveContainer(context.Background(), c, false, false, time)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if ctrCloneOpts.Run {
+ if err := ctr.Start(ctx, true); err != nil {
+ return nil, err
+ }
+ }
+
+ return &entities.ContainerCreateReport{Id: ctr.ID()}, nil
+}
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index cad8c4609..25e8f8556 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -392,7 +392,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
if err != nil {
return nil, err
}
- rtSpec, spec, opts, err := generate.MakeContainer(ctx, ic.Libpod, specGen)
+ rtSpec, spec, opts, err := generate.MakeContainer(ctx, ic.Libpod, specGen, false, nil)
if err != nil {
return nil, err
}
@@ -435,7 +435,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
if err != nil {
return nil, err
}
- rtSpec, spec, opts, err := generate.MakeContainer(ctx, ic.Libpod, specGen)
+ rtSpec, spec, opts, err := generate.MakeContainer(ctx, ic.Libpod, specGen, false, nil)
if err != nil {
return nil, err
}
diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go
index 67e709486..aa4baf846 100644
--- a/pkg/domain/infra/tunnel/containers.go
+++ b/pkg/domain/infra/tunnel/containers.go
@@ -958,3 +958,7 @@ func (ic *ContainerEngine) ShouldRestart(_ context.Context, id string) (bool, er
func (ic *ContainerEngine) ContainerRename(ctx context.Context, nameOrID string, opts entities.ContainerRenameOptions) error {
return containers.Rename(ic.ClientCtx, nameOrID, new(containers.RenameOptions).WithName(opts.NewName))
}
+
+func (ic *ContainerEngine) ContainerClone(ctx context.Context, ctrCloneOpts entities.ContainerCloneOptions) (*entities.ContainerCreateReport, error) {
+ return nil, errors.New("cloning a container is not supported on the remote client")
+}
diff --git a/pkg/specgen/generate/container.go b/pkg/specgen/generate/container.go
index a4d862a60..64669f34d 100644
--- a/pkg/specgen/generate/container.go
+++ b/pkg/specgen/generate/container.go
@@ -2,6 +2,7 @@ package generate
import (
"context"
+ "encoding/json"
"os"
"strings"
"time"
@@ -335,3 +336,152 @@ func FinishThrottleDevices(s *specgen.SpecGenerator) error {
}
return nil
}
+
+// ConfigToSpec takes a completed container config and converts it back into a specgenerator for purposes of cloning an exisiting container
+func ConfigToSpec(rt *libpod.Runtime, specg *specgen.SpecGenerator, contaierID string) (*libpod.Container, error) {
+ c, err := rt.LookupContainer(contaierID)
+ if err != nil {
+ return nil, err
+ }
+ conf := c.Config()
+
+ tmpSystemd := conf.Systemd
+ tmpMounts := conf.Mounts
+
+ conf.Systemd = nil
+ conf.Mounts = []string{}
+
+ specg.Pod = conf.Pod
+
+ matching, err := json.Marshal(conf)
+ if err != nil {
+ return nil, err
+ }
+
+ err = json.Unmarshal(matching, specg)
+ if err != nil {
+ return nil, err
+ }
+ conf.Systemd = tmpSystemd
+ conf.Mounts = tmpMounts
+
+ if conf.Spec != nil && conf.Spec.Linux != nil && conf.Spec.Linux.Resources != nil {
+ if specg.ResourceLimits == nil {
+ specg.ResourceLimits = conf.Spec.Linux.Resources
+ }
+ }
+
+ nameSpaces := []string{"pid", "net", "cgroup", "ipc", "uts", "user"}
+ containers := []string{conf.PIDNsCtr, conf.NetNsCtr, conf.CgroupNsCtr, conf.IPCNsCtr, conf.UTSNsCtr, conf.UserNsCtr}
+ place := []*specgen.Namespace{&specg.PidNS, &specg.NetNS, &specg.CgroupNS, &specg.IpcNS, &specg.UtsNS, &specg.UserNS}
+ for i, ns := range containers {
+ if len(ns) > 0 {
+ ns := specgen.Namespace{NSMode: specgen.FromContainer, Value: ns}
+ place[i] = &ns
+ } else {
+ switch nameSpaces[i] {
+ case "pid":
+ specg.PidNS = specgen.Namespace{NSMode: specgen.Default} //default
+ case "net":
+ switch {
+ case conf.NetMode.IsBridge():
+ toExpose := make(map[uint16]string, len(conf.ExposedPorts))
+ for _, expose := range []map[uint16][]string{conf.ExposedPorts} {
+ for port, proto := range expose {
+ toExpose[port] = strings.Join(proto, ",")
+ }
+ }
+ specg.Expose = toExpose
+ specg.PortMappings = conf.PortMappings
+ specg.NetNS = specgen.Namespace{NSMode: specgen.Bridge}
+ case conf.NetMode.IsSlirp4netns():
+ toExpose := make(map[uint16]string, len(conf.ExposedPorts))
+ for _, expose := range []map[uint16][]string{conf.ExposedPorts} {
+ for port, proto := range expose {
+ toExpose[port] = strings.Join(proto, ",")
+ }
+ }
+ specg.Expose = toExpose
+ specg.PortMappings = conf.PortMappings
+ netMode := strings.Split(string(conf.NetMode), ":")
+ var val string
+ if len(netMode) > 1 {
+ val = netMode[1]
+ }
+ specg.NetNS = specgen.Namespace{NSMode: specgen.Slirp, Value: val}
+ case conf.NetMode.IsPrivate():
+ specg.NetNS = specgen.Namespace{NSMode: specgen.Private}
+ case conf.NetMode.IsDefault():
+ specg.NetNS = specgen.Namespace{NSMode: specgen.Default}
+ case conf.NetMode.IsUserDefined():
+ specg.NetNS = specgen.Namespace{NSMode: specgen.Path, Value: strings.Split(string(conf.NetMode), ":")[1]}
+ case conf.NetMode.IsContainer():
+ specg.NetNS = specgen.Namespace{NSMode: specgen.FromContainer, Value: strings.Split(string(conf.NetMode), ":")[1]}
+ case conf.NetMode.IsPod():
+ specg.NetNS = specgen.Namespace{NSMode: specgen.FromPod, Value: strings.Split(string(conf.NetMode), ":")[1]}
+ }
+ case "cgroup":
+ specg.CgroupNS = specgen.Namespace{NSMode: specgen.Default} //default
+ case "ipc":
+ if conf.ShmDir == "/dev/shm" {
+ specg.IpcNS = specgen.Namespace{NSMode: specgen.Host}
+ } else {
+ specg.IpcNS = specgen.Namespace{NSMode: specgen.Default} //default
+ }
+ case "uts":
+ specg.UtsNS = specgen.Namespace{NSMode: specgen.Default} //default
+ case "user":
+ if conf.AddCurrentUserPasswdEntry {
+ specg.UserNS = specgen.Namespace{NSMode: specgen.KeepID}
+ } else {
+ specg.UserNS = specgen.Namespace{NSMode: specgen.Default} //default
+ }
+ }
+ }
+ }
+
+ specg.IDMappings = &conf.IDMappings
+ specg.ContainerCreateCommand = conf.CreateCommand
+ if len(specg.Rootfs) == 0 {
+ specg.Rootfs = conf.Rootfs
+ }
+ if len(specg.Image) == 0 {
+ specg.Image = conf.RootfsImageID
+ }
+ var named []*specgen.NamedVolume
+ if len(conf.NamedVolumes) != 0 {
+ for _, v := range conf.NamedVolumes {
+ named = append(named, &specgen.NamedVolume{
+ Name: v.Name,
+ Dest: v.Dest,
+ Options: v.Options,
+ })
+ }
+ }
+ specg.Volumes = named
+ var image []*specgen.ImageVolume
+ if len(conf.ImageVolumes) != 0 {
+ for _, v := range conf.ImageVolumes {
+ image = append(image, &specgen.ImageVolume{
+ Source: v.Source,
+ Destination: v.Dest,
+ ReadWrite: v.ReadWrite,
+ })
+ }
+ }
+ specg.ImageVolumes = image
+ var overlay []*specgen.OverlayVolume
+ if len(conf.OverlayVolumes) != 0 {
+ for _, v := range conf.OverlayVolumes {
+ overlay = append(overlay, &specgen.OverlayVolume{
+ Source: v.Source,
+ Destination: v.Dest,
+ Options: v.Options,
+ })
+ }
+ }
+ specg.OverlayVolumes = overlay
+ specg.Mounts = conf.Spec.Mounts
+ specg.HostDeviceList = conf.DeviceHostSrc
+ return c, nil
+}
diff --git a/pkg/specgen/generate/container_create.go b/pkg/specgen/generate/container_create.go
index a7a7353d0..c0b23953f 100644
--- a/pkg/specgen/generate/container_create.go
+++ b/pkg/specgen/generate/container_create.go
@@ -8,6 +8,7 @@ import (
cdi "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
"github.com/containers/common/libimage"
+ "github.com/containers/common/pkg/cgroups"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/namespaces"
@@ -22,7 +23,7 @@ import (
// MakeContainer creates a container based on the SpecGenerator.
// Returns the created, container and any warnings resulting from creating the
// container, or an error.
-func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGenerator) (*spec.Spec, *specgen.SpecGenerator, []libpod.CtrCreateOption, error) {
+func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGenerator, clone bool, c *libpod.Container) (*spec.Spec, *specgen.SpecGenerator, []libpod.CtrCreateOption, error) {
rtc, err := rt.GetConfigNoCopy()
if err != nil {
return nil, nil, nil, err
@@ -170,6 +171,42 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener
options = append(options, opts...)
}
runtimeSpec, err := SpecGenToOCI(ctx, s, rt, rtc, newImage, finalMounts, pod, command, compatibleOptions)
+ if clone { // the container fails to start if cloned due to missing Linux spec entries
+ if c == nil {
+ return nil, nil, nil, errors.New("the given container could not be retrieved")
+ }
+ conf := c.Config()
+ out, err := json.Marshal(conf.Spec.Linux)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ err = json.Unmarshal(out, runtimeSpec.Linux)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ switch {
+ case s.ResourceLimits.CPU != nil:
+ runtimeSpec.Linux.Resources.CPU = s.ResourceLimits.CPU
+ case s.ResourceLimits.Memory != nil:
+ runtimeSpec.Linux.Resources.Memory = s.ResourceLimits.Memory
+ case s.ResourceLimits.BlockIO != nil:
+ runtimeSpec.Linux.Resources.BlockIO = s.ResourceLimits.BlockIO
+ case s.ResourceLimits.Devices != nil:
+ runtimeSpec.Linux.Resources.Devices = s.ResourceLimits.Devices
+ }
+
+ cgroup2, err := cgroups.IsCgroup2UnifiedMode()
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ if cgroup2 && s.ResourceLimits.Memory != nil && s.ResourceLimits.Memory.Swappiness != nil { // conf.Spec.Linux contains memory swappiness established after the spec process we need to remove that
+ s.ResourceLimits.Memory.Swappiness = nil
+ if runtimeSpec.Linux.Resources.Memory != nil {
+ runtimeSpec.Linux.Resources.Memory.Swappiness = nil
+ }
+ }
+ }
if err != nil {
return nil, nil, nil, err
}
diff --git a/pkg/specgen/generate/oci.go b/pkg/specgen/generate/oci.go
index 945c994ea..8b3550e36 100644
--- a/pkg/specgen/generate/oci.go
+++ b/pkg/specgen/generate/oci.go
@@ -301,7 +301,14 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt
}
if compatibleOptions.InfraResources == nil && s.ResourceLimits != nil {
- g.Config.Linux.Resources = s.ResourceLimits
+ out, err := json.Marshal(s.ResourceLimits)
+ if err != nil {
+ return nil, err
+ }
+ err = json.Unmarshal(out, g.Config.Linux.Resources)
+ if err != nil {
+ return nil, err
+ }
} else if s.ResourceLimits != nil { // if we have predefined resource limits we need to make sure we keep the infra and container limits
originalResources, err := json.Marshal(s.ResourceLimits)
if err != nil {
diff --git a/pkg/specgen/generate/pod_create.go b/pkg/specgen/generate/pod_create.go
index 68fda3ad7..8450fe7ce 100644
--- a/pkg/specgen/generate/pod_create.go
+++ b/pkg/specgen/generate/pod_create.go
@@ -135,7 +135,7 @@ func MakePod(p *entities.PodSpec, rt *libpod.Runtime) (*libpod.Pod, error) {
return nil, err
}
p.PodSpecGen.InfraContainerSpec.User = "" // infraSpec user will get incorrectly assigned via the container creation process, overwrite here
- rtSpec, spec, opts, err := MakeContainer(context.Background(), rt, p.PodSpecGen.InfraContainerSpec)
+ rtSpec, spec, opts, err := MakeContainer(context.Background(), rt, p.PodSpecGen.InfraContainerSpec, false, nil)
if err != nil {
return nil, err
}
diff --git a/pkg/specgenutil/createparse.go b/pkg/specgenutil/createparse.go
index 9666e4be0..a51396227 100644
--- a/pkg/specgenutil/createparse.go
+++ b/pkg/specgenutil/createparse.go
@@ -26,6 +26,8 @@ func validate(c *entities.ContainerCreateOptions) error {
if _, ok := imageVolType[c.ImageVolume]; !ok {
if c.IsInfra {
c.ImageVolume = "bind"
+ } else if c.IsClone { // the image volume type will be deduced later from the container we are cloning
+ return nil
} else {
return errors.Errorf("invalid image-volume type %q. Pick one of bind, tmpfs, or ignore", c.ImageVolume)
}
diff --git a/pkg/specgenutil/specgen.go b/pkg/specgenutil/specgen.go
index 17699a038..b037e14cc 100644
--- a/pkg/specgenutil/specgen.go
+++ b/pkg/specgenutil/specgen.go
@@ -256,38 +256,43 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
if err := setNamespaces(s, c); err != nil {
return err
}
- userNS := namespaces.UsernsMode(s.UserNS.NSMode)
- tempIDMap, err := util.ParseIDMapping(namespaces.UsernsMode(c.UserNS), []string{}, []string{}, "", "")
- if err != nil {
- return err
- }
- s.IDMappings, err = util.ParseIDMapping(userNS, c.UIDMap, c.GIDMap, c.SubUIDName, c.SubGIDName)
- if err != nil {
- return err
- }
- if len(s.IDMappings.GIDMap) == 0 {
- s.IDMappings.AutoUserNsOpts.AdditionalGIDMappings = tempIDMap.AutoUserNsOpts.AdditionalGIDMappings
- if s.UserNS.NSMode == specgen.NamespaceMode("auto") {
- s.IDMappings.AutoUserNs = true
+
+ if s.IDMappings == nil {
+ userNS := namespaces.UsernsMode(s.UserNS.NSMode)
+ tempIDMap, err := util.ParseIDMapping(namespaces.UsernsMode(c.UserNS), []string{}, []string{}, "", "")
+ if err != nil {
+ return err
}
- }
- if len(s.IDMappings.UIDMap) == 0 {
- s.IDMappings.AutoUserNsOpts.AdditionalUIDMappings = tempIDMap.AutoUserNsOpts.AdditionalUIDMappings
- if s.UserNS.NSMode == specgen.NamespaceMode("auto") {
- s.IDMappings.AutoUserNs = true
+ s.IDMappings, err = util.ParseIDMapping(userNS, c.UIDMap, c.GIDMap, c.SubUIDName, c.SubGIDName)
+ if err != nil {
+ return err
+ }
+ if len(s.IDMappings.GIDMap) == 0 {
+ s.IDMappings.AutoUserNsOpts.AdditionalGIDMappings = tempIDMap.AutoUserNsOpts.AdditionalGIDMappings
+ if s.UserNS.NSMode == specgen.NamespaceMode("auto") {
+ s.IDMappings.AutoUserNs = true
+ }
+ }
+ if len(s.IDMappings.UIDMap) == 0 {
+ s.IDMappings.AutoUserNsOpts.AdditionalUIDMappings = tempIDMap.AutoUserNsOpts.AdditionalUIDMappings
+ if s.UserNS.NSMode == specgen.NamespaceMode("auto") {
+ s.IDMappings.AutoUserNs = true
+ }
+ }
+ if tempIDMap.AutoUserNsOpts.Size != 0 {
+ s.IDMappings.AutoUserNsOpts.Size = tempIDMap.AutoUserNsOpts.Size
+ }
+ // If some mappings are specified, assume a private user namespace
+ if userNS.IsDefaultValue() && (!s.IDMappings.HostUIDMapping || !s.IDMappings.HostGIDMapping) {
+ s.UserNS.NSMode = specgen.Private
+ } else {
+ s.UserNS.NSMode = specgen.NamespaceMode(userNS)
}
- }
- if tempIDMap.AutoUserNsOpts.Size != 0 {
- s.IDMappings.AutoUserNsOpts.Size = tempIDMap.AutoUserNsOpts.Size
- }
- // If some mappings are specified, assume a private user namespace
- if userNS.IsDefaultValue() && (!s.IDMappings.HostUIDMapping || !s.IDMappings.HostGIDMapping) {
- s.UserNS.NSMode = specgen.Private
- } else {
- s.UserNS.NSMode = specgen.NamespaceMode(userNS)
}
- s.Terminal = c.TTY
+ if !s.Terminal {
+ s.Terminal = c.TTY
+ }
if err := verifyExpose(c.Expose); err != nil {
return err
@@ -297,8 +302,13 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
if c.Net != nil {
s.PortMappings = c.Net.PublishPorts
}
- s.PublishExposedPorts = c.PublishAll
- s.Pod = c.Pod
+ if !s.PublishExposedPorts {
+ s.PublishExposedPorts = c.PublishAll
+ }
+
+ if len(s.Pod) == 0 {
+ s.Pod = c.Pod
+ }
if len(c.PodIDFile) > 0 {
if len(s.Pod) > 0 {
@@ -315,7 +325,10 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
if err != nil {
return err
}
- s.Expose = expose
+
+ if len(s.Expose) == 0 {
+ s.Expose = expose
+ }
if sig := c.StopSignal; len(sig) > 0 {
stopSignal, err := util.ParseSignal(sig)
@@ -341,8 +354,13 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
return errors.Wrap(err, "error parsing host environment variables")
}
- s.EnvHost = c.EnvHost
- s.HTTPProxy = c.HTTPProxy
+ if !s.EnvHost {
+ s.EnvHost = c.EnvHost
+ }
+
+ if !s.HTTPProxy {
+ s.HTTPProxy = c.HTTPProxy
+ }
// env-file overrides any previous variables
for _, f := range c.EnvFile {
@@ -359,7 +377,9 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
return err
}
- s.Env = envLib.Join(env, parsedEnv)
+ if len(s.Env) == 0 {
+ s.Env = envLib.Join(env, parsedEnv)
+ }
// LABEL VARIABLES
labels, err := parse.GetAllLabels(c.LabelFile, c.Label)
@@ -371,7 +391,9 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
labels[systemdDefine.EnvVariable] = systemdUnit
}
- s.Labels = labels
+ if len(s.Labels) == 0 {
+ s.Labels = labels
+ }
// ANNOTATIONS
annotations := make(map[string]string)
@@ -390,7 +412,9 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
}
annotations[splitAnnotation[0]] = splitAnnotation[1]
}
- s.Annotations = annotations
+ if len(s.Annotations) == 0 {
+ s.Annotations = annotations
+ }
if len(c.StorageOpts) > 0 {
opts := make(map[string]string, len(c.StorageOpts))
@@ -403,7 +427,9 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
}
s.StorageOpts = opts
}
- s.WorkDir = c.Workdir
+ if len(s.WorkDir) == 0 {
+ s.WorkDir = c.Workdir
+ }
if c.Entrypoint != nil {
entrypoint := []string{}
// Check if entrypoint specified is json
@@ -415,7 +441,9 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
// Include the command used to create the container.
- s.ContainerCreateCommand = os.Args
+ if len(s.ContainerCreateCommand) == 0 {
+ s.ContainerCreateCommand = os.Args
+ }
if len(inputCommand) > 0 {
s.Command = inputCommand
@@ -444,24 +472,37 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
s.NetworkOptions = c.Net.NetworkOptions
s.UseImageHosts = c.Net.NoHosts
}
- s.HostUsers = c.HostUsers
- s.ImageVolumeMode = c.ImageVolume
+ if len(s.HostUsers) == 0 || len(c.HostUsers) != 0 {
+ s.HostUsers = c.HostUsers
+ }
+ if len(s.ImageVolumeMode) == 0 || len(c.ImageVolume) != 0 {
+ s.ImageVolumeMode = c.ImageVolume
+ }
if s.ImageVolumeMode == "bind" {
s.ImageVolumeMode = "anonymous"
}
- s.Systemd = strings.ToLower(c.Systemd)
- s.SdNotifyMode = c.SdNotifyMode
+ if len(s.Systemd) == 0 || len(c.Systemd) != 0 {
+ s.Systemd = strings.ToLower(c.Systemd)
+ }
+ if len(s.SdNotifyMode) == 0 || len(c.SdNotifyMode) != 0 {
+ s.SdNotifyMode = c.SdNotifyMode
+ }
if s.ResourceLimits == nil {
s.ResourceLimits = &specs.LinuxResources{}
}
- s.ResourceLimits.Memory, err = getMemoryLimits(s, c)
- if err != nil {
- return err
+
+ if s.ResourceLimits.Memory == nil || (len(c.Memory) != 0 || len(c.MemoryReservation) != 0 || len(c.MemorySwap) != 0 || c.MemorySwappiness != 0) {
+ s.ResourceLimits.Memory, err = getMemoryLimits(s, c)
+ if err != nil {
+ return err
+ }
}
- s.ResourceLimits.BlockIO, err = getIOLimits(s, c)
- if err != nil {
- return err
+ if s.ResourceLimits.BlockIO == nil || (len(c.BlkIOWeight) != 0 || len(c.BlkIOWeightDevice) != 0) {
+ s.ResourceLimits.BlockIO, err = getIOLimits(s, c)
+ if err != nil {
+ return err
+ }
}
if c.PIDsLimit != nil {
pids := specs.LinuxPids{
@@ -470,7 +511,10 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
s.ResourceLimits.Pids = &pids
}
- s.ResourceLimits.CPU = getCPULimits(c)
+
+ if s.ResourceLimits.CPU == nil || (c.CPUPeriod != 0 || c.CPUQuota != 0 || c.CPURTPeriod != 0 || c.CPURTRuntime != 0 || c.CPUS != 0 || len(c.CPUSetCPUs) != 0 || len(c.CPUSetMems) != 0 || c.CPUShares != 0) {
+ s.ResourceLimits.CPU = getCPULimits(c)
+ }
unifieds := make(map[string]string)
for _, unified := range c.CgroupConf {
@@ -495,8 +539,12 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
if ld := c.LogDriver; len(ld) > 0 {
s.LogConfiguration.Driver = ld
}
- s.CgroupParent = c.CgroupParent
- s.CgroupsMode = c.CgroupsMode
+ if len(s.CgroupParent) == 0 || len(c.CgroupParent) != 0 {
+ s.CgroupParent = c.CgroupParent
+ }
+ if len(s.CgroupsMode) == 0 {
+ s.CgroupsMode = c.CgroupsMode
+ }
if s.CgroupsMode == "" {
rtc, err := config.Default()
if err != nil {
@@ -506,9 +554,13 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
s.CgroupsMode = rtc.Cgroups()
}
- s.Groups = c.GroupAdd
+ if len(s.Groups) == 0 || len(c.GroupAdd) != 0 {
+ s.Groups = c.GroupAdd
+ }
- s.Hostname = c.Hostname
+ if len(s.Hostname) == 0 || len(c.Hostname) != 0 {
+ s.Hostname = c.Hostname
+ }
sysctl := map[string]string{}
if ctl := c.Sysctl; len(ctl) > 0 {
sysctl, err = util.ValidateSysctls(ctl)
@@ -516,15 +568,29 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
return err
}
}
- s.Sysctl = sysctl
+ if len(s.Sysctl) == 0 || len(c.Sysctl) != 0 {
+ s.Sysctl = sysctl
+ }
- s.CapAdd = c.CapAdd
- s.CapDrop = c.CapDrop
- s.Privileged = c.Privileged
- s.ReadOnlyFilesystem = c.ReadOnly
- s.ConmonPidFile = c.ConmonPIDFile
+ if len(s.CapAdd) == 0 || len(c.CapAdd) != 0 {
+ s.CapAdd = c.CapAdd
+ }
+ if len(s.CapDrop) == 0 || len(c.CapDrop) != 0 {
+ s.CapDrop = c.CapDrop
+ }
+ if !s.Privileged {
+ s.Privileged = c.Privileged
+ }
+ if !s.ReadOnlyFilesystem {
+ s.ReadOnlyFilesystem = c.ReadOnly
+ }
+ if len(s.ConmonPidFile) == 0 || len(c.ConmonPIDFile) != 0 {
+ s.ConmonPidFile = c.ConmonPIDFile
+ }
- s.DependencyContainers = c.Requires
+ if len(s.DependencyContainers) == 0 || len(c.Requires) != 0 {
+ s.DependencyContainers = c.Requires
+ }
// TODO
// outside of specgen and oci though
@@ -540,7 +606,9 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
}
sysmap[splitCtl[0]] = splitCtl[1]
}
- s.Sysctl = sysmap
+ if len(s.Sysctl) == 0 || len(c.Sysctl) != 0 {
+ s.Sysctl = sysmap
+ }
if c.CIDFile != "" {
s.Annotations[define.InspectAnnotationCIDFile] = c.CIDFile
@@ -584,9 +652,13 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
}
}
- s.SeccompPolicy = c.SeccompPolicy
+ if len(s.SeccompPolicy) == 0 || len(c.SeccompPolicy) != 0 {
+ s.SeccompPolicy = c.SeccompPolicy
+ }
- s.VolumesFrom = c.VolumesFrom
+ if len(s.VolumesFrom) == 0 || len(c.VolumesFrom) != 0 {
+ s.VolumesFrom = c.VolumesFrom
+ }
// Only add read-only tmpfs mounts in case that we are read-only and the
// read-only tmpfs flag has been set.
@@ -594,10 +666,19 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
if err != nil {
return err
}
- s.Mounts = mounts
- s.Volumes = volumes
- s.OverlayVolumes = overlayVolumes
- s.ImageVolumes = imageVolumes
+ if len(s.Mounts) == 0 || len(c.Mount) != 0 {
+ s.Mounts = mounts
+ }
+ if len(s.Volumes) == 0 || len(c.Volume) != 0 {
+ s.Volumes = volumes
+ }
+ // TODO make sure these work in clone
+ if len(s.OverlayVolumes) == 0 {
+ s.OverlayVolumes = overlayVolumes
+ }
+ if len(s.ImageVolumes) == 0 {
+ s.ImageVolumes = imageVolumes
+ }
for _, dev := range c.Devices {
s.Devices = append(s.Devices, specs.LinuxDevice{Path: dev})
@@ -611,9 +692,15 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
s.DeviceCgroupRule = append(s.DeviceCgroupRule, dev)
}
- s.Init = c.Init
- s.InitPath = c.InitPath
- s.Stdin = c.Interactive
+ if !s.Init {
+ s.Init = c.Init
+ }
+ if len(s.InitPath) == 0 || len(c.InitPath) != 0 {
+ s.InitPath = c.InitPath
+ }
+ if !s.Stdin {
+ s.Stdin = c.Interactive
+ }
// quiet
// DeviceCgroupRules: c.StringSlice("device-cgroup-rule"),
@@ -656,11 +743,19 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
logOpts[split[0]] = split[1]
}
}
- s.LogConfiguration.Options = logOpts
- s.Name = c.Name
- s.PreserveFDs = c.PreserveFDs
+ if len(s.LogConfiguration.Options) == 0 || len(c.LogOptions) != 0 {
+ s.LogConfiguration.Options = logOpts
+ }
+ if len(s.Name) == 0 || len(c.Name) != 0 {
+ s.Name = c.Name
+ }
+ if s.PreserveFDs == 0 || c.PreserveFDs != 0 {
+ s.PreserveFDs = c.PreserveFDs
+ }
- s.OOMScoreAdj = &c.OOMScoreAdj
+ if s.OOMScoreAdj == nil || c.OOMScoreAdj != 0 {
+ s.OOMScoreAdj = &c.OOMScoreAdj
+ }
if c.Restart != "" {
splitRestart := strings.Split(c.Restart, ":")
switch len(splitRestart) {
@@ -685,9 +780,11 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
s.RestartPolicy = splitRestart[0]
}
- s.Secrets, s.EnvSecrets, err = parseSecrets(c.Secrets)
- if err != nil {
- return err
+ if len(s.Secrets) == 0 || len(c.Secrets) != 0 {
+ s.Secrets, s.EnvSecrets, err = parseSecrets(c.Secrets)
+ if err != nil {
+ return err
+ }
}
if c.Personality != "" {
@@ -695,21 +792,43 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
s.Personality.Domain = specs.LinuxPersonalityDomain(c.Personality)
}
- s.Remove = c.Rm
- s.StopTimeout = &c.StopTimeout
- s.Timeout = c.Timeout
- s.Timezone = c.Timezone
- s.Umask = c.Umask
- s.PidFile = c.PidFile
- s.Volatile = c.Rm
- s.UnsetEnv = c.UnsetEnv
- s.UnsetEnvAll = c.UnsetEnvAll
+ if !s.Remove {
+ s.Remove = c.Rm
+ }
+ if s.StopTimeout == nil || c.StopTimeout != 0 {
+ s.StopTimeout = &c.StopTimeout
+ }
+ if s.Timeout == 0 || c.Timeout != 0 {
+ s.Timeout = c.Timeout
+ }
+ if len(s.Timezone) == 0 || len(c.Timezone) != 0 {
+ s.Timezone = c.Timezone
+ }
+ if len(s.Umask) == 0 || len(c.Umask) != 0 {
+ s.Umask = c.Umask
+ }
+ if len(s.PidFile) == 0 || len(c.PidFile) != 0 {
+ s.PidFile = c.PidFile
+ }
+ if !s.Volatile {
+ s.Volatile = c.Rm
+ }
+ if len(s.UnsetEnv) == 0 || len(c.UnsetEnv) != 0 {
+ s.UnsetEnv = c.UnsetEnv
+ }
+ if !s.UnsetEnvAll {
+ s.UnsetEnvAll = c.UnsetEnvAll
+ }
// Initcontainers
- s.InitContainerType = c.InitContainerType
+ if len(s.InitContainerType) == 0 || len(c.InitContainerType) != 0 {
+ s.InitContainerType = c.InitContainerType
+ }
t := true
- s.Passwd = &t
+ if s.Passwd == nil {
+ s.Passwd = &t
+ }
return nil
}
diff --git a/test/e2e/container_clone_test.go b/test/e2e/container_clone_test.go
new file mode 100644
index 000000000..bebc6872b
--- /dev/null
+++ b/test/e2e/container_clone_test.go
@@ -0,0 +1,187 @@
+package integration
+
+import (
+ "os"
+
+ . "github.com/containers/podman/v4/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ . "github.com/onsi/gomega/gexec"
+)
+
+var _ = Describe("Podman container clone", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ SkipIfRemote("podman container clone is not supported in remote")
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.Setup()
+ podmanTest.SeedImages()
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ processTestResult(f)
+
+ })
+
+ It("podman container clone basic test", func() {
+ SkipIfRootlessCgroupsV1("starting a container with the memory limits not supported")
+ create := podmanTest.Podman([]string{"create", ALPINE})
+ create.WaitWithDefaultTimeout()
+ Expect(create).To(Exit(0))
+ clone := podmanTest.Podman([]string{"container", "clone", create.OutputToString()})
+ clone.WaitWithDefaultTimeout()
+ Expect(clone).To(Exit(0))
+
+ clone = podmanTest.Podman([]string{"container", "clone", clone.OutputToString()})
+ clone.WaitWithDefaultTimeout()
+ Expect(clone).To(Exit(0))
+
+ ctrInspect := podmanTest.Podman([]string{"inspect", clone.OutputToString()})
+ ctrInspect.WaitWithDefaultTimeout()
+ Expect(ctrInspect).To(Exit(0))
+ Expect(ctrInspect.InspectContainerToJSON()[0].Name).To(ContainSubstring("-clone1"))
+
+ ctrStart := podmanTest.Podman([]string{"container", "start", clone.OutputToString()})
+ ctrStart.WaitWithDefaultTimeout()
+ Expect(ctrStart).To(Exit(0))
+ })
+
+ It("podman container clone image test", func() {
+ create := podmanTest.Podman([]string{"create", ALPINE})
+ create.WaitWithDefaultTimeout()
+ Expect(create).To(Exit(0))
+ clone := podmanTest.Podman([]string{"container", "clone", create.OutputToString(), "new_name", fedoraMinimal})
+ clone.WaitWithDefaultTimeout()
+ Expect(clone).To(Exit(0))
+
+ ctrInspect := podmanTest.Podman([]string{"inspect", clone.OutputToString()})
+ ctrInspect.WaitWithDefaultTimeout()
+ Expect(ctrInspect).To(Exit(0))
+ Expect(ctrInspect.InspectContainerToJSON()[0].ImageName).To(Equal(fedoraMinimal))
+ Expect(ctrInspect.InspectContainerToJSON()[0].Name).To(Equal("new_name"))
+ })
+
+ It("podman container clone name test", func() {
+ create := podmanTest.Podman([]string{"create", ALPINE})
+ create.WaitWithDefaultTimeout()
+ Expect(create).To(Exit(0))
+ clone := podmanTest.Podman([]string{"container", "clone", "--name", "testing123", create.OutputToString()})
+ clone.WaitWithDefaultTimeout()
+ Expect(clone).To(Exit(0))
+
+ cloneInspect := podmanTest.Podman([]string{"inspect", clone.OutputToString()})
+ cloneInspect.WaitWithDefaultTimeout()
+ Expect(cloneInspect).To(Exit(0))
+ cloneData := cloneInspect.InspectContainerToJSON()
+ Expect(cloneData[0].Name).To(Equal("testing123"))
+ })
+
+ It("podman container clone resource limits override", func() {
+ create := podmanTest.Podman([]string{"create", "--cpus=5", ALPINE})
+ create.WaitWithDefaultTimeout()
+ Expect(create).To(Exit(0))
+ clone := podmanTest.Podman([]string{"container", "clone", create.OutputToString()})
+ clone.WaitWithDefaultTimeout()
+ Expect(clone).To(Exit(0))
+
+ createInspect := podmanTest.Podman([]string{"inspect", create.OutputToString()})
+ createInspect.WaitWithDefaultTimeout()
+ Expect(createInspect).To(Exit(0))
+ createData := createInspect.InspectContainerToJSON()
+
+ cloneInspect := podmanTest.Podman([]string{"inspect", clone.OutputToString()})
+ cloneInspect.WaitWithDefaultTimeout()
+ Expect(cloneInspect).To(Exit(0))
+ cloneData := cloneInspect.InspectContainerToJSON()
+ Expect(createData[0].HostConfig.NanoCpus).To(Equal(cloneData[0].HostConfig.NanoCpus))
+
+ create = podmanTest.Podman([]string{"create", "--memory=5", ALPINE})
+ create.WaitWithDefaultTimeout()
+ Expect(create).To(Exit(0))
+ clone = podmanTest.Podman([]string{"container", "clone", "--cpus=6", create.OutputToString()})
+ clone.WaitWithDefaultTimeout()
+ Expect(clone).To(Exit(0))
+
+ createInspect = podmanTest.Podman([]string{"inspect", create.OutputToString()})
+ createInspect.WaitWithDefaultTimeout()
+ Expect(createInspect).To(Exit(0))
+ createData = createInspect.InspectContainerToJSON()
+
+ cloneInspect = podmanTest.Podman([]string{"inspect", clone.OutputToString()})
+ cloneInspect.WaitWithDefaultTimeout()
+ Expect(cloneInspect).To(Exit(0))
+ cloneData = cloneInspect.InspectContainerToJSON()
+ Expect(createData[0].HostConfig.MemorySwap).To(Equal(cloneData[0].HostConfig.MemorySwap))
+
+ create = podmanTest.Podman([]string{"create", "--cpus=5", ALPINE})
+ create.WaitWithDefaultTimeout()
+ Expect(create).To(Exit(0))
+ clone = podmanTest.Podman([]string{"container", "clone", "--cpus=4", create.OutputToString()})
+ clone.WaitWithDefaultTimeout()
+ Expect(clone).To(Exit(0))
+
+ var nanoCPUs int64
+ numCpus := 4
+ nanoCPUs = int64(numCpus * 1000000000)
+
+ createInspect = podmanTest.Podman([]string{"inspect", create.OutputToString()})
+ createInspect.WaitWithDefaultTimeout()
+ Expect(createInspect).To(Exit(0))
+ createData = createInspect.InspectContainerToJSON()
+
+ cloneInspect = podmanTest.Podman([]string{"inspect", clone.OutputToString()})
+ cloneInspect.WaitWithDefaultTimeout()
+ Expect(cloneInspect).To(Exit(0))
+ cloneData = cloneInspect.InspectContainerToJSON()
+ Expect(createData[0].HostConfig.NanoCpus).ToNot(Equal(cloneData[0].HostConfig.NanoCpus))
+ Expect(cloneData[0].HostConfig.NanoCpus).To(Equal(nanoCPUs))
+ })
+
+ It("podman container clone in a pod", func() {
+ SkipIfRootlessCgroupsV1("starting a container with the memory limits not supported")
+ run := podmanTest.Podman([]string{"run", "-dt", "--pod", "new:1234", ALPINE, "sleep", "20"})
+ run.WaitWithDefaultTimeout()
+ Expect(run).To(Exit(0))
+ clone := podmanTest.Podman([]string{"container", "clone", run.OutputToString()})
+ clone.WaitWithDefaultTimeout()
+ Expect(clone).To(Exit(0))
+ ctrStart := podmanTest.Podman([]string{"container", "start", clone.OutputToString()})
+ ctrStart.WaitWithDefaultTimeout()
+ Expect(ctrStart).To(Exit(0))
+
+ checkClone := podmanTest.Podman([]string{"ps", "-f", "id=" + clone.OutputToString(), "--ns", "--format", "{{.Namespaces.IPC}} {{.Namespaces.UTS}} {{.Namespaces.NET}}"})
+ checkClone.WaitWithDefaultTimeout()
+ Expect(checkClone).Should(Exit(0))
+ cloneArray := checkClone.OutputToStringArray()
+
+ checkCreate := podmanTest.Podman([]string{"ps", "-f", "id=" + run.OutputToString(), "--ns", "--format", "{{.Namespaces.IPC}} {{.Namespaces.UTS}} {{.Namespaces.NET}}"})
+ checkCreate.WaitWithDefaultTimeout()
+ Expect(checkCreate).Should(Exit(0))
+ createArray := checkCreate.OutputToStringArray()
+
+ Expect(cloneArray).To(ContainElements(createArray[:]))
+
+ ctrInspect := podmanTest.Podman([]string{"inspect", clone.OutputToString()})
+ ctrInspect.WaitWithDefaultTimeout()
+ Expect(ctrInspect).Should(Exit(0))
+
+ runInspect := podmanTest.Podman([]string{"inspect", run.OutputToString()})
+ runInspect.WaitWithDefaultTimeout()
+ Expect(runInspect).Should(Exit(0))
+
+ Expect(ctrInspect.InspectContainerToJSON()[0].Pod).Should(Equal(runInspect.InspectContainerToJSON()[0].Pod))
+ Expect(ctrInspect.InspectContainerToJSON()[0].HostConfig.NetworkMode).Should(Equal(runInspect.InspectContainerToJSON()[0].HostConfig.NetworkMode))
+ })
+
+})