diff options
author | Brent Baude <bbaude@redhat.com> | 2020-03-29 11:25:56 -0500 |
---|---|---|
committer | Brent Baude <bbaude@redhat.com> | 2020-04-03 15:43:03 -0500 |
commit | 6514a5c80ef91ef6e16e283339cd0b5f78a42322 (patch) | |
tree | 33ced51adee58d38b39416191e2e8a207ce4ec47 /pkg/specgen/generate | |
parent | 35f586783388cdff6b4f15e7aff4df1ee72d9b67 (diff) | |
download | podman-6514a5c80ef91ef6e16e283339cd0b5f78a42322.tar.gz podman-6514a5c80ef91ef6e16e283339cd0b5f78a42322.tar.bz2 podman-6514a5c80ef91ef6e16e283339cd0b5f78a42322.zip |
v2podman container create
create a container in podmanv2 using specgen approach. this is the core implementation and still has quite a bit of code commented out specifically around volumes, devices, and namespaces. need contributions from smes on these parts.
Signed-off-by: Brent Baude <bbaude@redhat.com>
Diffstat (limited to 'pkg/specgen/generate')
-rw-r--r-- | pkg/specgen/generate/container.go | 168 | ||||
-rw-r--r-- | pkg/specgen/generate/container_create.go | 190 |
2 files changed, 358 insertions, 0 deletions
diff --git a/pkg/specgen/generate/container.go b/pkg/specgen/generate/container.go new file mode 100644 index 000000000..78c77fec1 --- /dev/null +++ b/pkg/specgen/generate/container.go @@ -0,0 +1,168 @@ +package generate + +import ( + "context" + + "github.com/containers/libpod/libpod" + ann "github.com/containers/libpod/pkg/annotations" + envLib "github.com/containers/libpod/pkg/env" + "github.com/containers/libpod/pkg/signal" + "github.com/containers/libpod/pkg/specgen" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerator) error { + + newImage, err := r.ImageRuntime().NewFromLocal(s.Image) + if err != nil { + return err + } + + // Image stop signal + if s.StopSignal == nil && newImage.Config != nil { + sig, err := signal.ParseSignalNameOrNumber(newImage.Config.StopSignal) + if err != nil { + return err + } + s.StopSignal = &sig + } + // Image envs from the image if they don't exist + // already + if newImage.Config != nil && len(newImage.Config.Env) > 0 { + envs, err := envLib.ParseSlice(newImage.Config.Env) + if err != nil { + return err + } + for k, v := range envs { + if _, exists := s.Env[k]; !exists { + s.Env[v] = k + } + } + } + + // labels from the image that dont exist already + if config := newImage.Config; config != nil { + for k, v := range config.Labels { + if _, exists := s.Labels[k]; !exists { + s.Labels[k] = v + } + } + } + + // annotations + // in the event this container is in a pod, and the pod has an infra container + // we will want to configure it as a type "container" instead defaulting to + // the behavior of a "sandbox" container + // In Kata containers: + // - "sandbox" is the annotation that denotes the container should use its own + // VM, which is the default behavior + // - "container" denotes the container should join the VM of the SandboxID + // (the infra container) + s.Annotations = make(map[string]string) + if len(s.Pod) > 0 { + s.Annotations[ann.SandboxID] = s.Pod + s.Annotations[ann.ContainerType] = ann.ContainerTypeContainer + } + // + // Next, add annotations from the image + annotations, err := newImage.Annotations(ctx) + if err != nil { + return err + } + for k, v := range annotations { + annotations[k] = v + } + + // entrypoint + if config := newImage.Config; config != nil { + if len(s.Entrypoint) < 1 && len(config.Entrypoint) > 0 { + s.Entrypoint = config.Entrypoint + } + if len(s.Command) < 1 && len(config.Cmd) > 0 { + s.Command = config.Cmd + } + if len(s.Command) < 1 && len(s.Entrypoint) < 1 { + return errors.Errorf("No command provided or as CMD or ENTRYPOINT in this image") + } + // workdir + if len(s.WorkDir) < 1 && len(config.WorkingDir) > 1 { + s.WorkDir = config.WorkingDir + } + } + + if len(s.SeccompProfilePath) < 1 { + p, err := libpod.DefaultSeccompPath() + if err != nil { + return err + } + s.SeccompProfilePath = p + } + + if user := s.User; len(user) == 0 { + switch { + // TODO This should be enabled when namespaces actually work + //case usernsMode.IsKeepID(): + // user = fmt.Sprintf("%d:%d", rootless.GetRootlessUID(), rootless.GetRootlessGID()) + case newImage.Config == nil || (newImage.Config != nil && len(newImage.Config.User) == 0): + s.User = "0" + default: + s.User = newImage.Config.User + } + } + if err := finishThrottleDevices(s); err != nil { + return err + } + return nil +} + +// finishThrottleDevices takes the temporary representation of the throttle +// devices in the specgen and looks up the major and major minors. it then +// sets the throttle devices proper in the specgen +func finishThrottleDevices(s *specgen.SpecGenerator) error { + if bps := s.ThrottleReadBpsDevice; len(bps) > 0 { + for k, v := range bps { + statT := unix.Stat_t{} + if err := unix.Stat(k, &statT); err != nil { + return err + } + v.Major = (int64(unix.Major(statT.Rdev))) + v.Minor = (int64(unix.Minor(statT.Rdev))) + s.ResourceLimits.BlockIO.ThrottleReadBpsDevice = append(s.ResourceLimits.BlockIO.ThrottleReadBpsDevice, v) + } + } + if bps := s.ThrottleWriteBpsDevice; len(bps) > 0 { + for k, v := range bps { + statT := unix.Stat_t{} + if err := unix.Stat(k, &statT); err != nil { + return err + } + v.Major = (int64(unix.Major(statT.Rdev))) + v.Minor = (int64(unix.Minor(statT.Rdev))) + s.ResourceLimits.BlockIO.ThrottleWriteBpsDevice = append(s.ResourceLimits.BlockIO.ThrottleWriteBpsDevice, v) + } + } + if iops := s.ThrottleReadIOPSDevice; len(iops) > 0 { + for k, v := range iops { + statT := unix.Stat_t{} + if err := unix.Stat(k, &statT); err != nil { + return err + } + v.Major = (int64(unix.Major(statT.Rdev))) + v.Minor = (int64(unix.Minor(statT.Rdev))) + s.ResourceLimits.BlockIO.ThrottleReadIOPSDevice = append(s.ResourceLimits.BlockIO.ThrottleReadIOPSDevice, v) + } + } + if iops := s.ThrottleWriteBpsDevice; len(iops) > 0 { + for k, v := range iops { + statT := unix.Stat_t{} + if err := unix.Stat(k, &statT); err != nil { + return err + } + v.Major = (int64(unix.Major(statT.Rdev))) + v.Minor = (int64(unix.Minor(statT.Rdev))) + s.ResourceLimits.BlockIO.ThrottleWriteIOPSDevice = append(s.ResourceLimits.BlockIO.ThrottleWriteIOPSDevice, v) + } + } + return nil +} diff --git a/pkg/specgen/generate/container_create.go b/pkg/specgen/generate/container_create.go new file mode 100644 index 000000000..aad59a861 --- /dev/null +++ b/pkg/specgen/generate/container_create.go @@ -0,0 +1,190 @@ +package generate + +import ( + "context" + "os" + + "github.com/containers/common/pkg/config" + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/pkg/specgen" + "github.com/containers/storage" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// MakeContainer creates a container based on the SpecGenerator +func MakeContainer(rt *libpod.Runtime, s *specgen.SpecGenerator) (*libpod.Container, error) { + if err := s.Validate(); err != nil { + return nil, errors.Wrap(err, "invalid config provided") + } + rtc, err := rt.GetConfig() + if err != nil { + return nil, err + } + + options, err := createContainerOptions(rt, s) + if err != nil { + return nil, err + } + + podmanPath, err := os.Executable() + if err != nil { + return nil, err + } + options = append(options, createExitCommandOption(s, rt.StorageConfig(), rtc, podmanPath)) + newImage, err := rt.ImageRuntime().NewFromLocal(s.Image) + if err != nil { + return nil, err + } + + options = append(options, libpod.WithRootFSFromImage(newImage.ID(), s.Image, s.RawImageName)) + + runtimeSpec, err := s.ToOCISpec(rt, newImage) + if err != nil { + return nil, err + } + return rt.NewContainer(context.Background(), runtimeSpec, options...) +} + +func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator) ([]libpod.CtrCreateOption, error) { + var options []libpod.CtrCreateOption + var err error + + if s.Stdin { + options = append(options, libpod.WithStdin()) + } + if len(s.Systemd) > 0 { + options = append(options, libpod.WithSystemd()) + } + if len(s.Name) > 0 { + logrus.Debugf("setting container name %s", s.Name) + options = append(options, libpod.WithName(s.Name)) + } + if s.Pod != "" { + pod, err := rt.LookupPod(s.Pod) + if err != nil { + return nil, err + } + logrus.Debugf("adding container to pod %s", s.Pod) + options = append(options, rt.WithPod(pod)) + } + destinations := []string{} + // // Take all mount and named volume destinations. + for _, mount := range s.Mounts { + destinations = append(destinations, mount.Destination) + } + for _, volume := range s.Volumes { + destinations = append(destinations, volume.Dest) + } + options = append(options, libpod.WithUserVolumes(destinations)) + + if len(s.Volumes) != 0 { + options = append(options, libpod.WithNamedVolumes(s.Volumes)) + } + + if len(s.Command) != 0 { + options = append(options, libpod.WithCommand(s.Command)) + } + + options = append(options, libpod.WithEntrypoint(s.Entrypoint)) + if s.StopSignal != nil { + options = append(options, libpod.WithStopSignal(*s.StopSignal)) + } + if s.StopTimeout != nil { + options = append(options, libpod.WithStopTimeout(*s.StopTimeout)) + } + if s.LogConfiguration != nil { + if len(s.LogConfiguration.Path) > 0 { + options = append(options, libpod.WithLogPath(s.LogConfiguration.Path)) + } + if len(s.LogConfiguration.Options) > 0 && s.LogConfiguration.Options["tag"] != "" { + // Note: I'm really guessing here. + options = append(options, libpod.WithLogTag(s.LogConfiguration.Options["tag"])) + } + + if len(s.LogConfiguration.Driver) > 0 { + options = append(options, libpod.WithLogDriver(s.LogConfiguration.Driver)) + } + } + + // Security options + if len(s.SelinuxOpts) > 0 { + options = append(options, libpod.WithSecLabels(s.SelinuxOpts)) + } + options = append(options, libpod.WithPrivileged(s.Privileged)) + + // Get namespace related options + namespaceOptions, err := s.GenerateNamespaceContainerOpts(rt) + if err != nil { + return nil, err + } + options = append(options, namespaceOptions...) + + if len(s.ConmonPidFile) > 0 { + options = append(options, libpod.WithConmonPidFile(s.ConmonPidFile)) + } + options = append(options, libpod.WithLabels(s.Labels)) + if s.ShmSize != nil { + options = append(options, libpod.WithShmSize(*s.ShmSize)) + } + if s.Rootfs != "" { + options = append(options, libpod.WithRootFS(s.Rootfs)) + } + // Default used if not overridden on command line + + if s.RestartPolicy != "" { + if s.RestartPolicy == "unless-stopped" { + return nil, errors.Wrapf(define.ErrInvalidArg, "the unless-stopped restart policy is not supported") + } + if s.RestartRetries != nil { + options = append(options, libpod.WithRestartRetries(*s.RestartRetries)) + } + options = append(options, libpod.WithRestartPolicy(s.RestartPolicy)) + } + + if s.ContainerHealthCheckConfig.HealthConfig != nil { + options = append(options, libpod.WithHealthCheck(s.ContainerHealthCheckConfig.HealthConfig)) + logrus.Debugf("New container has a health check") + } + return options, nil +} + +func createExitCommandOption(s *specgen.SpecGenerator, storageConfig storage.StoreOptions, config *config.Config, podmanPath string) libpod.CtrCreateOption { + // We need a cleanup process for containers in the current model. + // But we can't assume that the caller is Podman - it could be another + // user of the API. + // As such, provide a way to specify a path to Podman, so we can + // still invoke a cleanup process. + + command := []string{podmanPath, + "--root", storageConfig.GraphRoot, + "--runroot", storageConfig.RunRoot, + "--log-level", logrus.GetLevel().String(), + "--cgroup-manager", config.Engine.CgroupManager, + "--tmpdir", config.Engine.TmpDir, + } + if config.Engine.OCIRuntime != "" { + command = append(command, []string{"--runtime", config.Engine.OCIRuntime}...) + } + if storageConfig.GraphDriverName != "" { + command = append(command, []string{"--storage-driver", storageConfig.GraphDriverName}...) + } + for _, opt := range storageConfig.GraphDriverOptions { + command = append(command, []string{"--storage-opt", opt}...) + } + if config.Engine.EventsLogger != "" { + command = append(command, []string{"--events-backend", config.Engine.EventsLogger}...) + } + + // TODO Mheon wants to leave this for now + //if s.sys { + // command = append(command, "--syslog", "true") + //} + command = append(command, []string{"container", "cleanup"}...) + + if s.Remove { + command = append(command, "--rm") + } + return libpod.WithExitCommand(command) +} |