summaryrefslogtreecommitdiff
path: root/pkg/specgen
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/specgen')
-rw-r--r--pkg/specgen/generate/container.go2
-rw-r--r--pkg/specgen/generate/kube/kube.go105
-rw-r--r--pkg/specgen/generate/oci.go2
-rw-r--r--pkg/specgen/generate/security.go6
-rw-r--r--pkg/specgen/generate/storage.go2
-rw-r--r--pkg/specgen/pod_validate.go2
-rw-r--r--pkg/specgen/specgen.go4
7 files changed, 81 insertions, 42 deletions
diff --git a/pkg/specgen/generate/container.go b/pkg/specgen/generate/container.go
index c7e62d185..42fea0277 100644
--- a/pkg/specgen/generate/container.go
+++ b/pkg/specgen/generate/container.go
@@ -163,7 +163,7 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat
return nil, err
}
- // labels from the image that dont exist already
+ // labels from the image that don't exist already
if len(labels) > 0 && s.Labels == nil {
s.Labels = make(map[string]string)
}
diff --git a/pkg/specgen/generate/kube/kube.go b/pkg/specgen/generate/kube/kube.go
index 5cc7891ac..e5b09dcd8 100644
--- a/pkg/specgen/generate/kube/kube.go
+++ b/pkg/specgen/generate/kube/kube.go
@@ -30,7 +30,7 @@ func ToPodGen(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec)
p.Hostname = podName
}
if podYAML.Spec.HostNetwork {
- p.NetNS.Value = "host"
+ p.NetNS.NSMode = specgen.Host
}
if podYAML.Spec.HostAliases != nil {
hosts := make([]string, 0, len(podYAML.Spec.HostAliases))
@@ -47,30 +47,53 @@ func ToPodGen(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec)
return p, nil
}
-func ToSpecGen(ctx context.Context, containerYAML v1.Container, iid string, newImage *image.Image, volumes map[string]*KubeVolume, podID, podName, infraID string, configMaps []v1.ConfigMap, seccompPaths *KubeSeccompPaths, restartPolicy string) (*specgen.SpecGenerator, error) {
- s := specgen.NewSpecGenerator(iid, false)
+type CtrSpecGenOptions struct {
+ // Container as read from the pod yaml
+ Container v1.Container
+ // Image available to use (pulled or found local)
+ Image *image.Image
+ // Volumes for all containers
+ Volumes map[string]*KubeVolume
+ // PodID of the parent pod
+ PodID string
+ // PodName of the parent pod
+ PodName string
+ // PodInfraID as the infrastructure container id
+ PodInfraID string
+ // ConfigMaps the configuration maps for environment variables
+ ConfigMaps []v1.ConfigMap
+ // SeccompPaths for finding the seccomp profile path
+ SeccompPaths *KubeSeccompPaths
+ // RestartPolicy defines the restart policy of the container
+ RestartPolicy string
+ // NetNSIsHost tells the container to use the host netns
+ NetNSIsHost bool
+}
+
+func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGenerator, error) {
+ s := specgen.NewSpecGenerator(opts.Container.Image, false)
- // podName should be non-empty for Deployment objects to be able to create
+ // pod name should be non-empty for Deployment objects to be able to create
// multiple pods having containers with unique names
- if len(podName) < 1 {
- return nil, errors.Errorf("kubeContainerToCreateConfig got empty podName")
+ if len(opts.PodName) < 1 {
+ return nil, errors.Errorf("got empty pod name on container creation when playing kube")
}
- s.Name = fmt.Sprintf("%s-%s", podName, containerYAML.Name)
+ s.Name = fmt.Sprintf("%s-%s", opts.PodName, opts.Container.Name)
- s.Terminal = containerYAML.TTY
+ s.Terminal = opts.Container.TTY
- s.Pod = podID
+ s.Pod = opts.PodID
- setupSecurityContext(s, containerYAML)
+ setupSecurityContext(s, opts.Container)
// Since we prefix the container name with pod name to work-around the uniqueness requirement,
// the seccomp profile should reference the actual container name from the YAML
// but apply to the containers with the prefixed name
- s.SeccompProfilePath = seccompPaths.FindForContainer(containerYAML.Name)
+ s.SeccompProfilePath = opts.SeccompPaths.FindForContainer(opts.Container.Name)
s.ResourceLimits = &spec.LinuxResources{}
- milliCPU, err := quantityToInt64(containerYAML.Resources.Limits.Cpu())
+ milliCPU, err := quantityToInt64(opts.Container.Resources.Limits.Cpu())
if err != nil {
return nil, errors.Wrap(err, "Failed to set CPU quota")
}
@@ -82,12 +105,12 @@ func ToSpecGen(ctx context.Context, containerYAML v1.Container, iid string, newI
}
}
- limit, err := quantityToInt64(containerYAML.Resources.Limits.Memory())
+ limit, err := quantityToInt64(opts.Container.Resources.Limits.Memory())
if err != nil {
return nil, errors.Wrap(err, "Failed to set memory limit")
}
- memoryRes, err := quantityToInt64(containerYAML.Resources.Requests.Memory())
+ memoryRes, err := quantityToInt64(opts.Container.Resources.Requests.Memory())
if err != nil {
return nil, errors.Wrap(err, "Failed to set memory reservation")
}
@@ -104,19 +127,26 @@ func ToSpecGen(ctx context.Context, containerYAML v1.Container, iid string, newI
s.ResourceLimits.Memory.Reservation = &memoryRes
}
- // TODO: We dont understand why specgen does not take of this, but
+ // TODO: We don't understand why specgen does not take of this, but
// integration tests clearly pointed out that it was required.
s.Command = []string{}
- imageData, err := newImage.Inspect(ctx)
+ imageData, err := opts.Image.Inspect(ctx)
if err != nil {
return nil, err
}
s.WorkDir = "/"
+ // We will use "Docker field name" internally here to avoid confusion
+ // and reference the "Kubernetes field name" when referencing the YAML
+ // ref: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes
+ entrypoint := []string{}
+ cmd := []string{}
if imageData != nil && imageData.Config != nil {
if imageData.Config.WorkingDir != "" {
s.WorkDir = imageData.Config.WorkingDir
}
- s.Command = imageData.Config.Entrypoint
+ // Pull entrypoint and cmd from image
+ entrypoint = imageData.Config.Entrypoint
+ cmd = imageData.Config.Cmd
s.Labels = imageData.Config.Labels
if len(imageData.Config.StopSignal) > 0 {
stopSignal, err := util.ParseSignal(imageData.Config.StopSignal)
@@ -126,22 +156,27 @@ func ToSpecGen(ctx context.Context, containerYAML v1.Container, iid string, newI
s.StopSignal = &stopSignal
}
}
- if len(containerYAML.Command) != 0 {
- s.Command = containerYAML.Command
+ // If only the yaml.Command is specified, set it as the entrypoint and drop the image Cmd
+ if len(opts.Container.Command) != 0 {
+ entrypoint = opts.Container.Command
+ cmd = []string{}
}
- // doc https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes
- if len(containerYAML.Args) != 0 {
- s.Command = append(s.Command, containerYAML.Args...)
+ // Only override the cmd field if yaml.Args is specified
+ // Keep the image entrypoint, or the yaml.command if specified
+ if len(opts.Container.Args) != 0 {
+ cmd = opts.Container.Args
}
+
+ s.Command = append(entrypoint, cmd...)
// FIXME,
// we are currently ignoring imageData.Config.ExposedPorts
- if containerYAML.WorkingDir != "" {
- s.WorkDir = containerYAML.WorkingDir
+ if opts.Container.WorkingDir != "" {
+ s.WorkDir = opts.Container.WorkingDir
}
annotations := make(map[string]string)
- if infraID != "" {
- annotations[ann.SandboxID] = infraID
+ if opts.PodInfraID != "" {
+ annotations[ann.SandboxID] = opts.PodInfraID
annotations[ann.ContainerType] = ann.ContainerTypeContainer
}
s.Annotations = annotations
@@ -153,13 +188,13 @@ func ToSpecGen(ctx context.Context, containerYAML v1.Container, iid string, newI
envs[keyval[0]] = keyval[1]
}
- for _, env := range containerYAML.Env {
- value := envVarValue(env, configMaps)
+ for _, env := range opts.Container.Env {
+ value := envVarValue(env, opts.ConfigMaps)
envs[env.Name] = value
}
- for _, envFrom := range containerYAML.EnvFrom {
- cmEnvs := envVarsFromConfigMap(envFrom, configMaps)
+ for _, envFrom := range opts.Container.EnvFrom {
+ cmEnvs := envVarsFromConfigMap(envFrom, opts.ConfigMaps)
for k, v := range cmEnvs {
envs[k] = v
@@ -167,8 +202,8 @@ func ToSpecGen(ctx context.Context, containerYAML v1.Container, iid string, newI
}
s.Env = envs
- for _, volume := range containerYAML.VolumeMounts {
- volumeSource, exists := volumes[volume.Name]
+ for _, volume := range opts.Container.VolumeMounts {
+ volumeSource, exists := opts.Volumes[volume.Name]
if !exists {
return nil, errors.Errorf("Volume mount %s specified for container but not configured in volumes", volume.Name)
}
@@ -200,7 +235,11 @@ func ToSpecGen(ctx context.Context, containerYAML v1.Container, iid string, newI
}
}
- s.RestartPolicy = restartPolicy
+ s.RestartPolicy = opts.RestartPolicy
+
+ if opts.NetNSIsHost {
+ s.NetNS.NSMode = specgen.Host
+ }
return s, nil
}
diff --git a/pkg/specgen/generate/oci.go b/pkg/specgen/generate/oci.go
index c24dcf4c0..ba68de6fd 100644
--- a/pkg/specgen/generate/oci.go
+++ b/pkg/specgen/generate/oci.go
@@ -319,7 +319,7 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt
}
// BIND MOUNTS
- configSpec.Mounts = SupercedeUserMounts(mounts, configSpec.Mounts)
+ configSpec.Mounts = SupersedeUserMounts(mounts, configSpec.Mounts)
// Process mounts to ensure correct options
if err := InitFSMounts(configSpec.Mounts); err != nil {
return nil, err
diff --git a/pkg/specgen/generate/security.go b/pkg/specgen/generate/security.go
index b69bd9091..9fceec7b3 100644
--- a/pkg/specgen/generate/security.go
+++ b/pkg/specgen/generate/security.go
@@ -115,7 +115,7 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator,
if err != nil {
return errors.Wrapf(err, "capabilities requested by user or image are not valid: %q", strings.Join(capsRequired, ","))
} else {
- // Verify all capRequiered are in the capList
+ // Verify all capRequired are in the capList
for _, cap := range capsRequired {
if !util.StringInSlice(cap, caplist) {
privCapsRequired = append(privCapsRequired, cap)
@@ -141,7 +141,7 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator,
configSpec.Process.Capabilities.Effective = caplist
configSpec.Process.Capabilities.Permitted = caplist
} else {
- userCaps, err := capabilities.NormalizeCapabilities(s.CapAdd)
+ userCaps, err := capabilities.MergeCapabilities(nil, s.CapAdd, nil)
if err != nil {
return errors.Wrapf(err, "capabilities requested by user are not valid: %q", strings.Join(s.CapAdd, ","))
}
@@ -172,7 +172,7 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator,
// Clear default Seccomp profile from Generator for unconfined containers
// and privileged containers which do not specify a seccomp profile.
- if s.SeccompProfilePath == "unconfined" || (s.Privileged && (s.SeccompProfilePath == config.SeccompOverridePath || s.SeccompProfilePath == config.SeccompDefaultPath)) {
+ if s.SeccompProfilePath == "unconfined" || (s.Privileged && (s.SeccompProfilePath == "" || s.SeccompProfilePath == config.SeccompOverridePath || s.SeccompProfilePath == config.SeccompDefaultPath)) {
configSpec.Linux.Seccomp = nil
}
diff --git a/pkg/specgen/generate/storage.go b/pkg/specgen/generate/storage.go
index 331a5c5bf..f523ac5bf 100644
--- a/pkg/specgen/generate/storage.go
+++ b/pkg/specgen/generate/storage.go
@@ -366,7 +366,7 @@ func addContainerInitBinary(s *specgen.SpecGenerator, path string) (spec.Mount,
// TODO: Should we unmount subtree mounts? E.g., if /tmp/ is mounted by
// one mount, and we already have /tmp/a and /tmp/b, should we remove
// the /tmp/a and /tmp/b mounts in favor of the more general /tmp?
-func SupercedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.Mount {
+func SupersedeUserMounts(mounts []spec.Mount, configMount []spec.Mount) []spec.Mount {
if len(mounts) > 0 {
// If we have overlappings mounts, remove them from the spec in favor of
// the user-added volume mounts
diff --git a/pkg/specgen/pod_validate.go b/pkg/specgen/pod_validate.go
index a6c61a203..7c81f3f9f 100644
--- a/pkg/specgen/pod_validate.go
+++ b/pkg/specgen/pod_validate.go
@@ -48,7 +48,7 @@ func (p *PodSpecGenerator) Validate() error {
}
if p.NoInfra {
if p.NetNS.NSMode != Default && p.NetNS.NSMode != "" {
- return errors.New("NoInfra and network modes cannot be used toegther")
+ return errors.New("NoInfra and network modes cannot be used together")
}
if p.StaticIP != nil {
return exclusivePodOptions("NoInfra", "StaticIP")
diff --git a/pkg/specgen/specgen.go b/pkg/specgen/specgen.go
index 964b89fa4..a6cc0a730 100644
--- a/pkg/specgen/specgen.go
+++ b/pkg/specgen/specgen.go
@@ -19,7 +19,7 @@ type LogConfig struct {
// Only available if LogDriver is set to "json-file" or "k8s-file".
// Optional.
Path string `json:"path,omitempty"`
- // Size is the maximimup size of the log file
+ // Size is the maximum size of the log file
// Optional.
Size int64 `json:"size,omitempty"`
// A set of options to accompany the log driver.
@@ -302,7 +302,7 @@ type ContainerSecurityConfig struct {
IDMappings *storage.IDMappingOptions `json:"idmappings,omitempty"`
// ReadOnlyFilesystem indicates that everything will be mounted
// as read-only
- ReadOnlyFilesystem bool `json:"read_only_filesystem,omittempty"`
+ ReadOnlyFilesystem bool `json:"read_only_filesystem,omitempty"`
// Umask is the umask the init process of the container will be run with.
Umask string `json:"umask,omitempty"`
// ProcOpts are the options used for the proc mount.