diff options
Diffstat (limited to 'pkg/adapter')
-rw-r--r-- | pkg/adapter/containers.go | 36 | ||||
-rw-r--r-- | pkg/adapter/containers_remote.go | 6 | ||||
-rw-r--r-- | pkg/adapter/pods.go | 176 | ||||
-rw-r--r-- | pkg/adapter/pods_remote.go | 2 | ||||
-rw-r--r-- | pkg/adapter/runtime.go | 6 | ||||
-rw-r--r-- | pkg/adapter/runtime_remote.go | 6 | ||||
-rw-r--r-- | pkg/adapter/shortcuts/shortcuts.go | 32 |
7 files changed, 204 insertions, 60 deletions
diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go index 287bd8474..bc9554193 100644 --- a/pkg/adapter/containers.go +++ b/pkg/adapter/containers.go @@ -79,8 +79,18 @@ func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopVa } logrus.Debugf("Setting maximum stop workers to %d", maxWorkers) - ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { + names := cli.InputArgs + for _, cidFile := range cli.CIDFiles { + content, err := ioutil.ReadFile(cidFile) + if err != nil { + return nil, nil, errors.Wrap(err, "error reading CIDFile") + } + id := strings.Split(string(content), "\n")[0] + names = append(names, id) + } + + ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, names, r.Runtime) + if err != nil && !(cli.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) { return nil, nil, err } @@ -203,8 +213,18 @@ func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmVa return ok, failures, nil } - ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { + names := cli.InputArgs + for _, cidFile := range cli.CIDFiles { + content, err := ioutil.ReadFile(cidFile) + if err != nil { + return nil, nil, errors.Wrap(err, "error reading CIDFile") + } + id := strings.Split(string(content), "\n")[0] + names = append(names, id) + } + + ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, names, r.Runtime) + if err != nil && !(cli.Ignore && errors.Cause(err) == define.ErrNoSuchCtr) { // Failed to get containers. If force is specified, get the containers ID // and evict them if !cli.Force { @@ -215,6 +235,10 @@ func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmVa logrus.Debugf("Evicting container %q", ctr) id, err := r.EvictContainer(ctx, ctr, cli.Volumes) if err != nil { + if cli.Ignore && errors.Cause(err) == define.ErrNoSuchCtr { + logrus.Debugf("Ignoring error (--allow-missing): %v", err) + continue + } failures[ctr] = errors.Wrapf(err, "Failed to evict container: %q", id) continue } @@ -232,6 +256,10 @@ func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmVa Fn: func() error { err := r.RemoveContainer(ctx, c, cli.Force, cli.Volumes) if err != nil { + if cli.Ignore && errors.Cause(err) == define.ErrNoSuchCtr { + logrus.Debugf("Ignoring error (--allow-missing): %v", err) + return nil + } logrus.Debugf("Failed to remove container %s: %s", c.ID(), err.Error()) } return err diff --git a/pkg/adapter/containers_remote.go b/pkg/adapter/containers_remote.go index 20471d895..e34b8ffd9 100644 --- a/pkg/adapter/containers_remote.go +++ b/pkg/adapter/containers_remote.go @@ -178,7 +178,7 @@ func (r *LocalRuntime) LookupContainersWithStatus(filters []string) ([]*Containe if err != nil { return nil, err } - // This is not performance savy; if this turns out to be a problematic series of lookups, we need to + // This is not performance savvy; if this turns out to be a problematic series of lookups, we need to // create a new endpoint to speed things up for _, ctr := range ctrs { container, err := r.LookupContainer(ctr.Id) @@ -617,7 +617,7 @@ func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues) error { return err } if c.All { - // We dont have a great way to get all the running containers, so need to get all and then + // We don't have a great way to get all the running containers, so need to get all and then // check status on them bc checkpoint considers checkpointing a stopped container an error var runningIds []string for _, id := range ids { @@ -660,7 +660,7 @@ func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues) return err } if c.All { - // We dont have a great way to get all the exited containers, so need to get all and then + // We don't have a great way to get all the exited containers, so need to get all and then // check status on them bc checkpoint considers restoring a running container an error var exitedIDs []string for _, id := range ids { diff --git a/pkg/adapter/pods.go b/pkg/adapter/pods.go index 6648edc82..e9f3d41a9 100644 --- a/pkg/adapter/pods.go +++ b/pkg/adapter/pods.go @@ -15,8 +15,10 @@ import ( "github.com/containers/libpod/cmd/podman/cliconfig" "github.com/containers/libpod/cmd/podman/shared" "github.com/containers/libpod/libpod" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/image" "github.com/containers/libpod/pkg/adapter/shortcuts" + ann "github.com/containers/libpod/pkg/annotations" ns "github.com/containers/libpod/pkg/namespaces" createconfig "github.com/containers/libpod/pkg/spec" "github.com/containers/libpod/pkg/util" @@ -36,7 +38,7 @@ const ( ) // PodContainerStats is struct containing an adapter Pod and a libpod -// ContainerStats and is used primarily for outputing pod stats. +// ContainerStats and is used primarily for outputting pod stats. type PodContainerStats struct { Pod *Pod ContainerStats map[string]*libpod.ContainerStats @@ -93,7 +95,7 @@ func (r *LocalRuntime) RemovePods(ctx context.Context, cli *cliconfig.PodRmValue podids []string ) pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { + if err != nil && !(cli.Ignore && errors.Cause(err) == define.ErrNoSuchPod) { errs = append(errs, err) return nil, errs } @@ -150,7 +152,7 @@ func (r *LocalRuntime) StopPods(ctx context.Context, cli *cliconfig.PodStopValue podids []string ) pods, err := shortcuts.GetPodsByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime) - if err != nil { + if err != nil && !(cli.Ignore && errors.Cause(err) == define.ErrNoSuchPod) { errs = append(errs, err) return nil, errs } @@ -595,12 +597,17 @@ func (r *LocalRuntime) PlayKubeYAML(ctx context.Context, c *cliconfig.KubePlayVa volumes[volume.Name] = hostPath.Path } + seccompPaths, err := initializeSeccompPaths(podYAML.ObjectMeta.Annotations) + if err != nil { + return nil, err + } + for _, container := range podYAML.Spec.Containers { newImage, err := r.ImageRuntime().New(ctx, container.Image, c.SignaturePolicy, c.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, util.PullImageMissing) if err != nil { return nil, err } - createConfig, err := kubeContainerToCreateConfig(ctx, container, r.Runtime, newImage, namespaces, volumes, pod.ID()) + createConfig, err := kubeContainerToCreateConfig(ctx, container, r.Runtime, newImage, namespaces, volumes, pod.ID(), podInfraID, seccompPaths) if err != nil { return nil, err } @@ -666,62 +673,69 @@ func getPodPorts(containers []v1.Container) []ocicni.PortMapping { return infraPorts } -func setupSecurityContext(containerConfig *createconfig.CreateConfig, containerYAML v1.Container) { +func setupSecurityContext(securityConfig *createconfig.SecurityConfig, userConfig *createconfig.UserConfig, containerYAML v1.Container) { if containerYAML.SecurityContext == nil { return } if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil { - containerConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem + securityConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem } if containerYAML.SecurityContext.Privileged != nil { - containerConfig.Privileged = *containerYAML.SecurityContext.Privileged + securityConfig.Privileged = *containerYAML.SecurityContext.Privileged } if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil { - containerConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation + securityConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation } if seopt := containerYAML.SecurityContext.SELinuxOptions; seopt != nil { if seopt.User != "" { - containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=user:%s", seopt.User)) - containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("user:%s", seopt.User)) + securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=user:%s", seopt.User)) + securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("user:%s", seopt.User)) } if seopt.Role != "" { - containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=role:%s", seopt.Role)) - containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("role:%s", seopt.Role)) + securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=role:%s", seopt.Role)) + securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("role:%s", seopt.Role)) } if seopt.Type != "" { - containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=type:%s", seopt.Type)) - containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("type:%s", seopt.Type)) + securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=type:%s", seopt.Type)) + securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("type:%s", seopt.Type)) } if seopt.Level != "" { - containerConfig.SecurityOpts = append(containerConfig.SecurityOpts, fmt.Sprintf("label=level:%s", seopt.Level)) - containerConfig.LabelOpts = append(containerConfig.LabelOpts, fmt.Sprintf("level:%s", seopt.Level)) + securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=level:%s", seopt.Level)) + securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("level:%s", seopt.Level)) } } if caps := containerYAML.SecurityContext.Capabilities; caps != nil { for _, capability := range caps.Add { - containerConfig.CapAdd = append(containerConfig.CapAdd, string(capability)) + securityConfig.CapAdd = append(securityConfig.CapAdd, string(capability)) } for _, capability := range caps.Drop { - containerConfig.CapDrop = append(containerConfig.CapDrop, string(capability)) + securityConfig.CapDrop = append(securityConfig.CapDrop, string(capability)) } } if containerYAML.SecurityContext.RunAsUser != nil { - containerConfig.User = fmt.Sprintf("%d", *containerYAML.SecurityContext.RunAsUser) + userConfig.User = fmt.Sprintf("%d", *containerYAML.SecurityContext.RunAsUser) } if containerYAML.SecurityContext.RunAsGroup != nil { - if containerConfig.User == "" { - containerConfig.User = "0" + if userConfig.User == "" { + userConfig.User = "0" } - containerConfig.User = fmt.Sprintf("%s:%d", containerConfig.User, *containerYAML.SecurityContext.RunAsGroup) + userConfig.User = fmt.Sprintf("%s:%d", userConfig.User, *containerYAML.SecurityContext.RunAsGroup) } } // kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container -func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID string) (*createconfig.CreateConfig, error) { +func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, infraID string, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) { var ( containerConfig createconfig.CreateConfig + pidConfig createconfig.PidConfig + networkConfig createconfig.NetworkConfig + cgroupConfig createconfig.CgroupConfig + utsConfig createconfig.UtsConfig + ipcConfig createconfig.IpcConfig + userConfig createconfig.UserConfig + securityConfig createconfig.SecurityConfig ) // The default for MemorySwappiness is -1, not 0 @@ -737,18 +751,14 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container imageData, _ := newImage.Inspect(ctx) - containerConfig.User = "0" + userConfig.User = "0" if imageData != nil { - containerConfig.User = imageData.Config.User + userConfig.User = imageData.Config.User } - setupSecurityContext(&containerConfig, containerYAML) + setupSecurityContext(&securityConfig, &userConfig, containerYAML) - var err error - containerConfig.SeccompProfilePath, err = libpod.DefaultSeccompPath() - if err != nil { - return nil, err - } + securityConfig.SeccompProfilePath = seccompPaths.findForContainer(containerConfig.Name) containerConfig.Command = []string{} if imageData != nil && imageData.Config != nil { @@ -768,23 +778,38 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container containerConfig.StopSignal = 15 // If the user does not pass in ID mappings, just set to basics - if containerConfig.IDMappings == nil { - containerConfig.IDMappings = &storage.IDMappingOptions{} + if userConfig.IDMappings == nil { + userConfig.IDMappings = &storage.IDMappingOptions{} } - containerConfig.NetMode = ns.NetworkMode(namespaces["net"]) - containerConfig.IpcMode = ns.IpcMode(namespaces["ipc"]) - containerConfig.UtsMode = ns.UTSMode(namespaces["uts"]) + networkConfig.NetMode = ns.NetworkMode(namespaces["net"]) + ipcConfig.IpcMode = ns.IpcMode(namespaces["ipc"]) + utsConfig.UtsMode = ns.UTSMode(namespaces["uts"]) // disabled in code review per mheon //containerConfig.PidMode = ns.PidMode(namespaces["pid"]) - containerConfig.UsernsMode = ns.UsernsMode(namespaces["user"]) + userConfig.UsernsMode = ns.UsernsMode(namespaces["user"]) if len(containerConfig.WorkDir) == 0 { containerConfig.WorkDir = "/" } + containerConfig.Pid = pidConfig + containerConfig.Network = networkConfig + containerConfig.Uts = utsConfig + containerConfig.Ipc = ipcConfig + containerConfig.Cgroup = cgroupConfig + containerConfig.User = userConfig + containerConfig.Security = securityConfig + // Set default environment variables and incorporate data from image, if necessary envs := shared.EnvVariablesFromData(imageData) + annotations := make(map[string]string) + if infraID != "" { + annotations[ann.SandboxID] = infraID + annotations[ann.ContainerType] = ann.ContainerTypeContainer + } + containerConfig.Annotations = annotations + // Environment Variables for _, e := range containerYAML.Env { envs[e.Name] = e.Value @@ -803,3 +828,80 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container } return &containerConfig, nil } + +// kubeSeccompPaths holds information about a pod YAML's seccomp configuration +// it holds both container and pod seccomp paths +type kubeSeccompPaths struct { + containerPaths map[string]string + podPath string +} + +// findForContainer checks whether a container has a seccomp path configured for it +// if not, it returns the podPath, which should always have a value +func (k *kubeSeccompPaths) findForContainer(ctrName string) string { + if path, ok := k.containerPaths[ctrName]; ok { + return path + } + return k.podPath +} + +// initializeSeccompPaths takes annotations from the pod object metadata and finds annotations pertaining to seccomp +// it parses both pod and container level +func initializeSeccompPaths(annotations map[string]string) (*kubeSeccompPaths, error) { + seccompPaths := &kubeSeccompPaths{containerPaths: make(map[string]string)} + var err error + if annotations != nil { + for annKeyValue, seccomp := range annotations { + // check if it is prefaced with container.seccomp.security.alpha.kubernetes.io/ + prefixAndCtr := strings.Split(annKeyValue, "/") + if prefixAndCtr[0]+"/" != v1.SeccompContainerAnnotationKeyPrefix { + continue + } else if len(prefixAndCtr) != 2 { + // this could be caused by a user inputting either of + // container.seccomp.security.alpha.kubernetes.io{,/} + // both of which are invalid + return nil, errors.Errorf("Invalid seccomp path: %s", prefixAndCtr[0]) + } + + path, err := verifySeccompPath(seccomp) + if err != nil { + return nil, err + } + seccompPaths.containerPaths[prefixAndCtr[1]] = path + } + + podSeccomp, ok := annotations[v1.SeccompPodAnnotationKey] + if ok { + seccompPaths.podPath, err = verifySeccompPath(podSeccomp) + } else { + seccompPaths.podPath, err = libpod.DefaultSeccompPath() + } + if err != nil { + return nil, err + } + } + return seccompPaths, nil +} + +// verifySeccompPath takes a path and checks whether it is a default, unconfined, or a path +// the available options are parsed as defined in https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp +func verifySeccompPath(path string) (string, error) { + switch path { + case v1.DeprecatedSeccompProfileDockerDefault: + fallthrough + case v1.SeccompProfileRuntimeDefault: + return libpod.DefaultSeccompPath() + case "unconfined": + return path, nil + default: + // TODO we have an inconsistency here + // k8s parses `localhost/<path>` which is found at `<seccomp_root>` + // we currently parse `localhost:<seccomp_root>/<path> + // to fully conform, we need to find a good location for the seccomp root + parts := strings.Split(path, ":") + if parts[0] == "localhost" { + return parts[1], nil + } + return "", errors.Errorf("invalid seccomp path: %s", path) + } +} diff --git a/pkg/adapter/pods_remote.go b/pkg/adapter/pods_remote.go index 0c62ac923..16d34769e 100644 --- a/pkg/adapter/pods_remote.go +++ b/pkg/adapter/pods_remote.go @@ -19,7 +19,7 @@ import ( ) // PodContainerStats is struct containing an adapter Pod and a libpod -// ContainerStats and is used primarily for outputing pod stats. +// ContainerStats and is used primarily for outputting pod stats. type PodContainerStats struct { Pod *Pod ContainerStats map[string]*libpod.ContainerStats diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go index 81a43853c..069283bde 100644 --- a/pkg/adapter/runtime.go +++ b/pkg/adapter/runtime.go @@ -27,7 +27,7 @@ import ( "github.com/containers/libpod/pkg/util" "github.com/containers/storage/pkg/archive" "github.com/pkg/errors" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" ) // LocalRuntime describes a typical libpod runtime @@ -147,8 +147,8 @@ func (r *LocalRuntime) RemoveImage(ctx context.Context, img *ContainerImage, for } // PruneImages is wrapper into PruneImages within the image pkg -func (r *LocalRuntime) PruneImages(ctx context.Context, all bool) ([]string, error) { - return r.ImageRuntime().PruneImages(ctx, all) +func (r *LocalRuntime) PruneImages(ctx context.Context, all bool, filter []string) ([]string, error) { + return r.ImageRuntime().PruneImages(ctx, all, filter) } // Export is a wrapper to container export to a tarfile diff --git a/pkg/adapter/runtime_remote.go b/pkg/adapter/runtime_remote.go index 12bf550f2..ddd4b5271 100644 --- a/pkg/adapter/runtime_remote.go +++ b/pkg/adapter/runtime_remote.go @@ -415,8 +415,8 @@ func (ci *ContainerImage) History(ctx context.Context) ([]*image.History, error) } // PruneImages is the wrapper call for a remote-client to prune images -func (r *LocalRuntime) PruneImages(ctx context.Context, all bool) ([]string, error) { - return iopodman.ImagesPrune().Call(r.Conn, all) +func (r *LocalRuntime) PruneImages(ctx context.Context, all bool, filter []string) ([]string, error) { + return iopodman.ImagesPrune().Call(r.Conn, all, filter) } // Export is a wrapper to container export to a tarfile @@ -450,7 +450,7 @@ func (r *LocalRuntime) GetFileFromRemoteHost(remoteFilePath, outputPath string, reader := r.Conn.Reader if _, err := io.CopyN(writer, reader, length); err != nil { - return errors.Wrap(err, "file transer failed") + return errors.Wrap(err, "file transfer failed") } return nil } diff --git a/pkg/adapter/shortcuts/shortcuts.go b/pkg/adapter/shortcuts/shortcuts.go index 3e4eff555..4f6cfd6a3 100644 --- a/pkg/adapter/shortcuts/shortcuts.go +++ b/pkg/adapter/shortcuts/shortcuts.go @@ -2,9 +2,11 @@ package shortcuts import ( "github.com/containers/libpod/libpod" + "github.com/sirupsen/logrus" ) -// GetPodsByContext gets pods whether all, latest, or a slice of names/ids +// GetPodsByContext returns a slice of pods. Note that all, latest and pods are +// mutually exclusive arguments. func GetPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime) ([]*libpod.Pod, error) { var outpods []*libpod.Pod if all { @@ -18,17 +20,24 @@ func GetPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime) outpods = append(outpods, p) return outpods, nil } + var err error for _, p := range pods { - pod, err := runtime.LookupPod(p) - if err != nil { - return nil, err + pod, e := runtime.LookupPod(p) + if e != nil { + // Log all errors here, so callers don't need to. + logrus.Debugf("Error looking up pod %q: %v", p, e) + if err == nil { + err = e + } + } else { + outpods = append(outpods, pod) } - outpods = append(outpods, pod) } - return outpods, nil + return outpods, err } // GetContainersByContext gets pods whether all, latest, or a slice of names/ids +// is specified. func GetContainersByContext(all, latest bool, names []string, runtime *libpod.Runtime) (ctrs []*libpod.Container, err error) { var ctr *libpod.Container ctrs = []*libpod.Container{} @@ -41,10 +50,15 @@ func GetContainersByContext(all, latest bool, names []string, runtime *libpod.Ru } else { for _, n := range names { ctr, e := runtime.LookupContainer(n) - if e != nil && err == nil { - err = e + if e != nil { + // Log all errors here, so callers don't need to. + logrus.Debugf("Error looking up container %q: %v", n, e) + if err == nil { + err = e + } + } else { + ctrs = append(ctrs, ctr) } - ctrs = append(ctrs, ctr) } } return |