diff options
Diffstat (limited to 'pkg/domain/infra')
37 files changed, 2658 insertions, 208 deletions
diff --git a/pkg/domain/infra/abi/auto-update.go b/pkg/domain/infra/abi/auto-update.go new file mode 100644 index 000000000..9fcc451fd --- /dev/null +++ b/pkg/domain/infra/abi/auto-update.go @@ -0,0 +1,17 @@ +package abi + +import ( + "context" + + "github.com/containers/libpod/pkg/autoupdate" + "github.com/containers/libpod/pkg/domain/entities" +) + +func (ic *ContainerEngine) AutoUpdate(ctx context.Context, options entities.AutoUpdateOptions) (*entities.AutoUpdateReport, []error) { + // Convert the entities options to the autoupdate ones. We can't use + // them in the entities package as low-level packages must not leak + // into the remote client. + autoOpts := autoupdate.Options{Authfile: options.Authfile} + units, failures := autoupdate.AutoUpdate(ic.Libpod, autoOpts) + return &entities.AutoUpdateReport{Units: units}, failures +} diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go index 4c3389418..e982c7c11 100644 --- a/pkg/domain/infra/abi/containers.go +++ b/pkg/domain/infra/abi/containers.go @@ -8,8 +8,7 @@ import ( "strconv" "strings" "sync" - - lpfilters "github.com/containers/libpod/libpod/filters" + "time" "github.com/containers/buildah" "github.com/containers/common/pkg/config" @@ -17,8 +16,10 @@ import ( "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" + lpfilters "github.com/containers/libpod/libpod/filters" "github.com/containers/libpod/libpod/image" "github.com/containers/libpod/libpod/logs" + "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/checkpoint" "github.com/containers/libpod/pkg/domain/entities" "github.com/containers/libpod/pkg/domain/infra/abi/terminal" @@ -32,9 +33,9 @@ import ( "github.com/sirupsen/logrus" ) -// getContainersByContext gets pods whether all, latest, or a slice of names/ids -// is specified. -func getContainersByContext(all, latest bool, names []string, runtime *libpod.Runtime) (ctrs []*libpod.Container, err error) { +// getContainersAndInputByContext gets containers whether all, latest, or a slice of names/ids +// is specified. It also returns a list of the corresponding input name used to lookup each container. +func getContainersAndInputByContext(all, latest bool, names []string, runtime *libpod.Runtime) (ctrs []*libpod.Container, rawInput []string, err error) { var ctr *libpod.Container ctrs = []*libpod.Container{} @@ -43,6 +44,7 @@ func getContainersByContext(all, latest bool, names []string, runtime *libpod.Ru ctrs, err = runtime.GetAllContainers() case latest: ctr, err = runtime.GetLatestContainer() + rawInput = append(rawInput, ctr.ID()) ctrs = append(ctrs, ctr) default: for _, n := range names { @@ -54,6 +56,7 @@ func getContainersByContext(all, latest bool, names []string, runtime *libpod.Ru err = e } } else { + rawInput = append(rawInput, n) ctrs = append(ctrs, ctr) } } @@ -61,6 +64,13 @@ func getContainersByContext(all, latest bool, names []string, runtime *libpod.Ru return } +// getContainersByContext gets containers whether all, latest, or a slice of names/ids +// is specified. +func getContainersByContext(all, latest bool, names []string, runtime *libpod.Runtime) (ctrs []*libpod.Container, err error) { + ctrs, _, err = getContainersAndInputByContext(all, latest, names, runtime) + return +} + // TODO: Should return *entities.ContainerExistsReport, error func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrId string) (*entities.BoolReport, error) { _, err := ic.Libpod.LookupContainer(nameOrId) @@ -184,6 +194,10 @@ func (ic *ContainerEngine) ContainerPrune(ctx context.Context, options entities. filterFuncs = append(filterFuncs, generatedFunc) } } + return ic.pruneContainersHelper(ctx, filterFuncs) +} + +func (ic *ContainerEngine) pruneContainersHelper(ctx context.Context, filterFuncs []libpod.ContainerFilter) (*entities.ContainerPruneReport, error) { prunedContainers, pruneErrors, err := ic.Libpod.PruneContainers(filterFuncs) if err != nil { return nil, err @@ -420,6 +434,7 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [ TCPEstablished: options.TCPEstablished, TargetFile: options.Export, IgnoreRootfs: options.IgnoreRootFS, + KeepRunning: options.LeaveRunning, } if options.All { @@ -514,13 +529,29 @@ func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrId string, } // If the container is in a pod, also set to recursively start dependencies - if err := terminal.StartAttachCtr(ctx, ctr, options.Stdin, options.Stderr, options.Stdin, options.DetachKeys, options.SigProxy, false, ctr.PodID() != ""); err != nil && errors.Cause(err) != define.ErrDetach { + err = terminal.StartAttachCtr(ctx, ctr, options.Stdout, options.Stderr, options.Stdin, options.DetachKeys, options.SigProxy, false, ctr.PodID() != "") + if err != nil && errors.Cause(err) != define.ErrDetach { return errors.Wrapf(err, "error attaching to container %s", ctr.ID()) } return nil } -func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions) (int, error) { +func makeExecConfig(options entities.ExecOptions) *libpod.ExecConfig { + execConfig := new(libpod.ExecConfig) + execConfig.Command = options.Cmd + execConfig.Terminal = options.Tty + execConfig.Privileged = options.Privileged + execConfig.Environment = options.Envs + execConfig.User = options.User + execConfig.WorkDir = options.WorkDir + execConfig.DetachKeys = &options.DetachKeys + execConfig.PreserveFDs = options.PreserveFDs + execConfig.AttachStdin = options.Interactive + + return execConfig +} + +func checkExecPreserveFDs(options entities.ExecOptions) (int, error) { ec := define.ExecErrorCodeGeneric if options.PreserveFDs > 0 { entries, err := ioutil.ReadDir("/proc/self/fd") @@ -543,24 +574,77 @@ func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, o } } } + return ec, nil +} + +func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions, streams define.AttachStreams) (int, error) { + ec, err := checkExecPreserveFDs(options) + if err != nil { + return ec, err + } ctrs, err := getContainersByContext(false, options.Latest, []string{nameOrId}, ic.Libpod) if err != nil { return ec, err } ctr := ctrs[0] - ec, err = terminal.ExecAttachCtr(ctx, ctr, options.Tty, options.Privileged, options.Envs, options.Cmd, options.User, options.WorkDir, &options.Streams, options.PreserveFDs, options.DetachKeys) + + execConfig := makeExecConfig(options) + + ec, err = terminal.ExecAttachCtr(ctx, ctr, execConfig, &streams) return define.TranslateExecErrorToExitCode(ec, err), err } +func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrId string, options entities.ExecOptions) (string, error) { + _, err := checkExecPreserveFDs(options) + if err != nil { + return "", err + } + ctrs, err := getContainersByContext(false, options.Latest, []string{nameOrId}, ic.Libpod) + if err != nil { + return "", err + } + ctr := ctrs[0] + + execConfig := makeExecConfig(options) + + // Make an exit command + storageConfig := ic.Libpod.StorageConfig() + runtimeConfig, err := ic.Libpod.GetConfig() + if err != nil { + return "", errors.Wrapf(err, "error retrieving Libpod configuration to build exec exit command") + } + podmanPath, err := os.Executable() + if err != nil { + return "", errors.Wrapf(err, "error retrieving executable to build exec exit command") + } + // TODO: Add some ability to toggle syslog + exitCommandArgs := generate.CreateExitCommandArgs(storageConfig, runtimeConfig, podmanPath, false, true, true) + execConfig.ExitCommand = exitCommandArgs + + // Create and start the exec session + id, err := ctr.ExecCreate(execConfig) + if err != nil { + return "", err + } + + // TODO: we should try and retrieve exit code if this fails. + if err := ctr.ExecStart(id); err != nil { + return "", err + } + return id, nil +} + func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []string, options entities.ContainerStartOptions) ([]*entities.ContainerStartReport, error) { var reports []*entities.ContainerStartReport var exitCode = define.ExecErrorCodeGeneric - ctrs, err := getContainersByContext(false, options.Latest, namesOrIds, ic.Libpod) + ctrs, rawInputs, err := getContainersAndInputByContext(false, options.Latest, namesOrIds, ic.Libpod) if err != nil { return nil, err } // There can only be one container if attach was used - for _, ctr := range ctrs { + for i := range ctrs { + ctr := ctrs[i] + rawInput := rawInputs[i] ctrState, err := ctr.State() if err != nil { return nil, err @@ -574,6 +658,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri // Exit cleanly immediately reports = append(reports, &entities.ContainerStartReport{ Id: ctr.ID(), + RawInput: rawInput, Err: nil, ExitCode: 0, }) @@ -584,6 +669,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri logrus.Debugf("Deadlock error: %v", err) reports = append(reports, &entities.ContainerStartReport{ Id: ctr.ID(), + RawInput: rawInput, Err: err, ExitCode: define.ExitCode(err), }) @@ -593,6 +679,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri if ctrRunning { reports = append(reports, &entities.ContainerStartReport{ Id: ctr.ID(), + RawInput: rawInput, Err: nil, ExitCode: 0, }) @@ -602,6 +689,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri if err != nil { reports = append(reports, &entities.ContainerStartReport{ Id: ctr.ID(), + RawInput: rawInput, Err: err, ExitCode: exitCode, }) @@ -624,6 +712,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri } reports = append(reports, &entities.ContainerStartReport{ Id: ctr.ID(), + RawInput: rawInput, Err: err, ExitCode: exitCode, }) @@ -636,6 +725,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri // If the container is in a pod, also set to recursively start dependencies report := &entities.ContainerStartReport{ Id: ctr.ID(), + RawInput: rawInput, ExitCode: 125, } if err := ctr.Start(ctx, ctr.PodID() != ""); err != nil { @@ -812,6 +902,20 @@ func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []st for _, ctr := range ctrs { var err error report := entities.ContainerCleanupReport{Id: ctr.ID()} + + if options.Exec != "" { + if options.Remove { + if err := ctr.ExecRemove(options.Exec, false); err != nil { + return nil, err + } + } else { + if err := ctr.ExecCleanup(options.Exec); err != nil { + return nil, err + } + } + return []*entities.ContainerCleanupReport{}, nil + } + if options.Remove { err = ic.Libpod.RemoveContainer(ctx, ctr, false, true) if err != nil { @@ -981,3 +1085,77 @@ func (ic *ContainerEngine) Shutdown(_ context.Context) { _ = ic.Libpod.Shutdown(false) }) } + +func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []string, options entities.ContainerStatsOptions) error { + defer close(options.StatChan) + containerFunc := ic.Libpod.GetRunningContainers + switch { + case len(namesOrIds) > 0: + containerFunc = func() ([]*libpod.Container, error) { return ic.Libpod.GetContainersByList(namesOrIds) } + case options.Latest: + containerFunc = func() ([]*libpod.Container, error) { + lastCtr, err := ic.Libpod.GetLatestContainer() + if err != nil { + return nil, err + } + return []*libpod.Container{lastCtr}, nil + } + case options.All: + containerFunc = ic.Libpod.GetAllContainers + } + + ctrs, err := containerFunc() + if err != nil { + return errors.Wrapf(err, "unable to get list of containers") + } + containerStats := map[string]*define.ContainerStats{} + for _, ctr := range ctrs { + initialStats, err := ctr.GetContainerStats(&define.ContainerStats{}) + if err != nil { + // when doing "all", don't worry about containers that are not running + cause := errors.Cause(err) + if options.All && (cause == define.ErrCtrRemoved || cause == define.ErrNoSuchCtr || cause == define.ErrCtrStateInvalid) { + continue + } + if cause == cgroups.ErrCgroupV1Rootless { + err = cause + } + return err + } + containerStats[ctr.ID()] = initialStats + } + for { + reportStats := []*define.ContainerStats{} + for _, ctr := range ctrs { + id := ctr.ID() + if _, ok := containerStats[ctr.ID()]; !ok { + initialStats, err := ctr.GetContainerStats(&define.ContainerStats{}) + if errors.Cause(err) == define.ErrCtrRemoved || errors.Cause(err) == define.ErrNoSuchCtr || errors.Cause(err) == define.ErrCtrStateInvalid { + // skip dealing with a container that is gone + continue + } + if err != nil { + return err + } + containerStats[id] = initialStats + } + stats, err := ctr.GetContainerStats(containerStats[id]) + if err != nil && errors.Cause(err) != define.ErrNoSuchCtr { + return err + } + // replace the previous measurement with the current one + containerStats[id] = stats + reportStats = append(reportStats, stats) + } + ctrs, err = containerFunc() + if err != nil { + return err + } + options.StatChan <- reportStats + if options.NoStream { + break + } + time.Sleep(time.Second) + } + return nil +} diff --git a/pkg/domain/infra/abi/containers_runlabel.go b/pkg/domain/infra/abi/containers_runlabel.go new file mode 100644 index 000000000..41f4444d5 --- /dev/null +++ b/pkg/domain/infra/abi/containers_runlabel.go @@ -0,0 +1,280 @@ +package abi + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v5/types" + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/libpod/image" + "github.com/containers/libpod/pkg/domain/entities" + envLib "github.com/containers/libpod/pkg/env" + "github.com/containers/libpod/pkg/util" + "github.com/containers/libpod/utils" + "github.com/google/shlex" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +func (ic *ContainerEngine) ContainerRunlabel(ctx context.Context, label string, imageRef string, args []string, options entities.ContainerRunlabelOptions) error { + // First, get the image and pull it if needed. + img, err := ic.runlabelImage(ctx, label, imageRef, options) + if err != nil { + return err + } + // Extract the runlabel from the image. + runlabel, err := img.GetLabel(ctx, label) + if err != nil { + return err + } + + cmd, env, err := generateRunlabelCommand(runlabel, img, args, options) + if err != nil { + return err + } + + stdErr := os.Stderr + stdOut := os.Stdout + stdIn := os.Stdin + if options.Quiet { + stdErr = nil + stdOut = nil + stdIn = nil + } + + // If container already exists && --replace given -- Nuke it + if options.Replace { + for i, entry := range cmd { + if entry == "--name" { + name := cmd[i+1] + ctr, err := ic.Libpod.LookupContainer(name) + if err != nil { + if errors.Cause(err) != define.ErrNoSuchCtr { + logrus.Debugf("Error occurred searching for container %s: %s", name, err.Error()) + return err + } + } else { + logrus.Debugf("Runlabel --replace option given. Container %s will be deleted. The new container will be named %s", ctr.ID(), name) + if err := ic.Libpod.RemoveContainer(ctx, ctr, true, false); err != nil { + return err + } + } + break + } + } + } + + return utils.ExecCmdWithStdStreams(stdIn, stdOut, stdErr, env, cmd[0], cmd[1:]...) +} + +// runlabelImage returns an image based on the specified image AND options. +func (ic *ContainerEngine) runlabelImage(ctx context.Context, label string, imageRef string, options entities.ContainerRunlabelOptions) (*image.Image, error) { + // First, look up the image locally. If we get an error and requested + // to pull, fallthrough and pull it. + img, err := ic.Libpod.ImageRuntime().NewFromLocal(imageRef) + switch { + case err == nil: + return img, nil + case !options.Pull: + return nil, err + default: + // Fallthrough and pull! + } + + // Parse credentials if specified. + var credentials *types.DockerAuthConfig + if options.Credentials != "" { + credentials, err = util.ParseRegistryCreds(options.Credentials) + if err != nil { + return nil, err + } + } + + // Suppress pull progress bars if requested. + pullOutput := os.Stdout + if options.Quiet { + pullOutput = nil // c/image/copy takes care of the rest + } + + // Pull the image. + dockerRegistryOptions := image.DockerRegistryOptions{ + DockerCertPath: options.CertDir, + DockerInsecureSkipTLSVerify: options.SkipTLSVerify, + DockerRegistryCreds: credentials, + } + + return ic.Libpod.ImageRuntime().New(ctx, imageRef, options.SignaturePolicy, options.Authfile, pullOutput, &dockerRegistryOptions, image.SigningOptions{}, &label, util.PullImageMissing) +} + +// generateRunlabelCommand generates the to-be-executed command as a string +// slice along with a base environment. +func generateRunlabelCommand(runlabel string, img *image.Image, args []string, options entities.ContainerRunlabelOptions) ([]string, []string, error) { + var ( + err error + name, imageName string + globalOpts string + cmd, env []string + ) + + // TODO: How do we get global opts as done in v1? + + // Extract the imageName (or ID). + imgNames := img.Names() + if len(imgNames) == 0 { + imageName = img.ID() + } else { + imageName = imgNames[0] + } + + // Use the user-specified name or extract one from the image. + if options.Name != "" { + name = options.Name + } else { + name, err = image.GetImageBaseName(imageName) + if err != nil { + return nil, nil, err + } + } + + // Append the user-specified arguments to the runlabel (command). + if len(args) > 0 { + runlabel = fmt.Sprintf("%s %s", runlabel, strings.Join(args, " ")) + } + + cmd, err = generateCommand(runlabel, imageName, name, globalOpts) + if err != nil { + return nil, nil, err + } + + env = generateRunEnvironment(name, imageName, options) + env = append(env, "PODMAN_RUNLABEL_NESTED=1") + envmap, err := envLib.ParseSlice(env) + if err != nil { + return nil, nil, err + } + + envmapper := func(k string) string { + switch k { + case "OPT1": + return envmap["OPT1"] + case "OPT2": + return envmap["OPT2"] + case "OPT3": + return envmap["OPT3"] + case "PWD": + // I would prefer to use os.getenv but it appears PWD is not in the os env list. + d, err := os.Getwd() + if err != nil { + logrus.Error("unable to determine current working directory") + return "" + } + return d + } + return "" + } + newS := os.Expand(strings.Join(cmd, " "), envmapper) + cmd, err = shlex.Split(newS) + if err != nil { + return nil, nil, err + } + return cmd, env, nil +} + +// generateCommand takes a label (string) and converts it to an executable command +func generateCommand(command, imageName, name, globalOpts string) ([]string, error) { + var ( + newCommand []string + ) + if name == "" { + name = imageName + } + + cmd, err := shlex.Split(command) + if err != nil { + return nil, err + } + + prog, err := substituteCommand(cmd[0]) + if err != nil { + return nil, err + } + newCommand = append(newCommand, prog) + + for _, arg := range cmd[1:] { + var newArg string + switch arg { + case "IMAGE": + newArg = imageName + case "$IMAGE": + newArg = imageName + case "IMAGE=IMAGE": + newArg = fmt.Sprintf("IMAGE=%s", imageName) + case "IMAGE=$IMAGE": + newArg = fmt.Sprintf("IMAGE=%s", imageName) + case "NAME": + newArg = name + case "NAME=NAME": + newArg = fmt.Sprintf("NAME=%s", name) + case "NAME=$NAME": + newArg = fmt.Sprintf("NAME=%s", name) + case "$NAME": + newArg = name + case "$GLOBAL_OPTS": + newArg = globalOpts + default: + newArg = arg + } + newCommand = append(newCommand, newArg) + } + return newCommand, nil +} + +// GenerateRunEnvironment merges the current environment variables with optional +// environment variables provided by the user +func generateRunEnvironment(name, imageName string, options entities.ContainerRunlabelOptions) []string { + newEnv := os.Environ() + if options.Optional1 != "" { + newEnv = append(newEnv, fmt.Sprintf("OPT1=%s", options.Optional1)) + } + if options.Optional2 != "" { + newEnv = append(newEnv, fmt.Sprintf("OPT2=%s", options.Optional2)) + } + if options.Optional3 != "" { + newEnv = append(newEnv, fmt.Sprintf("OPT3=%s", options.Optional3)) + } + return newEnv +} + +func substituteCommand(cmd string) (string, error) { + var ( + newCommand string + ) + + // Replace cmd with "/proc/self/exe" if "podman" or "docker" is being + // used. If "/usr/bin/docker" is provided, we also sub in podman. + // Otherwise, leave the command unchanged. + if cmd == "podman" || filepath.Base(cmd) == "docker" { + newCommand = "/proc/self/exe" + } else { + newCommand = cmd + } + + // If cmd is an absolute or relative path, check if the file exists. + // Throw an error if it doesn't exist. + if strings.Contains(newCommand, "/") || strings.HasPrefix(newCommand, ".") { + res, err := filepath.Abs(newCommand) + if err != nil { + return "", err + } + if _, err := os.Stat(res); !os.IsNotExist(err) { + return res, nil + } else if err != nil { + return "", err + } + } + + return newCommand, nil +} diff --git a/pkg/domain/infra/abi/events.go b/pkg/domain/infra/abi/events.go index 20773cdce..7ec9db369 100644 --- a/pkg/domain/infra/abi/events.go +++ b/pkg/domain/infra/abi/events.go @@ -5,12 +5,9 @@ import ( "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/domain/entities" - "github.com/sirupsen/logrus" ) func (ic *ContainerEngine) Events(ctx context.Context, opts entities.EventsOptions) error { readOpts := events.ReadOptions{FromStart: opts.FromStart, Stream: opts.Stream, Filters: opts.Filter, EventChannel: opts.EventChan, Since: opts.Since, Until: opts.Until} - err := ic.Libpod.Events(readOpts) - logrus.Error(err) - return err + return ic.Libpod.Events(readOpts) } diff --git a/pkg/domain/infra/abi/generate.go b/pkg/domain/infra/abi/generate.go index f69ba560e..abb5e2911 100644 --- a/pkg/domain/infra/abi/generate.go +++ b/pkg/domain/infra/abi/generate.go @@ -1,14 +1,18 @@ package abi import ( + "bytes" "context" "fmt" "strings" "github.com/containers/libpod/libpod" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/domain/entities" "github.com/containers/libpod/pkg/systemd/generate" + "github.com/ghodss/yaml" "github.com/pkg/errors" + k8sAPI "k8s.io/api/core/v1" ) func (ic *ContainerEngine) GenerateSystemd(ctx context.Context, nameOrID string, options entities.GenerateSystemdOptions) (*entities.GenerateSystemdReport, error) { @@ -155,14 +159,14 @@ func (ic *ContainerEngine) generateSystemdgenContainerInfo(nameOrID string, pod func generateServiceName(ctr *libpod.Container, pod *libpod.Pod, options entities.GenerateSystemdOptions) (string, string) { var kind, name, ctrName string if pod == nil { - kind = "container" + kind = options.ContainerPrefix //defaults to container name = ctr.ID() if options.Name { name = ctr.Name() } ctrName = name } else { - kind = "pod" + kind = options.PodPrefix //defaults to pod name = pod.ID() ctrName = ctr.ID() if options.Name { @@ -170,5 +174,86 @@ func generateServiceName(ctr *libpod.Container, pod *libpod.Pod, options entitie ctrName = ctr.Name() } } - return ctrName, fmt.Sprintf("%s-%s", kind, name) + return ctrName, fmt.Sprintf("%s%s%s", kind, options.Separator, name) +} + +func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrID string, options entities.GenerateKubeOptions) (*entities.GenerateKubeReport, error) { + var ( + pod *libpod.Pod + podYAML *k8sAPI.Pod + err error + ctr *libpod.Container + servicePorts []k8sAPI.ServicePort + serviceYAML k8sAPI.Service + ) + // Get the container in question. + ctr, err = ic.Libpod.LookupContainer(nameOrID) + if err != nil { + pod, err = ic.Libpod.LookupPod(nameOrID) + if err != nil { + return nil, err + } + podYAML, servicePorts, err = pod.GenerateForKube() + } else { + if len(ctr.Dependencies()) > 0 { + return nil, errors.Wrapf(define.ErrNotImplemented, "containers with dependencies") + } + podYAML, err = ctr.GenerateForKube() + } + if err != nil { + return nil, err + } + + if options.Service { + serviceYAML = libpod.GenerateKubeServiceFromV1Pod(podYAML, servicePorts) + } + + content, err := generateKubeOutput(podYAML, &serviceYAML) + if err != nil { + return nil, err + } + + return &entities.GenerateKubeReport{Reader: bytes.NewReader(content)}, nil +} + +func generateKubeOutput(podYAML *k8sAPI.Pod, serviceYAML *k8sAPI.Service) ([]byte, error) { + var ( + output []byte + marshalledPod []byte + marshalledService []byte + err error + ) + + marshalledPod, err = yaml.Marshal(podYAML) + if err != nil { + return nil, err + } + + if serviceYAML != nil { + marshalledService, err = yaml.Marshal(serviceYAML) + if err != nil { + return nil, err + } + } + + header := `# Generation of Kubernetes YAML is still under development! +# +# Save the output of this file and use kubectl create -f to import +# it into Kubernetes. +# +# Created with podman-%s +` + podmanVersion, err := define.GetVersion() + if err != nil { + return nil, err + } + + output = append(output, []byte(fmt.Sprintf(header, podmanVersion.Version))...) + output = append(output, marshalledPod...) + if serviceYAML != nil { + output = append(output, []byte("---\n")...) + output = append(output, marshalledService...) + } + + return output, nil } diff --git a/pkg/domain/infra/abi/healthcheck.go b/pkg/domain/infra/abi/healthcheck.go index 351bf4f7e..4e925ef56 100644 --- a/pkg/domain/infra/abi/healthcheck.go +++ b/pkg/domain/infra/abi/healthcheck.go @@ -3,7 +3,6 @@ package abi import ( "context" - "github.com/containers/libpod/libpod" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/domain/entities" ) @@ -13,9 +12,9 @@ func (ic *ContainerEngine) HealthCheckRun(ctx context.Context, nameOrId string, if err != nil { return nil, err } - hcStatus := "unhealthy" - if status == libpod.HealthCheckSuccess { - hcStatus = "healthy" + hcStatus := define.HealthCheckUnhealthy + if status == define.HealthCheckSuccess { + hcStatus = define.HealthCheckHealthy } report := define.HealthCheckResults{ Status: hcStatus, diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go index 7c63276e5..d8af4d339 100644 --- a/pkg/domain/infra/abi/images.go +++ b/pkg/domain/infra/abi/images.go @@ -4,14 +4,22 @@ import ( "context" "fmt" "io" + "io/ioutil" + "net/url" "os" + "path/filepath" + "strconv" "strings" + "github.com/containers/libpod/pkg/rootless" + "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker" dockerarchive "github.com/containers/image/v5/docker/archive" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/transports" "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" "github.com/containers/libpod/libpod/define" @@ -19,14 +27,17 @@ import ( libpodImage "github.com/containers/libpod/libpod/image" "github.com/containers/libpod/pkg/domain/entities" domainUtils "github.com/containers/libpod/pkg/domain/utils" + "github.com/containers/libpod/pkg/trust" "github.com/containers/libpod/pkg/util" "github.com/containers/storage" - "github.com/hashicorp/go-multierror" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) +// SignatureStoreDir defines default directory to store signatures +const SignatureStoreDir = "/var/lib/containers/sigstore" + func (ir *ImageEngine) Exists(_ context.Context, nameOrId string) (*entities.BoolReport, error) { _, err := ir.Libpod.ImageRuntime().NewFromLocal(nameOrId) if err != nil && errors.Cause(err) != define.ErrNoSuchImage { @@ -36,7 +47,11 @@ func (ir *ImageEngine) Exists(_ context.Context, nameOrId string) (*entities.Boo } func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) { - results, err := ir.Libpod.ImageRuntime().PruneImages(ctx, opts.All, opts.Filter) + return ir.pruneImagesHelper(ctx, opts.All, opts.Filter) +} + +func (ir *ImageEngine) pruneImagesHelper(ctx context.Context, all bool, filters []string) (*entities.ImagePruneReport, error) { + results, err := ir.Libpod.ImageRuntime().PruneImages(ctx, all, filters) if err != nil { return nil, err } @@ -106,19 +121,18 @@ func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, options entiti } var registryCreds *types.DockerAuthConfig - if options.Credentials != "" { - creds, err := util.ParseRegistryCreds(options.Credentials) - if err != nil { - return nil, err + if len(options.Username) > 0 && len(options.Password) > 0 { + registryCreds = &types.DockerAuthConfig{ + Username: options.Username, + Password: options.Password, } - registryCreds = creds } dockerRegistryOptions := image.DockerRegistryOptions{ DockerRegistryCreds: registryCreds, DockerCertPath: options.CertDir, OSChoice: options.OverrideOS, ArchitectureChoice: options.OverrideArch, - DockerInsecureSkipTLSVerify: options.TLSVerify, + DockerInsecureSkipTLSVerify: options.SkipTLSVerify, } if !options.AllTags { @@ -211,17 +225,16 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri } var registryCreds *types.DockerAuthConfig - if options.Credentials != "" { - creds, err := util.ParseRegistryCreds(options.Credentials) - if err != nil { - return err + if len(options.Username) > 0 && len(options.Password) > 0 { + registryCreds = &types.DockerAuthConfig{ + Username: options.Username, + Password: options.Password, } - registryCreds = creds } dockerRegistryOptions := image.DockerRegistryOptions{ DockerRegistryCreds: registryCreds, DockerCertPath: options.CertDir, - DockerInsecureSkipTLSVerify: options.TLSVerify, + DockerInsecureSkipTLSVerify: options.SkipTLSVerify, } signOptions := image.SigningOptions{ @@ -370,7 +383,7 @@ func (ir *ImageEngine) Search(ctx context.Context, term string, opts entities.Im Filter: *filter, Limit: opts.Limit, NoTrunc: opts.NoTrunc, - InsecureSkipTLSVerify: opts.TLSVerify, + InsecureSkipTLSVerify: opts.SkipTLSVerify, } searchResults, err := image.SearchImages(term, searchOpts) @@ -419,8 +432,10 @@ func (ir *ImageEngine) Tree(ctx context.Context, nameOrId string, opts entities. return &entities.ImageTreeReport{Tree: results}, nil } -// Remove removes one or more images from local storage. -func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entities.ImageRemoveOptions) (report *entities.ImageRemoveReport, finalError error) { +// removeErrorsToExitCode returns an exit code for the specified slice of +// image-removal errors. The error codes are set according to the documented +// behaviour in the Podman man pages. +func removeErrorsToExitCode(rmErrors []error) int { var ( // noSuchImageErrors indicates that at least one image was not found. noSuchImageErrors bool @@ -430,57 +445,53 @@ func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entitie // otherErrors indicates that at least one error other than the two // above occured. otherErrors bool - // deleteError is a multierror to conveniently collect errors during - // removal. We really want to delete as many images as possible and not - // error out immediately. - deleteError *multierror.Error ) - report = &entities.ImageRemoveReport{} + if len(rmErrors) == 0 { + return 0 + } - // Set the removalCode and the error after all work is done. - defer func() { - switch { - // 2 - case inUseErrors: - // One of the specified images has child images or is - // being used by a container. - report.ExitCode = 2 - // 1 - case noSuchImageErrors && !(otherErrors || inUseErrors): - // One of the specified images did not exist, and no other - // failures. - report.ExitCode = 1 - // 0 + for _, e := range rmErrors { + switch errors.Cause(e) { + case define.ErrNoSuchImage: + noSuchImageErrors = true + case define.ErrImageInUse, storage.ErrImageUsedByContainer: + inUseErrors = true default: - // Nothing to do. - } - if deleteError != nil { - // go-multierror has a trailing new line which we need to remove to normalize the string. - finalError = deleteError.ErrorOrNil() - finalError = errors.New(strings.TrimSpace(finalError.Error())) + otherErrors = true } + } + + switch { + case inUseErrors: + // One of the specified images has child images or is + // being used by a container. + return 2 + case noSuchImageErrors && !(otherErrors || inUseErrors): + // One of the specified images did not exist, and no other + // failures. + return 1 + default: + return 125 + } +} + +// Remove removes one or more images from local storage. +func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entities.ImageRemoveOptions) (report *entities.ImageRemoveReport, rmErrors []error) { + report = &entities.ImageRemoveReport{} + + // Set the exit code at very end. + defer func() { + report.ExitCode = removeErrorsToExitCode(rmErrors) }() // deleteImage is an anonymous function to conveniently delete an image // without having to pass all local data around. deleteImage := func(img *image.Image) error { results, err := ir.Libpod.RemoveImage(ctx, img, opts.Force) - switch errors.Cause(err) { - case nil: - break - case storage.ErrImageUsedByContainer: - inUseErrors = true // Important for exit codes in Podman. - return errors.New( - fmt.Sprintf("A container associated with containers/storage, i.e. via Buildah, CRI-O, etc., may be associated with this image: %-12.12s\n", img.ID())) - case define.ErrImageInUse: - inUseErrors = true - return err - default: - otherErrors = true // Important for exit codes in Podman. + if err != nil { return err } - report.Deleted = append(report.Deleted, results.Deleted) report.Untagged = append(report.Untagged, results.Untagged...) return nil @@ -493,9 +504,7 @@ func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entitie for { storageImages, err := ir.Libpod.ImageRuntime().GetRWImages() if err != nil { - deleteError = multierror.Append(deleteError, - errors.Wrapf(err, "unable to query local images")) - otherErrors = true // Important for exit codes in Podman. + rmErrors = append(rmErrors, err) return } // No images (left) to remove, so we're done. @@ -504,9 +513,7 @@ func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entitie } // Prevent infinity loops by making a delete-progress check. if previousImages == len(storageImages) { - otherErrors = true // Important for exit codes in Podman. - deleteError = multierror.Append(deleteError, - errors.New("unable to delete all images, check errors and re-run image removal if needed")) + rmErrors = append(rmErrors, errors.New("unable to delete all images, check errors and re-run image removal if needed")) break } previousImages = len(storageImages) @@ -514,15 +521,15 @@ func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entitie for _, img := range storageImages { isParent, err := img.IsParent(ctx) if err != nil { - otherErrors = true // Important for exit codes in Podman. - deleteError = multierror.Append(deleteError, err) + rmErrors = append(rmErrors, err) + continue } // Skip parent images. if isParent { continue } if err := deleteImage(img); err != nil { - deleteError = multierror.Append(deleteError, err) + rmErrors = append(rmErrors, err) } } } @@ -533,21 +540,13 @@ func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entitie // Delete only the specified images. for _, id := range images { img, err := ir.Libpod.ImageRuntime().NewFromLocal(id) - switch errors.Cause(err) { - case nil: - break - case image.ErrNoSuchImage: - noSuchImageErrors = true // Important for exit codes in Podman. - fallthrough - default: - deleteError = multierror.Append(deleteError, err) + if err != nil { + rmErrors = append(rmErrors, err) continue } - err = deleteImage(img) if err != nil { - otherErrors = true // Important for exit codes in Podman. - deleteError = multierror.Append(deleteError, err) + rmErrors = append(rmErrors, err) } } @@ -560,3 +559,145 @@ func (ir *ImageEngine) Shutdown(_ context.Context) { _ = ir.Libpod.Shutdown(false) }) } + +func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entities.SignOptions) (*entities.SignReport, error) { + dockerRegistryOptions := image.DockerRegistryOptions{ + DockerCertPath: options.CertDir, + } + + mech, err := signature.NewGPGSigningMechanism() + if err != nil { + return nil, errors.Wrap(err, "error initializing GPG") + } + defer mech.Close() + if err := mech.SupportsSigning(); err != nil { + return nil, errors.Wrap(err, "signing is not supported") + } + sc := ir.Libpod.SystemContext() + sc.DockerCertPath = options.CertDir + + systemRegistriesDirPath := trust.RegistriesDirPath(sc) + registryConfigs, err := trust.LoadAndMergeConfig(systemRegistriesDirPath) + if err != nil { + return nil, errors.Wrapf(err, "error reading registry configuration") + } + + for _, signimage := range names { + var sigStoreDir string + srcRef, err := alltransports.ParseImageName(signimage) + if err != nil { + return nil, errors.Wrapf(err, "error parsing image name") + } + rawSource, err := srcRef.NewImageSource(ctx, sc) + if err != nil { + return nil, errors.Wrapf(err, "error getting image source") + } + err = rawSource.Close() + if err != nil { + logrus.Errorf("unable to close new image source %q", err) + } + getManifest, _, err := rawSource.GetManifest(ctx, nil) + if err != nil { + return nil, errors.Wrapf(err, "error getting getManifest") + } + dockerReference := rawSource.Reference().DockerReference() + if dockerReference == nil { + return nil, errors.Errorf("cannot determine canonical Docker reference for destination %s", transports.ImageName(rawSource.Reference())) + } + + // create the signstore file + rtc, err := ir.Libpod.GetConfig() + if err != nil { + return nil, err + } + newImage, err := ir.Libpod.ImageRuntime().New(ctx, signimage, rtc.Engine.SignaturePolicyPath, "", os.Stderr, &dockerRegistryOptions, image.SigningOptions{SignBy: options.SignBy}, nil, util.PullImageMissing) + if err != nil { + return nil, errors.Wrapf(err, "error pulling image %s", signimage) + } + if sigStoreDir == "" { + if rootless.IsRootless() { + sigStoreDir = filepath.Join(filepath.Dir(ir.Libpod.StorageConfig().GraphRoot), "sigstore") + } else { + registryInfo := trust.HaveMatchRegistry(rawSource.Reference().DockerReference().String(), registryConfigs) + if registryInfo != nil { + if sigStoreDir = registryInfo.SigStoreStaging; sigStoreDir == "" { + sigStoreDir = registryInfo.SigStore + + } + } + } + } + sigStoreDir, err = isValidSigStoreDir(sigStoreDir) + if err != nil { + return nil, errors.Wrapf(err, "invalid signature storage %s", sigStoreDir) + } + repos, err := newImage.RepoDigests() + if err != nil { + return nil, errors.Wrapf(err, "error calculating repo digests for %s", signimage) + } + if len(repos) == 0 { + logrus.Errorf("no repodigests associated with the image %s", signimage) + continue + } + + // create signature + newSig, err := signature.SignDockerManifest(getManifest, dockerReference.String(), mech, options.SignBy) + if err != nil { + return nil, errors.Wrapf(err, "error creating new signature") + } + + trimmedDigest := strings.TrimPrefix(repos[0], strings.Split(repos[0], "/")[0]) + sigStoreDir = filepath.Join(sigStoreDir, strings.Replace(trimmedDigest, ":", "=", 1)) + if err := os.MkdirAll(sigStoreDir, 0751); err != nil { + // The directory is allowed to exist + if !os.IsExist(err) { + logrus.Errorf("error creating directory %s: %s", sigStoreDir, err) + continue + } + } + sigFilename, err := getSigFilename(sigStoreDir) + if err != nil { + logrus.Errorf("error creating sigstore file: %v", err) + continue + } + err = ioutil.WriteFile(filepath.Join(sigStoreDir, sigFilename), newSig, 0644) + if err != nil { + logrus.Errorf("error storing signature for %s", rawSource.Reference().DockerReference().String()) + continue + } + } + return nil, nil +} + +func getSigFilename(sigStoreDirPath string) (string, error) { + sigFileSuffix := 1 + sigFiles, err := ioutil.ReadDir(sigStoreDirPath) + if err != nil { + return "", err + } + sigFilenames := make(map[string]bool) + for _, file := range sigFiles { + sigFilenames[file.Name()] = true + } + for { + sigFilename := "signature-" + strconv.Itoa(sigFileSuffix) + if _, exists := sigFilenames[sigFilename]; !exists { + return sigFilename, nil + } + sigFileSuffix++ + } +} + +func isValidSigStoreDir(sigStoreDir string) (string, error) { + writeURIs := map[string]bool{"file": true} + url, err := url.Parse(sigStoreDir) + if err != nil { + return sigStoreDir, errors.Wrapf(err, "invalid directory %s", sigStoreDir) + } + _, exists := writeURIs[url.Scheme] + if !exists { + return sigStoreDir, errors.Errorf("writing to %s is not supported. Use a supported scheme", sigStoreDir) + } + sigStoreDir = url.Path + return sigStoreDir, nil +} diff --git a/pkg/domain/infra/abi/images_list.go b/pkg/domain/infra/abi/images_list.go index 9add915ea..3034e36ec 100644 --- a/pkg/domain/infra/abi/images_list.go +++ b/pkg/domain/infra/abi/images_list.go @@ -13,20 +13,13 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) err error ) - // TODO: Future work support for domain.Filters - // filters := utils.ToLibpodFilters(opts.Filters) - - if len(opts.Filter) > 0 { - images, err = ir.Libpod.ImageRuntime().GetImagesWithFilters(opts.Filter) - } else { - images, err = ir.Libpod.ImageRuntime().GetImages() - } + images, err = ir.Libpod.ImageRuntime().GetImagesWithFilters(opts.Filter) if err != nil { return nil, err } - summaries := make([]*entities.ImageSummary, len(images)) - for i, img := range images { + var summaries []*entities.ImageSummary + for _, img := range images { var repoTags []string if opts.All { pairs, err := libpodImage.ReposToMap(img.Names()) @@ -40,7 +33,19 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) } } } else { - repoTags, _ = img.RepoTags() + repoTags, err = img.RepoTags() + if err != nil { + return nil, err + } + if len(img.Names()) == 0 { + parent, err := img.IsParent(ctx) + if err != nil { + return nil, err + } + if parent { + continue + } + } } digests := make([]string, len(img.Digests())) @@ -72,7 +77,7 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) sz, _ := img.Size(context.TODO()) e.Size = int64(*sz) - summaries[i] = &e + summaries = append(summaries, &e) } return summaries, nil } diff --git a/pkg/domain/infra/abi/manifest.go b/pkg/domain/infra/abi/manifest.go index 88331f96c..6e311dec7 100644 --- a/pkg/domain/infra/abi/manifest.go +++ b/pkg/domain/infra/abi/manifest.go @@ -6,14 +6,22 @@ import ( "context" "encoding/json" "fmt" + "io/ioutil" + "os" "strings" + "github.com/containers/buildah/manifests" buildahUtil "github.com/containers/buildah/util" + cp "github.com/containers/image/v5/copy" "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" libpodImage "github.com/containers/libpod/libpod/image" "github.com/containers/libpod/pkg/domain/entities" "github.com/containers/libpod/pkg/util" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -71,7 +79,7 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, opts entities.ManifestAd } listImage, err := ir.Libpod.ImageRuntime().NewFromLocal(listImageSpec) if err != nil { - return "", errors.Wrapf(err, "error retriving local image from image name %s", listImageSpec) + return "", errors.Wrapf(err, "error retrieving local image from image name %s", listImageSpec) } manifestAddOpts := libpodImage.ManifestAddOpts{ @@ -100,3 +108,120 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, opts entities.ManifestAd } return listID, nil } + +// ManifestAnnotate updates an entry of the manifest list +func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, names []string, opts entities.ManifestAnnotateOptions) (string, error) { + listImage, err := ir.Libpod.ImageRuntime().NewFromLocal(names[0]) + if err != nil { + return "", errors.Wrapf(err, "error retreiving local image from image name %s", names[0]) + } + digest, err := digest.Parse(names[1]) + if err != nil { + return "", errors.Errorf(`invalid image digest "%s": %v`, names[1], err) + } + manifestAnnotateOpts := libpodImage.ManifestAnnotateOpts{ + Arch: opts.Arch, + Features: opts.Features, + OS: opts.OS, + OSFeatures: opts.OSFeatures, + OSVersion: opts.OSVersion, + Variant: opts.Variant, + } + if len(opts.Annotation) > 0 { + annotations := make(map[string]string) + for _, annotationSpec := range opts.Annotation { + spec := strings.SplitN(annotationSpec, "=", 2) + if len(spec) != 2 { + return "", errors.Errorf("no value given for annotation %q", spec[0]) + } + annotations[spec[0]] = spec[1] + } + manifestAnnotateOpts.Annotation = annotations + } + updatedListID, err := listImage.AnnotateManifest(*ir.Libpod.SystemContext(), digest, manifestAnnotateOpts) + if err == nil { + return fmt.Sprintf("%s: %s", updatedListID, digest.String()), nil + } + return "", err +} + +// ManifestRemove removes specified digest from the specified manifest list +func (ir *ImageEngine) ManifestRemove(ctx context.Context, names []string) (string, error) { + instanceDigest, err := digest.Parse(names[1]) + if err != nil { + return "", errors.Errorf(`invalid image digest "%s": %v`, names[1], err) + } + listImage, err := ir.Libpod.ImageRuntime().NewFromLocal(names[0]) + if err != nil { + return "", errors.Wrapf(err, "error retriving local image from image name %s", names[0]) + } + updatedListID, err := listImage.RemoveManifest(instanceDigest) + if err == nil { + return fmt.Sprintf("%s :%s\n", updatedListID, instanceDigest.String()), nil + } + return "", err +} + +// ManifestPush pushes a manifest list or image index to the destination +func (ir *ImageEngine) ManifestPush(ctx context.Context, names []string, opts entities.ManifestPushOptions) error { + listImage, err := ir.Libpod.ImageRuntime().NewFromLocal(names[0]) + if err != nil { + return errors.Wrapf(err, "error retriving local image from image name %s", names[0]) + } + dest, err := alltransports.ParseImageName(names[1]) + if err != nil { + return err + } + var manifestType string + if opts.Format != "" { + switch opts.Format { + case "oci": + manifestType = imgspecv1.MediaTypeImageManifest + case "v2s2", "docker": + manifestType = manifest.DockerV2Schema2MediaType + default: + return errors.Errorf("unknown format %q. Choose one of the supported formats: 'oci' or 'v2s2'", opts.Format) + } + } + + // Set the system context. + sys := ir.Libpod.SystemContext() + if sys != nil { + sys = &types.SystemContext{} + } + sys.AuthFilePath = opts.Authfile + sys.DockerInsecureSkipTLSVerify = opts.SkipTLSVerify + + if opts.Username != "" && opts.Password != "" { + sys.DockerAuthConfig = &types.DockerAuthConfig{ + Username: opts.Username, + Password: opts.Password, + } + } + + options := manifests.PushOptions{ + Store: ir.Libpod.GetStore(), + SystemContext: sys, + ImageListSelection: cp.CopySpecificImages, + Instances: nil, + RemoveSignatures: opts.RemoveSignatures, + SignBy: opts.SignBy, + ManifestType: manifestType, + } + if opts.All { + options.ImageListSelection = cp.CopyAllImages + } + if !opts.Quiet { + options.ReportWriter = os.Stderr + } + digest, err := listImage.PushManifest(dest, options) + if err == nil && opts.Purge { + _, err = ir.Libpod.GetStore().DeleteImage(listImage.ID(), true) + } + if opts.DigestFile != "" { + if err = ioutil.WriteFile(opts.DigestFile, []byte(digest.String()), 0644); err != nil { + return buildahUtil.GetFailureCause(err, errors.Wrapf(err, "failed to write digest to file %q", opts.DigestFile)) + } + } + return err +} diff --git a/pkg/domain/infra/abi/network.go b/pkg/domain/infra/abi/network.go new file mode 100644 index 000000000..8e3515824 --- /dev/null +++ b/pkg/domain/infra/abi/network.go @@ -0,0 +1,292 @@ +package abi + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/containernetworking/cni/libcni" + cniversion "github.com/containernetworking/cni/pkg/version" + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/network" + "github.com/containers/libpod/pkg/util" + "github.com/pkg/errors" +) + +func (ic *ContainerEngine) NetworkList(ctx context.Context, options entities.NetworkListOptions) ([]*entities.NetworkListReport, error) { + var reports []*entities.NetworkListReport + + config, err := ic.Libpod.GetConfig() + if err != nil { + return nil, err + } + + networks, err := network.LoadCNIConfsFromDir(network.GetCNIConfDir(config)) + if err != nil { + return nil, err + } + + var tokens []string + // tokenize the networkListOptions.Filter in key=value. + if len(options.Filter) > 0 { + tokens = strings.Split(options.Filter, "=") + if len(tokens) != 2 { + return nil, fmt.Errorf("invalid filter syntax : %s", options.Filter) + } + } + + for _, n := range networks { + if ifPassesFilterTest(n, tokens) { + reports = append(reports, &entities.NetworkListReport{NetworkConfigList: n}) + } + } + return reports, nil +} + +func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []string, options entities.NetworkInspectOptions) ([]entities.NetworkInspectReport, error) { + var ( + rawCNINetworks []entities.NetworkInspectReport + ) + + config, err := ic.Libpod.GetConfig() + if err != nil { + return nil, err + } + + for _, name := range namesOrIds { + rawList, err := network.InspectNetwork(config, name) + if err != nil { + return nil, err + } + rawCNINetworks = append(rawCNINetworks, rawList) + } + return rawCNINetworks, nil +} + +func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, options entities.NetworkRmOptions) ([]*entities.NetworkRmReport, error) { + var reports []*entities.NetworkRmReport + + config, err := ic.Libpod.GetConfig() + if err != nil { + return nil, err + } + + for _, name := range namesOrIds { + report := entities.NetworkRmReport{Name: name} + containers, err := ic.Libpod.GetAllContainers() + if err != nil { + return reports, err + } + // We need to iterate containers looking to see if they belong to the given network + for _, c := range containers { + if util.StringInSlice(name, c.Config().Networks) { + // if user passes force, we nuke containers + if !options.Force { + // Without the force option, we return an error + return reports, errors.Errorf("%q has associated containers with it. Use -f to forcibly delete containers", name) + } + if err := ic.Libpod.RemoveContainer(ctx, c, true, true); err != nil { + return reports, err + } + } + } + if err := network.RemoveNetwork(config, name); err != nil { + report.Err = err + } + reports = append(reports, &report) + } + return reports, nil +} + +func (ic *ContainerEngine) NetworkCreate(ctx context.Context, name string, options entities.NetworkCreateOptions) (*entities.NetworkCreateReport, error) { + var ( + err error + fileName string + ) + if len(options.MacVLAN) > 0 { + fileName, err = createMacVLAN(ic.Libpod, name, options) + } else { + fileName, err = createBridge(ic.Libpod, name, options) + } + if err != nil { + return nil, err + } + return &entities.NetworkCreateReport{Filename: fileName}, nil +} + +// createBridge creates a CNI network +func createBridge(r *libpod.Runtime, name string, options entities.NetworkCreateOptions) (string, error) { + isGateway := true + ipMasq := true + subnet := &options.Subnet + ipRange := options.Range + runtimeConfig, err := r.GetConfig() + if err != nil { + return "", err + } + // if range is provided, make sure it is "in" network + if subnet.IP != nil { + // if network is provided, does it conflict with existing CNI or live networks + err = network.ValidateUserNetworkIsAvailable(runtimeConfig, subnet) + } else { + // if no network is provided, figure out network + subnet, err = network.GetFreeNetwork(runtimeConfig) + } + if err != nil { + return "", err + } + gateway := options.Gateway + if gateway == nil { + // if no gateway is provided, provide it as first ip of network + gateway = network.CalcGatewayIP(subnet) + } + // if network is provided and if gateway is provided, make sure it is "in" network + if options.Subnet.IP != nil && options.Gateway != nil { + if !subnet.Contains(gateway) { + return "", errors.Errorf("gateway %s is not in valid for subnet %s", gateway.String(), subnet.String()) + } + } + if options.Internal { + isGateway = false + ipMasq = false + } + + // if a range is given, we need to ensure it is "in" the network range. + if options.Range.IP != nil { + if options.Subnet.IP == nil { + return "", errors.New("you must define a subnet range to define an ip-range") + } + firstIP, err := network.FirstIPInSubnet(&options.Range) + if err != nil { + return "", err + } + lastIP, err := network.LastIPInSubnet(&options.Range) + if err != nil { + return "", err + } + if !subnet.Contains(firstIP) || !subnet.Contains(lastIP) { + return "", errors.Errorf("the ip range %s does not fall within the subnet range %s", options.Range.String(), subnet.String()) + } + } + bridgeDeviceName, err := network.GetFreeDeviceName(runtimeConfig) + if err != nil { + return "", err + } + + if len(name) > 0 { + netNames, err := network.GetNetworkNamesFromFileSystem(runtimeConfig) + if err != nil { + return "", err + } + if util.StringInSlice(name, netNames) { + return "", errors.Errorf("the network name %s is already used", name) + } + } else { + // If no name is given, we give the name of the bridge device + name = bridgeDeviceName + } + + ncList := network.NewNcList(name, cniversion.Current()) + var plugins []network.CNIPlugins + var routes []network.IPAMRoute + + defaultRoute, err := network.NewIPAMDefaultRoute() + if err != nil { + return "", err + } + routes = append(routes, defaultRoute) + ipamConfig, err := network.NewIPAMHostLocalConf(subnet, routes, ipRange, gateway) + if err != nil { + return "", err + } + + // TODO need to iron out the role of isDefaultGW and IPMasq + bridge := network.NewHostLocalBridge(bridgeDeviceName, isGateway, false, ipMasq, ipamConfig) + plugins = append(plugins, bridge) + plugins = append(plugins, network.NewPortMapPlugin()) + plugins = append(plugins, network.NewFirewallPlugin()) + // if we find the dnsname plugin, we add configuration for it + if network.HasDNSNamePlugin(runtimeConfig.Network.CNIPluginDirs) && !options.DisableDNS { + // Note: in the future we might like to allow for dynamic domain names + plugins = append(plugins, network.NewDNSNamePlugin(network.DefaultPodmanDomainName)) + } + ncList["plugins"] = plugins + b, err := json.MarshalIndent(ncList, "", " ") + if err != nil { + return "", err + } + cniPathName := filepath.Join(network.GetCNIConfDir(runtimeConfig), fmt.Sprintf("%s.conflist", name)) + err = ioutil.WriteFile(cniPathName, b, 0644) + return cniPathName, err +} + +func createMacVLAN(r *libpod.Runtime, name string, options entities.NetworkCreateOptions) (string, error) { + var ( + plugins []network.CNIPlugins + ) + liveNetNames, err := network.GetLiveNetworkNames() + if err != nil { + return "", err + } + + config, err := r.GetConfig() + if err != nil { + return "", err + } + + // Make sure the host-device exists + if !util.StringInSlice(options.MacVLAN, liveNetNames) { + return "", errors.Errorf("failed to find network interface %q", options.MacVLAN) + } + if len(name) > 0 { + netNames, err := network.GetNetworkNamesFromFileSystem(config) + if err != nil { + return "", err + } + if util.StringInSlice(name, netNames) { + return "", errors.Errorf("the network name %s is already used", name) + } + } else { + name, err = network.GetFreeDeviceName(config) + if err != nil { + return "", err + } + } + ncList := network.NewNcList(name, cniversion.Current()) + macvlan := network.NewMacVLANPlugin(options.MacVLAN) + plugins = append(plugins, macvlan) + ncList["plugins"] = plugins + b, err := json.MarshalIndent(ncList, "", " ") + if err != nil { + return "", err + } + cniPathName := filepath.Join(network.GetCNIConfDir(config), fmt.Sprintf("%s.conflist", name)) + err = ioutil.WriteFile(cniPathName, b, 0644) + return cniPathName, err +} + +func ifPassesFilterTest(netconf *libcni.NetworkConfigList, filter []string) bool { + result := false + if len(filter) == 0 { + // No filter, so pass + return true + } + switch strings.ToLower(filter[0]) { + case "name": + if filter[1] == netconf.Name { + result = true + } + case "plugin": + plugins := network.GetCNIPlugins(netconf) + if strings.Contains(plugins, filter[1]) { + result = true + } + default: + result = false + } + return result +} diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go new file mode 100644 index 000000000..6d0919d2b --- /dev/null +++ b/pkg/domain/infra/abi/play.go @@ -0,0 +1,554 @@ +package abi + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containers/buildah/pkg/parse" + "github.com/containers/image/v5/types" + "github.com/containers/libpod/libpod" + "github.com/containers/libpod/libpod/image" + ann "github.com/containers/libpod/pkg/annotations" + "github.com/containers/libpod/pkg/domain/entities" + envLib "github.com/containers/libpod/pkg/env" + ns "github.com/containers/libpod/pkg/namespaces" + createconfig "github.com/containers/libpod/pkg/spec" + "github.com/containers/libpod/pkg/specgen/generate" + "github.com/containers/libpod/pkg/util" + "github.com/containers/storage" + "github.com/cri-o/ocicni/pkg/ocicni" + "github.com/docker/distribution/reference" + "github.com/ghodss/yaml" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" +) + +const ( + // https://kubernetes.io/docs/concepts/storage/volumes/#hostpath + kubeDirectoryPermission = 0755 + // https://kubernetes.io/docs/concepts/storage/volumes/#hostpath + kubeFilePermission = 0644 +) + +func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) { + var ( + containers []*libpod.Container + pod *libpod.Pod + podOptions []libpod.PodCreateOption + podYAML v1.Pod + registryCreds *types.DockerAuthConfig + writer io.Writer + report entities.PlayKubeReport + ) + + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + if err := yaml.Unmarshal(content, &podYAML); err != nil { + return nil, errors.Wrapf(err, "unable to read %q as YAML", path) + } + + // NOTE: pkg/bindings/play is also parsing the file. + // A pkg/kube would be nice to refactor and abstract + // parts of the K8s-related code. + if podYAML.Kind != "Pod" { + return nil, errors.Errorf("invalid YAML kind: %q. Pod is the only supported Kubernetes YAML kind", podYAML.Kind) + } + + // check for name collision between pod and container + podName := podYAML.ObjectMeta.Name + if podName == "" { + return nil, errors.Errorf("pod does not have a name") + } + for _, n := range podYAML.Spec.Containers { + if n.Name == podName { + report.Logs = append(report.Logs, + fmt.Sprintf("a container exists with the same name (%q) as the pod in your YAML file; changing pod name to %s_pod\n", podName, podName)) + podName = fmt.Sprintf("%s_pod", podName) + } + } + + podOptions = append(podOptions, libpod.WithInfraContainer()) + podOptions = append(podOptions, libpod.WithPodName(podName)) + // TODO for now we just used the default kernel namespaces; we need to add/subtract this from yaml + + hostname := podYAML.Spec.Hostname + if hostname == "" { + hostname = podName + } + podOptions = append(podOptions, libpod.WithPodHostname(hostname)) + + if podYAML.Spec.HostNetwork { + podOptions = append(podOptions, libpod.WithPodHostNetwork()) + } + + nsOptions, err := generate.GetNamespaceOptions(strings.Split(createconfig.DefaultKernelNamespaces, ",")) + if err != nil { + return nil, err + } + podOptions = append(podOptions, nsOptions...) + podPorts := getPodPorts(podYAML.Spec.Containers) + podOptions = append(podOptions, libpod.WithInfraContainerPorts(podPorts)) + + if options.Network != "" { + switch strings.ToLower(options.Network) { + case "bridge", "host": + return nil, errors.Errorf("invalid value passed to --network: bridge or host networking must be configured in YAML") + case "": + return nil, errors.Errorf("invalid value passed to --network: must provide a comma-separated list of CNI networks") + default: + // We'll assume this is a comma-separated list of CNI + // networks. + networks := strings.Split(options.Network, ",") + logrus.Debugf("Pod joining CNI networks: %v", networks) + podOptions = append(podOptions, libpod.WithPodNetworks(networks)) + } + } + + // Create the Pod + pod, err = ic.Libpod.NewPod(ctx, podOptions...) + if err != nil { + return nil, err + } + + podInfraID, err := pod.InfraContainerID() + if err != nil { + return nil, err + } + hasUserns := false + if podInfraID != "" { + podCtr, err := ic.Libpod.GetContainer(podInfraID) + if err != nil { + return nil, err + } + mappings, err := podCtr.IDMappings() + if err != nil { + return nil, err + } + hasUserns = len(mappings.UIDMap) > 0 + } + + namespaces := map[string]string{ + // Disabled during code review per mheon + //"pid": fmt.Sprintf("container:%s", podInfraID), + "net": fmt.Sprintf("container:%s", podInfraID), + "ipc": fmt.Sprintf("container:%s", podInfraID), + "uts": fmt.Sprintf("container:%s", podInfraID), + } + if hasUserns { + namespaces["user"] = fmt.Sprintf("container:%s", podInfraID) + } + if !options.Quiet { + writer = os.Stderr + } + + if len(options.Username) > 0 && len(options.Password) > 0 { + registryCreds = &types.DockerAuthConfig{ + Username: options.Username, + Password: options.Password, + } + } + + dockerRegistryOptions := image.DockerRegistryOptions{ + DockerRegistryCreds: registryCreds, + DockerCertPath: options.CertDir, + DockerInsecureSkipTLSVerify: options.SkipTLSVerify, + } + + // map from name to mount point + volumes := make(map[string]string) + for _, volume := range podYAML.Spec.Volumes { + hostPath := volume.VolumeSource.HostPath + if hostPath == nil { + return nil, errors.Errorf("HostPath is currently the only supported VolumeSource") + } + if hostPath.Type != nil { + switch *hostPath.Type { + case v1.HostPathDirectoryOrCreate: + if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) { + if err := os.Mkdir(hostPath.Path, kubeDirectoryPermission); err != nil { + return nil, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path) + } + } + // Label a newly created volume + if err := libpod.LabelVolumePath(hostPath.Path); err != nil { + return nil, errors.Wrapf(err, "Error giving %s a label", hostPath.Path) + } + case v1.HostPathFileOrCreate: + if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) { + f, err := os.OpenFile(hostPath.Path, os.O_RDONLY|os.O_CREATE, kubeFilePermission) + if err != nil { + return nil, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path) + } + if err := f.Close(); err != nil { + logrus.Warnf("Error in closing newly created HostPath file: %v", err) + } + } + // unconditionally label a newly created volume + if err := libpod.LabelVolumePath(hostPath.Path); err != nil { + return nil, errors.Wrapf(err, "Error giving %s a label", hostPath.Path) + } + case v1.HostPathDirectory: + case v1.HostPathFile: + case v1.HostPathUnset: + // do nothing here because we will verify the path exists in validateVolumeHostDir + break + default: + return nil, errors.Errorf("Directories are the only supported HostPath type") + } + } + + if err := parse.ValidateVolumeHostDir(hostPath.Path); err != nil { + return nil, errors.Wrapf(err, "Error in parsing HostPath in YAML") + } + volumes[volume.Name] = hostPath.Path + } + + seccompPaths, err := initializeSeccompPaths(podYAML.ObjectMeta.Annotations, options.SeccompProfileRoot) + if err != nil { + return nil, err + } + + for _, container := range podYAML.Spec.Containers { + pullPolicy := util.PullImageMissing + if len(container.ImagePullPolicy) > 0 { + pullPolicy, err = util.ValidatePullType(string(container.ImagePullPolicy)) + if err != nil { + return nil, err + } + } + named, err := reference.ParseNormalizedNamed(container.Image) + if err != nil { + return nil, err + } + // In kube, if the image is tagged with latest, it should always pull + if tagged, isTagged := named.(reference.NamedTagged); isTagged { + if tagged.Tag() == image.LatestTag { + pullPolicy = util.PullImageAlways + } + } + newImage, err := ic.Libpod.ImageRuntime().New(ctx, container.Image, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, pullPolicy) + if err != nil { + return nil, err + } + conf, err := kubeContainerToCreateConfig(ctx, container, ic.Libpod, newImage, namespaces, volumes, pod.ID(), podInfraID, seccompPaths) + if err != nil { + return nil, err + } + ctr, err := createconfig.CreateContainerFromCreateConfig(ic.Libpod, conf, ctx, pod) + if err != nil { + return nil, err + } + containers = append(containers, ctr) + } + + // start the containers + for _, ctr := range containers { + if err := ctr.Start(ctx, true); err != nil { + // Making this a hard failure here to avoid a mess + // the other containers are in created status + return nil, err + } + } + + report.Pod = pod.ID() + for _, ctr := range containers { + report.Containers = append(report.Containers, ctr.ID()) + } + + return &report, nil +} + +// getPodPorts converts a slice of kube container descriptions to an +// array of ocicni portmapping descriptions usable in libpod +func getPodPorts(containers []v1.Container) []ocicni.PortMapping { + var infraPorts []ocicni.PortMapping + for _, container := range containers { + for _, p := range container.Ports { + if p.HostPort != 0 && p.ContainerPort == 0 { + p.ContainerPort = p.HostPort + } + if p.Protocol == "" { + p.Protocol = "tcp" + } + portBinding := ocicni.PortMapping{ + HostPort: p.HostPort, + ContainerPort: p.ContainerPort, + Protocol: strings.ToLower(string(p.Protocol)), + } + if p.HostIP != "" { + logrus.Debug("HostIP on port bindings is not supported") + } + // only hostPort is utilized in podman context, all container ports + // are accessible inside the shared network namespace + if p.HostPort != 0 { + infraPorts = append(infraPorts, portBinding) + } + + } + } + return infraPorts +} + +func setupSecurityContext(securityConfig *createconfig.SecurityConfig, userConfig *createconfig.UserConfig, containerYAML v1.Container) { + if containerYAML.SecurityContext == nil { + return + } + if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil { + securityConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem + } + if containerYAML.SecurityContext.Privileged != nil { + securityConfig.Privileged = *containerYAML.SecurityContext.Privileged + } + + if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil { + securityConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation + } + + if seopt := containerYAML.SecurityContext.SELinuxOptions; seopt != nil { + if seopt.User != "" { + securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=user:%s", seopt.User)) + securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("user:%s", seopt.User)) + } + if seopt.Role != "" { + securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=role:%s", seopt.Role)) + securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("role:%s", seopt.Role)) + } + if seopt.Type != "" { + securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=type:%s", seopt.Type)) + securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("type:%s", seopt.Type)) + } + if seopt.Level != "" { + securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=level:%s", seopt.Level)) + securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("level:%s", seopt.Level)) + } + } + if caps := containerYAML.SecurityContext.Capabilities; caps != nil { + for _, capability := range caps.Add { + securityConfig.CapAdd = append(securityConfig.CapAdd, string(capability)) + } + for _, capability := range caps.Drop { + securityConfig.CapDrop = append(securityConfig.CapDrop, string(capability)) + } + } + if containerYAML.SecurityContext.RunAsUser != nil { + userConfig.User = fmt.Sprintf("%d", *containerYAML.SecurityContext.RunAsUser) + } + if containerYAML.SecurityContext.RunAsGroup != nil { + if userConfig.User == "" { + userConfig.User = "0" + } + userConfig.User = fmt.Sprintf("%s:%d", userConfig.User, *containerYAML.SecurityContext.RunAsGroup) + } +} + +// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container +func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, infraID string, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) { + var ( + containerConfig createconfig.CreateConfig + pidConfig createconfig.PidConfig + networkConfig createconfig.NetworkConfig + cgroupConfig createconfig.CgroupConfig + utsConfig createconfig.UtsConfig + ipcConfig createconfig.IpcConfig + userConfig createconfig.UserConfig + securityConfig createconfig.SecurityConfig + ) + + // The default for MemorySwappiness is -1, not 0 + containerConfig.Resources.MemorySwappiness = -1 + + containerConfig.Image = containerYAML.Image + containerConfig.ImageID = newImage.ID() + containerConfig.Name = containerYAML.Name + containerConfig.Tty = containerYAML.TTY + + containerConfig.Pod = podID + + imageData, _ := newImage.Inspect(ctx) + + userConfig.User = "0" + if imageData != nil { + userConfig.User = imageData.Config.User + } + + setupSecurityContext(&securityConfig, &userConfig, containerYAML) + + securityConfig.SeccompProfilePath = seccompPaths.findForContainer(containerConfig.Name) + + containerConfig.Command = []string{} + if imageData != nil && imageData.Config != nil { + containerConfig.Command = append(containerConfig.Command, imageData.Config.Entrypoint...) + } + if len(containerYAML.Command) != 0 { + containerConfig.Command = append(containerConfig.Command, containerYAML.Command...) + } else if imageData != nil && imageData.Config != nil { + containerConfig.Command = append(containerConfig.Command, imageData.Config.Cmd...) + } + if imageData != nil && len(containerConfig.Command) == 0 { + return nil, errors.Errorf("No command specified in container YAML or as CMD or ENTRYPOINT in this image for %s", containerConfig.Name) + } + + containerConfig.UserCommand = containerConfig.Command + + containerConfig.StopSignal = 15 + + containerConfig.WorkDir = "/" + if imageData != nil { + // FIXME, + // we are currently ignoring imageData.Config.ExposedPorts + containerConfig.BuiltinImgVolumes = imageData.Config.Volumes + if imageData.Config.WorkingDir != "" { + containerConfig.WorkDir = imageData.Config.WorkingDir + } + containerConfig.Labels = imageData.Config.Labels + if imageData.Config.StopSignal != "" { + stopSignal, err := util.ParseSignal(imageData.Config.StopSignal) + if err != nil { + return nil, err + } + containerConfig.StopSignal = stopSignal + } + } + + if containerYAML.WorkingDir != "" { + containerConfig.WorkDir = containerYAML.WorkingDir + } + // If the user does not pass in ID mappings, just set to basics + if userConfig.IDMappings == nil { + userConfig.IDMappings = &storage.IDMappingOptions{} + } + + networkConfig.NetMode = ns.NetworkMode(namespaces["net"]) + ipcConfig.IpcMode = ns.IpcMode(namespaces["ipc"]) + utsConfig.UtsMode = ns.UTSMode(namespaces["uts"]) + // disabled in code review per mheon + //containerConfig.PidMode = ns.PidMode(namespaces["pid"]) + userConfig.UsernsMode = ns.UsernsMode(namespaces["user"]) + if len(containerConfig.WorkDir) == 0 { + containerConfig.WorkDir = "/" + } + + containerConfig.Pid = pidConfig + containerConfig.Network = networkConfig + containerConfig.Uts = utsConfig + containerConfig.Ipc = ipcConfig + containerConfig.Cgroup = cgroupConfig + containerConfig.User = userConfig + containerConfig.Security = securityConfig + + annotations := make(map[string]string) + if infraID != "" { + annotations[ann.SandboxID] = infraID + annotations[ann.ContainerType] = ann.ContainerTypeContainer + } + containerConfig.Annotations = annotations + + // Environment Variables + envs := map[string]string{} + if imageData != nil { + imageEnv, err := envLib.ParseSlice(imageData.Config.Env) + if err != nil { + return nil, errors.Wrap(err, "error parsing image environment variables") + } + envs = imageEnv + } + for _, e := range containerYAML.Env { + envs[e.Name] = e.Value + } + containerConfig.Env = envs + + for _, volume := range containerYAML.VolumeMounts { + hostPath, exists := volumes[volume.Name] + if !exists { + return nil, errors.Errorf("Volume mount %s specified for container but not configured in volumes", volume.Name) + } + if err := parse.ValidateVolumeCtrDir(volume.MountPath); err != nil { + return nil, errors.Wrapf(err, "error in parsing MountPath") + } + containerConfig.Volumes = append(containerConfig.Volumes, fmt.Sprintf("%s:%s", hostPath, volume.MountPath)) + } + return &containerConfig, nil +} + +// kubeSeccompPaths holds information about a pod YAML's seccomp configuration +// it holds both container and pod seccomp paths +type kubeSeccompPaths struct { + containerPaths map[string]string + podPath string +} + +// findForContainer checks whether a container has a seccomp path configured for it +// if not, it returns the podPath, which should always have a value +func (k *kubeSeccompPaths) findForContainer(ctrName string) string { + if path, ok := k.containerPaths[ctrName]; ok { + return path + } + return k.podPath +} + +// initializeSeccompPaths takes annotations from the pod object metadata and finds annotations pertaining to seccomp +// it parses both pod and container level +// if the annotation is of the form "localhost/%s", the seccomp profile will be set to profileRoot/%s +func initializeSeccompPaths(annotations map[string]string, profileRoot string) (*kubeSeccompPaths, error) { + seccompPaths := &kubeSeccompPaths{containerPaths: make(map[string]string)} + var err error + if annotations != nil { + for annKeyValue, seccomp := range annotations { + // check if it is prefaced with container.seccomp.security.alpha.kubernetes.io/ + prefixAndCtr := strings.Split(annKeyValue, "/") + if prefixAndCtr[0]+"/" != v1.SeccompContainerAnnotationKeyPrefix { + continue + } else if len(prefixAndCtr) != 2 { + // this could be caused by a user inputting either of + // container.seccomp.security.alpha.kubernetes.io{,/} + // both of which are invalid + return nil, errors.Errorf("Invalid seccomp path: %s", prefixAndCtr[0]) + } + + path, err := verifySeccompPath(seccomp, profileRoot) + if err != nil { + return nil, err + } + seccompPaths.containerPaths[prefixAndCtr[1]] = path + } + + podSeccomp, ok := annotations[v1.SeccompPodAnnotationKey] + if ok { + seccompPaths.podPath, err = verifySeccompPath(podSeccomp, profileRoot) + } else { + seccompPaths.podPath, err = libpod.DefaultSeccompPath() + } + if err != nil { + return nil, err + } + } + return seccompPaths, nil +} + +// verifySeccompPath takes a path and checks whether it is a default, unconfined, or a path +// the available options are parsed as defined in https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp +func verifySeccompPath(path string, profileRoot string) (string, error) { + switch path { + case v1.DeprecatedSeccompProfileDockerDefault: + fallthrough + case v1.SeccompProfileRuntimeDefault: + return libpod.DefaultSeccompPath() + case "unconfined": + return path, nil + default: + parts := strings.Split(path, "/") + if parts[0] == "localhost" { + return filepath.Join(profileRoot, parts[1]), nil + } + return "", errors.Errorf("invalid seccomp path: %s", path) + } +} diff --git a/pkg/domain/infra/abi/pods.go b/pkg/domain/infra/abi/pods.go index b286bcf0d..16c222cbd 100644 --- a/pkg/domain/infra/abi/pods.go +++ b/pkg/domain/infra/abi/pods.go @@ -243,6 +243,10 @@ func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, optio } func (ic *ContainerEngine) PodPrune(ctx context.Context, options entities.PodPruneOptions) ([]*entities.PodPruneReport, error) { + return ic.prunePodHelper(ctx) +} + +func (ic *ContainerEngine) prunePodHelper(ctx context.Context) ([]*entities.PodPruneReport, error) { var ( reports []*entities.PodPruneReport ) diff --git a/pkg/domain/infra/abi/pods_stats.go b/pkg/domain/infra/abi/pods_stats.go index a41c01da0..c6befcf95 100644 --- a/pkg/domain/infra/abi/pods_stats.go +++ b/pkg/domain/infra/abi/pods_stats.go @@ -8,6 +8,7 @@ import ( "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/domain/entities" "github.com/containers/libpod/pkg/rootless" + "github.com/containers/libpod/utils" "github.com/docker/go-units" "github.com/pkg/errors" ) @@ -68,7 +69,7 @@ func combineHumanValues(a, b uint64) string { } func floatToPercentString(f float64) string { - strippedFloat, err := libpod.RemoveScientificNotationFromFloat(f) + strippedFloat, err := utils.RemoveScientificNotationFromFloat(f) if err != nil || strippedFloat == 0 { // If things go bazinga, return a safe value return "--" diff --git a/pkg/domain/infra/abi/runtime.go b/pkg/domain/infra/abi/runtime.go index fba422d8e..b9020e9a5 100644 --- a/pkg/domain/infra/abi/runtime.go +++ b/pkg/domain/infra/abi/runtime.go @@ -16,4 +16,9 @@ type ContainerEngine struct { Libpod *libpod.Runtime } +// Container-related runtime linked against libpod library +type SystemEngine struct { + Libpod *libpod.Runtime +} + var shutdownSync sync.Once diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go index e5c109ee6..52dfaba7d 100644 --- a/pkg/domain/infra/abi/system.go +++ b/pkg/domain/infra/abi/system.go @@ -5,6 +5,8 @@ import ( "fmt" "io/ioutil" "os" + "os/exec" + "path/filepath" "strconv" "syscall" @@ -14,54 +16,18 @@ import ( "github.com/containers/libpod/pkg/domain/entities" "github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/util" - iopodman "github.com/containers/libpod/pkg/varlink" - iopodmanAPI "github.com/containers/libpod/pkg/varlinkapi" "github.com/containers/libpod/utils" - "github.com/containers/libpod/version" + "github.com/docker/distribution/reference" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "github.com/varlink/go/varlink" + "github.com/spf13/pflag" ) func (ic *ContainerEngine) Info(ctx context.Context) (*define.Info, error) { return ic.Libpod.Info() } -func (ic *ContainerEngine) VarlinkService(_ context.Context, opts entities.ServiceOptions) error { - var varlinkInterfaces = []*iopodman.VarlinkInterface{ - iopodmanAPI.New(opts.Command, ic.Libpod), - } - - service, err := varlink.NewService( - "Atomic", - "podman", - version.Version, - "https://github.com/containers/libpod", - ) - if err != nil { - return errors.Wrapf(err, "unable to create new varlink service") - } - - for _, i := range varlinkInterfaces { - if err := service.RegisterInterface(i); err != nil { - return errors.Errorf("unable to register varlink interface %v", i) - } - } - - // Run the varlink server at the given address - if err = service.Listen(opts.URI, opts.Timeout); err != nil { - switch err.(type) { - case varlink.ServiceTimeoutError: - logrus.Infof("varlink service expired (use --timeout to increase session time beyond %s ms, 0 means never timeout)", opts.Timeout.String()) - return nil - default: - return errors.Wrapf(err, "unable to start varlink service") - } - } - return nil -} - func (ic *ContainerEngine) SetupRootless(_ context.Context, cmd *cobra.Command) error { // do it only after podman has already re-execed and running with uid==0. if os.Geteuid() == 0 { @@ -83,6 +49,7 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, cmd *cobra.Command) } } } + return nil } pausePidPath, err := util.GetRootlessPauseProcessPidPath() @@ -175,3 +142,240 @@ func setUMask() { // nolint:deadcode,unused func checkInput() error { // nolint:deadcode,unused return nil } + +// SystemPrune removes unsed data from the system. Pruning pods, containers, volumes and images. +func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.SystemPruneOptions) (*entities.SystemPruneReport, error) { + var systemPruneReport = new(entities.SystemPruneReport) + podPruneReport, err := ic.prunePodHelper(ctx) + if err != nil { + return nil, err + } + systemPruneReport.PodPruneReport = podPruneReport + + containerPruneReport, err := ic.pruneContainersHelper(ctx, nil) + if err != nil { + return nil, err + } + systemPruneReport.ContainerPruneReport = containerPruneReport + + results, err := ic.Libpod.ImageRuntime().PruneImages(ctx, options.All, nil) + if err != nil { + return nil, err + } + report := entities.ImagePruneReport{ + Report: entities.Report{ + Id: results, + Err: nil, + }, + } + + systemPruneReport.ImagePruneReport = &report + + if options.Volume { + volumePruneReport, err := ic.pruneVolumesHelper(ctx) + if err != nil { + return nil, err + } + systemPruneReport.VolumePruneReport = volumePruneReport + } + return systemPruneReport, nil +} + +func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.SystemDfOptions) (*entities.SystemDfReport, error) { + var ( + dfImages []*entities.SystemDfImageReport + dfContainers []*entities.SystemDfContainerReport + dfVolumes []*entities.SystemDfVolumeReport + runningContainers []string + ) + + // Get Images and iterate them + imgs, err := ic.Libpod.ImageRuntime().GetImages() + if err != nil { + return nil, err + } + for _, i := range imgs { + var sharedSize uint64 + cons, err := i.Containers() + if err != nil { + return nil, err + } + imageSize, err := i.Size(ctx) + if err != nil { + return nil, err + } + uniqueSize := *imageSize + + parent, err := i.GetParent(ctx) + if err != nil { + return nil, err + } + if parent != nil { + parentSize, err := parent.Size(ctx) + if err != nil { + return nil, err + } + uniqueSize = *parentSize - *imageSize + sharedSize = *imageSize - uniqueSize + } + var name, repository, tag string + for _, n := range i.Names() { + if len(n) > 0 { + name = n + break + } + } + + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, err + } + repository = named.Name() + if tagged, isTagged := named.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } + + report := entities.SystemDfImageReport{ + Repository: repository, + Tag: tag, + ImageID: i.ID(), + Created: i.Created(), + Size: int64(*imageSize), + SharedSize: int64(sharedSize), + UniqueSize: int64(uniqueSize), + Containers: len(cons), + } + dfImages = append(dfImages, &report) + } + + // GetContainers and iterate them + cons, err := ic.Libpod.GetAllContainers() + if err != nil { + return nil, err + } + for _, c := range cons { + iid, _ := c.Image() + conSize, err := c.RootFsSize() + if err != nil { + return nil, err + } + state, err := c.State() + if err != nil { + return nil, err + } + rwsize, err := c.RWSize() + if err != nil { + return nil, err + } + report := entities.SystemDfContainerReport{ + ContainerID: c.ID(), + Image: iid, + Command: c.Command(), + LocalVolumes: len(c.UserVolumes()), + RWSize: rwsize, + Size: conSize, + Created: c.CreatedTime(), + Status: state.String(), + Names: c.Name(), + } + dfContainers = append(dfContainers, &report) + } + + // Get volumes and iterate them + vols, err := ic.Libpod.GetAllVolumes() + if err != nil { + return nil, err + } + + running, err := ic.Libpod.GetRunningContainers() + if err != nil { + return nil, err + } + for _, c := range running { + runningContainers = append(runningContainers, c.ID()) + } + + for _, v := range vols { + var consInUse int + volSize, err := sizeOfPath(v.MountPoint()) + if err != nil { + return nil, err + } + inUse, err := v.VolumesInUse() + if err != nil { + return nil, err + } + for _, viu := range inUse { + if util.StringInSlice(viu, runningContainers) { + consInUse += 1 + } + } + report := entities.SystemDfVolumeReport{ + VolumeName: v.Name(), + Links: consInUse, + Size: volSize, + } + dfVolumes = append(dfVolumes, &report) + } + return &entities.SystemDfReport{ + Images: dfImages, + Containers: dfContainers, + Volumes: dfVolumes, + }, nil +} + +// sizeOfPath determines the file usage of a given path. it was called volumeSize in v1 +// and now is made to be generic and take a path instead of a libpod volume +func sizeOfPath(path string) (int64, error) { + var size int64 + err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error { + if err == nil && !info.IsDir() { + size += info.Size() + } + return err + }) + return size, err +} + +func (se *SystemEngine) Reset(ctx context.Context) error { + return se.Libpod.Reset(ctx) +} + +func (se *SystemEngine) Renumber(ctx context.Context, flags *pflag.FlagSet, config *entities.PodmanConfig) error { + return nil +} + +func (s SystemEngine) Migrate(ctx context.Context, flags *pflag.FlagSet, config *entities.PodmanConfig, options entities.SystemMigrateOptions) error { + return nil +} + +func (s SystemEngine) Shutdown(ctx context.Context) { + if err := s.Libpod.Shutdown(false); err != nil { + logrus.Error(err) + } +} + +func unshareEnv(graphroot, runroot string) []string { + return append(os.Environ(), "_CONTAINERS_USERNS_CONFIGURED=done", + fmt.Sprintf("CONTAINERS_GRAPHROOT=%s", graphroot), + fmt.Sprintf("CONTAINERS_RUNROOT=%s", runroot)) +} + +func (ic *ContainerEngine) Unshare(ctx context.Context, args []string) error { + cmd := exec.Command(args[0], args[1:]...) + cmd.Env = unshareEnv(ic.Libpod.StorageConfig().GraphRoot, ic.Libpod.StorageConfig().RunRoot) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} + +func (ic ContainerEngine) Version(ctx context.Context) (*entities.SystemVersionReport, error) { + var report entities.SystemVersionReport + v, err := define.GetVersion() + if err != nil { + return nil, err + } + report.Client = &v + return &report, err +} diff --git a/pkg/domain/infra/abi/system_novalink.go b/pkg/domain/infra/abi/system_novalink.go new file mode 100644 index 000000000..a71b0170a --- /dev/null +++ b/pkg/domain/infra/abi/system_novalink.go @@ -0,0 +1,14 @@ +// +build !varlink + +package abi + +import ( + "context" + + "github.com/containers/libpod/pkg/domain/entities" + "github.com/pkg/errors" +) + +func (ic *ContainerEngine) VarlinkService(_ context.Context, opts entities.ServiceOptions) error { + return errors.Errorf("varlink is not supported") +} diff --git a/pkg/domain/infra/abi/system_varlink.go b/pkg/domain/infra/abi/system_varlink.go new file mode 100644 index 000000000..4dc766f52 --- /dev/null +++ b/pkg/domain/infra/abi/system_varlink.go @@ -0,0 +1,49 @@ +// +build varlink + +package abi + +import ( + "context" + + "github.com/containers/libpod/pkg/domain/entities" + iopodman "github.com/containers/libpod/pkg/varlink" + iopodmanAPI "github.com/containers/libpod/pkg/varlinkapi" + "github.com/containers/libpod/version" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/varlink/go/varlink" +) + +func (ic *ContainerEngine) VarlinkService(_ context.Context, opts entities.ServiceOptions) error { + var varlinkInterfaces = []*iopodman.VarlinkInterface{ + iopodmanAPI.New(opts.Command, ic.Libpod), + } + + service, err := varlink.NewService( + "Atomic", + "podman", + version.Version, + "https://github.com/containers/libpod", + ) + if err != nil { + return errors.Wrapf(err, "unable to create new varlink service") + } + + for _, i := range varlinkInterfaces { + if err := service.RegisterInterface(i); err != nil { + return errors.Errorf("unable to register varlink interface %v", i) + } + } + + // Run the varlink server at the given address + if err = service.Listen(opts.URI, opts.Timeout); err != nil { + switch err.(type) { + case varlink.ServiceTimeoutError: + logrus.Infof("varlink service expired (use --time to increase session time beyond %s ms, 0 means never timeout)", opts.Timeout.String()) + return nil + default: + return errors.Wrapf(err, "unable to start varlink service") + } + } + return nil +} diff --git a/pkg/domain/infra/abi/terminal/terminal_linux.go b/pkg/domain/infra/abi/terminal/terminal_linux.go index 15701342f..8d9cdde03 100644 --- a/pkg/domain/infra/abi/terminal/terminal_linux.go +++ b/pkg/domain/infra/abi/terminal/terminal_linux.go @@ -15,13 +15,13 @@ import ( ) // ExecAttachCtr execs and attaches to a container -func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *define.AttachStreams, preserveFDs uint, detachKeys string) (int, error) { +func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, execConfig *libpod.ExecConfig, streams *define.AttachStreams) (int, error) { resize := make(chan remotecommand.TerminalSize) haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd())) // Check if we are attached to a terminal. If we are, generate resize // events, and set the terminal to raw mode - if haveTerminal && tty { + if haveTerminal && execConfig.Terminal { cancel, oldTermState, err := handleTerminalAttach(ctx, resize) if err != nil { return -1, err @@ -34,16 +34,6 @@ func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged b }() } - execConfig := new(libpod.ExecConfig) - execConfig.Command = cmd - execConfig.Terminal = tty - execConfig.Privileged = privileged - execConfig.Environment = env - execConfig.User = user - execConfig.WorkDir = workDir - execConfig.DetachKeys = &detachKeys - execConfig.PreserveFDs = preserveFDs - return ctr.Exec(execConfig, streams, resize) } diff --git a/pkg/domain/infra/abi/trust.go b/pkg/domain/infra/abi/trust.go new file mode 100644 index 000000000..5b89c91d9 --- /dev/null +++ b/pkg/domain/infra/abi/trust.go @@ -0,0 +1,171 @@ +package abi + +import ( + "context" + "encoding/json" + "io/ioutil" + "os" + "strings" + + "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/trust" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +func (ir *ImageEngine) ShowTrust(ctx context.Context, args []string, options entities.ShowTrustOptions) (*entities.ShowTrustReport, error) { + var ( + err error + report entities.ShowTrustReport + ) + policyPath := trust.DefaultPolicyPath(ir.Libpod.SystemContext()) + if len(options.PolicyPath) > 0 { + policyPath = options.PolicyPath + } + report.Raw, err = ioutil.ReadFile(policyPath) + if err != nil { + return nil, errors.Wrapf(err, "unable to read %s", policyPath) + } + if options.Raw { + return &report, nil + } + report.SystemRegistriesDirPath = trust.RegistriesDirPath(ir.Libpod.SystemContext()) + if len(options.RegistryPath) > 0 { + report.SystemRegistriesDirPath = options.RegistryPath + } + policyContentStruct, err := trust.GetPolicy(policyPath) + if err != nil { + return nil, errors.Wrapf(err, "could not read trust policies") + } + report.Policies, err = getPolicyShowOutput(policyContentStruct, report.SystemRegistriesDirPath) + if err != nil { + return nil, errors.Wrapf(err, "could not show trust policies") + } + return &report, nil +} + +func (ir *ImageEngine) SetTrust(ctx context.Context, args []string, options entities.SetTrustOptions) error { + var ( + policyContentStruct trust.PolicyContent + newReposContent []trust.RepoContent + ) + trustType := options.Type + if trustType == "accept" { + trustType = "insecureAcceptAnything" + } + + pubkeysfile := options.PubKeysFile + if len(pubkeysfile) == 0 && trustType == "signedBy" { + return errors.Errorf("At least one public key must be defined for type 'signedBy'") + } + + policyPath := trust.DefaultPolicyPath(ir.Libpod.SystemContext()) + if len(options.PolicyPath) > 0 { + policyPath = options.PolicyPath + } + _, err := os.Stat(policyPath) + if !os.IsNotExist(err) { + policyContent, err := ioutil.ReadFile(policyPath) + if err != nil { + return errors.Wrapf(err, "unable to read %s", policyPath) + } + if err := json.Unmarshal(policyContent, &policyContentStruct); err != nil { + return errors.Errorf("could not read trust policies") + } + } + if len(pubkeysfile) != 0 { + for _, filepath := range pubkeysfile { + newReposContent = append(newReposContent, trust.RepoContent{Type: trustType, KeyType: "GPGKeys", KeyPath: filepath}) + } + } else { + newReposContent = append(newReposContent, trust.RepoContent{Type: trustType}) + } + if args[0] == "default" { + policyContentStruct.Default = newReposContent + } else { + if len(policyContentStruct.Default) == 0 { + return errors.Errorf("Default trust policy must be set.") + } + registryExists := false + for transport, transportval := range policyContentStruct.Transports { + _, registryExists = transportval[args[0]] + if registryExists { + policyContentStruct.Transports[transport][args[0]] = newReposContent + break + } + } + if !registryExists { + if policyContentStruct.Transports == nil { + policyContentStruct.Transports = make(map[string]trust.RepoMap) + } + if policyContentStruct.Transports["docker"] == nil { + policyContentStruct.Transports["docker"] = make(map[string][]trust.RepoContent) + } + policyContentStruct.Transports["docker"][args[0]] = append(policyContentStruct.Transports["docker"][args[0]], newReposContent...) + } + } + + data, err := json.MarshalIndent(policyContentStruct, "", " ") + if err != nil { + return errors.Wrapf(err, "error setting trust policy") + } + return ioutil.WriteFile(policyPath, data, 0644) +} + +func getPolicyShowOutput(policyContentStruct trust.PolicyContent, systemRegistriesDirPath string) ([]*trust.TrustPolicy, error) { + var output []*trust.TrustPolicy + + registryConfigs, err := trust.LoadAndMergeConfig(systemRegistriesDirPath) + if err != nil { + return nil, err + } + + if len(policyContentStruct.Default) > 0 { + defaultPolicyStruct := trust.TrustPolicy{ + Name: "* (default)", + RepoName: "default", + Type: trustTypeDescription(policyContentStruct.Default[0].Type), + } + output = append(output, &defaultPolicyStruct) + } + for _, transval := range policyContentStruct.Transports { + for repo, repoval := range transval { + tempTrustShowOutput := trust.TrustPolicy{ + Name: repo, + RepoName: repo, + Type: repoval[0].Type, + } + // TODO - keyarr is not used and I don't know its intent; commenting out for now for someone to fix later + //keyarr := []string{} + uids := []string{} + for _, repoele := range repoval { + if len(repoele.KeyPath) > 0 { + //keyarr = append(keyarr, repoele.KeyPath) + uids = append(uids, trust.GetGPGIdFromKeyPath(repoele.KeyPath)...) + } + if len(repoele.KeyData) > 0 { + //keyarr = append(keyarr, string(repoele.KeyData)) + uids = append(uids, trust.GetGPGIdFromKeyData(repoele.KeyData)...) + } + } + tempTrustShowOutput.GPGId = strings.Join(uids, ", ") + + registryNamespace := trust.HaveMatchRegistry(repo, registryConfigs) + if registryNamespace != nil { + tempTrustShowOutput.SignatureStore = registryNamespace.SigStore + } + output = append(output, &tempTrustShowOutput) + } + } + return output, nil +} + +var typeDescription = map[string]string{"insecureAcceptAnything": "accept", "signedBy": "signed", "reject": "reject"} + +func trustTypeDescription(trustType string) string { + trustDescription, exist := typeDescription[trustType] + if !exist { + logrus.Warnf("invalid trust type %s", trustType) + } + return trustDescription +} diff --git a/pkg/domain/infra/abi/volumes.go b/pkg/domain/infra/abi/volumes.go index bdae4359d..91b2440df 100644 --- a/pkg/domain/infra/abi/volumes.go +++ b/pkg/domain/infra/abi/volumes.go @@ -1,5 +1,3 @@ -// +build ABISupport - package abi import ( @@ -113,6 +111,10 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin } func (ic *ContainerEngine) VolumePrune(ctx context.Context, opts entities.VolumePruneOptions) ([]*entities.VolumePruneReport, error) { + return ic.pruneVolumesHelper(ctx) +} + +func (ic *ContainerEngine) pruneVolumesHelper(ctx context.Context) ([]*entities.VolumePruneReport, error) { var ( reports []*entities.VolumePruneReport ) diff --git a/pkg/domain/infra/runtime_abi.go b/pkg/domain/infra/runtime_abi.go index 7aa6986a7..67c1cd534 100644 --- a/pkg/domain/infra/runtime_abi.go +++ b/pkg/domain/infra/runtime_abi.go @@ -6,8 +6,10 @@ import ( "context" "fmt" + "github.com/containers/libpod/libpod" "github.com/containers/libpod/pkg/bindings" "github.com/containers/libpod/pkg/domain/entities" + "github.com/containers/libpod/pkg/domain/infra/abi" "github.com/containers/libpod/pkg/domain/infra/tunnel" ) @@ -36,3 +38,32 @@ func NewImageEngine(facts *entities.PodmanConfig) (entities.ImageEngine, error) } return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode) } + +// NewSystemEngine factory provides a libpod runtime for specialized system operations +func NewSystemEngine(setup entities.EngineSetup, facts *entities.PodmanConfig) (entities.SystemEngine, error) { + switch facts.EngineMode { + case entities.ABIMode: + var r *libpod.Runtime + var err error + switch setup { + case entities.NormalMode: + r, err = GetRuntime(context.Background(), facts.FlagSet, facts) + case entities.RenumberMode: + r, err = GetRuntimeRenumber(context.Background(), facts.FlagSet, facts) + case entities.ResetMode: + r, err = GetRuntimeRenumber(context.Background(), facts.FlagSet, facts) + case entities.MigrateMode: + name, flagErr := facts.FlagSet.GetString("new-runtime") + if flagErr != nil { + return nil, flagErr + } + r, err = GetRuntimeMigrate(context.Background(), facts.FlagSet, facts, name) + case entities.NoFDsMode: + r, err = GetRuntimeDisableFDs(context.Background(), facts.FlagSet, facts) + } + return &abi.SystemEngine{Libpod: r}, err + case entities.TunnelMode: + return nil, fmt.Errorf("tunnel system runtime not supported") + } + return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode) +} diff --git a/pkg/domain/infra/runtime_abi_unsupported.go b/pkg/domain/infra/runtime_abi_unsupported.go new file mode 100644 index 000000000..c4e25e990 --- /dev/null +++ b/pkg/domain/infra/runtime_abi_unsupported.go @@ -0,0 +1,14 @@ +// +build !ABISupport + +package infra + +import ( + "errors" + + "github.com/containers/libpod/pkg/domain/entities" +) + +// NewSystemEngine factory provides a libpod runtime for specialized system operations +func NewSystemEngine(setup entities.EngineSetup, facts *entities.PodmanConfig) (entities.SystemEngine, error) { + return nil, errors.New("not implemented") +} diff --git a/pkg/domain/infra/runtime_image_proxy.go b/pkg/domain/infra/runtime_image_proxy.go deleted file mode 100644 index ea5d0e6f2..000000000 --- a/pkg/domain/infra/runtime_image_proxy.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build ABISupport - -package infra - -import ( - "context" - - "github.com/containers/libpod/pkg/domain/entities" - "github.com/containers/libpod/pkg/domain/infra/abi" - "github.com/spf13/pflag" -) - -// ContainerEngine Image Proxy will be EOL'ed after podman is separated from libpod repo - -func NewLibpodImageRuntime(flags *pflag.FlagSet, opts *entities.PodmanConfig) (entities.ImageEngine, error) { - r, err := GetRuntime(context.Background(), flags, opts) - if err != nil { - return nil, err - } - return &abi.ImageEngine{Libpod: r}, nil -} diff --git a/pkg/domain/infra/runtime_libpod.go b/pkg/domain/infra/runtime_libpod.go index 7c9180d43..a57eadc63 100644 --- a/pkg/domain/infra/runtime_libpod.go +++ b/pkg/domain/infra/runtime_libpod.go @@ -213,6 +213,9 @@ func getRuntime(ctx context.Context, fs *flag.FlagSet, opts *engineOpts) (*libpo if fs.Changed("hooks-dir") { options = append(options, libpod.WithHooksDir(cfg.Engine.HooksDir...)) } + if fs.Changed("registries-conf") { + options = append(options, libpod.WithRegistriesConf(cfg.RegistriesConf)) + } // TODO flag to set CNI plugins dir? diff --git a/pkg/domain/infra/runtime_proxy.go b/pkg/domain/infra/runtime_proxy.go index 41193fd89..e7002e20f 100644 --- a/pkg/domain/infra/runtime_proxy.go +++ b/pkg/domain/infra/runtime_proxy.go @@ -19,3 +19,11 @@ func NewLibpodRuntime(flags *flag.FlagSet, opts *entities.PodmanConfig) (entitie } return &abi.ContainerEngine{Libpod: r}, nil } + +func NewLibpodImageRuntime(flags *flag.FlagSet, opts *entities.PodmanConfig) (entities.ImageEngine, error) { + r, err := GetRuntime(context.Background(), flags, opts) + if err != nil { + return nil, err + } + return &abi.ImageEngine{Libpod: r}, nil +} diff --git a/pkg/domain/infra/tunnel/auto-update.go b/pkg/domain/infra/tunnel/auto-update.go new file mode 100644 index 000000000..5c2dd360d --- /dev/null +++ b/pkg/domain/infra/tunnel/auto-update.go @@ -0,0 +1,12 @@ +package tunnel + +import ( + "context" + + "github.com/containers/libpod/pkg/domain/entities" + "github.com/pkg/errors" +) + +func (ic *ContainerEngine) AutoUpdate(ctx context.Context, options entities.AutoUpdateOptions) (*entities.AutoUpdateReport, []error) { + return nil, []error{errors.New("not implemented")} +} diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go index 32f9c4e36..beba55c2b 100644 --- a/pkg/domain/infra/tunnel/containers.go +++ b/pkg/domain/infra/tunnel/containers.go @@ -4,16 +4,25 @@ import ( "context" "io" "os" + "strconv" + "strings" + "time" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/pkg/bindings" "github.com/containers/libpod/pkg/bindings/containers" "github.com/containers/libpod/pkg/domain/entities" "github.com/containers/libpod/pkg/specgen" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) +func (ic *ContainerEngine) ContainerRunlabel(ctx context.Context, label string, image string, args []string, options entities.ContainerRunlabelOptions) error { + return errors.New("not implemented") +} + func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrId string) (*entities.BoolReport, error) { exists, err := containers.Exists(ic.ClientCxt, nameOrId) return &entities.BoolReport{Value: exists}, err @@ -80,10 +89,25 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin } for _, c := range ctrs { report := entities.StopReport{Id: c.ID} - report.Err = containers.Stop(ic.ClientCxt, c.ID, &options.Timeout) - // TODO we need to associate errors returned by http with common - // define.errors so that we can equity tests. this will allow output - // to be the same as the native client + if err = containers.Stop(ic.ClientCxt, c.ID, &options.Timeout); err != nil { + // These first two are considered non-fatal under the right conditions + if errors.Cause(err).Error() == define.ErrCtrStopped.Error() { + logrus.Debugf("Container %s is already stopped", c.ID) + reports = append(reports, &report) + continue + } else if options.All && errors.Cause(err).Error() == define.ErrCtrStateInvalid.Error() { + logrus.Debugf("Container %s is not running, could not stop", c.ID) + reports = append(reports, &report) + continue + } + + // TODO we need to associate errors returned by http with common + // define.errors so that we can equity tests. this will allow output + // to be the same as the native client + report.Err = err + reports = append(reports, &report) + continue + } reports = append(reports, &report) } return reports, nil @@ -263,7 +287,7 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [ } } for _, c := range ctrs { - report, err := containers.Checkpoint(ic.ClientCxt, c.ID, &options.Keep, &options.LeaveRuninng, &options.TCPEstablished, &options.IgnoreRootFS, &options.Export) + report, err := containers.Checkpoint(ic.ClientCxt, c.ID, &options.Keep, &options.LeaveRunning, &options.TCPEstablished, &options.IgnoreRootFS, &options.Export) if err != nil { reports = append(reports, &entities.CheckpointReport{Id: c.ID, Err: err}) } @@ -314,21 +338,92 @@ func (ic *ContainerEngine) ContainerCreate(ctx context.Context, s *specgen.SpecG return &entities.ContainerCreateReport{Id: response.ID}, nil } -func (ic *ContainerEngine) ContainerLogs(ctx context.Context, containers []string, options entities.ContainerLogsOptions) error { - // The endpoint is not ready yet and requires some more work. - return errors.New("not implemented yet") +func (ic *ContainerEngine) ContainerLogs(_ context.Context, nameOrIds []string, options entities.ContainerLogsOptions) error { + since := options.Since.Format(time.RFC3339) + tail := strconv.FormatInt(options.Tail, 10) + stdout := options.Writer != nil + opts := containers.LogOptions{ + Follow: &options.Follow, + Since: &since, + Stderr: &stdout, + Stdout: &stdout, + Tail: &tail, + Timestamps: &options.Timestamps, + Until: nil, + } + + var err error + outCh := make(chan string) + ctx, cancel := context.WithCancel(context.Background()) + go func() { + err = containers.Logs(ic.ClientCxt, nameOrIds[0], opts, outCh, outCh) + cancel() + }() + + for { + select { + case <-ctx.Done(): + return err + case line := <-outCh: + _, _ = io.WriteString(options.Writer, line) + } + } } func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrId string, options entities.AttachOptions) error { - return errors.New("not implemented") + return containers.Attach(ic.ClientCxt, nameOrId, &options.DetachKeys, nil, bindings.PTrue, options.Stdin, options.Stdout, options.Stderr, nil) } -func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions) (int, error) { +func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions, streams define.AttachStreams) (int, error) { return 125, errors.New("not implemented") } +func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrID string, options entities.ExecOptions) (string, error) { + return "", errors.New("not implemented") +} + +func startAndAttach(ic *ContainerEngine, name string, detachKeys *string, input, output, errput *os.File) error { //nolint + attachErr := make(chan error) + attachReady := make(chan bool) + go func() { + err := containers.Attach(ic.ClientCxt, name, detachKeys, bindings.PFalse, bindings.PTrue, input, output, errput, attachReady) + attachErr <- err + }() + // Wait for the attach to actually happen before starting + // the container. + <-attachReady + if err := containers.Start(ic.ClientCxt, name, detachKeys); err != nil { + return err + } + return <-attachErr +} + func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []string, options entities.ContainerStartOptions) ([]*entities.ContainerStartReport, error) { - return nil, errors.New("not implemented") + var reports []*entities.ContainerStartReport + for _, name := range namesOrIds { + report := entities.ContainerStartReport{ + Id: name, + RawInput: name, + ExitCode: 125, + } + if options.Attach { + report.Err = startAndAttach(ic, name, &options.DetachKeys, options.Stdin, options.Stdout, options.Stderr) + if report.Err == nil { + exitCode, err := containers.Wait(ic.ClientCxt, name, nil) + if err == nil { + report.ExitCode = int(exitCode) + } + } else { + report.ExitCode = define.ExitCode(report.Err) + } + reports = append(reports, &report) + return reports, nil + } + report.Err = containers.Start(ic.ClientCxt, name, &options.DetachKeys) + report.ExitCode = define.ExitCode(report.Err) + reports = append(reports, &report) + } + return reports, nil } func (ic *ContainerEngine) ContainerList(ctx context.Context, options entities.ContainerListOptions) ([]entities.ListContainer, error) { @@ -336,7 +431,30 @@ func (ic *ContainerEngine) ContainerList(ctx context.Context, options entities.C } func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.ContainerRunOptions) (*entities.ContainerRunReport, error) { - return nil, errors.New("not implemented") + if opts.Rm { + logrus.Info("the remote client does not support --rm yet") + } + con, err := containers.CreateWithSpec(ic.ClientCxt, opts.Spec) + if err != nil { + return nil, err + } + report := entities.ContainerRunReport{Id: con.ID} + // Attach + if !opts.Detach { + err = startAndAttach(ic, con.ID, &opts.DetachKeys, opts.InputStream, opts.OutputStream, opts.ErrorStream) + if err == nil { + exitCode, err := containers.Wait(ic.ClientCxt, con.ID, nil) + if err == nil { + report.ExitCode = int(exitCode) + } + } + } else { + err = containers.Start(ic.ClientCxt, con.ID, nil) + } + if err != nil { + report.ExitCode = define.ExitCode(err) + } + return &report, err } func (ic *ContainerEngine) ContainerDiff(ctx context.Context, nameOrId string, _ entities.DiffOptions) (*entities.DiffReport, error) { @@ -356,6 +474,11 @@ func (ic *ContainerEngine) ContainerInit(ctx context.Context, namesOrIds []strin } for _, ctr := range ctrs { err := containers.ContainerInit(ic.ClientCxt, ctr.ID) + // When using all, it is NOT considered an error if a container + // has already been init'd. + if err != nil && options.All && strings.Contains(errors.Cause(err).Error(), define.ErrCtrStateInvalid.Error()) { + err = nil + } reports = append(reports, &entities.ContainerInitReport{ Err: err, Id: ctr.ID, @@ -377,7 +500,29 @@ func (ic *ContainerEngine) Config(_ context.Context) (*config.Config, error) { } func (ic *ContainerEngine) ContainerPort(ctx context.Context, nameOrId string, options entities.ContainerPortOptions) ([]*entities.ContainerPortReport, error) { - return nil, errors.New("not implemented") + var ( + reports []*entities.ContainerPortReport + namesOrIds []string + ) + if len(nameOrId) > 0 { + namesOrIds = append(namesOrIds, nameOrId) + } + ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds) + if err != nil { + return nil, err + } + for _, con := range ctrs { + if con.State != define.ContainerStateRunning.String() { + continue + } + if len(con.Ports) > 0 { + reports = append(reports, &entities.ContainerPortReport{ + Id: con.ID, + Ports: con.Ports, + }) + } + } + return reports, nil } func (ic *ContainerEngine) ContainerCp(ctx context.Context, source, dest string, options entities.ContainerCpOptions) (*entities.ContainerCpReport, error) { @@ -387,3 +532,7 @@ func (ic *ContainerEngine) ContainerCp(ctx context.Context, source, dest string, // Shutdown Libpod engine func (ic *ContainerEngine) Shutdown(_ context.Context) { } + +func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []string, options entities.ContainerStatsOptions) error { + return errors.New("not implemented") +} diff --git a/pkg/domain/infra/tunnel/events.go b/pkg/domain/infra/tunnel/events.go index 93da3aeb4..6a08a1f85 100644 --- a/pkg/domain/infra/tunnel/events.go +++ b/pkg/domain/infra/tunnel/events.go @@ -25,6 +25,7 @@ func (ic *ContainerEngine) Events(ctx context.Context, opts entities.EventsOptio for e := range binChan { opts.EventChan <- entities.ConvertToLibpodEvent(e) } + close(opts.EventChan) }() - return system.Events(ic.ClientCxt, binChan, nil, &opts.Since, &opts.Until, filters) + return system.Events(ic.ClientCxt, binChan, nil, &opts.Since, &opts.Until, filters, &opts.Stream) } diff --git a/pkg/domain/infra/tunnel/generate.go b/pkg/domain/infra/tunnel/generate.go index 3cd483053..eb5587f89 100644 --- a/pkg/domain/infra/tunnel/generate.go +++ b/pkg/domain/infra/tunnel/generate.go @@ -3,6 +3,7 @@ package tunnel import ( "context" + "github.com/containers/libpod/pkg/bindings/generate" "github.com/containers/libpod/pkg/domain/entities" "github.com/pkg/errors" ) @@ -10,3 +11,7 @@ import ( func (ic *ContainerEngine) GenerateSystemd(ctx context.Context, nameOrID string, options entities.GenerateSystemdOptions) (*entities.GenerateSystemdReport, error) { return nil, errors.New("not implemented for tunnel") } + +func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrID string, options entities.GenerateKubeOptions) (*entities.GenerateKubeReport, error) { + return generate.GenerateKube(ic.ClientCxt, nameOrID, options) +} diff --git a/pkg/domain/infra/tunnel/helpers.go b/pkg/domain/infra/tunnel/helpers.go index 682d60d6a..862c7a5d6 100644 --- a/pkg/domain/infra/tunnel/helpers.go +++ b/pkg/domain/infra/tunnel/helpers.go @@ -20,7 +20,7 @@ func getContainersByContext(contextWithConnection context.Context, all bool, nam if all && len(namesOrIds) > 0 { return nil, errors.New("cannot lookup containers and all") } - c, err := containers.List(contextWithConnection, nil, &bindings.PTrue, nil, nil, nil, &bindings.PTrue) + c, err := containers.List(contextWithConnection, nil, bindings.PTrue, nil, nil, nil, bindings.PTrue) if err != nil { return nil, err } @@ -37,7 +37,7 @@ func getContainersByContext(contextWithConnection context.Context, all bool, nam } } if !found { - return nil, errors.Errorf("unable to find container %q", id) + return nil, errors.Wrapf(define.ErrNoSuchCtr, "unable to find container %q", id) } } return cons, nil diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go index dcc5fc3e7..c300e74d0 100644 --- a/pkg/domain/infra/tunnel/images.go +++ b/pkg/domain/infra/tunnel/images.go @@ -4,6 +4,8 @@ import ( "context" "io/ioutil" "os" + "path" + "strings" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" @@ -12,6 +14,7 @@ import ( "github.com/containers/libpod/pkg/domain/entities" "github.com/containers/libpod/pkg/domain/utils" utils2 "github.com/containers/libpod/utils" + "github.com/containers/storage/pkg/archive" "github.com/pkg/errors" ) @@ -20,13 +23,18 @@ func (ir *ImageEngine) Exists(_ context.Context, nameOrId string) (*entities.Boo return &entities.BoolReport{Value: found}, err } -func (ir *ImageEngine) Remove(ctx context.Context, imagesArg []string, opts entities.ImageRemoveOptions) (*entities.ImageRemoveReport, error) { - return images.Remove(ir.ClientCxt, imagesArg, opts) +func (ir *ImageEngine) Remove(ctx context.Context, imagesArg []string, opts entities.ImageRemoveOptions) (*entities.ImageRemoveReport, []error) { + return images.BatchRemove(ir.ClientCxt, imagesArg, opts) } func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) ([]*entities.ImageSummary, error) { - images, err := images.List(ir.ClientCxt, &opts.All, opts.Filters) + filters := make(map[string][]string, len(opts.Filter)) + for _, filter := range opts.Filter { + f := strings.Split(filter, "=") + filters[f[0]] = f[1:] + } + images, err := images.List(ir.ClientCxt, &opts.All, filters) if err != nil { return nil, err } @@ -61,7 +69,13 @@ func (ir *ImageEngine) History(ctx context.Context, nameOrId string, opts entiti } func (ir *ImageEngine) Prune(ctx context.Context, opts entities.ImagePruneOptions) (*entities.ImagePruneReport, error) { - results, err := images.Prune(ir.ClientCxt, &opts.All, opts.Filters) + filters := make(map[string][]string, len(opts.Filter)) + for _, filter := range opts.Filter { + f := strings.Split(filter, "=") + filters[f[0]] = f[1:] + } + + results, err := images.Prune(ir.ClientCxt, &opts.All, filters) if err != nil { return nil, err } @@ -112,7 +126,7 @@ func (ir *ImageEngine) Tag(ctx context.Context, nameOrId string, tags []string, func (ir *ImageEngine) Untag(ctx context.Context, nameOrId string, tags []string, options entities.ImageUntagOptions) error { // Remove all tags if none are provided if len(tags) == 0 { - newImage, err := images.GetImage(ir.ClientCxt, nameOrId, &bindings.PFalse) + newImage, err := images.GetImage(ir.ClientCxt, nameOrId, bindings.PFalse) if err != nil { return err } @@ -190,7 +204,6 @@ func (ir *ImageEngine) Save(ctx context.Context, nameOrId string, tags []string, f *os.File err error ) - switch options.Format { case "oci-dir", "docker-dir": f, err = ioutil.TempFile("", "podman_save") @@ -254,13 +267,24 @@ func (ir *ImageEngine) Config(_ context.Context) (*config.Config, error) { } func (ir *ImageEngine) Build(ctx context.Context, containerFiles []string, opts entities.BuildOptions) (*entities.BuildReport, error) { - return nil, errors.New("not implemented yet") + if len(containerFiles) > 1 { + return nil, errors.New("something") + } + tarfile, err := archive.Tar(path.Base(containerFiles[0]), 0) + if err != nil { + return nil, err + } + return images.Build(ir.ClientCxt, containerFiles, opts, tarfile) } func (ir *ImageEngine) Tree(ctx context.Context, nameOrId string, opts entities.ImageTreeOptions) (*entities.ImageTreeReport, error) { - return nil, errors.New("not implemented yet") + return images.Tree(ir.ClientCxt, nameOrId, &opts.WhatRequires) } // Shutdown Libpod engine func (ir *ImageEngine) Shutdown(_ context.Context) { } + +func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entities.SignOptions) (*entities.SignReport, error) { + return nil, errors.New("not implemented yet") +} diff --git a/pkg/domain/infra/tunnel/manifest.go b/pkg/domain/infra/tunnel/manifest.go index 18b400533..beac378fe 100644 --- a/pkg/domain/infra/tunnel/manifest.go +++ b/pkg/domain/infra/tunnel/manifest.go @@ -3,6 +3,7 @@ package tunnel import ( "context" "encoding/json" + "fmt" "strings" "github.com/containers/libpod/libpod/image" @@ -56,9 +57,29 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, opts entities.ManifestAd } manifestAddOpts.Annotation = annotations } - listID, err := manifests.Add(ctx, opts.Images[1], manifestAddOpts) + listID, err := manifests.Add(ir.ClientCxt, opts.Images[1], manifestAddOpts) if err != nil { return listID, errors.Wrapf(err, "error adding to manifest list %s", opts.Images[1]) } return listID, nil } + +// ManifestAnnotate updates an entry of the manifest list +func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, names []string, opts entities.ManifestAnnotateOptions) (string, error) { + return "", errors.New("not implemented") +} + +// ManifestRemove removes the digest from manifest list +func (ir *ImageEngine) ManifestRemove(ctx context.Context, names []string) (string, error) { + updatedListID, err := manifests.Remove(ir.ClientCxt, names[0], names[1]) + if err != nil { + return updatedListID, errors.Wrapf(err, "error removing from manifest %s", names[0]) + } + return fmt.Sprintf("%s :%s\n", updatedListID, names[1]), nil +} + +// ManifestPush pushes a manifest list or image index to the destination +func (ir *ImageEngine) ManifestPush(ctx context.Context, names []string, opts entities.ManifestPushOptions) error { + _, err := manifests.Push(ir.ClientCxt, names[0], &names[1], &opts.All) + return err +} diff --git a/pkg/domain/infra/tunnel/network.go b/pkg/domain/infra/tunnel/network.go new file mode 100644 index 000000000..7725d8257 --- /dev/null +++ b/pkg/domain/infra/tunnel/network.go @@ -0,0 +1,40 @@ +package tunnel + +import ( + "context" + + "github.com/containers/libpod/pkg/bindings/network" + "github.com/containers/libpod/pkg/domain/entities" +) + +func (ic *ContainerEngine) NetworkList(ctx context.Context, options entities.NetworkListOptions) ([]*entities.NetworkListReport, error) { + return network.List(ic.ClientCxt) +} + +func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []string, options entities.NetworkInspectOptions) ([]entities.NetworkInspectReport, error) { + var reports []entities.NetworkInspectReport + for _, name := range namesOrIds { + report, err := network.Inspect(ic.ClientCxt, name) + if err != nil { + return nil, err + } + reports = append(reports, report...) + } + return reports, nil +} + +func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, options entities.NetworkRmOptions) ([]*entities.NetworkRmReport, error) { + var reports []*entities.NetworkRmReport + for _, name := range namesOrIds { + report, err := network.Remove(ic.ClientCxt, name, &options.Force) + if err != nil { + report[0].Err = err + } + reports = append(reports, report...) + } + return reports, nil +} + +func (ic *ContainerEngine) NetworkCreate(ctx context.Context, name string, options entities.NetworkCreateOptions) (*entities.NetworkCreateReport, error) { + return network.Create(ic.ClientCxt, options, &name) +} diff --git a/pkg/domain/infra/tunnel/play.go b/pkg/domain/infra/tunnel/play.go new file mode 100644 index 000000000..15383a703 --- /dev/null +++ b/pkg/domain/infra/tunnel/play.go @@ -0,0 +1,12 @@ +package tunnel + +import ( + "context" + + "github.com/containers/libpod/pkg/bindings/play" + "github.com/containers/libpod/pkg/domain/entities" +) + +func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) { + return play.PlayKube(ic.ClientCxt, path, options) +} diff --git a/pkg/domain/infra/tunnel/runtime.go b/pkg/domain/infra/tunnel/runtime.go index c111f99e9..357e2c390 100644 --- a/pkg/domain/infra/tunnel/runtime.go +++ b/pkg/domain/infra/tunnel/runtime.go @@ -13,3 +13,8 @@ type ImageEngine struct { type ContainerEngine struct { ClientCxt context.Context } + +// Container-related runtime using an ssh-tunnel to utilize Podman service +type SystemEngine struct { + ClientCxt context.Context +} diff --git a/pkg/domain/infra/tunnel/system.go b/pkg/domain/infra/tunnel/system.go index 97bf885e7..109e6c1d7 100644 --- a/pkg/domain/infra/tunnel/system.go +++ b/pkg/domain/infra/tunnel/system.go @@ -21,3 +21,20 @@ func (ic *ContainerEngine) VarlinkService(_ context.Context, _ entities.ServiceO func (ic *ContainerEngine) SetupRootless(_ context.Context, cmd *cobra.Command) error { panic(errors.New("rootless engine mode is not supported when tunneling")) } + +// SystemPrune prunes unused data from the system. +func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.SystemPruneOptions) (*entities.SystemPruneReport, error) { + return system.Prune(ic.ClientCxt, &options.All, &options.Volume) +} + +func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.SystemDfOptions) (*entities.SystemDfReport, error) { + return system.DiskUsage(ic.ClientCxt) +} + +func (ic *ContainerEngine) Unshare(ctx context.Context, args []string) error { + return errors.New("unshare is not supported on remote clients") +} + +func (ic ContainerEngine) Version(ctx context.Context) (*entities.SystemVersionReport, error) { + return system.Version(ic.ClientCxt) +} diff --git a/pkg/domain/infra/tunnel/trust.go b/pkg/domain/infra/tunnel/trust.go new file mode 100644 index 000000000..a976bfdc2 --- /dev/null +++ b/pkg/domain/infra/tunnel/trust.go @@ -0,0 +1,16 @@ +package tunnel + +import ( + "context" + "errors" + + "github.com/containers/libpod/pkg/domain/entities" +) + +func (ir *ImageEngine) ShowTrust(ctx context.Context, args []string, options entities.ShowTrustOptions) (*entities.ShowTrustReport, error) { + return nil, errors.New("not implemented") +} + +func (ir *ImageEngine) SetTrust(ctx context.Context, args []string, options entities.SetTrustOptions) error { + return errors.New("not implemented") +} |