diff options
Diffstat (limited to 'cmd/podman/shared')
-rw-r--r-- | cmd/podman/shared/container.go | 257 | ||||
-rw-r--r-- | cmd/podman/shared/create.go | 109 | ||||
-rw-r--r-- | cmd/podman/shared/create_cli.go | 6 | ||||
-rw-r--r-- | cmd/podman/shared/funcs.go | 15 | ||||
-rw-r--r-- | cmd/podman/shared/intermediate.go | 14 | ||||
-rw-r--r-- | cmd/podman/shared/parse/parse.go | 29 | ||||
-rw-r--r-- | cmd/podman/shared/pod.go | 13 |
7 files changed, 200 insertions, 243 deletions
diff --git a/cmd/podman/shared/container.go b/cmd/podman/shared/container.go index 55cc529e0..7f53f5ec9 100644 --- a/cmd/podman/shared/container.go +++ b/cmd/podman/shared/container.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - v1 "k8s.io/api/core/v1" "os" "path/filepath" "regexp" @@ -16,16 +15,15 @@ import ( "github.com/containers/image/types" "github.com/containers/libpod/libpod" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/image" - "github.com/containers/libpod/pkg/inspect" - cc "github.com/containers/libpod/pkg/spec" "github.com/containers/libpod/pkg/util" "github.com/cri-o/ocicni/pkg/ocicni" "github.com/docker/go-units" "github.com/google/shlex" - "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" ) const ( @@ -34,7 +32,7 @@ const ( cmdTruncLength = 17 ) -// PsOptions describes the struct being formed for ps +// PsOptions describes the struct being formed for ps. type PsOptions struct { All bool Format string @@ -49,11 +47,11 @@ type PsOptions struct { Sync bool } -// BatchContainerStruct is the return obkect from BatchContainer and contains -// container related information +// BatchContainerStruct is the return object from BatchContainer and contains +// container related information. type BatchContainerStruct struct { ConConfig *libpod.ContainerConfig - ConState libpod.ContainerStatus + ConState define.ContainerStatus ExitCode int32 Exited bool Pid int @@ -63,7 +61,7 @@ type BatchContainerStruct struct { } // PsContainerOutput is the struct being returned from a parallel -// Batch operation +// batch operation. type PsContainerOutput struct { ID string Image string @@ -73,7 +71,7 @@ type PsContainerOutput struct { Names string IsInfra bool Status string - State libpod.ContainerStatus + State define.ContainerStatus Pid int Size *ContainerSize Pod string @@ -92,7 +90,7 @@ type PsContainerOutput struct { Mounts string } -// Namespace describes output for ps namespace +// Namespace describes output for ps namespace. type Namespace struct { PID string `json:"pid,omitempty"` Cgroup string `json:"cgroup,omitempty"` @@ -105,17 +103,17 @@ type Namespace struct { } // ContainerSize holds the size of the container's root filesystem and top -// read-write layer +// read-write layer. type ContainerSize struct { RootFsSize int64 `json:"rootFsSize"` RwSize int64 `json:"rwSize"` } // NewBatchContainer runs a batch process under one lock to get container information and only -// be called in PBatch +// be called in PBatch. func NewBatchContainer(ctr *libpod.Container, opts PsOptions) (PsContainerOutput, error) { var ( - conState libpod.ContainerStatus + conState define.ContainerStatus command string created string status string @@ -186,16 +184,16 @@ func NewBatchContainer(ctr *libpod.Container, opts PsOptions) (PsContainerOutput } switch conState.String() { - case libpod.ContainerStateExited.String(): + case define.ContainerStateExited.String(): fallthrough - case libpod.ContainerStateStopped.String(): + case define.ContainerStateStopped.String(): exitedSince := units.HumanDuration(time.Since(exitedAt)) status = fmt.Sprintf("Exited (%d) %s ago", exitCode, exitedSince) - case libpod.ContainerStateRunning.String(): + case define.ContainerStateRunning.String(): status = "Up " + units.HumanDuration(time.Since(startedAt)) + " ago" - case libpod.ContainerStatePaused.String(): + case define.ContainerStatePaused.String(): status = "Paused" - case libpod.ContainerStateCreated.String(), libpod.ContainerStateConfigured.String(): + case define.ContainerStateCreated.String(), define.ContainerStateConfigured.String(): status = "Created" default: status = "Error" @@ -259,15 +257,15 @@ type workerInput struct { job int } -// worker is a "threaded" worker that takes jobs from the channel "queue" +// worker is a "threaded" worker that takes jobs from the channel "queue". func worker(wg *sync.WaitGroup, jobs <-chan workerInput, results chan<- PsContainerOutput, errors chan<- error) { for j := range jobs { r, err := j.parallelFunc() - // If we find an error, we return just the error + // If we find an error, we return just the error. if err != nil { errors <- err } else { - // Return the result + // Return the result. results <- r } wg.Done() @@ -281,8 +279,8 @@ func generateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime) return strings.Contains(c.ID(), filterValue) }, nil case "label": - var filterArray []string = strings.SplitN(filterValue, "=", 2) - var filterKey string = filterArray[0] + var filterArray = strings.SplitN(filterValue, "=", 2) + var filterKey = filterArray[0] if len(filterArray) > 1 { filterValue = filterArray[1] } else { @@ -298,7 +296,11 @@ func generateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime) }, nil case "name": return func(c *libpod.Container) bool { - return strings.Contains(c.Name(), filterValue) + match, err := regexp.MatchString(filterValue, c.Name()) + if err != nil { + return false + } + return match }, nil case "exited": exitCode, err := strconv.ParseInt(filterValue, 10, 32) @@ -307,7 +309,7 @@ func generateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime) } return func(c *libpod.Container) bool { ec, exited, err := c.ExitCode() - if ec == int32(exitCode) && err == nil && exited == true { + if ec == int32(exitCode) && err == nil && exited { return true } return false @@ -325,9 +327,9 @@ func generateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime) filterValue = "exited" } state := status.String() - if status == libpod.ContainerStateConfigured { + if status == define.ContainerStateConfigured { state = "created" - } else if status == libpod.ContainerStateStopped { + } else if status == define.ContainerStateStopped { state = "exited" } return state == filterValue @@ -396,7 +398,7 @@ func generateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime) return nil, errors.Errorf("%s is an invalid filter", filter) } -// GetPsContainerOutput returns a slice of containers specifically for ps output +// GetPsContainerOutput returns a slice of containers specifically for ps output. func GetPsContainerOutput(r *libpod.Runtime, opts PsOptions, filters []string, maxWorkers int) ([]PsContainerOutput, error) { var ( filterFuncs []libpod.ContainerFilter @@ -417,21 +419,21 @@ func GetPsContainerOutput(r *libpod.Runtime, opts PsOptions, filters []string, m } } if !opts.Latest { - // Get all containers + // Get all containers. containers, err := r.GetContainers(filterFuncs...) if err != nil { return nil, err } - // We only want the last few containers + // We only want the last few containers. if opts.Last > 0 && opts.Last <= len(containers) { return nil, errors.Errorf("--last not yet supported") } else { outputContainers = containers } } else { - // Get just the latest container - // Ignore filters + // Get just the latest container. + // Ignore filters. latestCtr, err := r.GetLatestContainer() if err != nil { return nil, err @@ -444,8 +446,8 @@ func GetPsContainerOutput(r *libpod.Runtime, opts PsOptions, filters []string, m return pss, nil } -// PBatch is performs batch operations on a container in parallel. It spawns the number of workers -// relative to the the number of parallel operations desired. +// PBatch performs batch operations on a container in parallel. It spawns the +// number of workers relative to the number of parallel operations desired. func PBatch(containers []*libpod.Container, workers int, opts PsOptions) []PsContainerOutput { var ( wg sync.WaitGroup @@ -453,7 +455,7 @@ func PBatch(containers []*libpod.Container, workers int, opts PsOptions) []PsCon ) // If the number of containers in question is less than the number of - // proposed parallel operations, we shouldnt spawn so many workers + // proposed parallel operations, we shouldnt spawn so many workers. if workers > len(containers) { workers = len(containers) } @@ -462,12 +464,12 @@ func PBatch(containers []*libpod.Container, workers int, opts PsOptions) []PsCon results := make(chan PsContainerOutput, len(containers)) batchErrors := make(chan error, len(containers)) - // Create the workers + // Create the workers. for w := 1; w <= workers; w++ { go worker(&wg, jobs, results, batchErrors) } - // Add jobs to the workers + // Add jobs to the workers. for i, j := range containers { j := j wg.Add(1) @@ -492,7 +494,7 @@ func PBatch(containers []*libpod.Container, workers int, opts PsOptions) []PsCon // We sort out running vs non-running here to save lots of copying // later. if !opts.All && !opts.Latest && opts.Last < 1 { - if !res.IsInfra && res.State == libpod.ContainerStateRunning { + if !res.IsInfra && res.State == define.ContainerStateRunning { psResults = append(psResults, res) } } else { @@ -502,12 +504,12 @@ func PBatch(containers []*libpod.Container, workers int, opts PsOptions) []PsCon return psResults } -// BatchContainer is used in ps to reduce performance hits by "batching" +// BatchContainerOp is used in ps to reduce performance hits by "batching" // locks. func BatchContainerOp(ctr *libpod.Container, opts PsOptions) (BatchContainerStruct, error) { var ( conConfig *libpod.ContainerConfig - conState libpod.ContainerStatus + conState define.ContainerStatus err error exitCode int32 exited bool @@ -580,7 +582,7 @@ func BatchContainerOp(ctr *libpod.Container, opts PsOptions) (BatchContainerStru }, nil } -// GetNamespaces returns a populated namespace struct +// GetNamespaces returns a populated namespace struct. func GetNamespaces(pid int) *Namespace { ctrPID := strconv.Itoa(pid) cgroup, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "cgroup")) @@ -611,9 +613,9 @@ func getNamespaceInfo(path string) (string, error) { return getStrFromSquareBrackets(val), nil } -// getStrFromSquareBrackets gets the string inside [] from a string +// getStrFromSquareBrackets gets the string inside [] from a string. func getStrFromSquareBrackets(cmd string) string { - reg, err := regexp.Compile(".*\\[|\\].*") + reg, err := regexp.Compile(`.*\[|\].*`) if err != nil { return "" } @@ -621,145 +623,6 @@ func getStrFromSquareBrackets(cmd string) string { return strings.Join(arr, ",") } -// GetCtrInspectInfo takes container inspect data and collects all its info into a ContainerData -// structure for inspection related methods -func GetCtrInspectInfo(config *libpod.ContainerConfig, ctrInspectData *inspect.ContainerInspectData, createArtifact *cc.CreateConfig) (*inspect.ContainerData, error) { - spec := config.Spec - - cpus, mems, period, quota, realtimePeriod, realtimeRuntime, shares := getCPUInfo(spec) - blkioWeight, blkioWeightDevice, blkioReadBps, blkioWriteBps, blkioReadIOPS, blkioeWriteIOPS := getBLKIOInfo(spec) - memKernel, memReservation, memSwap, memSwappiness, memDisableOOMKiller := getMemoryInfo(spec) - pidsLimit := getPidsInfo(spec) - cgroup := getCgroup(spec) - logConfig := inspect.LogConfig{ - config.LogDriver, - make(map[string]string), - } - - data := &inspect.ContainerData{ - ctrInspectData, - &inspect.HostConfig{ - ConsoleSize: spec.Process.ConsoleSize, - OomScoreAdj: spec.Process.OOMScoreAdj, - CPUShares: shares, - BlkioWeight: blkioWeight, - BlkioWeightDevice: blkioWeightDevice, - BlkioDeviceReadBps: blkioReadBps, - BlkioDeviceWriteBps: blkioWriteBps, - BlkioDeviceReadIOps: blkioReadIOPS, - BlkioDeviceWriteIOps: blkioeWriteIOPS, - CPUPeriod: period, - CPUQuota: quota, - CPURealtimePeriod: realtimePeriod, - CPURealtimeRuntime: realtimeRuntime, - CPUSetCPUs: cpus, - CPUSetMems: mems, - Devices: spec.Linux.Devices, - KernelMemory: memKernel, - MemoryReservation: memReservation, - MemorySwap: memSwap, - MemorySwappiness: memSwappiness, - OomKillDisable: memDisableOOMKiller, - PidsLimit: pidsLimit, - Privileged: config.Privileged, - ReadOnlyRootfs: spec.Root.Readonly, - ReadOnlyTmpfs: createArtifact.ReadOnlyTmpfs, - Runtime: config.OCIRuntime, - NetworkMode: string(createArtifact.NetMode), - IpcMode: string(createArtifact.IpcMode), - Cgroup: cgroup, - UTSMode: string(createArtifact.UtsMode), - UsernsMode: string(createArtifact.UsernsMode), - GroupAdd: spec.Process.User.AdditionalGids, - ContainerIDFile: createArtifact.CidFile, - AutoRemove: createArtifact.Rm, - CapAdd: createArtifact.CapAdd, - CapDrop: createArtifact.CapDrop, - DNS: createArtifact.DNSServers, - DNSOptions: createArtifact.DNSOpt, - DNSSearch: createArtifact.DNSSearch, - PidMode: string(createArtifact.PidMode), - CgroupParent: createArtifact.CgroupParent, - ShmSize: createArtifact.Resources.ShmSize, - Memory: createArtifact.Resources.Memory, - Ulimits: createArtifact.Resources.Ulimit, - SecurityOpt: createArtifact.SecurityOpts, - Tmpfs: createArtifact.Tmpfs, - LogConfig: &logConfig, - }, - &inspect.CtrConfig{ - Hostname: spec.Hostname, - User: spec.Process.User, - Env: spec.Process.Env, - Image: config.RootfsImageName, - WorkingDir: spec.Process.Cwd, - Labels: config.Labels, - Annotations: spec.Annotations, - Tty: spec.Process.Terminal, - OpenStdin: config.Stdin, - StopSignal: config.StopSignal, - Cmd: config.Spec.Process.Args, - Entrypoint: strings.Join(createArtifact.Entrypoint, " "), - Healthcheck: config.HealthCheckConfig, - }, - } - return data, nil -} - -func getCPUInfo(spec *specs.Spec) (string, string, *uint64, *int64, *uint64, *int64, *uint64) { - if spec.Linux.Resources == nil { - return "", "", nil, nil, nil, nil, nil - } - cpu := spec.Linux.Resources.CPU - if cpu == nil { - return "", "", nil, nil, nil, nil, nil - } - return cpu.Cpus, cpu.Mems, cpu.Period, cpu.Quota, cpu.RealtimePeriod, cpu.RealtimeRuntime, cpu.Shares -} - -func getBLKIOInfo(spec *specs.Spec) (*uint16, []specs.LinuxWeightDevice, []specs.LinuxThrottleDevice, []specs.LinuxThrottleDevice, []specs.LinuxThrottleDevice, []specs.LinuxThrottleDevice) { - if spec.Linux.Resources == nil { - return nil, nil, nil, nil, nil, nil - } - blkio := spec.Linux.Resources.BlockIO - if blkio == nil { - return nil, nil, nil, nil, nil, nil - } - return blkio.Weight, blkio.WeightDevice, blkio.ThrottleReadBpsDevice, blkio.ThrottleWriteBpsDevice, blkio.ThrottleReadIOPSDevice, blkio.ThrottleWriteIOPSDevice -} - -func getMemoryInfo(spec *specs.Spec) (*int64, *int64, *int64, *uint64, *bool) { - if spec.Linux.Resources == nil { - return nil, nil, nil, nil, nil - } - memory := spec.Linux.Resources.Memory - if memory == nil { - return nil, nil, nil, nil, nil - } - return memory.Kernel, memory.Reservation, memory.Swap, memory.Swappiness, memory.DisableOOMKiller -} - -func getPidsInfo(spec *specs.Spec) *int64 { - if spec.Linux.Resources == nil { - return nil - } - pids := spec.Linux.Resources.Pids - if pids == nil { - return nil - } - return &pids.Limit -} - -func getCgroup(spec *specs.Spec) string { - cgroup := "host" - for _, ns := range spec.Linux.Namespaces { - if ns.Type == specs.CgroupNamespace && ns.Path != "" { - cgroup = "container" - } - } - return cgroup -} - func comparePorts(i, j ocicni.PortMapping) bool { if i.ContainerPort != j.ContainerPort { return i.ContainerPort < j.ContainerPort @@ -776,8 +639,8 @@ func comparePorts(i, j ocicni.PortMapping) bool { return i.Protocol < j.Protocol } -// returns the group as <IP:startPort:lastPort->startPort:lastPort/Proto> -// e.g 0.0.0.0:1000-1006->1000-1006/tcp +// formatGroup returns the group as <IP:startPort:lastPort->startPort:lastPort/Proto> +// e.g 0.0.0.0:1000-1006->1000-1006/tcp. func formatGroup(key string, start, last int32) string { parts := strings.Split(key, "/") groupType := parts[0] @@ -797,7 +660,7 @@ func formatGroup(key string, start, last int32) string { } // portsToString converts the ports used to a string of the from "port1, port2" -// also groups continuous list of ports in readable format. +// and also groups continuous list of ports in readable format. func portsToString(ports []ocicni.PortMapping) string { type portGroup struct { first int32 @@ -812,7 +675,7 @@ func portsToString(ports []ocicni.PortMapping) string { return comparePorts(ports[i], ports[j]) }) - // portGroupMap is used for grouping continuous ports + // portGroupMap is used for grouping continuous ports. portGroupMap := make(map[string]*portGroup) var groupKeyList []string @@ -822,7 +685,7 @@ func portsToString(ports []ocicni.PortMapping) string { if hostIP == "" { hostIP = "0.0.0.0" } - // if hostPort and containerPort are not same, consider as individual port. + // If hostPort and containerPort are not same, consider as individual port. if v.ContainerPort != v.HostPort { portDisplay = append(portDisplay, fmt.Sprintf("%s:%d->%d/%s", hostIP, v.HostPort, v.ContainerPort, v.Protocol)) continue @@ -833,7 +696,7 @@ func portsToString(ports []ocicni.PortMapping) string { portgroup, ok := portGroupMap[portMapKey] if !ok { portGroupMap[portMapKey] = &portGroup{first: v.ContainerPort, last: v.ContainerPort} - // this list is required to travese portGroupMap + // This list is required to travese portGroupMap. groupKeyList = append(groupKeyList, portMapKey) continue } @@ -843,7 +706,7 @@ func portsToString(ports []ocicni.PortMapping) string { continue } } - // for each portMapKey, format group list and appned to output string + // For each portMapKey, format group list and appned to output string. for _, portKey := range groupKeyList { group := portGroupMap[portKey] portDisplay = append(portDisplay, formatGroup(portKey, group.first, group.last)) @@ -852,7 +715,7 @@ func portsToString(ports []ocicni.PortMapping) string { } // GetRunlabel is a helper function for runlabel; it gets the image if needed and begins the -// contruction of the runlabel output and environment variables +// construction of the runlabel output and environment variables. func GetRunlabel(label string, runlabelImage string, ctx context.Context, runtime *libpod.Runtime, pull bool, inputCreds string, dockerRegistryOptions image.DockerRegistryOptions, authfile string, signaturePolicyPath string, output io.Writer) (string, string, error) { var ( newImage *image.Image @@ -887,9 +750,9 @@ func GetRunlabel(label string, runlabelImage string, ctx context.Context, runtim return runLabel, imageName, err } -// GenerateRunlabelCommand generates the command that will eventually be execucted by podman +// GenerateRunlabelCommand generates the command that will eventually be execucted by podman. func GenerateRunlabelCommand(runLabel, imageName, name string, opts map[string]string, extraArgs []string, globalOpts string) ([]string, []string, error) { - // If no name is provided, we use the image's basename instead + // If no name is provided, we use the image's basename instead. if name == "" { baseName, err := image.GetImageBaseName(imageName) if err != nil { @@ -897,7 +760,7 @@ func GenerateRunlabelCommand(runLabel, imageName, name string, opts map[string]s } name = baseName } - // The user provided extra arguments that need to be tacked onto the label's command + // The user provided extra arguments that need to be tacked onto the label's command. if len(extraArgs) > 0 { runLabel = fmt.Sprintf("%s %s", runLabel, strings.Join(extraArgs, " ")) } @@ -919,7 +782,7 @@ func GenerateRunlabelCommand(runLabel, imageName, name string, opts map[string]s case "OPT3": return envmap["OPT3"] case "PWD": - // I would prefer to use os.getenv but it appears PWD is not in the os env list + // I would prefer to use os.getenv but it appears PWD is not in the os env list. d, err := os.Getwd() if err != nil { logrus.Error("unable to determine current working directory") @@ -956,7 +819,7 @@ func GenerateKube(name string, service bool, r *libpod.Runtime) (*v1.Pod, *v1.Se servicePorts []v1.ServicePort serviceYAML v1.Service ) - // Get the container in question + // Get the container in question. container, err = r.LookupContainer(name) if err != nil { pod, err = r.LookupPod(name) @@ -966,7 +829,7 @@ func GenerateKube(name string, service bool, r *libpod.Runtime) (*v1.Pod, *v1.Se podYAML, servicePorts, err = pod.GenerateForKube() } else { if len(container.Dependencies()) > 0 { - return nil, nil, errors.Wrapf(libpod.ErrNotImplemented, "containers with dependencies") + return nil, nil, errors.Wrapf(define.ErrNotImplemented, "containers with dependencies") } podYAML, err = container.GenerateForKube() } diff --git a/cmd/podman/shared/create.go b/cmd/podman/shared/create.go index 7cf230605..9578eb17d 100644 --- a/cmd/podman/shared/create.go +++ b/cmd/podman/shared/create.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "github.com/containers/libpod/pkg/errorhandling" "io" "os" "path/filepath" @@ -25,7 +26,6 @@ import ( "github.com/docker/docker/pkg/signal" "github.com/docker/go-connections/nat" "github.com/docker/go-units" - "github.com/google/shlex" "github.com/opencontainers/selinux/go-selinux/label" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" @@ -56,15 +56,15 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod. } if c.IsSet("cidfile") && os.Geteuid() == 0 { - cidFile, err = libpod.OpenExclusiveFile(c.String("cidfile")) + cidFile, err = util.OpenExclusiveFile(c.String("cidfile")) if err != nil && os.IsExist(err) { return nil, nil, errors.Errorf("container id file exists. Ensure another container is not using it or delete %s", c.String("cidfile")) } if err != nil { return nil, nil, errors.Errorf("error opening cidfile %s", c.String("cidfile")) } - defer cidFile.Close() - defer cidFile.Sync() + defer errorhandling.CloseQuiet(cidFile) + defer errorhandling.SyncQuiet(cidFile) } imageName := "" @@ -77,11 +77,14 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod. writer = os.Stderr } - newImage, err := runtime.ImageRuntime().New(ctx, c.InputArgs[0], rtc.SignaturePolicyPath, "", writer, nil, image.SigningOptions{}, false, nil) + newImage, err := runtime.ImageRuntime().New(ctx, c.InputArgs[0], rtc.SignaturePolicyPath, GetAuthFile(""), writer, nil, image.SigningOptions{}, false, nil) if err != nil { return nil, nil, err } data, err = newImage.Inspect(ctx) + if err != nil { + return nil, nil, err + } names := newImage.Names() if len(names) > 0 { imageName = names[0] @@ -89,9 +92,8 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod. imageName = newImage.ID() } - var healthCheckCommandInput string // if the user disabled the healthcheck with "none", we skip adding it - healthCheckCommandInput = c.String("healthcheck-command") + healthCheckCommandInput := c.String("healthcheck-command") // the user didnt disable the healthcheck but did pass in a healthcheck command // now we need to make a healthcheck from the commandline input @@ -113,6 +115,30 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod. if err != nil { return nil, nil, errors.Wrapf(err, "unable to get healthcheck for %s", c.InputArgs[0]) } + + if healthCheck != nil { + hcCommand := healthCheck.Test + if len(hcCommand) < 1 || hcCommand[0] == "" || hcCommand[0] == "NONE" { + // disable health check + healthCheck = nil + } else { + // apply defaults if image doesn't override them + if healthCheck.Interval == 0 { + healthCheck.Interval = 30 * time.Second + } + if healthCheck.Timeout == 0 { + healthCheck.Timeout = 30 * time.Second + } + /* Docker default is 0s, so the following would be a no-op + if healthCheck.StartPeriod == 0 { + healthCheck.StartPeriod = 0 * time.Second + } + */ + if healthCheck.Retries == 0 { + healthCheck.Retries = 3 + } + } + } } } } @@ -191,7 +217,7 @@ func parseSecurityOpt(config *cc.CreateConfig, securityOpts []string, runtime *l } else { con := strings.SplitN(opt, "=", 2) if len(con) != 2 { - return fmt.Errorf("Invalid --security-opt 1: %q", opt) + return fmt.Errorf("invalid --security-opt 1: %q", opt) } switch con[0] { @@ -202,7 +228,7 @@ func parseSecurityOpt(config *cc.CreateConfig, securityOpts []string, runtime *l case "seccomp": config.SeccompProfilePath = con[1] default: - return fmt.Errorf("Invalid --security-opt 2: %q", opt) + return fmt.Errorf("invalid --security-opt 2: %q", opt) } } } @@ -256,13 +282,26 @@ func configurePod(c *GenericCLIResults, runtime *libpod.Runtime, namespaces map[ if err != nil { return namespaces, err } + hasUserns := false + if podInfraID != "" { + podCtr, err := runtime.GetContainer(podInfraID) + if err != nil { + return namespaces, err + } + mappings, err := podCtr.IDMappings() + if err != nil { + return namespaces, err + } + hasUserns = len(mappings.UIDMap) > 0 + } + if (namespaces["pid"] == cc.Pod) || (!c.IsSet("pid") && pod.SharesPID()) { namespaces["pid"] = fmt.Sprintf("container:%s", podInfraID) } if (namespaces["net"] == cc.Pod) || (!c.IsSet("net") && !c.IsSet("network") && pod.SharesNet()) { namespaces["net"] = fmt.Sprintf("container:%s", podInfraID) } - if (namespaces["user"] == cc.Pod) || (!c.IsSet("user") && pod.SharesUser()) { + if hasUserns && (namespaces["user"] == cc.Pod) || (!c.IsSet("user") && pod.SharesUser()) { namespaces["user"] = fmt.Sprintf("container:%s", podInfraID) } if (namespaces["ipc"] == cc.Pod) || (!c.IsSet("ipc") && pod.SharesIPC()) { @@ -374,11 +413,12 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod. namespaceNet = c.String("net") } namespaces = map[string]string{ - "pid": c.String("pid"), - "net": namespaceNet, - "ipc": c.String("ipc"), - "user": c.String("userns"), - "uts": c.String("uts"), + "cgroup": c.String("cgroupns"), + "pid": c.String("pid"), + "net": namespaceNet, + "ipc": c.String("ipc"), + "user": c.String("userns"), + "uts": c.String("uts"), } originalPodName := c.String("pod") @@ -436,6 +476,11 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod. return nil, errors.Errorf("--uts %q is not valid", namespaces["uts"]) } + cgroupMode := ns.CgroupMode(namespaces["cgroup"]) + if !cgroupMode.Valid() { + return nil, errors.Errorf("--cgroup %q is not valid", namespaces["cgroup"]) + } + ipcMode := ns.IpcMode(namespaces["ipc"]) if !cc.Valid(string(ipcMode), ipcMode) { return nil, errors.Errorf("--ipc %q is not valid", ipcMode) @@ -479,6 +524,16 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod. // ENVIRONMENT VARIABLES env := EnvVariablesFromData(data) + if c.Bool("env-host") { + for _, e := range os.Environ() { + pair := strings.SplitN(e, "=", 2) + if _, ok := env[pair[0]]; !ok { + if len(pair) > 1 { + env[pair[0]] = pair[1] + } + } + } + } if err := parse.ReadKVStrings(env, c.StringSlice("env-file"), c.StringArray("env")); err != nil { return nil, errors.Wrapf(err, "unable to process environment variables") } @@ -615,6 +670,8 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod. ImageVolumeType: c.String("image-volume"), CapAdd: c.StringSlice("cap-add"), CapDrop: c.StringSlice("cap-drop"), + CidFile: c.String("cidfile"), + Cgroupns: c.String("cgroupns"), CgroupParent: c.String("cgroup-parent"), Command: command, Detach: c.Bool("detach"), @@ -650,6 +707,7 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod. NetMode: netMode, UtsMode: utsMode, PidMode: pidMode, + CgroupMode: cgroupMode, Pod: podName, Privileged: c.Bool("privileged"), Publish: c.StringSlice("publish"), @@ -690,7 +748,7 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod. StopTimeout: c.Uint("stop-timeout"), Sysctl: sysctl, Systemd: systemd, - Tmpfs: c.StringSlice("tmpfs"), + Tmpfs: c.StringArray("tmpfs"), Tty: tty, User: user, UsernsMode: usernsMode, @@ -730,14 +788,6 @@ func CreateContainerFromCreateConfig(r *libpod.Runtime, createConfig *cc.CreateC if err != nil { return nil, err } - - createConfigJSON, err := json.Marshal(createConfig) - if err != nil { - return nil, err - } - if err := ctr.AddArtifact("create-config", createConfigJSON); err != nil { - return nil, err - } return ctr, nil } @@ -775,9 +825,12 @@ func makeHealthCheckFromCli(c *GenericCLIResults) (*manifest.Schema2HealthConfig return nil, errors.New("Must define a healthcheck command for all healthchecks") } - cmd, err := shlex.Split(inCommand) + // first try to parse option value as JSON array of strings... + cmd := []string{} + err := json.Unmarshal([]byte(inCommand), &cmd) if err != nil { - return nil, errors.Wrap(err, "failed to parse healthcheck command") + // ...otherwise pass it to "/bin/sh -c" inside the container + cmd = []string{"CMD-SHELL", inCommand} } hc := manifest.Schema2HealthConfig{ Test: cmd, @@ -801,7 +854,7 @@ func makeHealthCheckFromCli(c *GenericCLIResults) (*manifest.Schema2HealthConfig if err != nil { return nil, errors.Wrapf(err, "invalid healthcheck-timeout %s", inTimeout) } - if timeoutDuration < time.Duration(time.Second*1) { + if timeoutDuration < time.Duration(1) { return nil, errors.New("healthcheck-timeout must be at least 1 second") } hc.Timeout = timeoutDuration @@ -811,7 +864,7 @@ func makeHealthCheckFromCli(c *GenericCLIResults) (*manifest.Schema2HealthConfig return nil, errors.Wrapf(err, "invalid healthcheck-start-period %s", inStartPeriod) } if startPeriodDuration < time.Duration(0) { - return nil, errors.New("healthcheck-start-period must be a 0 seconds or greater") + return nil, errors.New("healthcheck-start-period must be 0 seconds or greater") } hc.StartPeriod = startPeriodDuration diff --git a/cmd/podman/shared/create_cli.go b/cmd/podman/shared/create_cli.go index 7f158b09a..08a40b206 100644 --- a/cmd/podman/shared/create_cli.go +++ b/cmd/podman/shared/create_cli.go @@ -5,9 +5,9 @@ import ( "strings" "github.com/containers/libpod/cmd/podman/shared/parse" + "github.com/containers/libpod/pkg/cgroups" cc "github.com/containers/libpod/pkg/spec" "github.com/containers/libpod/pkg/sysinfo" - "github.com/containers/libpod/pkg/util" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -78,7 +78,7 @@ func addWarning(warnings []string, msg string) []string { func verifyContainerResources(config *cc.CreateConfig, update bool) ([]string, error) { warnings := []string{} - cgroup2, err := util.IsCgroup2UnifiedMode() + cgroup2, err := cgroups.IsCgroup2UnifiedMode() if err != nil || cgroup2 { return warnings, err } @@ -133,7 +133,7 @@ func verifyContainerResources(config *cc.CreateConfig, update bool) ([]string, e if config.Resources.KernelMemory > 0 && config.Resources.KernelMemory < linuxMinMemory { return warnings, fmt.Errorf("minimum kernel memory limit allowed is 4MB") } - if config.Resources.DisableOomKiller == true && !sysInfo.OomKillDisable { + if config.Resources.DisableOomKiller && !sysInfo.OomKillDisable { // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point // warning the caller if they already wanted the feature to be off warnings = addWarning(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") diff --git a/cmd/podman/shared/funcs.go b/cmd/podman/shared/funcs.go index c189cceeb..2ceb9cdcb 100644 --- a/cmd/podman/shared/funcs.go +++ b/cmd/podman/shared/funcs.go @@ -9,6 +9,21 @@ import ( "github.com/google/shlex" ) +func GetAuthFile(authfile string) string { + if authfile != "" { + return authfile + } + authfile = os.Getenv("REGISTRY_AUTH_FILE") + if authfile != "" { + return authfile + } + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + if runtimeDir != "" { + return filepath.Join(runtimeDir, "containers/auth.json") + } + return "" +} + func substituteCommand(cmd string) (string, error) { var ( newCommand string diff --git a/cmd/podman/shared/intermediate.go b/cmd/podman/shared/intermediate.go index a38e4d47a..4062ac48a 100644 --- a/cmd/podman/shared/intermediate.go +++ b/cmd/podman/shared/intermediate.go @@ -370,6 +370,7 @@ func NewIntermediateLayer(c *cliconfig.PodmanCommand, remote bool) GenericCLIRes m["blkio-weight-device"] = newCRStringSlice(c, "blkio-weight-device") m["cap-add"] = newCRStringSlice(c, "cap-add") m["cap-drop"] = newCRStringSlice(c, "cap-drop") + m["cgroupns"] = newCRString(c, "cgroupns") m["cgroup-parent"] = newCRString(c, "cgroup-parent") m["cidfile"] = newCRString(c, "cidfile") m["conmon-pidfile"] = newCRString(c, "conmon-pidfile") @@ -393,16 +394,17 @@ func NewIntermediateLayer(c *cliconfig.PodmanCommand, remote bool) GenericCLIRes m["dns-search"] = newCRStringSlice(c, "dns-search") m["entrypoint"] = newCRString(c, "entrypoint") m["env"] = newCRStringArray(c, "env") + m["env-host"] = newCRBool(c, "env-host") m["env-file"] = newCRStringSlice(c, "env-file") m["expose"] = newCRStringSlice(c, "expose") m["gidmap"] = newCRStringSlice(c, "gidmap") m["group-add"] = newCRStringSlice(c, "group-add") m["help"] = newCRBool(c, "help") - m["healthcheck-command"] = newCRString(c, "healthcheck-command") - m["healthcheck-interval"] = newCRString(c, "healthcheck-interval") - m["healthcheck-retries"] = newCRUint(c, "healthcheck-retries") - m["healthcheck-start-period"] = newCRString(c, "healthcheck-start-period") - m["healthcheck-timeout"] = newCRString(c, "healthcheck-timeout") + m["healthcheck-command"] = newCRString(c, "health-cmd") + m["healthcheck-interval"] = newCRString(c, "health-interval") + m["healthcheck-retries"] = newCRUint(c, "health-retries") + m["healthcheck-start-period"] = newCRString(c, "health-start-period") + m["healthcheck-timeout"] = newCRString(c, "health-timeout") m["hostname"] = newCRString(c, "hostname") m["http-proxy"] = newCRBool(c, "http-proxy") m["image-volume"] = newCRString(c, "image-volume") @@ -448,7 +450,7 @@ func NewIntermediateLayer(c *cliconfig.PodmanCommand, remote bool) GenericCLIRes m["subuidname"] = newCRString(c, "subuidname") m["sysctl"] = newCRStringSlice(c, "sysctl") m["systemd"] = newCRBool(c, "systemd") - m["tmpfs"] = newCRStringSlice(c, "tmpfs") + m["tmpfs"] = newCRStringArray(c, "tmpfs") m["tty"] = newCRBool(c, "tty") m["uidmap"] = newCRStringSlice(c, "uidmap") m["ulimit"] = newCRStringSlice(c, "ulimit") diff --git a/cmd/podman/shared/parse/parse.go b/cmd/podman/shared/parse/parse.go index 7bc2652cb..9fbc92fc3 100644 --- a/cmd/podman/shared/parse/parse.go +++ b/cmd/podman/shared/parse/parse.go @@ -7,6 +7,7 @@ import ( "bufio" "fmt" "net" + "net/url" "os" "regexp" "strings" @@ -112,9 +113,22 @@ func parseEnv(env map[string]string, line string) error { if len(data) > 1 { env[name] = data[1] } else { - // if only a pass-through variable is given, clean it up. - val, _ := os.LookupEnv(name) - env[name] = val + if strings.HasSuffix(name, "*") { + name = strings.TrimSuffix(name, "*") + for _, e := range os.Environ() { + part := strings.SplitN(e, "=", 2) + if len(part) < 2 { + continue + } + if strings.HasPrefix(part[0], name) { + env[part[0]] = part[1] + } + } + } else { + // if only a pass-through variable is given, clean it up. + val, _ := os.LookupEnv(name) + env[name] = val + } } return nil } @@ -149,3 +163,12 @@ func ValidateFileName(filename string) error { } return nil } + +// ValidURL checks a string urlStr is a url or not +func ValidURL(urlStr string) error { + _, err := url.ParseRequestURI(urlStr) + if err != nil { + return errors.Wrapf(err, "invalid url path: %q", urlStr) + } + return nil +} diff --git a/cmd/podman/shared/pod.go b/cmd/podman/shared/pod.go index 3f4cb0312..ab6d1f144 100644 --- a/cmd/podman/shared/pod.go +++ b/cmd/podman/shared/pod.go @@ -4,6 +4,7 @@ import ( "strconv" "github.com/containers/libpod/libpod" + "github.com/containers/libpod/libpod/define" "github.com/cri-o/ocicni/pkg/ocicni" "github.com/docker/go-connections/nat" "github.com/pkg/errors" @@ -29,7 +30,7 @@ func GetPodStatus(pod *libpod.Pod) (string, error) { return CreatePodStatusResults(ctrStatuses) } -func CreatePodStatusResults(ctrStatuses map[string]libpod.ContainerStatus) (string, error) { +func CreatePodStatusResults(ctrStatuses map[string]define.ContainerStatus) (string, error) { ctrNum := len(ctrStatuses) if ctrNum == 0 { return PodStateCreated, nil @@ -43,15 +44,15 @@ func CreatePodStatusResults(ctrStatuses map[string]libpod.ContainerStatus) (stri } for _, ctrStatus := range ctrStatuses { switch ctrStatus { - case libpod.ContainerStateExited: + case define.ContainerStateExited: fallthrough - case libpod.ContainerStateStopped: + case define.ContainerStateStopped: statuses[PodStateStopped]++ - case libpod.ContainerStateRunning: + case define.ContainerStateRunning: statuses[PodStateRunning]++ - case libpod.ContainerStatePaused: + case define.ContainerStatePaused: statuses[PodStatePaused]++ - case libpod.ContainerStateCreated, libpod.ContainerStateConfigured: + case define.ContainerStateCreated, define.ContainerStateConfigured: statuses[PodStateCreated]++ default: statuses[PodStateErrored]++ |