diff options
Diffstat (limited to 'libpod')
34 files changed, 965 insertions, 396 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index 608a279c3..4918bf57a 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -2235,7 +2235,7 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error { if ctr == nil { // This should never happen // State is inconsistent - return errors.Wrapf(define.ErrNoSuchCtr, "pod %s referenced nonexistant container %s", pod.ID(), string(id)) + return errors.Wrapf(define.ErrNoSuchCtr, "pod %s referenced nonexistent container %s", pod.ID(), string(id)) } ctrDeps := ctr.Bucket(dependenciesBkt) // This should never be nil, but if it is, we're diff --git a/libpod/boltdb_state_linux.go b/libpod/boltdb_state_linux.go index 09a9be606..6ccda71bd 100644 --- a/libpod/boltdb_state_linux.go +++ b/libpod/boltdb_state_linux.go @@ -3,6 +3,8 @@ package libpod import ( + "github.com/containers/libpod/libpod/define" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -25,8 +27,12 @@ func replaceNetNS(netNSPath string, ctr *Container, newState *ContainerState) er if err == nil { newState.NetNS = ns } else { + if ctr.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { + return errors.Wrapf(err, "error joning network namespace of container %s", ctr.ID()) + } + logrus.Errorf("error joining network namespace for container %s: %v", ctr.ID(), err) - ctr.valid = false + ctr.state.NetNS = nil } } } else { diff --git a/libpod/config/config.go b/libpod/config/config.go index 5b4b57f3a..0e867a50e 100644 --- a/libpod/config/config.go +++ b/libpod/config/config.go @@ -12,6 +12,7 @@ import ( "github.com/BurntSushi/toml" "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/util" "github.com/containers/storage" @@ -137,11 +138,11 @@ type Config struct { // VolumePath is the default location that named volumes will be created // under. This convention is followed by the default volume driver, but // may not be by other drivers. - VolumePath string `toml:"volume_path"` + VolumePath string `toml:"volume_path,omitempty"` // ImageDefaultTransport is the default transport method used to fetch // images. - ImageDefaultTransport string `toml:"image_default_transport"` + ImageDefaultTransport string `toml:"image_default_transport,omitempty"` // SignaturePolicyPath is the path to a signature policy to use for // validating images. If left empty, the containers/image default signature @@ -149,61 +150,61 @@ type Config struct { SignaturePolicyPath string `toml:"signature_policy_path,omitempty"` // OCIRuntime is the OCI runtime to use. - OCIRuntime string `toml:"runtime"` + OCIRuntime string `toml:"runtime,omitempty"` // OCIRuntimes are the set of configured OCI runtimes (default is runc). - OCIRuntimes map[string][]string `toml:"runtimes"` + OCIRuntimes map[string][]string `toml:"runtimes,omitempty"` // RuntimeSupportsJSON is the list of the OCI runtimes that support // --format=json. - RuntimeSupportsJSON []string `toml:"runtime_supports_json"` + RuntimeSupportsJSON []string `toml:"runtime_supports_json,omitempty"` // RuntimeSupportsNoCgroups is a list of OCI runtimes that support // running containers without CGroups. - RuntimeSupportsNoCgroups []string `toml:"runtime_supports_nocgroups"` + RuntimeSupportsNoCgroups []string `toml:"runtime_supports_nocgroups,omitempty"` // RuntimePath is the path to OCI runtime binary for launching containers. // The first path pointing to a valid file will be used This is used only // when there are no OCIRuntime/OCIRuntimes defined. It is used only to be // backward compatible with older versions of Podman. - RuntimePath []string `toml:"runtime_path"` + RuntimePath []string `toml:"runtime_path,omitempty"` // ConmonPath is the path to the Conmon binary used for managing containers. // The first path pointing to a valid file will be used. - ConmonPath []string `toml:"conmon_path"` + ConmonPath []string `toml:"conmon_path,omitempty"` // ConmonEnvVars are environment variables to pass to the Conmon binary // when it is launched. - ConmonEnvVars []string `toml:"conmon_env_vars"` + ConmonEnvVars []string `toml:"conmon_env_vars,omitempty"` // CGroupManager is the CGroup Manager to use Valid values are "cgroupfs" // and "systemd". - CgroupManager string `toml:"cgroup_manager"` + CgroupManager string `toml:"cgroup_manager,omitempty"` // InitPath is the path to the container-init binary. - InitPath string `toml:"init_path"` + InitPath string `toml:"init_path,omitempty"` // StaticDir is the path to a persistent directory to store container // files. - StaticDir string `toml:"static_dir"` + StaticDir string `toml:"static_dir,omitempty"` // TmpDir is the path to a temporary directory to store per-boot container // files. Must be stored in a tmpfs. - TmpDir string `toml:"tmp_dir"` + TmpDir string `toml:"tmp_dir,omitempty"` // MaxLogSize is the maximum size of container logfiles. MaxLogSize int64 `toml:"max_log_size,omitempty"` // NoPivotRoot sets whether to set no-pivot-root in the OCI runtime. - NoPivotRoot bool `toml:"no_pivot_root"` + NoPivotRoot bool `toml:"no_pivot_root,omitempty"` // CNIConfigDir sets the directory where CNI configuration files are // stored. - CNIConfigDir string `toml:"cni_config_dir"` + CNIConfigDir string `toml:"cni_config_dir,omitempty"` // CNIPluginDir sets a number of directories where the CNI network // plugins can be located. - CNIPluginDir []string `toml:"cni_plugin_dir"` + CNIPluginDir []string `toml:"cni_plugin_dir,omitempty"` // CNIDefaultNetwork is the network name of the default CNI network // to attach pods to. @@ -213,7 +214,7 @@ type Config struct { // configuration files. When the same filename is present in in // multiple directories, the file in the directory listed last in // this slice takes precedence. - HooksDir []string `toml:"hooks_dir"` + HooksDir []string `toml:"hooks_dir,omitempty"` // DefaultMountsFile is the path to the default mounts file for testing // purposes only. @@ -229,10 +230,10 @@ type Config struct { // InfraImage is the image a pod infra container will use to manage // namespaces. - InfraImage string `toml:"infra_image"` + InfraImage string `toml:"infra_image,omitempty"` // InfraCommand is the command run to start up a pod infra container. - InfraCommand string `toml:"infra_command"` + InfraCommand string `toml:"infra_command,omitempty"` // EnablePortReservation determines whether libpod will reserve ports on the // host when they are forwarded to containers. When enabled, when ports are @@ -241,13 +242,13 @@ type Config struct { // programs on the host. However, this can cause significant memory usage if // a container has many ports forwarded to it. Disabling this can save // memory. - EnablePortReservation bool `toml:"enable_port_reservation"` + EnablePortReservation bool `toml:"enable_port_reservation,omitempty"` // EnableLabeling indicates whether libpod will support container labeling. - EnableLabeling bool `toml:"label"` + EnableLabeling bool `toml:"label,omitempty"` // NetworkCmdPath is the path to the slirp4netns binary. - NetworkCmdPath string `toml:"network_cmd_path"` + NetworkCmdPath string `toml:"network_cmd_path,omitempty"` // NumLocks is the number of locks to make available for containers and // pods. @@ -257,17 +258,21 @@ type Config struct { LockType string `toml:"lock_type,omitempty"` // EventsLogger determines where events should be logged. - EventsLogger string `toml:"events_logger"` + EventsLogger string `toml:"events_logger,omitempty"` // EventsLogFilePath is where the events log is stored. - EventsLogFilePath string `toml:"events_logfile_path"` + EventsLogFilePath string `toml:"events_logfile_path,omitempty"` //DetachKeys is the sequence of keys used to detach a container. - DetachKeys string `toml:"detach_keys"` + DetachKeys string `toml:"detach_keys,omitempty"` // SDNotify tells Libpod to allow containers to notify the host systemd of // readiness using the SD_NOTIFY mechanism. - SDNotify bool + SDNotify bool `toml:",omitempty"` + + // CgroupCheck indicates the configuration has been rewritten after an + // upgrade to Fedora 31 to change the default OCI runtime for cgroupsv2. + CgroupCheck bool `toml:"cgroup_check,omitempty"` } // DBConfig is a set of Libpod runtime configuration settings that are saved in @@ -443,6 +448,9 @@ func NewConfig(userConfigPath string) (*Config, error) { if err != nil { return nil, errors.Wrapf(err, "error reading user config %q", userConfigPath) } + if err := cgroupV2Check(userConfigPath, config); err != nil { + return nil, errors.Wrapf(err, "error rewriting configuration file %s", userConfigPath) + } } // Now, check if the user can access system configs and merge them if needed. @@ -469,6 +477,9 @@ func NewConfig(userConfigPath string) (*Config, error) { if defaultConfig, err := defaultConfigFromMemory(); err != nil { return nil, errors.Wrapf(err, "error generating default config from memory") } else { + // Check if we need to switch to cgroupfs and logger=file on rootless. + defaultConfig.checkCgroupsAndLogger() + if err := config.mergeConfig(defaultConfig); err != nil { return nil, errors.Wrapf(err, "error merging default config from memory") } @@ -487,9 +498,6 @@ func NewConfig(userConfigPath string) (*Config, error) { return nil, errors.Wrapf(define.ErrInvalidArg, "volume path must be an absolute path - instead got %q", config.VolumePath) } - // Check if we need to switch to cgroupfs on rootless. - config.checkCgroupsAndAdjustConfig() - return config, nil } @@ -524,11 +532,13 @@ func systemConfigs() ([]string, error) { return configs, nil } -// checkCgroupsAndAdjustConfig checks if we're running rootless with the systemd +// checkCgroupsAndLogger checks if we're running rootless with the systemd // cgroup manager. In case the user session isn't available, we're switching the -// cgroup manager to cgroupfs. Note, this only applies to rootless. -func (c *Config) checkCgroupsAndAdjustConfig() { - if !rootless.IsRootless() || c.CgroupManager != define.SystemdCgroupsManager { +// cgroup manager to cgroupfs and the events logger backend to 'file'. +// Note, this only applies to rootless. +func (c *Config) checkCgroupsAndLogger() { + if !rootless.IsRootless() || (c.CgroupManager != + define.SystemdCgroupsManager && c.EventsLogger == "file") { return } @@ -543,7 +553,34 @@ func (c *Config) checkCgroupsAndAdjustConfig() { logrus.Warningf("The cgroups manager is set to systemd but there is no systemd user session available") logrus.Warningf("For using systemd, you may need to login using an user session") logrus.Warningf("Alternatively, you can enable lingering with: `loginctl enable-linger %d` (possibly as root)", rootless.GetRootlessUID()) - logrus.Warningf("Falling back to --cgroup-manager=cgroupfs") + logrus.Warningf("Falling back to --cgroup-manager=cgroupfs and --events-backend=file") c.CgroupManager = define.CgroupfsCgroupsManager + c.EventsLogger = "file" + } +} + +// Since runc does not currently support cgroupV2 +// Change to default crun on first running of libpod.conf +// TODO Once runc has support for cgroups, this function should be removed. +func cgroupV2Check(configPath string, tmpConfig *Config) error { + if !tmpConfig.CgroupCheck && rootless.IsRootless() { + cgroupsV2, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return err + } + if cgroupsV2 { + path, err := exec.LookPath("crun") + if err != nil { + logrus.Warnf("Can not find crun package on the host, containers might fail to run on cgroup V2 systems without crun: %q", err) + // Can't find crun path so do nothing + return nil + } + tmpConfig.CgroupCheck = true + tmpConfig.OCIRuntime = path + if err := tmpConfig.Write(configPath); err != nil { + return err + } + } } + return nil } diff --git a/libpod/config/default.go b/libpod/config/default.go index 17574c059..5decaeab7 100644 --- a/libpod/config/default.go +++ b/libpod/config/default.go @@ -6,6 +6,7 @@ import ( "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" + "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/util" "github.com/containers/storage" @@ -47,6 +48,12 @@ func defaultConfigFromMemory() (*Config, error) { c.ImageDefaultTransport = _defaultTransport c.StateType = define.BoltDBStateStore c.OCIRuntime = "runc" + + // If we're running on cgroups v2, default to using crun. + if onCgroupsv2, _ := cgroups.IsCgroup2UnifiedMode(); onCgroupsv2 { + c.OCIRuntime = "crun" + } + c.OCIRuntimes = map[string][]string{ "runc": { "/usr/bin/runc", @@ -58,7 +65,15 @@ func defaultConfigFromMemory() (*Config, error) { "/usr/lib/cri-o-runc/sbin/runc", "/run/current-system/sw/bin/runc", }, - // TODO - should we add "crun" defaults here as well? + "crun": { + "/usr/bin/crun", + "/usr/sbin/crun", + "/usr/local/bin/crun", + "/usr/local/sbin/crun", + "/sbin/crun", + "/bin/crun", + "/run/current-system/sw/bin/crun", + }, } c.ConmonPath = []string{ "/usr/libexec/podman/conmon", diff --git a/libpod/container.go b/libpod/container.go index 8e24391b9..dcec3ee50 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -6,6 +6,7 @@ import ( "net" "os" "path/filepath" + "strings" "time" "github.com/containernetworking/cni/pkg/types" @@ -138,6 +139,10 @@ type Container struct { // being checkpointed. If requestedIP is set it will be used instead // of config.StaticIP. requestedIP net.IP + // A restored container should have the same MAC address as before + // being checkpointed. If requestedMAC is set it will be used instead + // of config.StaticMAC. + requestedMAC net.HardwareAddr // This is true if a container is restored from a checkpoint. restoreFromCheckpoint bool @@ -296,6 +301,10 @@ type ContainerConfig struct { // This cannot be set unless CreateNetNS is set. // If not set, the container will be dynamically assigned an IP by CNI. StaticIP net.IP `json:"staticIP"` + // StaticMAC is a static MAC to request for the container. + // This cannot be set unless CreateNetNS is set. + // If not set, the container will be dynamically assigned a MAC by CNI. + StaticMAC net.HardwareAddr `json:"staticMAC"` // PortMappings are the ports forwarded to the container's network // namespace // These are not used unless CreateNetNS is true @@ -1064,7 +1073,14 @@ func (c *Container) CGroupPath() (string, error) { case define.SystemdCgroupsManager: if rootless.IsRootless() { uid := rootless.GetRootlessUID() - return filepath.Join(c.config.CgroupParent, fmt.Sprintf("user-%d.slice/user@%d.service/user.slice", uid, uid), createUnitName("libpod", c.ID())), nil + parts := strings.SplitN(c.config.CgroupParent, "/", 2) + + dir := "" + if len(parts) > 1 { + dir = parts[1] + } + + return filepath.Join(parts[0], fmt.Sprintf("user-%d.slice/user@%d.service/user.slice/%s", uid, uid, dir), createUnitName("libpod", c.ID())), nil } return filepath.Join(c.config.CgroupParent, createUnitName("libpod", c.ID())), nil default: @@ -1138,7 +1154,7 @@ func (c *Container) NetworkDisabled() (bool, error) { if err != nil { return false, err } - return networkDisabled(container) + return container.NetworkDisabled() } return networkDisabled(c) diff --git a/libpod/container_api.go b/libpod/container_api.go index a6f5b54d5..153a1d628 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -404,6 +404,11 @@ func (c *Container) Mount() (string, error) { return "", err } } + + if c.state.State == define.ContainerStateRemoving { + return "", errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID()) + } + defer c.newContainerEvent(events.Mount) return c.mount() } @@ -488,7 +493,12 @@ func (c *Container) Export(path string) error { return err } } - defer c.newContainerEvent(events.Export) + + if c.state.State == define.ContainerStateRemoving { + return errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID()) + } + + defer c.newContainerEvent(events.Mount) return c.export(path) } @@ -674,6 +684,10 @@ func (c *Container) Refresh(ctx context.Context) error { } } + if c.state.State == define.ContainerStateRemoving { + return errors.Wrapf(define.ErrCtrStateInvalid, "cannot refresh containers that are being removed") + } + wasCreated := false if c.state.State == define.ContainerStateCreated { wasCreated = true @@ -794,6 +808,11 @@ type ContainerCheckpointOptions struct { // important to be able to restore a container multiple // times with '--import --name'. IgnoreStaticIP bool + // IgnoreStaticMAC tells the API to ignore the MAC set + // during 'podman run' with '--mac-address'. This is especially + // important to be able to restore a container multiple + // times with '--import --name'. + IgnoreStaticMAC bool } // Checkpoint checkpoints a container @@ -814,7 +833,6 @@ func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointO return err } } - defer c.newContainerEvent(events.Checkpoint) return c.checkpoint(ctx, options) } diff --git a/libpod/container_commit.go b/libpod/container_commit.go index 42f298a81..a0ba57f4f 100644 --- a/libpod/container_commit.go +++ b/libpod/container_commit.go @@ -3,7 +3,6 @@ package libpod import ( "context" "fmt" - "os" "strings" "github.com/containers/buildah" @@ -12,6 +11,7 @@ import ( "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/libpod/image" + libpodutil "github.com/containers/libpod/pkg/util" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -32,10 +32,6 @@ type ContainerCommitOptions struct { // Commit commits the changes between a container and its image, creating a new // image func (c *Container) Commit(ctx context.Context, destImage string, options ContainerCommitOptions) (*image.Image, error) { - var ( - isEnvCleared, isLabelCleared, isExposeCleared, isVolumeCleared bool - ) - if c.config.Rootfs != "" { return nil, errors.Errorf("cannot commit a container that uses an exploded rootfs") } @@ -51,7 +47,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai if c.state.State == define.ContainerStateRunning && options.Pause { if err := c.pause(); err != nil { - return nil, errors.Wrapf(err, "error pausing container %q", c.ID()) + return nil, errors.Wrapf(err, "error pausing container %q to commit", c.ID()) } defer func() { if err := c.unpause(); err != nil { @@ -103,7 +99,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai } // Expose ports for _, p := range c.config.PortMappings { - importBuilder.SetPort(fmt.Sprintf("%d", p.ContainerPort)) + importBuilder.SetPort(fmt.Sprintf("%d/%s", p.ContainerPort, p.Protocol)) } // Labels for k, v := range c.Labels() { @@ -111,7 +107,9 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai } // No stop signal // User - importBuilder.SetUser(c.User()) + if c.config.User != "" { + importBuilder.SetUser(c.config.User) + } // Volumes if options.IncludeVolumes { for _, v := range c.config.UserVolumes { @@ -119,107 +117,76 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai importBuilder.AddVolume(v) } } - } - // Workdir - importBuilder.SetWorkDir(c.Spec().Process.Cwd) - - genCmd := func(cmd string) []string { - trim := func(cmd []string) []string { - if len(cmd) == 0 { - return cmd + } else { + // Only include anonymous named volumes added by the user by + // default. + for _, v := range c.config.NamedVolumes { + include := false + for _, userVol := range c.config.UserVolumes { + if userVol == v.Dest { + include = true + break + } } - - retCmd := []string{} - for _, c := range cmd { - if len(c) >= 2 { - if c[0] == '"' && c[len(c)-1] == '"' { - retCmd = append(retCmd, c[1:len(c)-1]) - continue - } + if include { + vol, err := c.runtime.GetVolume(v.Name) + if err != nil { + return nil, errors.Wrapf(err, "volume %s used in container %s has been removed", v.Name, c.ID()) + } + if vol.IsCtrSpecific() { + importBuilder.AddVolume(v.Dest) } - retCmd = append(retCmd, c) } - return retCmd } - if strings.HasPrefix(cmd, "[") { - cmd = strings.TrimPrefix(cmd, "[") - cmd = strings.TrimSuffix(cmd, "]") - return trim(strings.Split(cmd, ",")) - } - return []string{"/bin/sh", "-c", cmd} } - // Process user changes - for _, change := range options.Changes { - splitChange := strings.SplitN(change, "=", 2) - if len(splitChange) != 2 { - splitChange = strings.SplitN(change, " ", 2) - if len(splitChange) < 2 { - return nil, errors.Errorf("invalid change %s format", change) - } - } + // Workdir + importBuilder.SetWorkDir(c.config.Spec.Process.Cwd) - switch strings.ToUpper(splitChange[0]) { - case "CMD": - importBuilder.SetCmd(genCmd(splitChange[1])) - case "ENTRYPOINT": - importBuilder.SetEntrypoint(genCmd(splitChange[1])) - case "ENV": - change := strings.Split(splitChange[1], " ") - name := change[0] - val := "" - if len(change) < 2 { - change = strings.Split(change[0], "=") - } - if len(change) < 2 { - var ok bool - val, ok = os.LookupEnv(name) - if !ok { - return nil, errors.Errorf("invalid env variable %q: not defined in your environment", name) - } - } else { - name = change[0] - val = strings.Join(change[1:], " ") - } - if !isEnvCleared { // Multiple values are valid, only clear once. - importBuilder.ClearEnv() - isEnvCleared = true - } - importBuilder.SetEnv(name, val) - case "EXPOSE": - if !isExposeCleared { // Multiple values are valid, only clear once - importBuilder.ClearPorts() - isExposeCleared = true - } - importBuilder.SetPort(splitChange[1]) - case "LABEL": - change := strings.Split(splitChange[1], " ") - if len(change) < 2 { - change = strings.Split(change[0], "=") - } - if len(change) < 2 { - return nil, errors.Errorf("invalid label %s format, requires to NAME=VAL", splitChange[1]) - } - if !isLabelCleared { // multiple values are valid, only clear once - importBuilder.ClearLabels() - isLabelCleared = true - } - importBuilder.SetLabel(change[0], strings.Join(change[1:], " ")) - case "ONBUILD": - importBuilder.SetOnBuild(splitChange[1]) - case "STOPSIGNAL": - // No Set StopSignal - case "USER": - importBuilder.SetUser(splitChange[1]) - case "VOLUME": - if !isVolumeCleared { // multiple values are valid, only clear once - importBuilder.ClearVolumes() - isVolumeCleared = true - } - importBuilder.AddVolume(splitChange[1]) - case "WORKDIR": - importBuilder.SetWorkDir(splitChange[1]) + // Process user changes + newImageConfig, err := libpodutil.GetImageConfig(options.Changes) + if err != nil { + return nil, err + } + if newImageConfig.User != "" { + importBuilder.SetUser(newImageConfig.User) + } + // EXPOSE only appends + for port := range newImageConfig.ExposedPorts { + importBuilder.SetPort(port) + } + // ENV only appends + for _, env := range newImageConfig.Env { + splitEnv := strings.SplitN(env, "=", 2) + key := splitEnv[0] + value := "" + if len(splitEnv) == 2 { + value = splitEnv[1] } + importBuilder.SetEnv(key, value) } + if newImageConfig.Entrypoint != nil { + importBuilder.SetEntrypoint(newImageConfig.Entrypoint) + } + if newImageConfig.Cmd != nil { + importBuilder.SetCmd(newImageConfig.Cmd) + } + // VOLUME only appends + for vol := range newImageConfig.Volumes { + importBuilder.AddVolume(vol) + } + if newImageConfig.WorkingDir != "" { + importBuilder.SetWorkDir(newImageConfig.WorkingDir) + } + for k, v := range newImageConfig.Labels { + importBuilder.SetLabel(k, v) + } + if newImageConfig.StopSignal != "" { + importBuilder.SetStopSignal(newImageConfig.StopSignal) + } + for _, onbuild := range newImageConfig.OnBuild { + importBuilder.SetOnBuild(onbuild) + } + candidates, _, _, err := util.ResolveName(destImage, "", sc, c.runtime.store) if err != nil { return nil, errors.Wrapf(err, "error resolving name %q", destImage) diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 028d7601d..1e8a8a580 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -652,6 +652,11 @@ func (c *Container) removeConmonFiles() error { return errors.Wrapf(err, "error removing container %s ctl file", c.ID()) } + winszFile := filepath.Join(c.bundlePath(), "winsz") + if err := os.Remove(winszFile); err != nil && !os.IsNotExist(err) { + return errors.Wrapf(err, "error removing container %s winsz file", c.ID()) + } + oomFile := filepath.Join(c.bundlePath(), "oom") if err := os.Remove(oomFile); err != nil && !os.IsNotExist(err) { return errors.Wrapf(err, "error removing container %s OOM file", c.ID()) @@ -714,7 +719,8 @@ func (c *Container) isStopped() (bool, error) { if err != nil { return true, err } - return c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused, nil + + return !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused), nil } // save container state to the database @@ -1052,6 +1058,8 @@ func (c *Container) initAndStart(ctx context.Context) (err error) { // If we are ContainerStateUnknown, throw an error if c.state.State == define.ContainerStateUnknown { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in an unknown state", c.ID()) + } else if c.state.State == define.ContainerStateRemoving { + return errors.Wrapf(define.ErrCtrStateInvalid, "cannot start container %s as it is being removed", c.ID()) } // If we are running, do nothing diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 471648bc8..1b0570998 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -21,6 +21,7 @@ import ( "github.com/containernetworking/plugins/pkg/ns" "github.com/containers/buildah/pkg/secrets" "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/annotations" "github.com/containers/libpod/pkg/apparmor" "github.com/containers/libpod/pkg/cgroups" @@ -676,6 +677,10 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State) } + if c.AutoRemove() && options.TargetFile == "" { + return errors.Errorf("Cannot checkpoint containers that have been started with '--rm' unless '--export' is used") + } + if err := c.checkpointRestoreLabelLog("dump.log"); err != nil { return err } @@ -695,6 +700,8 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO return err } + defer c.newContainerEvent(events.Checkpoint) + if options.TargetFile != "" { if err = c.exportCheckpoint(options.TargetFile, options.IgnoreRootfs); err != nil { return err @@ -766,7 +773,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti return err } - if (c.state.State != define.ContainerStateConfigured) && (c.state.State != define.ContainerStateExited) { + if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID()) } @@ -794,6 +801,15 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti c.config.StaticIP = nil } + // If a container is restored multiple times from an exported checkpoint with + // the help of '--import --name', the restore will fail if during 'podman run' + // a static container MAC address was set with '--mac-address'. The user + // can tell the restore process to ignore the static MAC with + // '--ignore-static-mac' + if options.IgnoreStaticMAC { + c.config.StaticMAC = nil + } + // Read network configuration from checkpoint // Currently only one interface with one IP is supported. networkStatusFile, err := os.Open(filepath.Join(c.bundlePath(), "network.status")) @@ -803,9 +819,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti // TODO: This implicit restoring with or without IP depending on an // unrelated restore parameter (--name) does not seem like the // best solution. - if err == nil && options.Name == "" && !options.IgnoreStaticIP { + if err == nil && options.Name == "" && (!options.IgnoreStaticIP || !options.IgnoreStaticMAC) { // The file with the network.status does exist. Let's restore the - // container with the same IP address as during checkpointing. + // container with the same IP address / MAC address as during checkpointing. defer networkStatusFile.Close() var networkStatus []*cnitypes.Result networkJSON, err := ioutil.ReadAll(networkStatusFile) @@ -815,16 +831,35 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti if err := json.Unmarshal(networkJSON, &networkStatus); err != nil { return err } - // Take the first IP address - var IP net.IP - if len(networkStatus) > 0 { - if len(networkStatus[0].IPs) > 0 { - IP = networkStatus[0].IPs[0].Address.IP + if !options.IgnoreStaticIP { + // Take the first IP address + var IP net.IP + if len(networkStatus) > 0 { + if len(networkStatus[0].IPs) > 0 { + IP = networkStatus[0].IPs[0].Address.IP + } + } + if IP != nil { + // Tell CNI which IP address we want. + c.requestedIP = IP } } - if IP != nil { - // Tell CNI which IP address we want. - c.requestedIP = IP + if !options.IgnoreStaticMAC { + // Take the first device with a defined sandbox. + var MAC net.HardwareAddr + for _, n := range networkStatus[0].Interfaces { + if n.Sandbox != "" { + MAC, err = net.ParseMAC(n.Mac) + if err != nil { + return errors.Wrapf(err, "failed to parse MAC %v", n.Mac) + } + break + } + } + if MAC != nil { + // Tell CNI which MAC address we want. + c.requestedMAC = MAC + } } } @@ -856,7 +891,12 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti // We want to have the same network namespace as before. if c.config.CreateNetNS { - if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), c.state.NetNS.Path()); err != nil { + netNSPath := "" + if !c.config.PostConfigureNetNS { + netNSPath = c.state.NetNS.Path() + } + + if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), netNSPath); err != nil { return err } } @@ -976,9 +1016,24 @@ func (c *Container) makeBindMounts() error { // We want /etc/resolv.conf and /etc/hosts from the // other container. Unless we're not creating both of // them. - depCtr, err := c.runtime.state.Container(c.config.NetNsCtr) - if err != nil { - return errors.Wrapf(err, "error fetching dependency %s of container %s", c.config.NetNsCtr, c.ID()) + var ( + depCtr *Container + nextCtr string + ) + + // I don't like infinite loops, but I don't think there's + // a serious risk of looping dependencies - too many + // protections against that elsewhere. + nextCtr = c.config.NetNsCtr + for { + depCtr, err = c.runtime.state.Container(nextCtr) + if err != nil { + return errors.Wrapf(err, "error fetching dependency %s of container %s", c.config.NetNsCtr, c.ID()) + } + nextCtr = depCtr.config.NetNsCtr + if nextCtr == "" { + break + } } // We need that container's bind mounts @@ -1314,7 +1369,7 @@ func (c *Container) copyOwnerAndPerms(source, dest string) error { // Teardown CNI config on refresh func (c *Container) refreshCNI() error { // Let's try and delete any lingering network config... - podNetwork := c.runtime.getPodNetwork(c.ID(), c.config.Name, "", c.config.Networks, c.config.PortMappings, c.config.StaticIP) + podNetwork := c.runtime.getPodNetwork(c.ID(), c.config.Name, "", c.config.Networks, c.config.PortMappings, c.config.StaticIP, c.config.StaticMAC) return c.runtime.netPlugin.TearDownPod(podNetwork) } diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go index ab2527b3e..e7d258e21 100644 --- a/libpod/define/containerstate.go +++ b/libpod/define/containerstate.go @@ -25,6 +25,9 @@ const ( // ContainerStateExited indicates the the container has stopped and been // cleaned up ContainerStateExited ContainerStatus = iota + // ContainerStateRemoving indicates the container is in the process of + // being removed. + ContainerStateRemoving ContainerStatus = iota ) // ContainerStatus returns a string representation for users @@ -45,6 +48,8 @@ func (t ContainerStatus) String() string { return "paused" case ContainerStateExited: return "exited" + case ContainerStateRemoving: + return "removing" } return "bad state" } @@ -67,6 +72,8 @@ func StringToContainerStatus(status string) (ContainerStatus, error) { return ContainerStatePaused, nil case ContainerStateExited.String(): return ContainerStateExited, nil + case ContainerStateRemoving.String(): + return ContainerStateRemoving, nil default: return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status) } diff --git a/libpod/events/config.go b/libpod/events/config.go index 453c64f8c..20c01baff 100644 --- a/libpod/events/config.go +++ b/libpod/events/config.go @@ -167,7 +167,7 @@ type EventFilter func(*Event) bool var ( // ErrEventTypeBlank indicates the event log found something done by podman - // but it isnt likely an event + // but it isn't likely an event ErrEventTypeBlank = errors.New("event type blank") // ErrEventNotFound indicates that the event was not found in the event log diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go index 470c76959..9e6fffc29 100644 --- a/libpod/events/journal_linux.go +++ b/libpod/events/journal_linux.go @@ -54,6 +54,7 @@ func (e EventJournalD) Write(ee Event) error { // Read reads events from the journal and sends qualified events to the event channel func (e EventJournalD) Read(options ReadOptions) error { + defer close(options.EventChannel) eventOptions, err := generateEventOptions(options.Filters, options.Since, options.Until) if err != nil { return errors.Wrapf(err, "failed to generate event options") @@ -87,7 +88,6 @@ func (e EventJournalD) Read(options ReadOptions) error { if err != nil { return err } - defer close(options.EventChannel) for { if _, err := j.Next(); err != nil { return err diff --git a/libpod/events/logfile.go b/libpod/events/logfile.go index 4b65b0ad0..93e6fa3c9 100644 --- a/libpod/events/logfile.go +++ b/libpod/events/logfile.go @@ -41,6 +41,7 @@ func (e EventLogFile) Write(ee Event) error { // Reads from the log file func (e EventLogFile) Read(options ReadOptions) error { + defer close(options.EventChannel) eventOptions, err := generateEventOptions(options.Filters, options.Since, options.Until) if err != nil { return errors.Wrapf(err, "unable to generate event options") @@ -68,7 +69,6 @@ func (e EventLogFile) Read(options ReadOptions) error { options.EventChannel <- event } } - close(options.EventChannel) return nil } diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go index e9c950713..b42e7d16a 100644 --- a/libpod/healthcheck.go +++ b/libpod/healthcheck.go @@ -31,7 +31,7 @@ const ( // HealthCheckNotDefined means the container has no health // check defined in it HealthCheckNotDefined HealthCheckStatus = iota - // HealthCheckInternalError means somes something failed obtaining or running + // HealthCheckInternalError means some something failed obtaining or running // a given health check HealthCheckInternalError HealthCheckStatus = iota // HealthCheckDefined means the healthcheck was found on the container diff --git a/libpod/image/filters.go b/libpod/image/filters.go new file mode 100644 index 000000000..d545f1bfc --- /dev/null +++ b/libpod/image/filters.go @@ -0,0 +1,176 @@ +package image + +import ( + "context" + "fmt" + "github.com/pkg/errors" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/containers/libpod/pkg/inspect" + "github.com/sirupsen/logrus" +) + +// ResultFilter is a mock function for image filtering +type ResultFilter func(*Image) bool + +// Filter is a function to determine whether an image is included in +// command output. Images to be outputted are tested using the function. A true +// return will include the image, a false return will exclude it. +type Filter func(*Image, *inspect.ImageData) bool + +// CreatedBeforeFilter allows you to filter on images created before +// the given time.Time +func CreatedBeforeFilter(createTime time.Time) ResultFilter { + return func(i *Image) bool { + return i.Created().Before(createTime) + } +} + +// CreatedAfterFilter allows you to filter on images created after +// the given time.Time +func CreatedAfterFilter(createTime time.Time) ResultFilter { + return func(i *Image) bool { + return i.Created().After(createTime) + } +} + +// DanglingFilter allows you to filter images for dangling images +func DanglingFilter(danglingImages bool) ResultFilter { + return func(i *Image) bool { + if danglingImages { + return i.Dangling() + } + return !i.Dangling() + } +} + +// ReadOnlyFilter allows you to filter images based on read/only and read/write +func ReadOnlyFilter(readOnly bool) ResultFilter { + return func(i *Image) bool { + if readOnly { + return i.IsReadOnly() + } + return !i.IsReadOnly() + } +} + +// LabelFilter allows you to filter by images labels key and/or value +func LabelFilter(ctx context.Context, labelfilter string) ResultFilter { + // We need to handle both label=key and label=key=value + return func(i *Image) bool { + var value string + splitFilter := strings.Split(labelfilter, "=") + key := splitFilter[0] + if len(splitFilter) > 1 { + value = splitFilter[1] + } + labels, err := i.Labels(ctx) + if err != nil { + return false + } + if len(strings.TrimSpace(labels[key])) > 0 && len(strings.TrimSpace(value)) == 0 { + return true + } + return labels[key] == value + } +} + +// ReferenceFilter allows you to filter by image name +// Replacing all '/' with '|' so that filepath.Match() can work +// '|' character is not valid in image name, so this is safe +func ReferenceFilter(ctx context.Context, referenceFilter string) ResultFilter { + filter := fmt.Sprintf("*%s*", referenceFilter) + filter = strings.Replace(filter, "/", "|", -1) + return func(i *Image) bool { + if len(referenceFilter) < 1 { + return true + } + for _, name := range i.Names() { + newName := strings.Replace(name, "/", "|", -1) + match, err := filepath.Match(filter, newName) + if err != nil { + logrus.Errorf("failed to match %s and %s, %q", name, referenceFilter, err) + } + if match { + return true + } + } + return false + } +} + +// OutputImageFilter allows you to filter by an a specific image name +func OutputImageFilter(userImage *Image) ResultFilter { + return func(i *Image) bool { + return userImage.ID() == i.ID() + } +} + +// FilterImages filters images using a set of predefined filter funcs +func FilterImages(images []*Image, filters []ResultFilter) []*Image { + var filteredImages []*Image + for _, image := range images { + include := true + for _, filter := range filters { + include = include && filter(image) + } + if include { + filteredImages = append(filteredImages, image) + } + } + return filteredImages +} + +// createFilterFuncs returns an array of filter functions based on the user inputs +// and is later used to filter images for output +func (ir *Runtime) createFilterFuncs(filters []string, img *Image) ([]ResultFilter, error) { + var filterFuncs []ResultFilter + ctx := context.Background() + for _, filter := range filters { + splitFilter := strings.Split(filter, "=") + if len(splitFilter) < 2 { + return nil, errors.Errorf("invalid filter syntax %s", filter) + } + switch splitFilter[0] { + case "before": + before, err := ir.NewFromLocal(splitFilter[1]) + if err != nil { + return nil, errors.Wrapf(err, "unable to find image %s in local stores", splitFilter[1]) + } + filterFuncs = append(filterFuncs, CreatedBeforeFilter(before.Created())) + case "after": + after, err := ir.NewFromLocal(splitFilter[1]) + if err != nil { + return nil, errors.Wrapf(err, "unable to find image %s in local stores", splitFilter[1]) + } + filterFuncs = append(filterFuncs, CreatedAfterFilter(after.Created())) + case "readonly": + readonly, err := strconv.ParseBool(splitFilter[1]) + if err != nil { + return nil, errors.Wrapf(err, "invalid filter readonly=%s", splitFilter[1]) + } + filterFuncs = append(filterFuncs, ReadOnlyFilter(readonly)) + case "dangling": + danglingImages, err := strconv.ParseBool(splitFilter[1]) + if err != nil { + return nil, errors.Wrapf(err, "invalid filter dangling=%s", splitFilter[1]) + } + filterFuncs = append(filterFuncs, DanglingFilter(danglingImages)) + case "label": + labelFilter := strings.Join(splitFilter[1:], "=") + filterFuncs = append(filterFuncs, LabelFilter(ctx, labelFilter)) + case "reference": + referenceFilter := strings.Join(splitFilter[1:], "=") + filterFuncs = append(filterFuncs, ReferenceFilter(ctx, referenceFilter)) + default: + return nil, errors.Errorf("invalid filter %s ", splitFilter[0]) + } + } + if img != nil { + filterFuncs = append(filterFuncs, OutputImageFilter(img)) + } + return filterFuncs, nil +} diff --git a/libpod/image/image.go b/libpod/image/image.go index c912ac2ca..c8583a1c5 100644 --- a/libpod/image/image.go +++ b/libpod/image/image.go @@ -74,6 +74,11 @@ type InfoImage struct { Layers []LayerInfo } +// ImageFilter is a function to determine whether a image is included +// in command output. Images to be outputted are tested using the function. +// A true return will include the image, a false return will exclude it. +type ImageFilter func(*Image) bool //nolint + // ErrRepoTagNotFound is the error returned when the image id given doesn't match a rep tag in store var ErrRepoTagNotFound = stderrors.New("unable to match user input to any specific repotag") @@ -211,6 +216,19 @@ func (ir *Runtime) Shutdown(force bool) error { return err } +// GetImagesWithFilters gets images with a series of filters applied +func (ir *Runtime) GetImagesWithFilters(filters []string) ([]*Image, error) { + filterFuncs, err := ir.createFilterFuncs(filters, nil) + if err != nil { + return nil, err + } + images, err := ir.GetImages() + if err != nil { + return nil, err + } + return FilterImages(images, filterFuncs), nil +} + func (i *Image) reloadImage() error { newImage, err := i.imageruntime.getImage(i.ID()) if err != nil { @@ -330,6 +348,21 @@ func (i *Image) Names() []string { return i.image.Names } +// NamesHistory returns a string array of names previously associated with the +// image, which may be a mixture of tags and digests +func (i *Image) NamesHistory() []string { + if len(i.image.Names) > 0 && len(i.image.NamesHistory) > 0 && + // We compare the latest (time-referenced) tags for equality and skip + // it in the history if they match to not display them twice. We have + // to compare like this, because `i.image.Names` (latest last) gets + // appended on retag, whereas `i.image.NamesHistory` gets prepended + // (latest first) + i.image.Names[len(i.image.Names)-1] == i.image.NamesHistory[0] { + return i.image.NamesHistory[1:] + } + return i.image.NamesHistory +} + // RepoTags returns a string array of repotags associated with the image func (i *Image) RepoTags() ([]string, error) { var repoTags []string @@ -765,109 +798,65 @@ func (i *Image) History(ctx context.Context) ([]*History, error) { return nil, err } - // Use our layers list to find images that use any of them (or no - // layer, since every base layer is derived from an empty layer) as its - // topmost layer. - interestingLayers := make(map[string]bool) - var layer *storage.Layer - if i.TopLayer() != "" { - if layer, err = i.imageruntime.store.Layer(i.TopLayer()); err != nil { - return nil, err - } + // Build a mapping from top-layer to image ID. + images, err := i.imageruntime.GetImages() + if err != nil { + return nil, err } - interestingLayers[""] = true - for layer != nil { - interestingLayers[layer.ID] = true - if layer.Parent == "" { - break + topLayerMap := make(map[string]string) + for _, image := range images { + if _, exists := topLayerMap[image.TopLayer()]; !exists { + topLayerMap[image.TopLayer()] = image.ID() } - layer, err = i.imageruntime.store.Layer(layer.Parent) + } + + var allHistory []*History + var layer *storage.Layer + + // Check if we have an actual top layer to prevent lookup errors. + if i.TopLayer() != "" { + layer, err = i.imageruntime.store.Layer(i.TopLayer()) if err != nil { return nil, err } } - // Get the IDs of the images that share some of our layers. Hopefully - // this step means that we'll be able to avoid reading the - // configuration of every single image in local storage later on. - images, err := i.imageruntime.GetImages() - if err != nil { - return nil, errors.Wrapf(err, "error getting images from store") - } - interestingImages := make([]*Image, 0, len(images)) - for i := range images { - if interestingLayers[images[i].TopLayer()] { - interestingImages = append(interestingImages, images[i]) - } - } + // Iterate in reverse order over the history entries, and lookup the + // corresponding image ID, size and get the next later if needed. + numHistories := len(oci.History) - 1 + for x := numHistories; x >= 0; x-- { + var size int64 - // Build a list of image IDs that correspond to our history entries. - historyImages := make([]*Image, len(oci.History)) - if len(oci.History) > 0 { - // The starting image shares its whole history with itself. - historyImages[len(historyImages)-1] = i - for i := range interestingImages { - image, err := images[i].ociv1Image(ctx) - if err != nil { - return nil, errors.Wrapf(err, "error getting image configuration for image %q", images[i].ID()) - } - // If the candidate has a longer history or no history - // at all, then it doesn't share the portion of our - // history that we're interested in matching with other - // images. - if len(image.History) == 0 || len(image.History) > len(historyImages) { - continue - } - // If we don't include all of the layers that the - // candidate image does (i.e., our rootfs didn't look - // like its rootfs at any point), then it can't be part - // of our history. - if len(image.RootFS.DiffIDs) > len(oci.RootFS.DiffIDs) { - continue - } - candidateLayersAreUsed := true - for i := range image.RootFS.DiffIDs { - if image.RootFS.DiffIDs[i] != oci.RootFS.DiffIDs[i] { - candidateLayersAreUsed = false - break - } - } - if !candidateLayersAreUsed { - continue + id := "<missing>" + if x == numHistories { + id = i.ID() + } else if layer != nil { + if !oci.History[x].EmptyLayer { + size = layer.UncompressedSize } - // If the candidate's entire history is an initial - // portion of our history, then we're based on it, - // either directly or indirectly. - sharedHistory := historiesMatch(oci.History, image.History) - if sharedHistory == len(image.History) { - historyImages[sharedHistory-1] = images[i] + if imageID, exists := topLayerMap[layer.ID]; exists { + id = imageID + // Delete the entry to avoid reusing it for following history items. + delete(topLayerMap, layer.ID) } } - } - - var ( - size int64 - sizeCount = 1 - allHistory []*History - ) - for i := len(oci.History) - 1; i >= 0; i-- { - imageID := "<missing>" - if historyImages[i] != nil { - imageID = historyImages[i].ID() - } - if !oci.History[i].EmptyLayer { - size = img.LayerInfos()[len(img.LayerInfos())-sizeCount].Size - sizeCount++ - } allHistory = append(allHistory, &History{ - ID: imageID, - Created: oci.History[i].Created, - CreatedBy: oci.History[i].CreatedBy, + ID: id, + Created: oci.History[x].Created, + CreatedBy: oci.History[x].CreatedBy, Size: size, - Comment: oci.History[i].Comment, + Comment: oci.History[x].Comment, }) + + if layer != nil && layer.Parent != "" && !oci.History[x].EmptyLayer { + layer, err = i.imageruntime.store.Layer(layer.Parent) + if err != nil { + return nil, err + } + } } + return allHistory, nil } diff --git a/libpod/image/prune.go b/libpod/image/prune.go index 006cbdf22..f5be8ed50 100644 --- a/libpod/image/prune.go +++ b/libpod/image/prune.go @@ -2,23 +2,78 @@ package image import ( "context" + "strings" + "time" "github.com/containers/libpod/libpod/events" + "github.com/containers/libpod/pkg/timetype" "github.com/containers/storage" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) +func generatePruneFilterFuncs(filter, filterValue string) (ImageFilter, error) { + switch filter { + case "label": + var filterArray = strings.SplitN(filterValue, "=", 2) + var filterKey = filterArray[0] + if len(filterArray) > 1 { + filterValue = filterArray[1] + } else { + filterValue = "" + } + return func(i *Image) bool { + labels, err := i.Labels(context.Background()) + if err != nil { + return false + } + for labelKey, labelValue := range labels { + if labelKey == filterKey && ("" == filterValue || labelValue == filterValue) { + return true + } + } + return false + }, nil + + case "until": + ts, err := timetype.GetTimestamp(filterValue, time.Now()) + if err != nil { + return nil, err + } + seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0) + if err != nil { + return nil, err + } + until := time.Unix(seconds, nanoseconds) + return func(i *Image) bool { + if !until.IsZero() && i.Created().After((until)) { + return true + } + return false + }, nil + + } + return nil, nil +} + // GetPruneImages returns a slice of images that have no names/unused -func (ir *Runtime) GetPruneImages(all bool) ([]*Image, error) { +func (ir *Runtime) GetPruneImages(all bool, filterFuncs []ImageFilter) ([]*Image, error) { var ( pruneImages []*Image ) + allImages, err := ir.GetRWImages() if err != nil { return nil, err } for _, i := range allImages { + // filter the images based on this. + for _, filterFunc := range filterFuncs { + if !filterFunc(i) { + continue + } + } + if len(i.Names()) == 0 { pruneImages = append(pruneImages, i) continue @@ -38,9 +93,25 @@ func (ir *Runtime) GetPruneImages(all bool) ([]*Image, error) { // PruneImages prunes dangling and optionally all unused images from the local // image store -func (ir *Runtime) PruneImages(ctx context.Context, all bool) ([]string, error) { - var prunedCids []string - pruneImages, err := ir.GetPruneImages(all) +func (ir *Runtime) PruneImages(ctx context.Context, all bool, filter []string) ([]string, error) { + var ( + prunedCids []string + filterFuncs []ImageFilter + ) + for _, f := range filter { + filterSplit := strings.SplitN(f, "=", 2) + if len(filterSplit) < 2 { + return nil, errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f) + } + + generatedFunc, err := generatePruneFilterFuncs(filterSplit[0], filterSplit[1]) + if err != nil { + return nil, errors.Wrapf(err, "invalid filter") + } + filterFuncs = append(filterFuncs, generatedFunc) + } + + pruneImages, err := ir.GetPruneImages(all, filterFuncs) if err != nil { return nil, errors.Wrap(err, "unable to get images to prune") } diff --git a/libpod/image/pull.go b/libpod/image/pull.go index 7f5dc33b9..326a23f4c 100644 --- a/libpod/image/pull.go +++ b/libpod/image/pull.go @@ -230,7 +230,12 @@ func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName s sc.BlobInfoCacheDir = filepath.Join(ir.store.GraphRoot(), "cache") srcRef, err := alltransports.ParseImageName(inputName) if err != nil { - // could be trying to pull from registry with short name + // We might be pulling with an unqualified image reference in which case + // we need to make sure that we're not using any other transport. + srcTransport := alltransports.TransportFromImageName(inputName) + if srcTransport != nil && srcTransport.Name() != DockerTransport { + return nil, err + } goal, err = ir.pullGoalFromPossiblyUnqualifiedName(inputName) if err != nil { return nil, errors.Wrap(err, "error getting default registries to try") @@ -325,7 +330,7 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa if goal.usedSearchRegistries && len(goal.searchedRegistries) == 0 { return nil, errors.Errorf("image name provided is a short name and no search registries are defined in the registries config file.") } - // If the image passed in was fully-qualified, we will have 1 refpair. Bc the image is fq'd, we dont need to yap about registries. + // If the image passed in was fully-qualified, we will have 1 refpair. Bc the image is fq'd, we don't need to yap about registries. if !goal.usedSearchRegistries { if pullErrors != nil && len(pullErrors.Errors) > 0 { // this should always be true return nil, errors.Wrap(pullErrors.Errors[0], "unable to pull image") @@ -347,6 +352,7 @@ func (ir *Runtime) pullGoalFromPossiblyUnqualifiedName(inputName string) (*pullG if err != nil { return nil, err } + if decomposedImage.hasRegistry { srcRef, err := docker.ParseReference("//" + inputName) if err != nil { diff --git a/libpod/kube.go b/libpod/kube.go index d0e7baf95..6ae3e3d07 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -341,7 +341,7 @@ func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, []v1.Volume return vms, vos, nil } -// generateKubeVolumeMount takes a user specfied mount and returns +// generateKubeVolumeMount takes a user specified mount and returns // a kubernetes VolumeMount (to be added to the container) and a kubernetes Volume // (to be added to the pod) func generateKubeVolumeMount(m specs.Mount) (v1.VolumeMount, v1.Volume, error) { @@ -487,13 +487,16 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) { if err := c.syncContainer(); err != nil { return nil, errors.Wrapf(err, "unable to sync container during YAML generation") } + logrus.Debugf("Looking in container for user: %s", c.User()) - u, err := lookup.GetUser(c.state.Mountpoint, c.User()) + execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.User(), nil) if err != nil { return nil, err } - user := int64(u.Uid) - sc.RunAsUser = &user + uid := int64(execUser.Uid) + gid := int64(execUser.Gid) + sc.RunAsUser = &uid + sc.RunAsGroup = &gid } return &sc, nil } diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c index fbb3f57cc..95052c40f 100644 --- a/libpod/lock/shm/shm_lock.c +++ b/libpod/lock/shm/shm_lock.c @@ -145,7 +145,7 @@ shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code) { // Set mutexes to robust - if a process dies while holding a mutex, we'll get // a special error code on the next attempt to lock it. - // This should prevent panicing processes from leaving the state unusable. + // This should prevent panicking processes from leaving the state unusable. ret_code = pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST); if (ret_code != 0) { *error_code = -1 * ret_code; @@ -298,7 +298,7 @@ int32_t close_lock_shm(shm_struct_t *shm) { // Allocate the first available semaphore // Returns a positive integer guaranteed to be less than UINT32_MAX on success, // or negative errno values on failure -// On sucess, the returned integer is the number of the semaphore allocated +// On success, the returned integer is the number of the semaphore allocated int64_t allocate_semaphore(shm_struct_t *shm) { int ret_code, i; bitmap_t test_map; diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index daa0619a2..a68338dbb 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -28,23 +28,50 @@ import ( ) // Get an OCICNI network config -func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP) ocicni.PodNetwork { - defaultNetwork := r.netPlugin.GetDefaultNetworkName() +func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP, staticMAC net.HardwareAddr) ocicni.PodNetwork { + var networkKey string + if len(networks) > 0 { + // This is inconsistent for >1 network, but it's probably the + // best we can do. + networkKey = networks[0] + } else { + networkKey = r.netPlugin.GetDefaultNetworkName() + } network := ocicni.PodNetwork{ Name: name, Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces ID: id, NetNS: nsPath, - Networks: networks, RuntimeConfig: map[string]ocicni.RuntimeConfig{ - defaultNetwork: {PortMappings: ports}, + networkKey: {PortMappings: ports}, }, } - if staticIP != nil { - network.Networks = []string{defaultNetwork} + // If we have extra networks, add them + if len(networks) > 0 { + network.Networks = make([]ocicni.NetAttachment, len(networks)) + for i, netName := range networks { + network.Networks[i].Name = netName + } + } + + if staticIP != nil || staticMAC != nil { + // For static IP or MAC, we need to populate networks even if + // it's just the default. + if len(networks) == 0 { + // If len(networks) == 0 this is guaranteed to be the + // default network. + network.Networks = []ocicni.NetAttachment{{Name: networkKey}} + } + var rt ocicni.RuntimeConfig = ocicni.RuntimeConfig{PortMappings: ports} + if staticIP != nil { + rt.IP = staticIP.String() + } + if staticMAC != nil { + rt.MAC = staticMAC.String() + } network.RuntimeConfig = map[string]ocicni.RuntimeConfig{ - defaultNetwork: {IP: staticIP.String(), PortMappings: ports}, + networkKey: rt, } } @@ -62,7 +89,16 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re requestedIP = ctr.config.StaticIP } - podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctrNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP) + var requestedMAC net.HardwareAddr + if ctr.requestedMAC != nil { + requestedMAC = ctr.requestedMAC + // cancel request for a specific MAC in case the container is reused later + ctr.requestedMAC = nil + } else { + requestedMAC = ctr.config.StaticMAC + } + + podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctrNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP, requestedMAC) results, err := r.netPlugin.SetUpPod(podNetwork) if err != nil { @@ -78,10 +114,10 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re networkStatus := make([]*cnitypes.Result, 0) for idx, r := range results { - logrus.Debugf("[%d] CNI result: %v", idx, r.String()) - resultCurrent, err := cnitypes.GetResult(r) + logrus.Debugf("[%d] CNI result: %v", idx, r.Result.String()) + resultCurrent, err := cnitypes.GetResult(r.Result) if err != nil { - return nil, errors.Wrapf(err, "error parsing CNI plugin result %q: %v", r.String(), err) + return nil, errors.Wrapf(err, "error parsing CNI plugin result %q: %v", r.Result.String(), err) } networkStatus = append(networkStatus, resultCurrent) } @@ -295,7 +331,7 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { defer close(chWait) // wait that API socket file appears before trying to use it. - if _, err := WaitForFile(apiSocket, chWait, pidWaitTimeout*time.Millisecond); err != nil { + if _, err := WaitForFile(apiSocket, chWait, pidWaitTimeout); err != nil { return errors.Wrapf(err, "waiting for slirp4nets to create the api socket file %s", apiSocket) } @@ -443,7 +479,16 @@ func (r *Runtime) teardownNetNS(ctr *Container) error { requestedIP = ctr.config.StaticIP } - podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP) + var requestedMAC net.HardwareAddr + if ctr.requestedMAC != nil { + requestedMAC = ctr.requestedMAC + // cancel request for a specific MAC in case the container is reused later + ctr.requestedMAC = nil + } else { + requestedMAC = ctr.config.StaticMAC + } + + podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP, requestedMAC) if err := r.netPlugin.TearDownPod(podNetwork); err != nil { return errors.Wrapf(err, "error tearing down CNI namespace configuration for container %s", ctr.ID()) diff --git a/libpod/oci_attach_linux.go b/libpod/oci_attach_linux.go index eeaee6d43..46c70e7eb 100644 --- a/libpod/oci_attach_linux.go +++ b/libpod/oci_attach_linux.go @@ -152,7 +152,7 @@ func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-c func processDetachKeys(keys string) ([]byte, error) { // Check the validity of the provided keys first if len(keys) == 0 { - keys = define.DefaultDetachKeys + return []byte{}, nil } detachKeys, err := term.ToBytes(keys) if err != nil { diff --git a/libpod/oci_util.go b/libpod/oci_util.go index c1a7f1c9a..53567d2d0 100644 --- a/libpod/oci_util.go +++ b/libpod/oci_util.go @@ -82,12 +82,21 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) { } func getOCIRuntimeError(runtimeMsg string) error { - r := strings.ToLower(runtimeMsg) - if match, _ := regexp.MatchString(".*permission denied.*|.*operation not permitted.*", r); match { - return errors.Wrapf(define.ErrOCIRuntimePermissionDenied, "%s", strings.Trim(runtimeMsg, "\n")) + includeFullOutput := logrus.GetLevel() == logrus.DebugLevel + + if match := regexp.MustCompile("(?i).*permission denied.*|.*operation not permitted.*").FindString(runtimeMsg); match != "" { + errStr := match + if includeFullOutput { + errStr = runtimeMsg + } + return errors.Wrapf(define.ErrOCIRuntimePermissionDenied, "%s", strings.Trim(errStr, "\n")) } - if match, _ := regexp.MatchString(".*executable file not found in.*|.*no such file or directory.*", r); match { - return errors.Wrapf(define.ErrOCIRuntimeNotFound, "%s", strings.Trim(runtimeMsg, "\n")) + if match := regexp.MustCompile("(?i).*executable file not found in.*|.*no such file or directory.*").FindString(runtimeMsg); match != "" { + errStr := match + if includeFullOutput { + errStr = runtimeMsg + } + return errors.Wrapf(define.ErrOCIRuntimeNotFound, "%s", strings.Trim(errStr, "\n")) } return errors.Wrapf(define.ErrOCIRuntime, "%s", strings.Trim(runtimeMsg, "\n")) } diff --git a/libpod/options.go b/libpod/options.go index 66e8ef93c..a9b775dc3 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -386,8 +386,7 @@ func WithNamespace(ns string) RuntimeOption { // WithVolumePath sets the path under which all named volumes // should be created. -// The path changes based on whethe rthe user is running as root -// or not. +// The path changes based on whether the user is running as root or not. func WithVolumePath(volPath string) RuntimeOption { return func(rt *Runtime) error { if rt.valid { @@ -769,16 +768,8 @@ func WithIPCNSFrom(nsCtr *Container) CtrCreateOption { return define.ErrCtrFinalized } - if !nsCtr.valid { - return define.ErrCtrRemoved - } - - if nsCtr.ID() == ctr.ID() { - return errors.Wrapf(define.ErrInvalidArg, "must specify another container") - } - - if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { - return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) + if err := checkDependencyContainer(nsCtr, ctr); err != nil { + return err } ctr.config.IPCNsCtr = nsCtr.ID() @@ -797,16 +788,8 @@ func WithMountNSFrom(nsCtr *Container) CtrCreateOption { return define.ErrCtrFinalized } - if !nsCtr.valid { - return define.ErrCtrRemoved - } - - if nsCtr.ID() == ctr.ID() { - return errors.Wrapf(define.ErrInvalidArg, "must specify another container") - } - - if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { - return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) + if err := checkDependencyContainer(nsCtr, ctr); err != nil { + return err } ctr.config.MountNsCtr = nsCtr.ID() @@ -825,22 +808,14 @@ func WithNetNSFrom(nsCtr *Container) CtrCreateOption { return define.ErrCtrFinalized } - if !nsCtr.valid { - return define.ErrCtrRemoved - } - - if nsCtr.ID() == ctr.ID() { - return errors.Wrapf(define.ErrInvalidArg, "must specify another container") + if err := checkDependencyContainer(nsCtr, ctr); err != nil { + return err } if ctr.config.CreateNetNS { return errors.Wrapf(define.ErrInvalidArg, "cannot join another container's net ns as we are making a new net ns") } - if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { - return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) - } - ctr.config.NetNsCtr = nsCtr.ID() return nil @@ -857,16 +832,8 @@ func WithPIDNSFrom(nsCtr *Container) CtrCreateOption { return define.ErrCtrFinalized } - if !nsCtr.valid { - return define.ErrCtrRemoved - } - - if nsCtr.ID() == ctr.ID() { - return errors.Wrapf(define.ErrInvalidArg, "must specify another container") - } - - if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { - return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) + if err := checkDependencyContainer(nsCtr, ctr); err != nil { + return err } if ctr.config.NoCgroups { @@ -889,16 +856,8 @@ func WithUserNSFrom(nsCtr *Container) CtrCreateOption { return define.ErrCtrFinalized } - if !nsCtr.valid { - return define.ErrCtrRemoved - } - - if nsCtr.ID() == ctr.ID() { - return errors.Wrapf(define.ErrInvalidArg, "must specify another container") - } - - if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { - return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) + if err := checkDependencyContainer(nsCtr, ctr); err != nil { + return err } ctr.config.UserNsCtr = nsCtr.ID() @@ -918,16 +877,8 @@ func WithUTSNSFrom(nsCtr *Container) CtrCreateOption { return define.ErrCtrFinalized } - if !nsCtr.valid { - return define.ErrCtrRemoved - } - - if nsCtr.ID() == ctr.ID() { - return errors.Wrapf(define.ErrInvalidArg, "must specify another container") - } - - if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { - return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) + if err := checkDependencyContainer(nsCtr, ctr); err != nil { + return err } ctr.config.UTSNsCtr = nsCtr.ID() @@ -946,16 +897,8 @@ func WithCgroupNSFrom(nsCtr *Container) CtrCreateOption { return define.ErrCtrFinalized } - if !nsCtr.valid { - return define.ErrCtrRemoved - } - - if nsCtr.ID() == ctr.ID() { - return errors.Wrapf(define.ErrInvalidArg, "must specify another container") - } - - if ctr.config.Pod != "" && nsCtr.config.Pod != ctr.config.Pod { - return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, nsCtr.ID()) + if err := checkDependencyContainer(nsCtr, ctr); err != nil { + return err } ctr.config.CgroupNsCtr = nsCtr.ID() @@ -1042,8 +985,8 @@ func WithStaticIP(ip net.IP) CtrCreateOption { return errors.Wrapf(define.ErrInvalidArg, "cannot set a static IP if the container is not creating a network namespace") } - if len(ctr.config.Networks) != 0 { - return errors.Wrapf(define.ErrInvalidArg, "cannot set a static IP if joining additional CNI networks") + if len(ctr.config.Networks) > 1 { + return errors.Wrapf(define.ErrInvalidArg, "cannot set a static IP if joining more than 1 CNI network") } ctr.config.StaticIP = ip @@ -1052,6 +995,31 @@ func WithStaticIP(ip net.IP) CtrCreateOption { } } +// WithStaticMAC indicates that the container should request a static MAC from +// the CNI plugins. +// It cannot be set unless WithNetNS has already been passed. +// Further, it cannot be set if additional CNI networks to join have been +// specified. +func WithStaticMAC(mac net.HardwareAddr) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + + if !ctr.config.CreateNetNS { + return errors.Wrapf(define.ErrInvalidArg, "cannot set a static MAC if the container is not creating a network namespace") + } + + if len(ctr.config.Networks) > 1 { + return errors.Wrapf(define.ErrInvalidArg, "cannot set a static MAC if joining more than 1 CNI network") + } + + ctr.config.StaticMAC = mac + + return nil + } +} + // WithLogDriver sets the log driver for the container func WithLogDriver(driver string) CtrCreateOption { return func(ctr *Container) error { diff --git a/libpod/pod_api.go b/libpod/pod_api.go index 3a194f04b..b27257004 100644 --- a/libpod/pod_api.go +++ b/libpod/pod_api.go @@ -445,7 +445,7 @@ func (p *Pod) Inspect() (*PodInspect, error) { } for _, c := range containers { containerStatus := "unknown" - // Ignoring possible errors here because we dont want this to be + // Ignoring possible errors here because we don't want this to be // catastrophic in nature containerState, err := c.State() if err == nil { diff --git a/libpod/reset.go b/libpod/reset.go new file mode 100644 index 000000000..a35b476a4 --- /dev/null +++ b/libpod/reset.go @@ -0,0 +1,107 @@ +package libpod + +import ( + "context" + "os" + "path/filepath" + + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/pkg/rootless" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Reset removes all storage +func (r *Runtime) Reset(ctx context.Context) error { + + pods, err := r.GetAllPods() + if err != nil { + return err + } + for _, p := range pods { + if err := r.RemovePod(ctx, p, true, true); err != nil { + if errors.Cause(err) == define.ErrNoSuchPod { + continue + } + logrus.Errorf("Error removing Pod %s: %v", p.ID(), err) + } + } + + ctrs, err := r.GetAllContainers() + if err != nil { + return err + } + + for _, c := range ctrs { + if err := r.RemoveContainer(ctx, c, true, true); err != nil { + if err := r.RemoveStorageContainer(c.ID(), true); err != nil { + if errors.Cause(err) == define.ErrNoSuchCtr { + continue + } + logrus.Errorf("Error removing container %s: %v", c.ID(), err) + } + } + } + + if err := stopPauseProcess(); err != nil { + logrus.Errorf("Error stopping pause process: %v", err) + } + + ir := r.ImageRuntime() + images, err := ir.GetImages() + if err != nil { + return err + } + + for _, i := range images { + if err := i.Remove(ctx, true); err != nil { + if errors.Cause(err) == define.ErrNoSuchImage { + continue + } + logrus.Errorf("Error removing image %s: %v", i.ID(), err) + } + } + volumes, err := r.state.AllVolumes() + if err != nil { + return err + } + for _, v := range volumes { + if err := r.RemoveVolume(ctx, v, true); err != nil { + if errors.Cause(err) == define.ErrNoSuchVolume { + continue + } + logrus.Errorf("Error removing volume %s: %v", v.config.Name, err) + } + } + + _, prevError := r.store.Shutdown(true) + if err := os.RemoveAll(r.store.GraphRoot()); err != nil { + if prevError != nil { + logrus.Error(prevError) + } + prevError = err + } + if err := os.RemoveAll(r.store.RunRoot()); err != nil { + if prevError != nil { + logrus.Error(prevError) + } + prevError = err + } + if err := os.RemoveAll(r.config.TmpDir); err != nil { + if prevError != nil { + logrus.Error(prevError) + } + prevError = err + } + if rootless.IsRootless() { + configPath := filepath.Join(os.Getenv("HOME"), ".config/containers") + if err := os.RemoveAll(configPath); err != nil { + if prevError != nil { + logrus.Error(prevError) + } + prevError = err + } + } + + return prevError +} diff --git a/libpod/runtime.go b/libpod/runtime.go index 42e6782e9..3873079ce 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -625,7 +625,8 @@ func (r *Runtime) refresh(alivePath string) error { } // Next refresh the state of all containers to recreate dirs and - // namespaces, and all the pods to recreate cgroups + // namespaces, and all the pods to recreate cgroups. + // Containers, pods, and volumes must also reacquire their locks. ctrs, err := r.state.AllContainers() if err != nil { return errors.Wrapf(err, "error retrieving all containers from state") @@ -634,10 +635,14 @@ func (r *Runtime) refresh(alivePath string) error { if err != nil { return errors.Wrapf(err, "error retrieving all pods from state") } - // No locks are taken during pod and container refresh. - // Furthermore, the pod and container refresh() functions are not + vols, err := r.state.AllVolumes() + if err != nil { + return errors.Wrapf(err, "error retrieving all volumes from state") + } + // No locks are taken during pod, volume, and container refresh. + // Furthermore, the pod/volume/container refresh() functions are not // allowed to take locks themselves. - // We cannot assume that any pod or container has a valid lock until + // We cannot assume that any pod/volume/container has a valid lock until // after this function has returned. // The runtime alive lock should suffice to provide mutual exclusion // until this has run. @@ -651,6 +656,11 @@ func (r *Runtime) refresh(alivePath string) error { logrus.Errorf("Error refreshing pod %s: %v", pod.ID(), err) } } + for _, vol := range vols { + if err := vol.refresh(); err != nil { + logrus.Errorf("Error refreshing volume %s: %v", vol.Name(), err) + } + } // Create a file indicating the runtime is alive and ready file, err := os.OpenFile(alivePath, os.O_RDONLY|os.O_CREATE, 0644) diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 7069d3494..ae401013c 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -489,32 +489,19 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } } - var cleanupErr error - // Remove the container from the state - if c.config.Pod != "" { - // If we're removing the pod, the container will be evicted - // from the state elsewhere - if !removePod { - if err := r.state.RemoveContainerFromPod(pod, c); err != nil { - cleanupErr = err - } - } - } else { - if err := r.state.RemoveContainer(c); err != nil { - cleanupErr = err - } + // Set ContainerStateRemoving and remove exec sessions + c.state.State = define.ContainerStateRemoving + c.state.ExecSessions = nil + + if err := c.save(); err != nil { + return errors.Wrapf(err, "unable to set container %s removing state in database", c.ID()) } - // Set container as invalid so it can no longer be used - c.valid = false + var cleanupErr error // Clean up network namespace, cgroups, mounts if err := c.cleanup(ctx); err != nil { - if cleanupErr == nil { - cleanupErr = errors.Wrapf(err, "error cleaning up container %s", c.ID()) - } else { - logrus.Errorf("cleanup network, cgroups, mounts: %v", err) - } + cleanupErr = errors.Wrapf(err, "error cleaning up container %s", c.ID()) } // Stop the container's storage @@ -540,6 +527,29 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } } + // Remove the container from the state + if c.config.Pod != "" { + // If we're removing the pod, the container will be evicted + // from the state elsewhere + if !removePod { + if err := r.state.RemoveContainerFromPod(pod, c); err != nil { + if cleanupErr == nil { + cleanupErr = err + } else { + logrus.Errorf("Error removing container %s from database: %v", c.ID(), err) + } + } + } + } else { + if err := r.state.RemoveContainer(c); err != nil { + if cleanupErr == nil { + cleanupErr = err + } else { + logrus.Errorf("Error removing container %s from database: %v", c.ID(), err) + } + } + } + // Deallocate the container's lock if err := c.lock.Free(); err != nil { if cleanupErr == nil { @@ -549,6 +559,9 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } } + // Set container as invalid so it can no longer be used + c.valid = false + c.newContainerEvent(events.Remove) if !removeVolume { diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go index f2784c07d..abd8b581d 100644 --- a/libpod/runtime_img.go +++ b/libpod/runtime_img.go @@ -160,10 +160,11 @@ func (r *Runtime) Import(ctx context.Context, source string, reference string, c ic := v1.ImageConfig{} if len(changes) > 0 { - ic, err = util.GetImageConfig(changes) + config, err := util.GetImageConfig(changes) if err != nil { return "", errors.Wrapf(err, "error adding config changes to image %q", source) } + ic = config.ImageConfig } hist := []v1.History{ diff --git a/libpod/runtime_migrate_unsupported.go b/libpod/runtime_migrate_unsupported.go index 1a9e46fdc..e362cca63 100644 --- a/libpod/runtime_migrate_unsupported.go +++ b/libpod/runtime_migrate_unsupported.go @@ -9,3 +9,7 @@ import ( func (r *Runtime) migrate(ctx context.Context) error { return nil } + +func stopPauseProcess() error { + return nil +} diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go index a6ab748e5..835dccf9c 100644 --- a/libpod/runtime_volume.go +++ b/libpod/runtime_volume.go @@ -59,7 +59,7 @@ func (r *Runtime) GetVolume(name string) (*Volume, error) { return vol, nil } -// LookupVolume retrieves a volume by unambigious partial name. +// LookupVolume retrieves a volume by unambiguous partial name. func (r *Runtime) LookupVolume(name string) (*Volume, error) { r.lock.RLock() defer r.lock.RUnlock() diff --git a/libpod/stats.go b/libpod/stats.go index 5513abce5..3b5e0958c 100644 --- a/libpod/stats.go +++ b/libpod/stats.go @@ -3,7 +3,6 @@ package libpod import ( - "runtime" "strings" "syscall" "time" @@ -56,8 +55,8 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container } previousCPU := previousStats.CPUNano - previousSystem := previousStats.SystemNano - stats.CPU = calculateCPUPercent(cgroupStats, previousCPU, previousSystem) + now := uint64(time.Now().UnixNano()) + stats.CPU = calculateCPUPercent(cgroupStats, previousCPU, now, previousStats.SystemNano) stats.MemUsage = cgroupStats.Memory.Usage.Usage stats.MemLimit = getMemLimit(cgroupStats.Memory.Usage.Limit) stats.MemPerc = (float64(stats.MemUsage) / float64(stats.MemLimit)) * 100 @@ -67,7 +66,7 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container } stats.BlockInput, stats.BlockOutput = calculateBlockIO(cgroupStats) stats.CPUNano = cgroupStats.CPU.Usage.Total - stats.SystemNano = cgroupStats.CPU.Usage.Kernel + stats.SystemNano = now // Handle case where the container is not in a network namespace if netStats != nil { stats.NetInput = netStats.TxBytes @@ -98,20 +97,19 @@ func getMemLimit(cgroupLimit uint64) uint64 { return cgroupLimit } -func calculateCPUPercent(stats *cgroups.Metrics, previousCPU, previousSystem uint64) float64 { +// calculateCPUPercent calculates the cpu usage using the latest measurement in stats. +// previousCPU is the last value of stats.CPU.Usage.Total measured at the time previousSystem. +// (now - previousSystem) is the time delta in nanoseconds, between the measurement in previousCPU +// and the updated value in stats. +func calculateCPUPercent(stats *cgroups.Metrics, previousCPU, now, previousSystem uint64) float64 { var ( cpuPercent = 0.0 cpuDelta = float64(stats.CPU.Usage.Total - previousCPU) - systemDelta = float64(uint64(time.Now().UnixNano()) - previousSystem) + systemDelta = float64(now - previousSystem) ) if systemDelta > 0.0 && cpuDelta > 0.0 { - // gets a ratio of container cpu usage total, multiplies it by the number of cores (4 cores running - // at 100% utilization should be 400% utilization), and multiplies that by 100 to get a percentage - nCPUS := len(stats.CPU.Usage.PerCPU) - if nCPUS == 0 { - nCPUS = runtime.NumCPU() - } - cpuPercent = (cpuDelta / systemDelta) * float64(nCPUS) * 100 + // gets a ratio of container cpu usage total, and multiplies that by 100 to get a percentage + cpuPercent = (cpuDelta / systemDelta) * 100 } return cpuPercent } diff --git a/libpod/util.go b/libpod/util.go index 7bd834e30..30e5cd4c3 100644 --- a/libpod/util.go +++ b/libpod/util.go @@ -187,6 +187,9 @@ func programVersion(mountProgram string) (string, error) { return strings.TrimSuffix(output, "\n"), nil } +// DefaultSeccompPath returns the path to the default seccomp.json file +// if it exists, first it checks OverrideSeccomp and then default. +// If neither exist function returns "" func DefaultSeccompPath() (string, error) { _, err := os.Stat(config.SeccompOverridePath) if err == nil { @@ -203,3 +206,28 @@ func DefaultSeccompPath() (string, error) { } return config.SeccompDefaultPath, nil } + +// CheckDependencyContainer verifies the given container can be used as a +// dependency of another container. +// Both the dependency to check and the container that will be using the +// dependency must be passed in. +// It is assumed that ctr is locked, and depCtr is unlocked. +func checkDependencyContainer(depCtr, ctr *Container) error { + state, err := depCtr.State() + if err != nil { + return errors.Wrapf(err, "error accessing dependency container %s state", depCtr.ID()) + } + if state == define.ContainerStateRemoving { + return errors.Wrapf(define.ErrCtrStateInvalid, "cannot use container %s as a dependency as it is being removed", depCtr.ID()) + } + + if depCtr.ID() == ctr.ID() { + return errors.Wrapf(define.ErrInvalidArg, "must specify another container") + } + + if ctr.config.Pod != "" && depCtr.PodID() != ctr.config.Pod { + return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, depCtr.ID()) + } + + return nil +} diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go index 42b935e7c..e89b3484d 100644 --- a/libpod/volume_internal.go +++ b/libpod/volume_internal.go @@ -5,6 +5,7 @@ import ( "path/filepath" "github.com/containers/libpod/libpod/define" + "github.com/pkg/errors" ) // Creates a new volume @@ -46,3 +47,14 @@ func (v *Volume) update() error { func (v *Volume) save() error { return v.runtime.state.SaveVolume(v) } + +// Refresh volume state after a restart. +func (v *Volume) refresh() error { + lock, err := v.runtime.lockManager.AllocateAndRetrieveLock(v.config.LockID) + if err != nil { + return errors.Wrapf(err, "error acquiring lock %d for volume %s", v.config.LockID, v.Name()) + } + v.lock = lock + + return nil +} |