diff options
Diffstat (limited to 'libpod')
53 files changed, 2876 insertions, 2284 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index 4918bf57a..34ca7f740 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -6,7 +6,6 @@ import ( "strings" "sync" - "github.com/containers/libpod/libpod/config" "github.com/containers/libpod/libpod/define" bolt "github.com/etcd-io/bbolt" jsoniter "github.com/json-iterator/go" @@ -41,6 +40,8 @@ type BoltState struct { // containing the path to the container's network namespace, a dependencies // bucket containing the container's dependencies, and an optional pod key // containing the ID of the pod the container is joined to. +// After updates to include exec sessions, may also include an exec bucket +// with the IDs of exec sessions currently in use by the container. // - allCtrsBkt: Map of ID to name containing only containers. Used for // container lookup operations. // - podBkt: Contains a sub-bucket for each pod in the state. @@ -49,6 +50,10 @@ type BoltState struct { // containers in the pod. // - allPodsBkt: Map of ID to name containing only pods. Used for pod lookup // operations. +// - execBkt: Map of exec session ID to exec session - contains a sub-bucket for +// each exec session in the DB. +// - execRegistryBkt: Map of exec session ID to nothing. Contains one entry for +// each exec session. Used for iterating through all exec sessions. // - runtimeConfigBkt: Contains configuration of the libpod instance that // initially created the database. This must match for any further instances // that access the database, to ensure that state mismatches with @@ -86,6 +91,7 @@ func NewBoltState(path string, runtime *Runtime) (State, error) { allPodsBkt, volBkt, allVolsBkt, + execBkt, runtimeConfigBkt, } @@ -171,6 +177,11 @@ func (s *BoltState) Refresh() error { return err } + execBucket, err := getExecBucket(tx) + if err != nil { + return err + } + // Iterate through all IDs. Check if they are containers. // If they are, unmarshal their state, and then clear // PID, mountpoint, and state for all of them @@ -245,6 +256,26 @@ func (s *BoltState) Refresh() error { return errors.Wrapf(err, "error updating state for container %s in DB", string(id)) } + // Delete all exec sessions, if there are any + ctrExecBkt := ctrBkt.Bucket(execBkt) + if ctrExecBkt != nil { + // Can't delete in a ForEach, so build a list of + // what to remove then remove. + toRemove := []string{} + err = ctrExecBkt.ForEach(func(id, unused []byte) error { + toRemove = append(toRemove, string(id)) + return nil + }) + if err != nil { + return err + } + for _, execId := range toRemove { + if err := ctrExecBkt.Delete([]byte(execId)); err != nil { + return errors.Wrapf(err, "error removing exec session %s from container %s", execId, string(id)) + } + } + } + return nil }) if err != nil { @@ -285,19 +316,42 @@ func (s *BoltState) Refresh() error { return nil }) - return err + if err != nil { + return err + } + + // Now refresh exec sessions + // We want to remove them all, but for-each can't modify buckets + // So we have to make a list of what to operate on, then do the + // work. + toRemoveExec := []string{} + err = execBucket.ForEach(func(id, unused []byte) error { + toRemoveExec = append(toRemoveExec, string(id)) + return nil + }) + if err != nil { + return err + } + + for _, execSession := range toRemoveExec { + if err := execBucket.Delete([]byte(execSession)); err != nil { + return errors.Wrapf(err, "error deleting exec session %s registry from database", execSession) + } + } + + return nil }) return err } // GetDBConfig retrieves runtime configuration fields that were created when // the database was first initialized -func (s *BoltState) GetDBConfig() (*config.DBConfig, error) { +func (s *BoltState) GetDBConfig() (*DBConfig, error) { if !s.valid { return nil, define.ErrDBClosed } - cfg := new(config.DBConfig) + cfg := new(DBConfig) db, err := s.getDBCon() if err != nil { @@ -895,6 +949,287 @@ func (s *BoltState) GetContainerConfig(id string) (*ContainerConfig, error) { return config, nil } +// AddExecSession adds an exec session to the state. +func (s *BoltState) AddExecSession(ctr *Container, session *ExecSession) error { + if !s.valid { + return define.ErrDBClosed + } + + if !ctr.valid { + return define.ErrCtrRemoved + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + ctrID := []byte(ctr.ID()) + sessionID := []byte(session.ID()) + + err = db.Update(func(tx *bolt.Tx) error { + execBucket, err := getExecBucket(tx) + if err != nil { + return err + } + ctrBucket, err := getCtrBucket(tx) + if err != nil { + return err + } + + dbCtr := ctrBucket.Bucket(ctrID) + if dbCtr == nil { + ctr.valid = false + return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not present in the database", ctr.ID()) + } + + ctrExecSessionBucket, err := dbCtr.CreateBucketIfNotExists(execBkt) + if err != nil { + return errors.Wrapf(err, "error creating exec sessions bucket for container %s", ctr.ID()) + } + + execExists := execBucket.Get(sessionID) + if execExists != nil { + return errors.Wrapf(define.ErrExecSessionExists, "an exec session with ID %s already exists", session.ID()) + } + + if err := execBucket.Put(sessionID, ctrID); err != nil { + return errors.Wrapf(err, "error adding exec session %s to DB", session.ID()) + } + + if err := ctrExecSessionBucket.Put(sessionID, ctrID); err != nil { + return errors.Wrapf(err, "error adding exec session %s to container %s in DB", session.ID(), ctr.ID()) + } + + return nil + }) + return err +} + +// GetExecSession returns the ID of the container an exec session is associated +// with. +func (s *BoltState) GetExecSession(id string) (string, error) { + if !s.valid { + return "", define.ErrDBClosed + } + + if id == "" { + return "", define.ErrEmptyID + } + + db, err := s.getDBCon() + if err != nil { + return "", err + } + defer s.deferredCloseDBCon(db) + + ctrID := "" + err = db.View(func(tx *bolt.Tx) error { + execBucket, err := getExecBucket(tx) + if err != nil { + return err + } + + ctr := execBucket.Get([]byte(id)) + if ctr == nil { + return errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found", id) + } + ctrID = string(ctr) + return nil + }) + return ctrID, err +} + +// RemoveExecSession removes references to the given exec session in the +// database. +func (s *BoltState) RemoveExecSession(session *ExecSession) error { + if !s.valid { + return define.ErrDBClosed + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + sessionID := []byte(session.ID()) + containerID := []byte(session.ContainerID()) + err = db.Update(func(tx *bolt.Tx) error { + execBucket, err := getExecBucket(tx) + if err != nil { + return err + } + ctrBucket, err := getCtrBucket(tx) + if err != nil { + return err + } + + sessionExists := execBucket.Get(sessionID) + if sessionExists == nil { + return define.ErrNoSuchExecSession + } + // Check that container ID matches + if string(sessionExists) != session.ContainerID() { + return errors.Wrapf(define.ErrInternal, "database inconsistency: exec session %s points to container %s in state but %s in database", session.ID(), session.ContainerID(), string(sessionExists)) + } + + if err := execBucket.Delete(sessionID); err != nil { + return errors.Wrapf(err, "error removing exec session %s from database", session.ID()) + } + + dbCtr := ctrBucket.Bucket(containerID) + if dbCtr == nil { + // State is inconsistent. We refer to a container that + // is no longer in the state. + // Return without error, to attempt to recover. + return nil + } + + ctrExecBucket := dbCtr.Bucket(execBkt) + if ctrExecBucket == nil { + // Again, state is inconsistent. We should have an exec + // bucket, and it should have this session. + // Again, nothing we can do, so proceed and try to + // recover. + return nil + } + + ctrSessionExists := ctrExecBucket.Get(sessionID) + if ctrSessionExists != nil { + if err := ctrExecBucket.Delete(sessionID); err != nil { + return errors.Wrapf(err, "error removing exec session %s from container %s in database", session.ID(), session.ContainerID()) + } + } + + return nil + }) + return err +} + +// GetContainerExecSessions retrieves the IDs of all exec sessions running in a +// container that the database is aware of (IE, were added via AddExecSession). +func (s *BoltState) GetContainerExecSessions(ctr *Container) ([]string, error) { + if !s.valid { + return nil, define.ErrDBClosed + } + + if !ctr.valid { + return nil, define.ErrCtrRemoved + } + + db, err := s.getDBCon() + if err != nil { + return nil, err + } + defer s.deferredCloseDBCon(db) + + ctrID := []byte(ctr.ID()) + sessions := []string{} + err = db.View(func(tx *bolt.Tx) error { + ctrBucket, err := getCtrBucket(tx) + if err != nil { + return err + } + + dbCtr := ctrBucket.Bucket(ctrID) + if dbCtr == nil { + ctr.valid = false + return define.ErrNoSuchCtr + } + + ctrExecSessions := dbCtr.Bucket(execBkt) + if ctrExecSessions == nil { + return nil + } + + return ctrExecSessions.ForEach(func(id, unused []byte) error { + sessions = append(sessions, string(id)) + return nil + }) + }) + if err != nil { + return nil, err + } + + return sessions, nil +} + +// RemoveContainerExecSessions removes all exec sessions attached to a given +// container. +func (s *BoltState) RemoveContainerExecSessions(ctr *Container) error { + if !s.valid { + return define.ErrDBClosed + } + + if !ctr.valid { + return define.ErrCtrRemoved + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.deferredCloseDBCon(db) + + ctrID := []byte(ctr.ID()) + sessions := []string{} + + err = db.Update(func(tx *bolt.Tx) error { + execBucket, err := getExecBucket(tx) + if err != nil { + return err + } + ctrBucket, err := getCtrBucket(tx) + if err != nil { + return err + } + + dbCtr := ctrBucket.Bucket(ctrID) + if dbCtr == nil { + ctr.valid = false + return define.ErrNoSuchCtr + } + + ctrExecSessions := dbCtr.Bucket(execBkt) + if ctrExecSessions == nil { + return nil + } + + err = ctrExecSessions.ForEach(func(id, unused []byte) error { + sessions = append(sessions, string(id)) + return nil + }) + if err != nil { + return err + } + + for _, session := range sessions { + if err := ctrExecSessions.Delete([]byte(session)); err != nil { + return errors.Wrapf(err, "error removing container %s exec session %s from database", ctr.ID(), session) + } + // Check if the session exists in the global table + // before removing. It should, but in cases where the DB + // has become inconsistent, we should try and proceed + // so we can recover. + sessionExists := execBucket.Get([]byte(session)) + if sessionExists == nil { + continue + } + if string(sessionExists) != ctr.ID() { + return errors.Wrapf(define.ErrInternal, "database mismatch: exec session %s is associated with containers %s and %s", session, ctr.ID(), string(sessionExists)) + } + if err := execBucket.Delete([]byte(session)); err != nil { + return errors.Wrapf(err, "error removing container %s exec session %s from exec sessions", ctr.ID(), session) + } + } + + return nil + }) + return err +} + // RewriteContainerConfig rewrites a container's configuration. // WARNING: This function is DANGEROUS. Do not use without reading the full // comment on this function in state.go. diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index 3f09305f5..6e1f2a5f2 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -24,6 +24,7 @@ const ( allPodsName = "allPods" volName = "vol" allVolsName = "allVolumes" + execName = "exec" runtimeConfigName = "runtime-config" configName = "config" @@ -54,6 +55,7 @@ var ( allPodsBkt = []byte(allPodsName) volBkt = []byte(volName) allVolsBkt = []byte(allVolsName) + execBkt = []byte(execName) runtimeConfigBkt = []byte(runtimeConfigName) configKey = []byte(configName) @@ -102,37 +104,37 @@ func checkRuntimeConfig(db *bolt.DB, rt *Runtime) error { }, { "libpod root directory (staticdir)", - rt.config.StaticDir, + rt.config.Engine.StaticDir, staticDirKey, "", }, { "libpod temporary files directory (tmpdir)", - rt.config.TmpDir, + rt.config.Engine.TmpDir, tmpDirKey, "", }, { "storage temporary directory (runroot)", - rt.config.StorageConfig.RunRoot, + rt.StorageConfig().RunRoot, runRootKey, storeOpts.RunRoot, }, { "storage graph root directory (graphroot)", - rt.config.StorageConfig.GraphRoot, + rt.StorageConfig().GraphRoot, graphRootKey, storeOpts.GraphRoot, }, { "storage graph driver", - rt.config.StorageConfig.GraphDriverName, + rt.StorageConfig().GraphDriverName, graphDriverKey, storeOpts.GraphDriverName, }, { "volume path", - rt.config.VolumePath, + rt.config.Engine.VolumePath, volPathKey, "", }, @@ -339,6 +341,14 @@ func getAllVolsBucket(tx *bolt.Tx) (*bolt.Bucket, error) { return bkt, nil } +func getExecBucket(tx *bolt.Tx) (*bolt.Bucket, error) { + bkt := tx.Bucket(execBkt) + if bkt == nil { + return nil, errors.Wrapf(define.ErrDBBadConfig, "exec bucket not found in DB") + } + return bkt, nil +} + func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) { bkt := tx.Bucket(runtimeConfigBkt) if bkt == nil { @@ -787,6 +797,23 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error } } + // Does the container have exec sessions? + ctrExecSessionsBkt := ctrExists.Bucket(execBkt) + if ctrExecSessionsBkt != nil { + sessions := []string{} + err = ctrExecSessionsBkt.ForEach(func(id, value []byte) error { + sessions = append(sessions, string(id)) + + return nil + }) + if err != nil { + return err + } + if len(sessions) > 0 { + return errors.Wrapf(define.ErrExecSessionExists, "container %s has active exec sessions: %s", ctr.ID(), strings.Join(sessions, ", ")) + } + } + // Does the container have dependencies? ctrDepsBkt := ctrExists.Bucket(dependenciesBkt) if ctrDepsBkt == nil { diff --git a/libpod/common_test.go b/libpod/common_test.go index 63ea4f41b..abf336f97 100644 --- a/libpod/common_test.go +++ b/libpod/common_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/containers/libpod/libpod/config" + "github.com/containers/common/pkg/config" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/lock" "github.com/cri-o/ocicni/pkg/ocicni" @@ -58,14 +58,12 @@ func getTestContainer(id, name string, manager lock.Manager) (*Container, error) PID: 1234, ExecSessions: map[string]*ExecSession{ "abcd": { - ID: "1", - Command: []string{"2", "3"}, - PID: 9876, + Id: "1", + PID: 9876, }, "ef01": { - ID: "5", - Command: []string{"hello", "world"}, - PID: 46765, + Id: "5", + PID: 46765, }, }, BindMounts: map[string]string{ @@ -75,7 +73,9 @@ func getTestContainer(id, name string, manager lock.Manager) (*Container, error) }, runtime: &Runtime{ config: &config.Config{ - VolumePath: "/does/not/exist/tmp/volumes", + Engine: config.EngineConfig{ + VolumePath: "/does/not/exist/tmp/volumes", + }, }, }, valid: true, diff --git a/libpod/config/config.go b/libpod/config/config.go deleted file mode 100644 index 5d59f1bf2..000000000 --- a/libpod/config/config.go +++ /dev/null @@ -1,618 +0,0 @@ -package config - -import ( - "bytes" - "fmt" - "os" - "os/exec" - "path/filepath" - "regexp" - "strconv" - "strings" - - "github.com/BurntSushi/toml" - "github.com/containers/libpod/libpod/define" - "github.com/containers/libpod/pkg/cgroups" - "github.com/containers/libpod/pkg/rootless" - "github.com/containers/libpod/pkg/util" - "github.com/containers/storage" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - // _defaultTransport is a prefix that we apply to an image name to check - // docker hub first for the image. - _defaultTransport = "docker://" - - // _rootlessConfigPath is the path to the rootless libpod.conf in $HOME. - _rootlessConfigPath = ".config/containers/libpod.conf" - - // _conmonMinMajorVersion is the major version required for conmon. - _conmonMinMajorVersion = 2 - - // _conmonMinMinorVersion is the minor version required for conmon. - _conmonMinMinorVersion = 0 - - // _conmonMinPatchVersion is the sub-minor version required for conmon. - _conmonMinPatchVersion = 1 - - // _conmonVersionFormatErr is used when the expected versio-format of conmon - // has changed. - _conmonVersionFormatErr = "conmon version changed format" - - // InstallPrefix is the prefix where podman will be installed. - // It can be overridden at build time. - _installPrefix = "/usr" - - // EtcDir is the sysconfdir where podman should look for system config files. - // It can be overridden at build time. - _etcDir = "/etc" - - // SeccompDefaultPath defines the default seccomp path. - SeccompDefaultPath = _installPrefix + "/share/containers/seccomp.json" - - // SeccompOverridePath if this exists it overrides the default seccomp path. - SeccompOverridePath = _etcDir + "/crio/seccomp.json" - - // _rootConfigPath is the path to the libpod configuration file - // This file is loaded to replace the builtin default config before - // runtime options (e.g. WithStorageConfig) are applied. - // If it is not present, the builtin default config is used instead - // This path can be overridden when the runtime is created by using - // NewRuntimeFromConfig() instead of NewRuntime(). - _rootConfigPath = _installPrefix + "/share/containers/libpod.conf" - - // _rootOverrideConfigPath is the path to an override for the default libpod - // configuration file. If OverrideConfigPath exists, it will be used in - // place of the configuration file pointed to by ConfigPath. - _rootOverrideConfigPath = _etcDir + "/containers/libpod.conf" -) - -// SetOptions contains a subset of options in a Config. It's used to indicate if -// a given option has either been set by the user or by a parsed libpod -// configuration file. If not, the corresponding option might be overwritten by -// values from the database. This behavior guarantees backwards compat with -// older version of libpod and Podman. -type SetOptions struct { - // StorageConfigRunRootSet indicates if the RunRoot has been explicitly set - // by the config or by the user. It's required to guarantee backwards - // compatibility with older versions of libpod for which we must query the - // database configuration. Not included in the on-disk config. - StorageConfigRunRootSet bool `toml:"-"` - - // StorageConfigGraphRootSet indicates if the RunRoot has been explicitly - // set by the config or by the user. It's required to guarantee backwards - // compatibility with older versions of libpod for which we must query the - // database configuration. Not included in the on-disk config. - StorageConfigGraphRootSet bool `toml:"-"` - - // StorageConfigGraphDriverNameSet indicates if the GraphDriverName has been - // explicitly set by the config or by the user. It's required to guarantee - // backwards compatibility with older versions of libpod for which we must - // query the database configuration. Not included in the on-disk config. - StorageConfigGraphDriverNameSet bool `toml:"-"` - - // VolumePathSet indicates if the VolumePath has been explicitly set by the - // config or by the user. It's required to guarantee backwards compatibility - // with older versions of libpod for which we must query the database - // configuration. Not included in the on-disk config. - VolumePathSet bool `toml:"-"` - - // StaticDirSet indicates if the StaticDir has been explicitly set by the - // config or by the user. It's required to guarantee backwards compatibility - // with older versions of libpod for which we must query the database - // configuration. Not included in the on-disk config. - StaticDirSet bool `toml:"-"` - - // TmpDirSet indicates if the TmpDir has been explicitly set by the config - // or by the user. It's required to guarantee backwards compatibility with - // older versions of libpod for which we must query the database - // configuration. Not included in the on-disk config. - TmpDirSet bool `toml:"-"` -} - -// Config contains configuration options used to set up a libpod runtime -type Config struct { - // NOTE: when changing this struct, make sure to update (*Config).Merge(). - - // SetOptions contains a subset of config options. It's used to indicate if - // a given option has either been set by the user or by a parsed libpod - // configuration file. If not, the corresponding option might be - // overwritten by values from the database. This behavior guarantees - // backwards compat with older version of libpod and Podman. - SetOptions - - // StateType is the type of the backing state store. Avoid using multiple - // values for this with the same containers/storage configuration on the - // same system. Different state types do not interact, and each will see a - // separate set of containers, which may cause conflicts in - // containers/storage. As such this is not exposed via the config file. - StateType define.RuntimeStateStore `toml:"-"` - - // StorageConfig is the configuration used by containers/storage Not - // included in the on-disk config, use the dedicated containers/storage - // configuration file instead. - StorageConfig storage.StoreOptions `toml:"-"` - - // VolumePath is the default location that named volumes will be created - // under. This convention is followed by the default volume driver, but - // may not be by other drivers. - VolumePath string `toml:"volume_path,omitempty"` - - // ImageDefaultTransport is the default transport method used to fetch - // images. - ImageDefaultTransport string `toml:"image_default_transport,omitempty"` - - // SignaturePolicyPath is the path to a signature policy to use for - // validating images. If left empty, the containers/image default signature - // policy will be used. - SignaturePolicyPath string `toml:"signature_policy_path,omitempty"` - - // OCIRuntime is the OCI runtime to use. - OCIRuntime string `toml:"runtime,omitempty"` - - // OCIRuntimes are the set of configured OCI runtimes (default is runc). - OCIRuntimes map[string][]string `toml:"runtimes,omitempty"` - - // RuntimeSupportsJSON is the list of the OCI runtimes that support - // --format=json. - RuntimeSupportsJSON []string `toml:"runtime_supports_json,omitempty"` - - // RuntimeSupportsNoCgroups is a list of OCI runtimes that support - // running containers without CGroups. - RuntimeSupportsNoCgroups []string `toml:"runtime_supports_nocgroups,omitempty"` - - // RuntimePath is the path to OCI runtime binary for launching containers. - // The first path pointing to a valid file will be used This is used only - // when there are no OCIRuntime/OCIRuntimes defined. It is used only to be - // backward compatible with older versions of Podman. - RuntimePath []string `toml:"runtime_path,omitempty"` - - // ConmonPath is the path to the Conmon binary used for managing containers. - // The first path pointing to a valid file will be used. - ConmonPath []string `toml:"conmon_path,omitempty"` - - // ConmonEnvVars are environment variables to pass to the Conmon binary - // when it is launched. - ConmonEnvVars []string `toml:"conmon_env_vars,omitempty"` - - // CGroupManager is the CGroup Manager to use Valid values are "cgroupfs" - // and "systemd". - CgroupManager string `toml:"cgroup_manager,omitempty"` - - // InitPath is the path to the container-init binary. - InitPath string `toml:"init_path,omitempty"` - - // StaticDir is the path to a persistent directory to store container - // files. - StaticDir string `toml:"static_dir,omitempty"` - - // TmpDir is the path to a temporary directory to store per-boot container - // files. Must be stored in a tmpfs. - TmpDir string `toml:"tmp_dir,omitempty"` - - // MaxLogSize is the maximum size of container logfiles. - MaxLogSize int64 `toml:"max_log_size,omitempty"` - - // NoPivotRoot sets whether to set no-pivot-root in the OCI runtime. - NoPivotRoot bool `toml:"no_pivot_root,omitempty"` - - // CNIConfigDir sets the directory where CNI configuration files are - // stored. - CNIConfigDir string `toml:"cni_config_dir,omitempty"` - - // CNIPluginDir sets a number of directories where the CNI network - // plugins can be located. - CNIPluginDir []string `toml:"cni_plugin_dir,omitempty"` - - // CNIDefaultNetwork is the network name of the default CNI network - // to attach pods to. - CNIDefaultNetwork string `toml:"cni_default_network,omitempty"` - - // HooksDir holds paths to the directories containing hooks - // configuration files. When the same filename is present in in - // multiple directories, the file in the directory listed last in - // this slice takes precedence. - HooksDir []string `toml:"hooks_dir,omitempty"` - - // DefaultMountsFile is the path to the default mounts file for testing - // purposes only. - DefaultMountsFile string `toml:"-"` - - // Namespace is the libpod namespace to use. Namespaces are used to create - // scopes to separate containers and pods in the state. When namespace is - // set, libpod will only view containers and pods in the same namespace. All - // containers and pods created will default to the namespace set here. A - // namespace of "", the empty string, is equivalent to no namespace, and all - // containers and pods will be visible. The default namespace is "". - Namespace string `toml:"namespace,omitempty"` - - // InfraImage is the image a pod infra container will use to manage - // namespaces. - InfraImage string `toml:"infra_image,omitempty"` - - // InfraCommand is the command run to start up a pod infra container. - InfraCommand string `toml:"infra_command,omitempty"` - - // EnablePortReservation determines whether libpod will reserve ports on the - // host when they are forwarded to containers. When enabled, when ports are - // forwarded to containers, they are held open by conmon as long as the - // container is running, ensuring that they cannot be reused by other - // programs on the host. However, this can cause significant memory usage if - // a container has many ports forwarded to it. Disabling this can save - // memory. - EnablePortReservation bool `toml:"enable_port_reservation,omitempty"` - - // EnableLabeling indicates whether libpod will support container labeling. - EnableLabeling bool `toml:"label,omitempty"` - - // NetworkCmdPath is the path to the slirp4netns binary. - NetworkCmdPath string `toml:"network_cmd_path,omitempty"` - - // NumLocks is the number of locks to make available for containers and - // pods. - NumLocks uint32 `toml:"num_locks,omitempty"` - - // LockType is the type of locking to use. - LockType string `toml:"lock_type,omitempty"` - - // EventsLogger determines where events should be logged. - EventsLogger string `toml:"events_logger,omitempty"` - - // EventsLogFilePath is where the events log is stored. - EventsLogFilePath string `toml:"events_logfile_path,omitempty"` - - //DetachKeys is the sequence of keys used to detach a container. - DetachKeys string `toml:"detach_keys,omitempty"` - - // SDNotify tells Libpod to allow containers to notify the host systemd of - // readiness using the SD_NOTIFY mechanism. - SDNotify bool `toml:",omitempty"` - - // CgroupCheck indicates the configuration has been rewritten after an - // upgrade to Fedora 31 to change the default OCI runtime for cgroupsv2. - CgroupCheck bool `toml:"cgroup_check,omitempty"` -} - -// DBConfig is a set of Libpod runtime configuration settings that are saved in -// a State when it is first created, and can subsequently be retrieved. -type DBConfig struct { - LibpodRoot string - LibpodTmp string - StorageRoot string - StorageTmp string - GraphDriver string - VolumePath string -} - -// readConfigFromFile reads the specified config file at `path` and attempts to -// unmarshal its content into a Config. The config param specifies the previous -// default config. If the path, only specifies a few fields in the Toml file -// the defaults from the config parameter will be used for all other fields. -func readConfigFromFile(path string, config *Config) (*Config, error) { - logrus.Debugf("Reading configuration file %q", path) - _, err := toml.DecodeFile(path, config) - if err != nil { - return nil, fmt.Errorf("unable to decode configuration %v: %v", path, err) - } - - // For the sake of backwards compat we need to check if the config fields - // with *Set suffix are set in the config. Note that the storage-related - // fields are NOT set in the config here but in the storage.conf OR directly - // by the user. - if config.VolumePath != "" { - config.VolumePathSet = true - } - if config.StaticDir != "" { - config.StaticDirSet = true - } - if config.TmpDir != "" { - config.TmpDirSet = true - } - - return config, err -} - -// Write decodes the config as TOML and writes it to the specified path. -func (c *Config) Write(path string) error { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0666) - if err != nil { - return errors.Wrapf(err, "error opening config file %q", path) - } - - buffer := new(bytes.Buffer) - if err := toml.NewEncoder(buffer).Encode(c); err != nil { - return errors.Wrapf(err, "error encoding config") - } - - if _, err := f.WriteString(buffer.String()); err != nil { - return errors.Wrapf(err, "error writing config %q", path) - } - return err -} - -// FindConmon iterates over (*Config).ConmonPath and returns the path to first -// (version) matching conmon binary. If non is found, we try to do a path lookup -// of "conmon". -func (c *Config) FindConmon() (string, error) { - foundOutdatedConmon := false - for _, path := range c.ConmonPath { - stat, err := os.Stat(path) - if err != nil { - continue - } - if stat.IsDir() { - continue - } - if err := probeConmon(path); err != nil { - logrus.Warnf("Conmon at %s invalid: %v", path, err) - foundOutdatedConmon = true - continue - } - logrus.Debugf("Using conmon: %q", path) - return path, nil - } - - // Search the $PATH as last fallback - if path, err := exec.LookPath("conmon"); err == nil { - if err := probeConmon(path); err != nil { - logrus.Warnf("Conmon at %s is invalid: %v", path, err) - foundOutdatedConmon = true - } else { - logrus.Debugf("Using conmon from $PATH: %q", path) - return path, nil - } - } - - if foundOutdatedConmon { - return "", errors.Wrapf(define.ErrConmonOutdated, - "please update to v%d.%d.%d or later", - _conmonMinMajorVersion, _conmonMinMinorVersion, _conmonMinPatchVersion) - } - - return "", errors.Wrapf(define.ErrInvalidArg, - "could not find a working conmon binary (configured options: %v)", - c.ConmonPath) -} - -// probeConmon calls conmon --version and verifies it is a new enough version for -// the runtime expectations podman currently has. -func probeConmon(conmonBinary string) error { - cmd := exec.Command(conmonBinary, "--version") - var out bytes.Buffer - cmd.Stdout = &out - err := cmd.Run() - if err != nil { - return err - } - r := regexp.MustCompile(`^conmon version (?P<Major>\d+).(?P<Minor>\d+).(?P<Patch>\d+)`) - - matches := r.FindStringSubmatch(out.String()) - if len(matches) != 4 { - return errors.Wrap(err, _conmonVersionFormatErr) - } - major, err := strconv.Atoi(matches[1]) - if err != nil { - return errors.Wrap(err, _conmonVersionFormatErr) - } - if major < _conmonMinMajorVersion { - return define.ErrConmonOutdated - } - if major > _conmonMinMajorVersion { - return nil - } - - minor, err := strconv.Atoi(matches[2]) - if err != nil { - return errors.Wrap(err, _conmonVersionFormatErr) - } - if minor < _conmonMinMinorVersion { - return define.ErrConmonOutdated - } - if minor > _conmonMinMinorVersion { - return nil - } - - patch, err := strconv.Atoi(matches[3]) - if err != nil { - return errors.Wrap(err, _conmonVersionFormatErr) - } - if patch < _conmonMinPatchVersion { - return define.ErrConmonOutdated - } - if patch > _conmonMinPatchVersion { - return nil - } - - return nil -} - -// NewConfig creates a new Config. It starts with an empty config and, if -// specified, merges the config at `userConfigPath` path. Depending if we're -// running as root or rootless, we then merge the system configuration followed -// by merging the default config (hard-coded default in memory). -// -// Note that the OCI runtime is hard-set to `crun` if we're running on a system -// with cgroupsv2. Other OCI runtimes are not yet supporting cgroupsv2. This -// might change in the future. -func NewConfig(userConfigPath string) (*Config, error) { - // Start with the default config and iteratively merge fields in the system - // configs. - config, err := defaultConfigFromMemory() - if err != nil { - return nil, err - } - - // Now, check if the user can access system configs and merge them if needed. - configs, err := systemConfigs() - if err != nil { - return nil, errors.Wrapf(err, "error finding config on system") - } - - for _, path := range configs { - config, err = readConfigFromFile(path, config) - if err != nil { - return nil, errors.Wrapf(err, "error reading system config %q", path) - } - } - - // First, try to read the user-specified config - if userConfigPath != "" { - var err error - config, err = readConfigFromFile(userConfigPath, config) - if err != nil { - return nil, errors.Wrapf(err, "error reading user config %q", userConfigPath) - } - } - - // Since runc does not currently support cgroupV2 - // Change to default crun on first running of libpod.conf - // TODO Once runc has support for cgroups, this function should be removed. - if !config.CgroupCheck && rootless.IsRootless() { - cgroupsV2, err := cgroups.IsCgroup2UnifiedMode() - if err != nil { - return nil, err - } - if cgroupsV2 { - path, err := exec.LookPath("crun") - if err != nil { - // Can't find crun path so do nothing - logrus.Warnf("Can not find crun package on the host, containers might fail to run on cgroup V2 systems without crun: %q", err) - } else { - config.CgroupCheck = true - config.OCIRuntime = path - } - } - } - - // If we need to, switch to cgroupfs and logger=file on rootless. - config.checkCgroupsAndLogger() - - // Relative paths can cause nasty bugs, because core paths we use could - // shift between runs (or even parts of the program - the OCI runtime - // uses a different working directory than we do, for example. - if !filepath.IsAbs(config.StaticDir) { - return nil, errors.Wrapf(define.ErrInvalidArg, "static directory must be an absolute path - instead got %q", config.StaticDir) - } - if !filepath.IsAbs(config.TmpDir) { - return nil, errors.Wrapf(define.ErrInvalidArg, "temporary directory must be an absolute path - instead got %q", config.TmpDir) - } - if !filepath.IsAbs(config.VolumePath) { - return nil, errors.Wrapf(define.ErrInvalidArg, "volume path must be an absolute path - instead got %q", config.VolumePath) - } - - return config, nil -} - -func rootlessConfigPath() (string, error) { - home, err := util.HomeDir() - if err != nil { - return "", err - } - - return filepath.Join(home, _rootlessConfigPath), nil -} - -func systemConfigs() ([]string, error) { - if rootless.IsRootless() { - path, err := rootlessConfigPath() - if err != nil { - return nil, err - } - if _, err := os.Stat(path); err == nil { - return []string{path}, nil - } - return nil, err - } - - configs := []string{} - if _, err := os.Stat(_rootConfigPath); err == nil { - configs = append(configs, _rootConfigPath) - } - if _, err := os.Stat(_rootOverrideConfigPath); err == nil { - configs = append(configs, _rootOverrideConfigPath) - } - return configs, nil -} - -// checkCgroupsAndLogger checks if we're running rootless with the systemd -// cgroup manager. In case the user session isn't available, we're switching the -// cgroup manager to cgroupfs and the events logger backend to 'file'. -// Note, this only applies to rootless. -func (c *Config) checkCgroupsAndLogger() { - if !rootless.IsRootless() || (c.CgroupManager != - define.SystemdCgroupsManager && c.EventsLogger == "file") { - return - } - - session := os.Getenv("DBUS_SESSION_BUS_ADDRESS") - hasSession := session != "" - if hasSession && strings.HasPrefix(session, "unix:path=") { - _, err := os.Stat(strings.TrimPrefix(session, "unix:path=")) - hasSession = err == nil - } - - if !hasSession { - logrus.Warningf("The cgroups manager is set to systemd but there is no systemd user session available") - logrus.Warningf("For using systemd, you may need to login using an user session") - logrus.Warningf("Alternatively, you can enable lingering with: `loginctl enable-linger %d` (possibly as root)", rootless.GetRootlessUID()) - logrus.Warningf("Falling back to --cgroup-manager=cgroupfs and --events-backend=file") - c.CgroupManager = define.CgroupfsCgroupsManager - c.EventsLogger = "file" - } -} - -// MergeDBConfig merges the configuration from the database. -func (c *Config) MergeDBConfig(dbConfig *DBConfig) error { - - if !c.StorageConfigRunRootSet && dbConfig.StorageTmp != "" { - if c.StorageConfig.RunRoot != dbConfig.StorageTmp && - c.StorageConfig.RunRoot != "" { - logrus.Debugf("Overriding run root %q with %q from database", - c.StorageConfig.RunRoot, dbConfig.StorageTmp) - } - c.StorageConfig.RunRoot = dbConfig.StorageTmp - } - - if !c.StorageConfigGraphRootSet && dbConfig.StorageRoot != "" { - if c.StorageConfig.GraphRoot != dbConfig.StorageRoot && - c.StorageConfig.GraphRoot != "" { - logrus.Debugf("Overriding graph root %q with %q from database", - c.StorageConfig.GraphRoot, dbConfig.StorageRoot) - } - c.StorageConfig.GraphRoot = dbConfig.StorageRoot - } - - if !c.StorageConfigGraphDriverNameSet && dbConfig.GraphDriver != "" { - if c.StorageConfig.GraphDriverName != dbConfig.GraphDriver && - c.StorageConfig.GraphDriverName != "" { - logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve", - c.StorageConfig.GraphDriverName, dbConfig.GraphDriver) - } - c.StorageConfig.GraphDriverName = dbConfig.GraphDriver - } - - if !c.StaticDirSet && dbConfig.LibpodRoot != "" { - if c.StaticDir != dbConfig.LibpodRoot && c.StaticDir != "" { - logrus.Debugf("Overriding static dir %q with %q from database", c.StaticDir, dbConfig.LibpodRoot) - } - c.StaticDir = dbConfig.LibpodRoot - } - - if !c.TmpDirSet && dbConfig.LibpodTmp != "" { - if c.TmpDir != dbConfig.LibpodTmp && c.TmpDir != "" { - logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp) - } - c.TmpDir = dbConfig.LibpodTmp - c.EventsLogFilePath = filepath.Join(dbConfig.LibpodTmp, "events", "events.log") - } - - if !c.VolumePathSet && dbConfig.VolumePath != "" { - if c.VolumePath != dbConfig.VolumePath && c.VolumePath != "" { - logrus.Debugf("Overriding volume path %q with %q from database", c.VolumePath, dbConfig.VolumePath) - } - c.VolumePath = dbConfig.VolumePath - } - return nil -} diff --git a/libpod/config/config_test.go b/libpod/config/config_test.go deleted file mode 100644 index 24620ce0e..000000000 --- a/libpod/config/config_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package config - -import ( - "reflect" - "testing" - - "github.com/containers/libpod/libpod/define" - "github.com/containers/storage" - "github.com/stretchr/testify/assert" -) - -func TestEmptyConfig(t *testing.T) { - // Make sure that we can read empty configs - config, err := readConfigFromFile("testdata/empty.conf", &Config{}) - assert.NotNil(t, config) - assert.Nil(t, err) -} - -func TestDefaultLibpodConf(t *testing.T) { - // Make sure that we can read the default libpod.conf - config, err := readConfigFromFile("testdata/libpod.conf", &Config{}) - assert.NotNil(t, config) - assert.Nil(t, err) -} - -func TestMergeEmptyAndDefaultMemoryConfig(t *testing.T) { - // Make sure that when we merge the default config into an empty one that we - // effectively get the default config. - defaultConfig, err := defaultConfigFromMemory() - assert.NotNil(t, defaultConfig) - assert.Nil(t, err) - defaultConfig.StateType = define.InvalidStateStore - defaultConfig.StorageConfig = storage.StoreOptions{} - - emptyConfig, err := readConfigFromFile("testdata/empty.conf", defaultConfig) - assert.NotNil(t, emptyConfig) - assert.Nil(t, err) - - equal := reflect.DeepEqual(emptyConfig, defaultConfig) - assert.True(t, equal) -} - -func TestMergeEmptyAndLibpodConfig(t *testing.T) { - // Make sure that when we merge the default config into an empty one that we - // effectively get the default config. - libpodConfig, err := readConfigFromFile("testdata/libpod.conf", &Config{}) - assert.NotNil(t, libpodConfig) - assert.Nil(t, err) - libpodConfig.StateType = define.InvalidStateStore - libpodConfig.StorageConfig = storage.StoreOptions{} - - emptyConfig, err := readConfigFromFile("testdata/empty.conf", libpodConfig) - assert.NotNil(t, emptyConfig) - assert.Nil(t, err) - - equal := reflect.DeepEqual(emptyConfig, libpodConfig) - assert.True(t, equal) -} diff --git a/libpod/config/default.go b/libpod/config/default.go deleted file mode 100644 index c4a4efdaf..000000000 --- a/libpod/config/default.go +++ /dev/null @@ -1,153 +0,0 @@ -package config - -import ( - "os" - "path/filepath" - - "github.com/containers/libpod/libpod/define" - "github.com/containers/libpod/libpod/events" - "github.com/containers/libpod/pkg/cgroups" - "github.com/containers/libpod/pkg/rootless" - "github.com/containers/libpod/pkg/util" - "github.com/containers/storage" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - // _defaultGraphRoot points to the default path of the graph root. - _defaultGraphRoot = "/var/lib/containers/storage" - // _defaultRootlessSignaturePolicyPath points to the default path of the - // rootless policy.json file. - _defaultRootlessSignaturePolicyPath = ".config/containers/policy.json" -) - -// defaultConfigFromMemory returns a default libpod configuration. Note that the -// config is different for root and rootless. It also parses the storage.conf. -func defaultConfigFromMemory() (*Config, error) { - c := new(Config) - tmp, err := defaultTmpDir() - if err != nil { - return nil, err - } - c.TmpDir = tmp - - c.EventsLogFilePath = filepath.Join(c.TmpDir, "events", "events.log") - - storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID()) - if err != nil { - return nil, err - } - if storeOpts.GraphRoot == "" { - logrus.Warnf("Storage configuration is unset - using hardcoded default graph root %q", _defaultGraphRoot) - storeOpts.GraphRoot = _defaultGraphRoot - } - c.StaticDir = filepath.Join(storeOpts.GraphRoot, "libpod") - c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes") - c.StorageConfig = storeOpts - - c.ImageDefaultTransport = _defaultTransport - c.StateType = define.BoltDBStateStore - c.OCIRuntime = "runc" - - // If we're running on cgroups v2, default to using crun. - if onCgroupsv2, _ := cgroups.IsCgroup2UnifiedMode(); onCgroupsv2 { - c.OCIRuntime = "crun" - } - - c.OCIRuntimes = map[string][]string{ - "runc": { - "/usr/bin/runc", - "/usr/sbin/runc", - "/usr/local/bin/runc", - "/usr/local/sbin/runc", - "/sbin/runc", - "/bin/runc", - "/usr/lib/cri-o-runc/sbin/runc", - "/run/current-system/sw/bin/runc", - }, - "crun": { - "/usr/bin/crun", - "/usr/sbin/crun", - "/usr/local/bin/crun", - "/usr/local/sbin/crun", - "/sbin/crun", - "/bin/crun", - "/run/current-system/sw/bin/crun", - }, - } - c.ConmonPath = []string{ - "/usr/libexec/podman/conmon", - "/usr/local/libexec/podman/conmon", - "/usr/local/lib/podman/conmon", - "/usr/bin/conmon", - "/usr/sbin/conmon", - "/usr/local/bin/conmon", - "/usr/local/sbin/conmon", - "/run/current-system/sw/bin/conmon", - } - c.ConmonEnvVars = []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - } - c.RuntimeSupportsJSON = []string{ - "crun", - "runc", - } - c.RuntimeSupportsNoCgroups = []string{"crun"} - c.InitPath = define.DefaultInitPath - c.CgroupManager = define.SystemdCgroupsManager - c.MaxLogSize = -1 - c.NoPivotRoot = false - c.CNIConfigDir = _etcDir + "/cni/net.d/" - c.CNIPluginDir = []string{ - "/usr/libexec/cni", - "/usr/lib/cni", - "/usr/local/lib/cni", - "/opt/cni/bin", - } - c.CNIDefaultNetwork = "podman" - c.InfraCommand = define.DefaultInfraCommand - c.InfraImage = define.DefaultInfraImage - c.EnablePortReservation = true - c.EnableLabeling = true - c.NumLocks = 2048 - c.EventsLogger = events.DefaultEventerType.String() - c.DetachKeys = define.DefaultDetachKeys - // TODO - ideally we should expose a `type LockType string` along with - // constants. - c.LockType = "shm" - - if rootless.IsRootless() { - home, err := util.HomeDir() - if err != nil { - return nil, err - } - sigPath := filepath.Join(home, _defaultRootlessSignaturePolicyPath) - if _, err := os.Stat(sigPath); err == nil { - c.SignaturePolicyPath = sigPath - } - } - return c, nil -} - -func defaultTmpDir() (string, error) { - if !rootless.IsRootless() { - return "/var/run/libpod", nil - } - - runtimeDir, err := util.GetRuntimeDir() - if err != nil { - return "", err - } - libpodRuntimeDir := filepath.Join(runtimeDir, "libpod") - - if err := os.Mkdir(libpodRuntimeDir, 0700|os.ModeSticky); err != nil { - if !os.IsExist(err) { - return "", errors.Wrapf(err, "cannot mkdir %s", libpodRuntimeDir) - } else if err := os.Chmod(libpodRuntimeDir, 0700|os.ModeSticky); err != nil { - // The directory already exist, just set the sticky bit - return "", errors.Wrapf(err, "could not set sticky bit on %s", libpodRuntimeDir) - } - } - return filepath.Join(libpodRuntimeDir, "tmp"), nil -} diff --git a/libpod/config/testdata/empty.conf b/libpod/config/testdata/empty.conf deleted file mode 100644 index e69de29bb..000000000 --- a/libpod/config/testdata/empty.conf +++ /dev/null diff --git a/libpod/config/testdata/libpod.conf b/libpod/config/testdata/libpod.conf deleted file mode 120000 index 17d09fe4a..000000000 --- a/libpod/config/testdata/libpod.conf +++ /dev/null @@ -1 +0,0 @@ -../../../libpod.conf
\ No newline at end of file diff --git a/libpod/container.go b/libpod/container.go index dbd15e55f..c1deb95f9 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -11,6 +11,7 @@ import ( "github.com/containernetworking/cni/pkg/types" cnitypes "github.com/containernetworking/cni/pkg/types/current" + "github.com/containers/common/pkg/config" "github.com/containers/image/v5/manifest" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/lock" @@ -181,9 +182,13 @@ type ContainerState struct { PID int `json:"pid,omitempty"` // ConmonPID is the PID of the container's conmon ConmonPID int `json:"conmonPid,omitempty"` - // ExecSessions contains active exec sessions for container - // Exec session ID is mapped to PID of exec process - ExecSessions map[string]*ExecSession `json:"execSessions,omitempty"` + // ExecSessions contains all exec sessions that are associated with this + // container. + ExecSessions map[string]*ExecSession `json:"newExecSessions,omitempty"` + // LegacyExecSessions are legacy exec sessions from older versions of + // Podman. + // These are DEPRECATED and will be removed in a future release. + LegacyExecSessions map[string]*legacyExecSession `json:"execSessions,omitempty"` // NetworkStatus contains the configuration results for all networks // the pod is attached to. Only populated if we created a network // namespace for the container, and the network namespace is currently @@ -214,13 +219,6 @@ type ContainerState struct { containerPlatformState } -// ExecSession contains information on an active exec session -type ExecSession struct { - ID string `json:"id"` - Command []string `json:"command"` - PID int `json:"pid"` -} - // ContainerConfig contains all information that was used to create the // container. It may not be changed once created. // It is stored, read-only, on disk @@ -239,6 +237,12 @@ type ContainerConfig struct { // container has been created with. CreateCommand []string `json:"CreateCommand,omitempty"` + // RawImageName is the raw and unprocessed name of the image when creating + // the container (as specified by the user). May or may not be set. One + // use case to store this data are auto-updates where we need the _exact_ + // name and not some normalized instance of it. + RawImageName string `json:"RawImageName,omitempty"` + // TODO consider breaking these subsections up into smaller structs // UID/GID mappings used by the storage @@ -503,11 +507,17 @@ func (c *Container) Namespace() string { return c.config.Namespace } -// Image returns the ID and name of the image used as the container's rootfs +// Image returns the ID and name of the image used as the container's rootfs. func (c *Container) Image() (string, string) { return c.config.RootfsImageID, c.config.RootfsImageName } +// RawImageName returns the unprocessed and not-normalized user-specified image +// name. +func (c *Container) RawImageName() string { + return c.config.RawImageName +} + // ShmDir returns the sources path to be mounted on /dev/shm in container func (c *Container) ShmDir() string { return c.config.ShmDir @@ -932,13 +942,13 @@ func (c *Container) ExecSession(id string) (*ExecSession, error) { session, ok := c.state.ExecSessions[id] if !ok { - return nil, errors.Wrapf(define.ErrNoSuchCtr, "no exec session with ID %s found in container %s", id, c.ID()) + return nil, errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found in container %s", id, c.ID()) } returnSession := new(ExecSession) - returnSession.ID = session.ID - returnSession.Command = session.Command - returnSession.PID = session.PID + if err := JSONDeepCopy(session, returnSession); err != nil { + return nil, errors.Wrapf(err, "error copying contents of container %s exec session %s", c.ID(), session.ID()) + } return returnSession, nil } @@ -1074,10 +1084,10 @@ func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:in // CGroupPath returns a cgroups "path" for a given container. func (c *Container) CGroupPath() (string, error) { - switch c.runtime.config.CgroupManager { - case define.CgroupfsCgroupsManager: + switch c.runtime.config.Engine.CgroupManager { + case config.CgroupfsCgroupsManager: return filepath.Join(c.config.CgroupParent, fmt.Sprintf("libpod-%s", c.ID())), nil - case define.SystemdCgroupsManager: + case config.SystemdCgroupsManager: if rootless.IsRootless() { uid := rootless.GetRootlessUID() parts := strings.SplitN(c.config.CgroupParent, "/", 2) @@ -1091,7 +1101,7 @@ func (c *Container) CGroupPath() (string, error) { } return filepath.Join(c.config.CgroupParent, createUnitName("libpod", c.ID())), nil default: - return "", errors.Wrapf(define.ErrInvalidArg, "unsupported CGroup manager %s in use", c.runtime.config.CgroupManager) + return "", errors.Wrapf(define.ErrInvalidArg, "unsupported CGroup manager %s in use", c.runtime.config.Engine.CgroupManager) } } diff --git a/libpod/container_api.go b/libpod/container_api.go index 039619ea6..967180437 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -9,10 +9,8 @@ import ( "os" "time" - "github.com/containers/common/pkg/capabilities" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" - "github.com/containers/storage/pkg/stringid" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -215,142 +213,6 @@ func (c *Container) Kill(signal uint) error { return c.save() } -// Exec starts a new process inside the container -// Returns an exit code and an error. If Exec was not able to exec in the container before a failure, an exit code of define.ExecErrorCodeCannotInvoke is returned. -// If another generic error happens, an exit code of define.ExecErrorCodeGeneric is returned. -// Sometimes, the $RUNTIME exec call errors, and if that is the case, the exit code is the exit code of the call. -// Otherwise, the exit code will be the exit code of the executed call inside of the container. -// TODO investigate allowing exec without attaching -func (c *Container) Exec(tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *AttachStreams, preserveFDs uint, resize chan remotecommand.TerminalSize, detachKeys string) (int, error) { - var capList []string - if !c.batched { - c.lock.Lock() - defer c.lock.Unlock() - - if err := c.syncContainer(); err != nil { - return define.ExecErrorCodeCannotInvoke, err - } - } - - if c.state.State != define.ContainerStateRunning { - return define.ExecErrorCodeCannotInvoke, errors.Wrapf(define.ErrCtrStateInvalid, "cannot exec into container that is not running") - } - - if privileged || c.config.Privileged { - capList = capabilities.AllCapabilities() - } - - // Generate exec session ID - // Ensure we don't conflict with an existing session ID - sessionID := stringid.GenerateNonCryptoID() - found := true - // This really ought to be a do-while, but Go doesn't have those... - for found { - found = false - for id := range c.state.ExecSessions { - if id == sessionID { - found = true - break - } - } - if found { - sessionID = stringid.GenerateNonCryptoID() - } - } - - logrus.Debugf("Creating new exec session in container %s with session id %s", c.ID(), sessionID) - if err := c.createExecBundle(sessionID); err != nil { - return define.ExecErrorCodeCannotInvoke, err - } - - defer func() { - // cleanup exec bundle - if err := c.cleanupExecBundle(sessionID); err != nil { - logrus.Errorf("Error removing exec session %s bundle path for container %s: %v", sessionID, c.ID(), err) - } - }() - - opts := new(ExecOptions) - opts.Cmd = cmd - opts.CapAdd = capList - opts.Env = env - opts.Terminal = tty - opts.Cwd = workDir - opts.User = user - opts.Streams = streams - opts.PreserveFDs = preserveFDs - opts.Resize = resize - opts.DetachKeys = detachKeys - - pid, attachChan, err := c.ociRuntime.ExecContainer(c, sessionID, opts) - if err != nil { - ec := define.ExecErrorCodeGeneric - // Conmon will pass a non-zero exit code from the runtime as a pid here. - // we differentiate a pid with an exit code by sending it as negative, so reverse - // that change and return the exit code the runtime failed with. - if pid < 0 { - ec = -1 * pid - } - return ec, err - } - - // We have the PID, add it to state - if c.state.ExecSessions == nil { - c.state.ExecSessions = make(map[string]*ExecSession) - } - session := new(ExecSession) - session.ID = sessionID - session.Command = cmd - session.PID = pid - c.state.ExecSessions[sessionID] = session - if err := c.save(); err != nil { - // Now we have a PID but we can't save it in the DB - // TODO handle this better - return define.ExecErrorCodeGeneric, errors.Wrapf(err, "error saving exec sessions %s for container %s", sessionID, c.ID()) - } - c.newContainerEvent(events.Exec) - logrus.Debugf("Successfully started exec session %s in container %s", sessionID, c.ID()) - - // Unlock so other processes can use the container - if !c.batched { - c.lock.Unlock() - } - - lastErr := <-attachChan - - exitCode, err := c.readExecExitCode(sessionID) - if err != nil { - if lastErr != nil { - logrus.Errorf(lastErr.Error()) - } - lastErr = err - } - if exitCode != 0 { - if lastErr != nil { - logrus.Errorf(lastErr.Error()) - } - lastErr = errors.Wrapf(define.ErrOCIRuntime, "non zero exit code: %d", exitCode) - } - - // Lock again - if !c.batched { - c.lock.Lock() - } - - // Sync the container again to pick up changes in state - if err := c.syncContainer(); err != nil { - logrus.Errorf("error syncing container %s state to remove exec session %s", c.ID(), sessionID) - return exitCode, lastErr - } - - // Remove the exec session from state - delete(c.state.ExecSessions, sessionID) - if err := c.save(); err != nil { - logrus.Errorf("Error removing exec session %s from container %s state: %v", sessionID, c.ID(), err) - } - return exitCode, lastErr -} - // AttachStreams contains streams that will be attached to the container type AttachStreams struct { // OutputStream will be attached to container's STDOUT @@ -493,7 +355,11 @@ func (c *Container) Unmount(force bool) error { if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { return errors.Wrapf(define.ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID()) } - if len(c.state.ExecSessions) != 0 { + execSessions, err := c.getActiveExecSessions() + if err != nil { + return err + } + if len(execSessions) != 0 { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to unmount", c.ID()) } return errors.Wrapf(define.ErrInternal, "can't unmount %s last mount, it is still in use", c.ID()) @@ -674,15 +540,15 @@ func (c *Container) Cleanup(ctx context.Context) error { // If we didn't restart, we perform a normal cleanup - // Reap exec sessions first. - if err := c.reapExecSessions(); err != nil { + // Check for running exec sessions + sessions, err := c.getActiveExecSessions() + if err != nil { return err } - - // Check if we have active exec sessions after reaping. - if len(c.state.ExecSessions) != 0 { + if len(sessions) > 0 { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID()) } + defer c.newContainerEvent(events.Cleanup) return c.cleanup(ctx) } @@ -757,114 +623,11 @@ func (c *Container) Sync() error { return nil } -// Refresh refreshes a container's state in the database, restarting the -// container if it is running +// Refresh is DEPRECATED and REMOVED. func (c *Container) Refresh(ctx context.Context) error { - if !c.batched { - c.lock.Lock() - defer c.lock.Unlock() - - if err := c.syncContainer(); err != nil { - return err - } - } - - if c.state.State == define.ContainerStateRemoving { - return errors.Wrapf(define.ErrCtrStateInvalid, "cannot refresh containers that are being removed") - } - - wasCreated := false - if c.state.State == define.ContainerStateCreated { - wasCreated = true - } - wasRunning := false - if c.state.State == define.ContainerStateRunning { - wasRunning = true - } - wasPaused := false - if c.state.State == define.ContainerStatePaused { - wasPaused = true - } - - // First, unpause the container if it's paused - if c.state.State == define.ContainerStatePaused { - if err := c.unpause(); err != nil { - return err - } - } - - // Next, if the container is running, stop it - if c.state.State == define.ContainerStateRunning { - if err := c.stop(c.config.StopTimeout); err != nil { - return err - } - } - - // If there are active exec sessions, we need to kill them - if len(c.state.ExecSessions) > 0 { - logrus.Infof("Killing %d exec sessions in container %s. They will not be restored after refresh.", - len(c.state.ExecSessions), c.ID()) - } - for _, session := range c.state.ExecSessions { - if err := c.ociRuntime.ExecStopContainer(c, session.ID, c.StopTimeout()); err != nil { - return errors.Wrapf(err, "error stopping exec session %s of container %s", session.ID, c.ID()) - } - } - - // If the container is in ContainerStateStopped, we need to delete it - // from the runtime and clear conmon state - if c.state.State == define.ContainerStateStopped { - if err := c.delete(ctx); err != nil { - return err - } - if err := c.removeConmonFiles(); err != nil { - return err - } - } - - // Fire cleanup code one more time unconditionally to ensure we are good - // to refresh - if err := c.cleanup(ctx); err != nil { - return err - } - - logrus.Debugf("Resetting state of container %s", c.ID()) - - // We've finished unwinding the container back to its initial state - // Now safe to refresh container state - if err := resetState(c.state); err != nil { - return errors.Wrapf(err, "error resetting state of container %s", c.ID()) - } - if err := c.refresh(); err != nil { - return err - } - - logrus.Debugf("Successfully refresh container %s state", c.ID()) - - // Initialize the container if it was created in runc - if wasCreated || wasRunning || wasPaused { - if err := c.prepare(); err != nil { - return err - } - if err := c.init(ctx, false); err != nil { - return err - } - } - - // If the container was running before, start it - if wasRunning || wasPaused { - if err := c.start(); err != nil { - return err - } - } - - // If the container was paused before, re-pause it - if wasPaused { - if err := c.pause(); err != nil { - return err - } - } - return nil + // This has been deprecated for a long while, and is in the process of + // being removed. + return define.ErrNotImplemented } // ContainerCheckpointOptions is a struct used to pass the parameters diff --git a/libpod/container_exec.go b/libpod/container_exec.go new file mode 100644 index 000000000..5469462f8 --- /dev/null +++ b/libpod/container_exec.go @@ -0,0 +1,816 @@ +package libpod + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + "time" + + "github.com/containers/common/pkg/capabilities" + "github.com/containers/libpod/libpod/define" + "github.com/containers/libpod/libpod/events" + "github.com/containers/storage/pkg/stringid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "k8s.io/client-go/tools/remotecommand" +) + +// ExecConfig contains the configuration of an exec session +type ExecConfig struct { + // Command the the command that will be invoked in the exec session. + // Must not be empty. + Command []string `json:"command"` + // Terminal is whether the exec session will allocate a pseudoterminal. + Terminal bool `json:"terminal,omitempty"` + // AttachStdin is whether the STDIN stream will be forwarded to the exec + // session's first process when attaching. Only available if Terminal is + // false. + AttachStdin bool `json:"attachStdin,omitempty"` + // AttachStdout is whether the STDOUT stream will be forwarded to the + // exec session's first process when attaching. Only available if + // Terminal is false. + AttachStdout bool `json:"attachStdout,omitempty"` + // AttachStderr is whether the STDERR stream will be forwarded to the + // exec session's first process when attaching. Only available if + // Terminal is false. + AttachStderr bool `json:"attachStderr,omitempty"` + // DetachKeys are keys that will be used to detach from the exec + // session. Here, nil will use the default detach keys, where a pointer + // to the empty string ("") will disable detaching via detach keys. + DetachKeys *string `json:"detachKeys,omitempty"` + // Environment is a set of environment variables that will be set for + // the first process started by the exec session. + Environment map[string]string `json:"environment,omitempty"` + // Privileged is whether the exec session will be privileged - that is, + // will be granted additional capabilities. + Privileged bool `json:"privileged,omitempty"` + // User is the user the exec session will be run as. + // If set to "" the exec session will be started as the same user the + // container was started as. + User string `json:"user,omitempty"` + // WorkDir is the working directory for the first process that will be + // launched by the exec session. + // If set to "" the exec session will be started in / within the + // container. + WorkDir string `json:"workDir,omitempty"` + // PreserveFDs indicates that a number of extra FDs from the process + // running libpod will be passed into the container. These are assumed + // to begin at 3 (immediately after the standard streams). The number + // given is the number that will be passed into the exec session, + // starting at 3. + PreserveFDs uint `json:"preserveFds,omitempty"` +} + +// ExecSession contains information on a single exec session attached to a given +// container. +type ExecSession struct { + // Id is the ID of the exec session. + // Named somewhat strangely to not conflict with ID(). + Id string `json:"id"` + // ContainerId is the ID of the container this exec session belongs to. + // Named somewhat strangely to not conflict with ContainerID(). + ContainerId string `json:"containerId"` + + // State is the state of the exec session. + State define.ContainerExecStatus `json:"state"` + // PID is the PID of the process created by the exec session. + PID int `json:"pid,omitempty"` + // ExitCode is the exit code of the exec session, if it has exited. + ExitCode int `json:"exitCode,omitempty"` + + // Config is the configuration of this exec session. + // Cannot be empty. + Config *ExecConfig `json:"config"` +} + +// ID returns the ID of an exec session. +func (e *ExecSession) ID() string { + return e.Id +} + +// ContainerID returns the ID of the container this exec session was started in. +func (e *ExecSession) ContainerID() string { + return e.ContainerId +} + +// Inspect inspects the given exec session and produces detailed output on its +// configuration and current state. +func (e *ExecSession) Inspect() (*define.InspectExecSession, error) { + if e.Config == nil { + return nil, errors.Wrapf(define.ErrInternal, "given exec session does not have a configuration block") + } + + output := new(define.InspectExecSession) + output.CanRemove = e.State != define.ExecStateRunning + output.ContainerID = e.ContainerId + if e.Config.DetachKeys != nil { + output.DetachKeys = *e.Config.DetachKeys + } + output.ExitCode = e.ExitCode + output.ID = e.Id + output.OpenStderr = e.Config.AttachStderr + output.OpenStdin = e.Config.AttachStdin + output.OpenStdout = e.Config.AttachStdout + output.Running = e.State == define.ExecStateRunning + output.Pid = e.PID + output.ProcessConfig = new(define.InspectExecProcess) + if len(e.Config.Command) > 0 { + output.ProcessConfig.Entrypoint = e.Config.Command[0] + if len(e.Config.Command) > 1 { + output.ProcessConfig.Arguments = make([]string, 0, len(e.Config.Command)-1) + output.ProcessConfig.Arguments = append(output.ProcessConfig.Arguments, e.Config.Command[1:]...) + } + } + output.ProcessConfig.Privileged = e.Config.Privileged + output.ProcessConfig.Tty = e.Config.Terminal + output.ProcessConfig.User = e.Config.User + + return output, nil +} + +// legacyExecSession contains information on an active exec session. It is a +// holdover from a previous Podman version and is DEPRECATED. +type legacyExecSession struct { + ID string `json:"id"` + Command []string `json:"command"` + PID int `json:"pid"` +} + +// ExecCreate creates a new exec session for the container. +// The session is not started. The ID of the new exec session will be returned. +func (c *Container) ExecCreate(config *ExecConfig) (string, error) { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return "", err + } + } + + // Verify our config + if config == nil { + return "", errors.Wrapf(define.ErrInvalidArg, "must provide a configuration to ExecCreate") + } + if len(config.Command) == 0 { + return "", errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty command to start an exec session") + } + if config.Terminal && (config.AttachStdin || config.AttachStdout || config.AttachStderr) { + return "", errors.Wrapf(define.ErrInvalidArg, "cannot specify streams to attach to when exec session has a pseudoterminal") + } + + // Verify that we are in a good state to continue + if !c.ensureState(define.ContainerStateRunning) { + return "", errors.Wrapf(define.ErrCtrStateInvalid, "can only create exec sessions on running containers") + } + + // Generate an ID for our new exec session + sessionID := stringid.GenerateNonCryptoID() + found := true + // This really ought to be a do-while, but Go doesn't have those... + for found { + found = false + for id := range c.state.ExecSessions { + if id == sessionID { + found = true + break + } + } + if found { + sessionID = stringid.GenerateNonCryptoID() + } + } + + // Make our new exec session + session := new(ExecSession) + session.Id = sessionID + session.ContainerId = c.ID() + session.State = define.ExecStateCreated + session.Config = new(ExecConfig) + if err := JSONDeepCopy(config, session.Config); err != nil { + return "", errors.Wrapf(err, "error copying exec configuration into exec session") + } + + if c.state.ExecSessions == nil { + c.state.ExecSessions = make(map[string]*ExecSession) + } + + // Need to add to container state and exec session registry + c.state.ExecSessions[session.ID()] = session + if err := c.save(); err != nil { + return "", err + } + if err := c.runtime.state.AddExecSession(c, session); err != nil { + return "", err + } + + logrus.Infof("Created exec session %s in container %s", session.ID(), c.ID()) + + return sessionID, nil +} + +// ExecStart starts an exec session in the container, but does not attach to it. +// Returns immediately upon starting the exec session. +func (c *Container) ExecStart(sessionID string) error { + // Will be implemented in part 2, migrating Start and implementing + // detached Start. + return define.ErrNotImplemented +} + +// ExecStartAndAttach starts and attaches to an exec session in a container. +// TODO: Should we include detach keys in the signature to allow override? +// TODO: How do we handle AttachStdin/AttachStdout/AttachStderr? +func (c *Container) ExecStartAndAttach(sessionID string, streams *AttachStreams) error { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + // Verify that we are in a good state to continue + if !c.ensureState(define.ContainerStateRunning) { + return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running") + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) + } + + if session.State != define.ExecStateCreated { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String()) + } + + logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID()) + + // TODO: check logic here - should we set Privileged if the container is + // privileged? + var capList []string + if session.Config.Privileged || c.config.Privileged { + capList = capabilities.AllCapabilities() + } + + user := c.config.User + if session.Config.User != "" { + user = session.Config.User + } + + if err := c.createExecBundle(session.ID()); err != nil { + return err + } + + opts := new(ExecOptions) + opts.Cmd = session.Config.Command + opts.CapAdd = capList + opts.Env = session.Config.Environment + opts.Terminal = session.Config.Terminal + opts.Cwd = session.Config.WorkDir + opts.User = user + opts.Streams = streams + opts.PreserveFDs = session.Config.PreserveFDs + opts.DetachKeys = session.Config.DetachKeys + + pid, attachChan, err := c.ociRuntime.ExecContainer(c, session.ID(), opts) + if err != nil { + return err + } + + c.newContainerEvent(events.Exec) + logrus.Debugf("Successfully started exec session %s in container %s", session.ID(), c.ID()) + + var lastErr error + + // Update and save session to reflect PID/running + session.PID = pid + session.State = define.ExecStateRunning + + if err := c.save(); err != nil { + lastErr = err + } + + // Unlock so other processes can use the container + if !c.batched { + c.lock.Unlock() + } + + tmpErr := <-attachChan + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = tmpErr + + exitCode, err := c.readExecExitCode(session.ID()) + if err != nil { + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = err + } + + logrus.Debugf("Container %s exec session %s completed with exit code %d", c.ID(), session.ID(), exitCode) + + // Lock again + if !c.batched { + c.lock.Lock() + } + + // Sync the container to pick up state changes + if err := c.syncContainer(); err != nil { + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + return errors.Wrapf(err, "error syncing container %s state to remove exec session %s", c.ID(), session.ID()) + } + + // Update status + // Since we did a syncContainer, the old session has been overwritten. + // Grab a fresh one from the database. + session, ok = c.state.ExecSessions[sessionID] + if !ok { + // Exec session already removed. + logrus.Infof("Container %s exec session %s already removed from database", c.ID(), sessionID) + return nil + } + session.State = define.ExecStateStopped + session.ExitCode = exitCode + session.PID = 0 + + if err := c.save(); err != nil { + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = err + } + + // Clean up after ourselves + if err := c.cleanupExecBundle(session.ID()); err != nil { + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = err + } + + return lastErr +} + +// ExecHTTPStartAndAttach starts and performs an HTTP attach to an exec session. +func (c *Container) ExecHTTPStartAndAttach(sessionID string) error { + // Will be implemented in part 2, migrating Start. + return define.ErrNotImplemented +} + +// ExecStop stops an exec session in the container. +// If a timeout is provided, it will be used; otherwise, the timeout will +// default to the stop timeout of the container. +// Cleanup will be invoked automatically once the session is stopped. +func (c *Container) ExecStop(sessionID string, timeout *uint) error { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) + } + + if session.State != define.ExecStateRunning { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "container %s exec session %s is %q, can only stop running sessions", c.ID(), session.ID(), session.State.String()) + } + + logrus.Infof("Stopping container %s exec session %s", c.ID(), session.ID()) + + finalTimeout := c.StopTimeout() + if timeout != nil { + finalTimeout = *timeout + } + + // Stop the session + if err := c.ociRuntime.ExecStopContainer(c, session.ID(), finalTimeout); err != nil { + return err + } + + var cleanupErr error + + // Retrieve exit code and update status + exitCode, err := c.readExecExitCode(session.ID()) + if err != nil { + cleanupErr = err + } + session.ExitCode = exitCode + session.PID = 0 + session.State = define.ExecStateStopped + + if err := c.save(); err != nil { + if cleanupErr != nil { + logrus.Errorf("Error stopping container %s exec session %s: %v", c.ID(), session.ID(), cleanupErr) + } + cleanupErr = err + } + + if err := c.cleanupExecBundle(session.ID()); err != nil { + if cleanupErr != nil { + logrus.Errorf("Error stopping container %s exec session %s: %v", c.ID(), session.ID(), cleanupErr) + } + cleanupErr = err + } + + return cleanupErr +} + +// ExecCleanup cleans up an exec session in the container, removing temporary +// files associated with it. +func (c *Container) ExecCleanup(sessionID string) error { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) + } + + if session.State == define.ExecStateRunning { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot clean up container %s exec session %s as it is running", c.ID(), session.ID()) + } + + logrus.Infof("Cleaning up container %s exec session %s", c.ID(), session.ID()) + + return c.cleanupExecBundle(session.ID()) +} + +// ExecRemove removes an exec session in the container. +// If force is given, the session will be stopped first if it is running. +func (c *Container) ExecRemove(sessionID string, force bool) error { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) + } + + logrus.Infof("Removing container %s exec session %s", c.ID(), session.ID()) + + // Update status of exec session if running, so we cna check if it + // stopped in the meantime. + if session.State == define.ExecStateRunning { + stopped, err := c.ociRuntime.ExecUpdateStatus(c, session.ID()) + if err != nil { + return err + } + if stopped { + session.State = define.ExecStateStopped + // TODO: should we retrieve exit code here? + // TODO: Might be worth saving state here. + } + } + + if session.State == define.ExecStateRunning { + if !force { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "container %s exec session %s is still running, cannot remove", c.ID(), session.ID()) + } + + // Stop the session + if err := c.ociRuntime.ExecStopContainer(c, session.ID(), c.StopTimeout()); err != nil { + return err + } + + if err := c.cleanupExecBundle(session.ID()); err != nil { + return err + } + } + + // First remove exec session from DB. + if err := c.runtime.state.RemoveExecSession(session); err != nil { + return err + } + // Next, remove it from the container and save state + delete(c.state.ExecSessions, sessionID) + if err := c.save(); err != nil { + return err + } + + logrus.Debugf("Successfully removed container %s exec session %s", c.ID(), session.ID()) + + return nil +} + +// ExecResize resizes the TTY of the given exec session. Only available if the +// exec session created a TTY. +func (c *Container) ExecResize(sessionID string, newSize remotecommand.TerminalSize) error { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return err + } + } + + session, ok := c.state.ExecSessions[sessionID] + if !ok { + return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID) + } + + logrus.Infof("Resizing container %s exec session %s to %+v", c.ID(), session.ID(), newSize) + + if session.State != define.ExecStateRunning { + return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot resize container %s exec session %s as it is not running", c.ID(), session.ID()) + } + + return c.ociRuntime.ExecAttachResize(c, sessionID, newSize) +} + +// Exec emulates the old Libpod exec API, providing a single call to create, +// run, and remove an exec session. Returns exit code and error. Exit code is +// not guaranteed to be set sanely if error is not nil. +func (c *Container) Exec(config *ExecConfig, streams *AttachStreams, resize <-chan remotecommand.TerminalSize) (int, error) { + sessionID, err := c.ExecCreate(config) + if err != nil { + return -1, err + } + + // Start resizing if we have a resize channel. + // This goroutine may likely leak, given that we cannot close it here. + // Not a big deal, since it should run for as long as the Podman process + // does. Could be a big deal for `podman service` but we don't need this + // API there. + // TODO: Refactor so this is closed here, before we remove the exec + // session. + if resize != nil { + go func() { + logrus.Debugf("Sending resize events to exec session %s", sessionID) + for resizeRequest := range resize { + if err := c.ExecResize(sessionID, resizeRequest); err != nil { + // Assume the exec session went down. + logrus.Warnf("Error resizing exec session %s: %v", sessionID, err) + return + } + } + }() + } + + if err := c.ExecStartAndAttach(sessionID, streams); err != nil { + return -1, err + } + + session, err := c.ExecSession(sessionID) + if err != nil { + return -1, err + } + exitCode := session.ExitCode + if err := c.ExecRemove(sessionID, false); err != nil { + return -1, err + } + + if exitCode != 0 { + return exitCode, errors.Wrapf(define.ErrOCIRuntime, "exec session exited with non-zero exit code %d", exitCode) + } + + return exitCode, nil +} + +// cleanup an exec session after its done +func (c *Container) cleanupExecBundle(sessionID string) error { + if err := os.RemoveAll(c.execBundlePath(sessionID)); err != nil && !os.IsNotExist(err) { + return err + } + + return c.ociRuntime.ExecContainerCleanup(c, sessionID) +} + +// the path to a containers exec session bundle +func (c *Container) execBundlePath(sessionID string) string { + return filepath.Join(c.bundlePath(), sessionID) +} + +// Get PID file path for a container's exec session +func (c *Container) execPidPath(sessionID string) string { + return filepath.Join(c.execBundlePath(sessionID), "exec_pid") +} + +// the log path for an exec session +func (c *Container) execLogPath(sessionID string) string { + return filepath.Join(c.execBundlePath(sessionID), "exec_log") +} + +// the socket conmon creates for an exec session +func (c *Container) execAttachSocketPath(sessionID string) (string, error) { + return c.ociRuntime.ExecAttachSocketPath(c, sessionID) +} + +// execExitFileDir gets the path to the container's exit file +func (c *Container) execExitFileDir(sessionID string) string { + return filepath.Join(c.execBundlePath(sessionID), "exit") +} + +// execOCILog returns the file path for the exec sessions oci log +func (c *Container) execOCILog(sessionID string) string { + if !c.ociRuntime.SupportsJSONErrors() { + return "" + } + return filepath.Join(c.execBundlePath(sessionID), "oci-log") +} + +// create a bundle path and associated files for an exec session +func (c *Container) createExecBundle(sessionID string) (err error) { + bundlePath := c.execBundlePath(sessionID) + if createErr := os.MkdirAll(bundlePath, execDirPermission); createErr != nil { + return createErr + } + defer func() { + if err != nil { + if err2 := os.RemoveAll(bundlePath); err != nil { + logrus.Warnf("error removing exec bundle after creation caused another error: %v", err2) + } + } + }() + if err2 := os.MkdirAll(c.execExitFileDir(sessionID), execDirPermission); err2 != nil { + // The directory is allowed to exist + if !os.IsExist(err2) { + err = errors.Wrapf(err2, "error creating OCI runtime exit file path %s", c.execExitFileDir(sessionID)) + } + } + return +} + +// readExecExitCode reads the exit file for an exec session and returns +// the exit code +func (c *Container) readExecExitCode(sessionID string) (int, error) { + exitFile := filepath.Join(c.execExitFileDir(sessionID), c.ID()) + chWait := make(chan error) + defer close(chWait) + + _, err := WaitForFile(exitFile, chWait, time.Second*5) + if err != nil { + return -1, err + } + ec, err := ioutil.ReadFile(exitFile) + if err != nil { + return -1, err + } + ecInt, err := strconv.Atoi(string(ec)) + if err != nil { + return -1, err + } + return ecInt, nil +} + +// getExecSessionPID gets the PID of an active exec session +func (c *Container) getExecSessionPID(sessionID string) (int, error) { + session, ok := c.state.ExecSessions[sessionID] + if ok { + return session.PID, nil + } + oldSession, ok := c.state.LegacyExecSessions[sessionID] + if ok { + return oldSession.PID, nil + } + + return -1, errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found in container %s", sessionID, c.ID()) +} + +// getKnownExecSessions gets a list of all exec sessions we think are running, +// but does not verify their current state. +// Please use getActiveExecSessions() outside of container_exec.go, as this +// function performs further checks to return an accurate list. +func (c *Container) getKnownExecSessions() []string { + knownSessions := []string{} + // First check legacy sessions. + // TODO: This is DEPRECATED and will be removed in a future major + // release. + for sessionID := range c.state.LegacyExecSessions { + knownSessions = append(knownSessions, sessionID) + } + // Next check new exec sessions, but only if in running state + for sessionID, session := range c.state.ExecSessions { + if session.State == define.ExecStateRunning { + knownSessions = append(knownSessions, sessionID) + } + } + + return knownSessions +} + +// getActiveExecSessions checks if there are any active exec sessions in the +// current container. Returns an array of active exec sessions. +// Will continue through errors where possible. +// Currently handles both new and legacy, deprecated exec sessions. +func (c *Container) getActiveExecSessions() ([]string, error) { + activeSessions := []string{} + knownSessions := c.getKnownExecSessions() + + // Instead of saving once per iteration, do it once at the end. + var lastErr error + needSave := false + for _, id := range knownSessions { + alive, err := c.ociRuntime.ExecUpdateStatus(c, id) + if err != nil { + if lastErr != nil { + logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + continue + } + if !alive { + if err := c.cleanupExecBundle(id); err != nil { + if lastErr != nil { + logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + } + + _, isLegacy := c.state.LegacyExecSessions[id] + if isLegacy { + delete(c.state.LegacyExecSessions, id) + needSave = true + } else { + session := c.state.ExecSessions[id] + exitCode, err := c.readExecExitCode(session.ID()) + if err != nil { + if lastErr != nil { + logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + } + session.ExitCode = exitCode + session.PID = 0 + session.State = define.ExecStateStopped + + needSave = true + } + } else { + activeSessions = append(activeSessions, id) + } + } + if needSave { + if err := c.save(); err != nil { + if lastErr != nil { + logrus.Errorf("Error reaping exec sessions for container %s: %v", c.ID(), lastErr) + } + lastErr = err + } + } + + return activeSessions, lastErr +} + +// removeAllExecSessions stops and removes all the container's exec sessions +func (c *Container) removeAllExecSessions() error { + knownSessions := c.getKnownExecSessions() + + var lastErr error + for _, id := range knownSessions { + if err := c.ociRuntime.ExecStopContainer(c, id, c.StopTimeout()); err != nil { + if lastErr != nil { + logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + continue + } + + if err := c.cleanupExecBundle(id); err != nil { + if lastErr != nil { + logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + } + } + // Delete all exec sessions + if err := c.runtime.state.RemoveContainerExecSessions(c); err != nil { + if lastErr != nil { + logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + } + c.state.ExecSessions = nil + c.state.LegacyExecSessions = nil + if err := c.save(); err != nil { + if lastErr != nil { + logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr) + } + lastErr = err + } + + return lastErr +} diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go index 50ae72499..f81be8b22 100644 --- a/libpod/container_inspect.go +++ b/libpod/container_inspect.go @@ -3,13 +3,11 @@ package libpod import ( "fmt" "strings" - "time" - "github.com/containers/image/v5/manifest" + "github.com/containers/common/pkg/config" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/driver" "github.com/containers/libpod/pkg/util" - "github.com/cri-o/ocicni/pkg/ocicni" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/runtime-tools/validate" @@ -85,602 +83,9 @@ const ( InspectResponseFalse = "FALSE" ) -// InspectContainerData provides a detailed record of a container's configuration -// and state as viewed by Libpod. -// Large portions of this structure are defined such that the output is -// compatible with `docker inspect` JSON, but additional fields have been added -// as required to share information not in the original output. -type InspectContainerData struct { - ID string `json:"Id"` - Created time.Time `json:"Created"` - Path string `json:"Path"` - Args []string `json:"Args"` - State *InspectContainerState `json:"State"` - Image string `json:"Image"` - ImageName string `json:"ImageName"` - Rootfs string `json:"Rootfs"` - Pod string `json:"Pod"` - ResolvConfPath string `json:"ResolvConfPath"` - HostnamePath string `json:"HostnamePath"` - HostsPath string `json:"HostsPath"` - StaticDir string `json:"StaticDir"` - OCIConfigPath string `json:"OCIConfigPath,omitempty"` - OCIRuntime string `json:"OCIRuntime,omitempty"` - LogPath string `json:"LogPath"` - LogTag string `json:"LogTag"` - ConmonPidFile string `json:"ConmonPidFile"` - Name string `json:"Name"` - RestartCount int32 `json:"RestartCount"` - Driver string `json:"Driver"` - MountLabel string `json:"MountLabel"` - ProcessLabel string `json:"ProcessLabel"` - AppArmorProfile string `json:"AppArmorProfile"` - EffectiveCaps []string `json:"EffectiveCaps"` - BoundingCaps []string `json:"BoundingCaps"` - ExecIDs []string `json:"ExecIDs"` - GraphDriver *driver.Data `json:"GraphDriver"` - SizeRw *int64 `json:"SizeRw,omitempty"` - SizeRootFs int64 `json:"SizeRootFs,omitempty"` - Mounts []InspectMount `json:"Mounts"` - Dependencies []string `json:"Dependencies"` - NetworkSettings *InspectNetworkSettings `json:"NetworkSettings"` //TODO - ExitCommand []string `json:"ExitCommand"` - Namespace string `json:"Namespace"` - IsInfra bool `json:"IsInfra"` - Config *InspectContainerConfig `json:"Config"` - HostConfig *InspectContainerHostConfig `json:"HostConfig"` -} - -// InspectContainerConfig holds further data about how a container was initially -// configured. -type InspectContainerConfig struct { - // Container hostname - Hostname string `json:"Hostname"` - // Container domain name - unused at present - DomainName string `json:"Domainname"` - // User the container was launched with - User string `json:"User"` - // Unused, at present - AttachStdin bool `json:"AttachStdin"` - // Unused, at present - AttachStdout bool `json:"AttachStdout"` - // Unused, at present - AttachStderr bool `json:"AttachStderr"` - // Whether the container creates a TTY - Tty bool `json:"Tty"` - // Whether the container leaves STDIN open - OpenStdin bool `json:"OpenStdin"` - // Whether STDIN is only left open once. - // Presently not supported by Podman, unused. - StdinOnce bool `json:"StdinOnce"` - // Container environment variables - Env []string `json:"Env"` - // Container command - Cmd []string `json:"Cmd"` - // Container image - Image string `json:"Image"` - // Unused, at present. I've never seen this field populated. - Volumes map[string]struct{} `json:"Volumes"` - // Container working directory - WorkingDir string `json:"WorkingDir"` - // Container entrypoint - Entrypoint string `json:"Entrypoint"` - // On-build arguments - presently unused. More of Buildah's domain. - OnBuild *string `json:"OnBuild"` - // Container labels - Labels map[string]string `json:"Labels"` - // Container annotations - Annotations map[string]string `json:"Annotations"` - // Container stop signal - StopSignal uint `json:"StopSignal"` - // Configured healthcheck for the container - Healthcheck *manifest.Schema2HealthConfig `json:"Healthcheck,omitempty"` - // CreateCommand is the full command plus arguments of the process the - // container has been created with. - CreateCommand []string `json:"CreateCommand,omitempty"` -} - -// InspectContainerHostConfig holds information used when the container was -// created. -// It's very much a Docker-specific struct, retained (mostly) as-is for -// compatibility. We fill individual fields as best as we can, inferring as much -// as possible from the spec and container config. -// Some things cannot be inferred. These will be populated by spec annotations -// (if available). -// Field names are fixed for compatibility and cannot be changed. -// As such, silence lint warnings about them. -//nolint -type InspectContainerHostConfig struct { - // Binds contains an array of user-added mounts. - // Both volume mounts and named volumes are included. - // Tmpfs mounts are NOT included. - // In 'docker inspect' this is separated into 'Binds' and 'Mounts' based - // on how a mount was added. We do not make this distinction and do not - // include a Mounts field in inspect. - // Format: <src>:<destination>[:<comma-separated options>] - Binds []string `json:"Binds"` - // ContainerIDFile is a file created during container creation to hold - // the ID of the created container. - // This is not handled within libpod and is stored in an annotation. - ContainerIDFile string `json:"ContainerIDFile"` - // LogConfig contains information on the container's logging backend - LogConfig *InspectLogConfig `json:"LogConfig"` - // NetworkMode is the configuration of the container's network - // namespace. - // Populated as follows: - // default - A network namespace is being created and configured via CNI - // none - A network namespace is being created, not configured via CNI - // host - No network namespace created - // container:<id> - Using another container's network namespace - // ns:<path> - A path to a network namespace has been specified - NetworkMode string `json:"NetworkMode"` - // PortBindings contains the container's port bindings. - // It is formatted as map[string][]InspectHostPort. - // The string key here is formatted as <integer port number>/<protocol> - // and represents the container port. A single container port may be - // bound to multiple host ports (on different IPs). - PortBindings map[string][]InspectHostPort `json:"PortBindings"` - // RestartPolicy contains the container's restart policy. - RestartPolicy *InspectRestartPolicy `json:"RestartPolicy"` - // AutoRemove is whether the container will be automatically removed on - // exiting. - // It is not handled directly within libpod and is stored in an - // annotation. - AutoRemove bool `json:"AutoRemove"` - // VolumeDriver is presently unused and is retained for Docker - // compatibility. - VolumeDriver string `json:"VolumeDriver"` - // VolumesFrom is a list of containers which this container uses volumes - // from. This is not handled directly within libpod and is stored in an - // annotation. - // It is formatted as an array of container names and IDs. - VolumesFrom []string `json:"VolumesFrom"` - // CapAdd is a list of capabilities added to the container. - // It is not directly stored by Libpod, and instead computed from the - // capabilities listed in the container's spec, compared against a set - // of default capabilities. - CapAdd []string `json:"CapAdd"` - // CapDrop is a list of capabilities removed from the container. - // It is not directly stored by libpod, and instead computed from the - // capabilities listed in the container's spec, compared against a set - // of default capabilities. - CapDrop []string `json:"CapDrop"` - // Dns is a list of DNS nameservers that will be added to the - // container's resolv.conf - Dns []string `json:"Dns"` - // DnsOptions is a list of DNS options that will be set in the - // container's resolv.conf - DnsOptions []string `json:"DnsOptions"` - // DnsSearch is a list of DNS search domains that will be set in the - // container's resolv.conf - DnsSearch []string `json:"DnsSearch"` - // ExtraHosts contains hosts that will be aded to the container's - // /etc/hosts. - ExtraHosts []string `json:"ExtraHosts"` - // GroupAdd contains groups that the user inside the container will be - // added to. - GroupAdd []string `json:"GroupAdd"` - // IpcMode represents the configuration of the container's IPC - // namespace. - // Populated as follows: - // "" (empty string) - Default, an IPC namespace will be created - // host - No IPC namespace created - // container:<id> - Using another container's IPC namespace - // ns:<path> - A path to an IPC namespace has been specified - IpcMode string `json:"IpcMode"` - // Cgroup contains the container's cgroup. It is presently not - // populated. - // TODO. - Cgroup string `json:"Cgroup"` - // Cgroups contains the container's CGroup mode. - // Allowed values are "default" (container is creating CGroups) and - // "disabled" (container is not creating CGroups). - // This is Libpod-specific and not included in `docker inspect`. - Cgroups string `json:"Cgroups"` - // Links is unused, and provided purely for Docker compatibility. - Links []string `json:"Links"` - // OOMScoreAdj is an adjustment that will be made to the container's OOM - // score. - OomScoreAdj int `json:"OomScoreAdj"` - // PidMode represents the configuration of the container's PID - // namespace. - // Populated as follows: - // "" (empty string) - Default, a PID namespace will be created - // host - No PID namespace created - // container:<id> - Using another container's PID namespace - // ns:<path> - A path to a PID namespace has been specified - PidMode string `json:"PidMode"` - // Privileged indicates whether the container is running with elevated - // privileges. - // This has a very specific meaning in the Docker sense, so it's very - // difficult to decode from the spec and config, and so is stored as an - // annotation. - Privileged bool `json:"Privileged"` - // PublishAllPorts indicates whether image ports are being published. - // This is not directly stored in libpod and is saved as an annotation. - PublishAllPorts bool `json:"PublishAllPorts"` - // ReadonlyRootfs is whether the container will be mounted read-only. - ReadonlyRootfs bool `json:"ReadonlyRootfs"` - // SecurityOpt is a list of security-related options that are set in the - // container. - SecurityOpt []string `json:"SecurityOpt"` - // Tmpfs is a list of tmpfs filesystems that will be mounted into the - // container. - // It is a map of destination path to options for the mount. - Tmpfs map[string]string `json:"Tmpfs"` - // UTSMode represents the configuration of the container's UID - // namespace. - // Populated as follows: - // "" (empty string) - Default, a UTS namespace will be created - // host - no UTS namespace created - // container:<id> - Using another container's UTS namespace - // ns:<path> - A path to a UTS namespace has been specified - UTSMode string `json:"UTSMode"` - // UsernsMode represents the configuration of the container's user - // namespace. - // When running rootless, a user namespace is created outside of libpod - // to allow some privileged operations. This will not be reflected here. - // Populated as follows: - // "" (empty string) - No user namespace will be created - // private - The container will be run in a user namespace - // container:<id> - Using another container's user namespace - // ns:<path> - A path to a user namespace has been specified - // TODO Rootless has an additional 'keep-id' option, presently not - // reflected here. - UsernsMode string `json:"UsernsMode"` - // ShmSize is the size of the container's SHM device. - ShmSize int64 `json:"ShmSize"` - // Runtime is provided purely for Docker compatibility. - // It is set unconditionally to "oci" as Podman does not presently - // support non-OCI runtimes. - Runtime string `json:"Runtime"` - // ConsoleSize is an array of 2 integers showing the size of the - // container's console. - // It is only set if the container is creating a terminal. - // TODO. - ConsoleSize []uint `json:"ConsoleSize"` - // Isolation is presently unused and provided solely for Docker - // compatibility. - Isolation string `json:"Isolation"` - // CpuShares indicates the CPU resources allocated to the container. - // It is a relative weight in the scheduler for assigning CPU time - // versus other CGroups. - CpuShares uint64 `json:"CpuShares"` - // Memory indicates the memory resources allocated to the container. - // This is the limit (in bytes) of RAM the container may use. - Memory int64 `json:"Memory"` - // NanoCpus indicates number of CPUs allocated to the container. - // It is an integer where one full CPU is indicated by 1000000000 (one - // billion). - // Thus, 2.5 CPUs (fractional portions of CPUs are allowed) would be - // 2500000000 (2.5 billion). - // In 'docker inspect' this is set exclusively of two further options in - // the output (CpuPeriod and CpuQuota) which are both used to implement - // this functionality. - // We can't distinguish here, so if CpuQuota is set to the default of - // 100000, we will set both CpuQuota, CpuPeriod, and NanoCpus. If - // CpuQuota is not the default, we will not set NanoCpus. - NanoCpus int64 `json:"NanoCpus"` - // CgroupParent is the CGroup parent of the container. - // Only set if not default. - CgroupParent string `json:"CgroupParent"` - // BlkioWeight indicates the I/O resources allocated to the container. - // It is a relative weight in the scheduler for assigning I/O time - // versus other CGroups. - BlkioWeight uint16 `json:"BlkioWeight"` - // BlkioWeightDevice is an array of I/O resource priorities for - // individual device nodes. - // Unfortunately, the spec only stores the device's Major/Minor numbers - // and not the path, which is used here. - // Fortunately, the kernel provides an interface for retrieving the path - // of a given node by major:minor at /sys/dev/. However, the exact path - // in use may not be what was used in the original CLI invocation - - // though it is guaranteed that the device node will be the same, and - // using the given path will be functionally identical. - BlkioWeightDevice []InspectBlkioWeightDevice `json:"BlkioWeightDevice"` - // BlkioDeviceReadBps is an array of I/O throttle parameters for - // individual device nodes. - // This specifically sets read rate cap in bytes per second for device - // nodes. - // As with BlkioWeightDevice, we pull the path from /sys/dev, and we - // don't guarantee the path will be identical to the original (though - // the node will be). - BlkioDeviceReadBps []InspectBlkioThrottleDevice `json:"BlkioDeviceReadBps"` - // BlkioDeviceWriteBps is an array of I/O throttle parameters for - // individual device nodes. - // this specifically sets write rate cap in bytes per second for device - // nodes. - // as with BlkioWeightDevice, we pull the path from /sys/dev, and we - // don't guarantee the path will be identical to the original (though - // the node will be). - BlkioDeviceWriteBps []InspectBlkioThrottleDevice `json:"BlkioDeviceWriteBps"` - // BlkioDeviceReadIOps is an array of I/O throttle parameters for - // individual device nodes. - // This specifically sets the read rate cap in iops per second for - // device nodes. - // As with BlkioWeightDevice, we pull the path from /sys/dev, and we - // don't guarantee the path will be identical to the original (though - // the node will be). - BlkioDeviceReadIOps []InspectBlkioThrottleDevice `json:"BlkioDeviceReadIOps"` - // BlkioDeviceWriteIOps is an array of I/O throttle parameters for - // individual device nodes. - // This specifically sets the write rate cap in iops per second for - // device nodes. - // As with BlkioWeightDevice, we pull the path from /sys/dev, and we - // don't guarantee the path will be identical to the original (though - // the node will be). - BlkioDeviceWriteIOps []InspectBlkioThrottleDevice `json:"BlkioDeviceWriteIOps"` - // CpuPeriod is the length of a CPU period in microseconds. - // It relates directly to CpuQuota. - CpuPeriod uint64 `json:"CpuPeriod"` - // CpuPeriod is the amount of time (in microseconds) that a container - // can use the CPU in every CpuPeriod. - CpuQuota int64 `json:"CpuQuota"` - // CpuRealtimePeriod is the length of time (in microseconds) of the CPU - // realtime period. If set to 0, no time will be allocated to realtime - // tasks. - CpuRealtimePeriod uint64 `json:"CpuRealtimePeriod"` - // CpuRealtimeRuntime is the length of time (in microseconds) allocated - // for realtime tasks within every CpuRealtimePeriod. - CpuRealtimeRuntime int64 `json:"CpuRealtimeRuntime"` - // CpusetCpus is the is the set of CPUs that the container will execute - // on. Formatted as `0-3` or `0,2`. Default (if unset) is all CPUs. - CpusetCpus string `json:"CpusetCpus"` - // CpusetMems is the set of memory nodes the container will use. - // Formatted as `0-3` or `0,2`. Default (if unset) is all memory nodes. - CpusetMems string `json:"CpusetMems"` - // Devices is a list of device nodes that will be added to the - // container. - // These are stored in the OCI spec only as type, major, minor while we - // display the host path. We convert this with /sys/dev, but we cannot - // guarantee that the host path will be identical - only that the actual - // device will be. - Devices []InspectDevice `json:"Devices"` - // DiskQuota is the maximum amount of disk space the container may use - // (in bytes). - // Presently not populated. - // TODO. - DiskQuota uint64 `json:"DiskQuota"` - // KernelMemory is the maximum amount of memory the kernel will devote - // to the container. - KernelMemory int64 `json:"KernelMemory"` - // MemoryReservation is the reservation (soft limit) of memory available - // to the container. Soft limits are warnings only and can be exceeded. - MemoryReservation int64 `json:"MemoryReservation"` - // MemorySwap is the total limit for all memory available to the - // container, including swap. 0 indicates that there is no limit to the - // amount of memory available. - MemorySwap int64 `json:"MemorySwap"` - // MemorySwappiness is the willingness of the kernel to page container - // memory to swap. It is an integer from 0 to 100, with low numbers - // being more likely to be put into swap. - // -1, the default, will not set swappiness and use the system defaults. - MemorySwappiness int64 `json:"MemorySwappiness"` - // OomKillDisable indicates whether the kernel OOM killer is disabled - // for the container. - OomKillDisable bool `json:"OomKillDisable"` - // Init indicates whether the container has an init mounted into it. - Init bool `json:"Init,omitempty"` - // PidsLimit is the maximum number of PIDs what may be created within - // the container. 0, the default, indicates no limit. - PidsLimit int64 `json:"PidsLimit"` - // Ulimits is a set of ulimits that will be set within the container. - Ulimits []InspectUlimit `json:"Ulimits"` - // CpuCount is Windows-only and not presently implemented. - CpuCount uint64 `json:"CpuCount"` - // CpuPercent is Windows-only and not presently implemented. - CpuPercent uint64 `json:"CpuPercent"` - // IOMaximumIOps is Windows-only and not presently implemented. - IOMaximumIOps uint64 `json:"IOMaximumIOps"` - // IOMaximumBandwidth is Windows-only and not presently implemented. - IOMaximumBandwidth uint64 `json:"IOMaximumBandwidth"` -} - -// InspectLogConfig holds information about a container's configured log driver -// and is presently unused. It is retained for Docker compatibility. -type InspectLogConfig struct { - Type string `json:"Type"` - Config map[string]string `json:"Config"` //idk type, TODO -} - -// InspectRestartPolicy holds information about the container's restart policy. -type InspectRestartPolicy struct { - // Name contains the container's restart policy. - // Allowable values are "no" or "" (take no action), - // "on-failure" (restart on non-zero exit code, with an optional max - // retry count), and "always" (always restart on container stop, unless - // explicitly requested by API). - // Note that this is NOT actually a name of any sort - the poor naming - // is for Docker compatibility. - Name string `json:"Name"` - // MaximumRetryCount is the maximum number of retries allowed if the - // "on-failure" restart policy is in use. Not used if "on-failure" is - // not set. - MaximumRetryCount uint `json:"MaximumRetryCount"` -} - -// InspectBlkioWeightDevice holds information about the relative weight -// of an individual device node. Weights are used in the I/O scheduler to give -// relative priority to some accesses. -type InspectBlkioWeightDevice struct { - // Path is the path to the device this applies to. - Path string `json:"Path"` - // Weight is the relative weight the scheduler will use when scheduling - // I/O. - Weight uint16 `json:"Weight"` -} - -// InspectBlkioThrottleDevice holds information about a speed cap for a device -// node. This cap applies to a specific operation (read, write, etc) on the given -// node. -type InspectBlkioThrottleDevice struct { - // Path is the path to the device this applies to. - Path string `json:"Path"` - // Rate is the maximum rate. It is in either bytes per second or iops - // per second, determined by where it is used - documentation will - // indicate which is appropriate. - Rate uint64 `json:"Rate"` -} - -// InspectUlimit is a ulimit that will be applied to the container. -type InspectUlimit struct { - // Name is the name (type) of the ulimit. - Name string `json:"Name"` - // Soft is the soft limit that will be applied. - Soft uint64 `json:"Soft"` - // Hard is the hard limit that will be applied. - Hard uint64 `json:"Hard"` -} - -// InspectMount provides a record of a single mount in a container. It contains -// fields for both named and normal volumes. Only user-specified volumes will be -// included, and tmpfs volumes are not included even if the user specified them. -type InspectMount struct { - // Whether the mount is a volume or bind mount. Allowed values are - // "volume" and "bind". - Type string `json:"Type"` - // The name of the volume. Empty for bind mounts. - Name string `json:"Name,omptempty"` - // The source directory for the volume. - Source string `json:"Source"` - // The destination directory for the volume. Specified as a path within - // the container, as it would be passed into the OCI runtime. - Destination string `json:"Destination"` - // The driver used for the named volume. Empty for bind mounts. - Driver string `json:"Driver"` - // Contains SELinux :z/:Z mount options. Unclear what, if anything, else - // goes in here. - Mode string `json:"Mode"` - // All remaining mount options. Additional data, not present in the - // original output. - Options []string `json:"Options"` - // Whether the volume is read-write - RW bool `json:"RW"` - // Mount propagation for the mount. Can be empty if not specified, but - // is always printed - no omitempty. - Propagation string `json:"Propagation"` -} - -// InspectDevice is a single device that will be mounted into the container. -type InspectDevice struct { - // PathOnHost is the path of the device on the host. - PathOnHost string `json:"PathOnHost"` - // PathInContainer is the path of the device within the container. - PathInContainer string `json:"PathInContainer"` - // CgroupPermissions is the permissions of the mounted device. - // Presently not populated. - // TODO. - CgroupPermissions string `json:"CgroupPermissions"` -} - -// InspectHostPort provides information on a port on the host that a container's -// port is bound to. -type InspectHostPort struct { - // IP on the host we are bound to. "" if not specified (binding to all - // IPs). - HostIP string `json:"HostIp"` - // Port on the host we are bound to. No special formatting - just an - // integer stuffed into a string. - HostPort string `json:"HostPort"` -} - -// InspectContainerState provides a detailed record of a container's current -// state. It is returned as part of InspectContainerData. -// As with InspectContainerData, many portions of this struct are matched to -// Docker, but here we see more fields that are unused (nonsensical in the -// context of Libpod). -type InspectContainerState struct { - OciVersion string `json:"OciVersion"` - Status string `json:"Status"` - Running bool `json:"Running"` - Paused bool `json:"Paused"` - Restarting bool `json:"Restarting"` // TODO - OOMKilled bool `json:"OOMKilled"` - Dead bool `json:"Dead"` - Pid int `json:"Pid"` - ConmonPid int `json:"ConmonPid,omitempty"` - ExitCode int32 `json:"ExitCode"` - Error string `json:"Error"` // TODO - StartedAt time.Time `json:"StartedAt"` - FinishedAt time.Time `json:"FinishedAt"` - Healthcheck HealthCheckResults `json:"Healthcheck,omitempty"` -} - -// InspectBasicNetworkConfig holds basic configuration information (e.g. IP -// addresses, MAC address, subnet masks, etc) that are common for all networks -// (both additional and main). -type InspectBasicNetworkConfig struct { - // EndpointID is unused, maintained exclusively for compatibility. - EndpointID string `json:"EndpointID"` - // Gateway is the IP address of the gateway this network will use. - Gateway string `json:"Gateway"` - // IPAddress is the IP address for this network. - IPAddress string `json:"IPAddress"` - // IPPrefixLen is the length of the subnet mask of this network. - IPPrefixLen int `json:"IPPrefixLen"` - // SecondaryIPAddresses is a list of extra IP Addresses that the - // container has been assigned in this network. - SecondaryIPAddresses []string `json:"SecondaryIPAddresses,omitempty"` - // IPv6Gateway is the IPv6 gateway this network will use. - IPv6Gateway string `json:"IPv6Gateway"` - // GlobalIPv6Address is the global-scope IPv6 Address for this network. - GlobalIPv6Address string `json:"GlobalIPv6Address"` - // GlobalIPv6PrefixLen is the length of the subnet mask of this network. - GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen"` - // SecondaryIPv6Addresses is a list of extra IPv6 Addresses that the - // container has been assigned in this networ. - SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses,omitempty"` - // MacAddress is the MAC address for the interface in this network. - MacAddress string `json:"MacAddress"` - // AdditionalMacAddresses is a set of additional MAC Addresses beyond - // the first. CNI may configure more than one interface for a single - // network, which can cause this. - AdditionalMacAddresses []string `json:"AdditionalMACAddresses,omitempty"` -} - -// InspectNetworkSettings holds information about the network settings of the -// container. -// Many fields are maintained only for compatibility with `docker inspect` and -// are unused within Libpod. -type InspectNetworkSettings struct { - InspectBasicNetworkConfig - - Bridge string `json:"Bridge"` - SandboxID string `json:"SandboxID"` - HairpinMode bool `json:"HairpinMode"` - LinkLocalIPv6Address string `json:"LinkLocalIPv6Address"` - LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen"` - Ports []ocicni.PortMapping `json:"Ports"` - SandboxKey string `json:"SandboxKey"` - // Networks contains information on non-default CNI networks this - // container has joined. - // It is a map of network name to network information. - Networks map[string]*InspectAdditionalNetwork `json:"Networks,omitempty"` -} - -// InspectAdditionalNetwork holds information about non-default CNI networks the -// container has been connected to. -// As with InspectNetworkSettings, many fields are unused and maintained only -// for compatibility with Docker. -type InspectAdditionalNetwork struct { - InspectBasicNetworkConfig - - // Name of the network we're connecting to. - NetworkID string `json:"NetworkID,omitempty"` - // DriverOpts is presently unused and maintained exclusively for - // compatibility. - DriverOpts map[string]string `json:"DriverOpts"` - // IPAMConfig is presently unused and maintained exclusively for - // compatibility. - IPAMConfig map[string]string `json:"IPAMConfig"` - // Links is presently unused and maintained exclusively for - // compatibility. - Links []string `json:"Links"` -} - // inspectLocked inspects a container for low-level information. // The caller must held c.lock. -func (c *Container) inspectLocked(size bool) (*InspectContainerData, error) { +func (c *Container) inspectLocked(size bool) (*define.InspectContainerData, error) { storeCtr, err := c.runtime.store.Container(c.ID()) if err != nil { return nil, errors.Wrapf(err, "error getting container from store %q", c.ID()) @@ -697,7 +102,7 @@ func (c *Container) inspectLocked(size bool) (*InspectContainerData, error) { } // Inspect a container for low-level information -func (c *Container) Inspect(size bool) (*InspectContainerData, error) { +func (c *Container) Inspect(size bool) (*define.InspectContainerData, error) { if !c.batched { c.lock.Lock() defer c.lock.Unlock() @@ -710,7 +115,7 @@ func (c *Container) Inspect(size bool) (*InspectContainerData, error) { return c.inspectLocked(size) } -func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) (*InspectContainerData, error) { +func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) (*define.InspectContainerData, error) { config := c.config runtimeInfo := c.state ctrSpec, err := c.specFromState() @@ -757,12 +162,12 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) return nil, err } - data := &InspectContainerData{ + data := &define.InspectContainerData{ ID: config.ID, Created: config.CreatedTime, Path: path, Args: args, - State: &InspectContainerState{ + State: &define.InspectContainerState{ OciVersion: ctrSpec.Version, Status: runtimeInfo.State.String(), Running: runtimeInfo.State == define.ContainerStateRunning, @@ -857,8 +262,8 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) // Get inspect-formatted mounts list. // Only includes user-specified mounts. Only includes bind mounts and named // volumes, not tmpfs volumes. -func (c *Container) getInspectMounts(ctrSpec *spec.Spec, namedVolumes []*ContainerNamedVolume, mounts []spec.Mount) ([]InspectMount, error) { - inspectMounts := []InspectMount{} +func (c *Container) getInspectMounts(ctrSpec *spec.Spec, namedVolumes []*ContainerNamedVolume, mounts []spec.Mount) ([]define.InspectMount, error) { + inspectMounts := []define.InspectMount{} // No mounts, return early if len(c.config.UserVolumes) == 0 { @@ -866,7 +271,7 @@ func (c *Container) getInspectMounts(ctrSpec *spec.Spec, namedVolumes []*Contain } for _, volume := range namedVolumes { - mountStruct := InspectMount{} + mountStruct := define.InspectMount{} mountStruct.Type = "volume" mountStruct.Destination = volume.Dest mountStruct.Name = volume.Name @@ -891,7 +296,7 @@ func (c *Container) getInspectMounts(ctrSpec *spec.Spec, namedVolumes []*Contain continue } - mountStruct := InspectMount{} + mountStruct := define.InspectMount{} mountStruct.Type = "bind" mountStruct.Source = mount.Source mountStruct.Destination = mount.Destination @@ -906,7 +311,7 @@ func (c *Container) getInspectMounts(ctrSpec *spec.Spec, namedVolumes []*Contain // Parse mount options so we can populate them in the mount structure. // The mount passed in will be modified. -func parseMountOptionsForInspect(options []string, mount *InspectMount) { +func parseMountOptionsForInspect(options []string, mount *define.InspectMount) { isRW := true mountProp := "" zZ := "" @@ -940,8 +345,8 @@ func parseMountOptionsForInspect(options []string, mount *InspectMount) { } // Generate the InspectContainerConfig struct for the Config field of Inspect. -func (c *Container) generateInspectContainerConfig(spec *spec.Spec) (*InspectContainerConfig, error) { - ctrConfig := new(InspectContainerConfig) +func (c *Container) generateInspectContainerConfig(spec *spec.Spec) (*define.InspectContainerConfig, error) { + ctrConfig := new(define.InspectContainerConfig) ctrConfig.Hostname = c.Hostname() ctrConfig.User = c.config.User @@ -992,14 +397,14 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) (*InspectCon // Generate the InspectContainerHostConfig struct for the HostConfig field of // Inspect. -func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, namedVolumes []*ContainerNamedVolume, mounts []spec.Mount) (*InspectContainerHostConfig, error) { - hostConfig := new(InspectContainerHostConfig) +func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, namedVolumes []*ContainerNamedVolume, mounts []spec.Mount) (*define.InspectContainerHostConfig, error) { + hostConfig := new(define.InspectContainerHostConfig) - logConfig := new(InspectLogConfig) + logConfig := new(define.InspectLogConfig) logConfig.Type = c.config.LogDriver hostConfig.LogConfig = logConfig - restartPolicy := new(InspectRestartPolicy) + restartPolicy := new(define.InspectRestartPolicy) restartPolicy.Name = c.config.RestartPolicy restartPolicy.MaximumRetryCount = c.config.RestartRetries hostConfig.RestartPolicy = restartPolicy @@ -1126,7 +531,7 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named if ctrSpec.Linux.Resources.BlockIO.Weight != nil { hostConfig.BlkioWeight = *ctrSpec.Linux.Resources.BlockIO.Weight } - hostConfig.BlkioWeightDevice = []InspectBlkioWeightDevice{} + hostConfig.BlkioWeightDevice = []define.InspectBlkioWeightDevice{} for _, dev := range ctrSpec.Linux.Resources.BlockIO.WeightDevice { key := fmt.Sprintf("%d:%d", dev.Major, dev.Minor) // TODO: how do we handle LeafWeight vs @@ -1148,14 +553,14 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named logrus.Warnf("Could not locate weight device %s in system devices", key) continue } - weightDev := InspectBlkioWeightDevice{} + weightDev := define.InspectBlkioWeightDevice{} weightDev.Path = path weightDev.Weight = *dev.Weight hostConfig.BlkioWeightDevice = append(hostConfig.BlkioWeightDevice, weightDev) } - handleThrottleDevice := func(devs []spec.LinuxThrottleDevice) ([]InspectBlkioThrottleDevice, error) { - out := []InspectBlkioThrottleDevice{} + handleThrottleDevice := func(devs []spec.LinuxThrottleDevice) ([]define.InspectBlkioThrottleDevice, error) { + out := []define.InspectBlkioThrottleDevice{} for _, dev := range devs { key := fmt.Sprintf("%d:%d", dev.Major, dev.Minor) if deviceNodes == nil { @@ -1170,7 +575,7 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named logrus.Warnf("Could not locate throttle device %s in system devices", key) continue } - throttleDev := InspectBlkioThrottleDevice{} + throttleDev := define.InspectBlkioThrottleDevice{} throttleDev.Path = path throttleDev.Rate = dev.Rate out = append(out, throttleDev) @@ -1272,15 +677,15 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named // Port bindings. // Only populate if we're using CNI to configure the network. - portBindings := make(map[string][]InspectHostPort) + portBindings := make(map[string][]define.InspectHostPort) if c.config.CreateNetNS { for _, port := range c.config.PortMappings { key := fmt.Sprintf("%d/%s", port.ContainerPort, port.Protocol) hostPorts := portBindings[key] if hostPorts == nil { - hostPorts = []InspectHostPort{} + hostPorts = []define.InspectHostPort{} } - hostPorts = append(hostPorts, InspectHostPort{ + hostPorts = append(hostPorts, define.InspectHostPort{ HostIP: port.HostIP, HostPort: fmt.Sprintf("%d", port.HostPort), }) @@ -1365,10 +770,10 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named // CGroup parent // Need to check if it's the default, and not print if so. defaultCgroupParent := "" - switch c.runtime.config.CgroupManager { - case define.CgroupfsCgroupsManager: + switch c.runtime.config.Engine.CgroupManager { + case config.CgroupfsCgroupsManager: defaultCgroupParent = CgroupfsDefaultCgroupParent - case define.SystemdCgroupsManager: + case config.SystemdCgroupsManager: defaultCgroupParent = SystemdDefaultCgroupParent } if c.config.CgroupParent != defaultCgroupParent { @@ -1449,7 +854,7 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named // Devices // Do not include if privileged - assumed that all devices will be // included. - hostConfig.Devices = []InspectDevice{} + hostConfig.Devices = []define.InspectDevice{} if ctrSpec.Linux != nil && !hostConfig.Privileged { for _, dev := range ctrSpec.Linux.Devices { key := fmt.Sprintf("%d:%d", dev.Major, dev.Minor) @@ -1465,7 +870,7 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named logrus.Warnf("Could not locate device %s on host", key) continue } - newDev := InspectDevice{} + newDev := define.InspectDevice{} newDev.PathOnHost = path newDev.PathInContainer = dev.Path hostConfig.Devices = append(hostConfig.Devices, newDev) @@ -1473,10 +878,10 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named } // Ulimits - hostConfig.Ulimits = []InspectUlimit{} + hostConfig.Ulimits = []define.InspectUlimit{} if ctrSpec.Process != nil { for _, limit := range ctrSpec.Process.Rlimits { - newLimit := InspectUlimit{} + newLimit := define.InspectUlimit{} newLimit.Name = limit.Type newLimit.Soft = limit.Soft newLimit.Hard = limit.Hard diff --git a/libpod/container_internal.go b/libpod/container_internal.go index a0805c1fa..4e18819b8 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -142,92 +142,6 @@ func (c *Container) exitFilePath() (string, error) { return c.ociRuntime.ExitFilePath(c) } -// create a bundle path and associated files for an exec session -func (c *Container) createExecBundle(sessionID string) (err error) { - bundlePath := c.execBundlePath(sessionID) - if createErr := os.MkdirAll(bundlePath, execDirPermission); createErr != nil { - return createErr - } - defer func() { - if err != nil { - if err2 := os.RemoveAll(bundlePath); err != nil { - logrus.Warnf("error removing exec bundle after creation caused another error: %v", err2) - } - } - }() - if err2 := os.MkdirAll(c.execExitFileDir(sessionID), execDirPermission); err2 != nil { - // The directory is allowed to exist - if !os.IsExist(err2) { - err = errors.Wrapf(err2, "error creating OCI runtime exit file path %s", c.execExitFileDir(sessionID)) - } - } - return -} - -// cleanup an exec session after its done -func (c *Container) cleanupExecBundle(sessionID string) error { - if err := os.RemoveAll(c.execBundlePath(sessionID)); err != nil && !os.IsNotExist(err) { - return err - } - - return c.ociRuntime.ExecContainerCleanup(c, sessionID) -} - -// the path to a containers exec session bundle -func (c *Container) execBundlePath(sessionID string) string { - return filepath.Join(c.bundlePath(), sessionID) -} - -// Get PID file path for a container's exec session -func (c *Container) execPidPath(sessionID string) string { - return filepath.Join(c.execBundlePath(sessionID), "exec_pid") -} - -// the log path for an exec session -func (c *Container) execLogPath(sessionID string) string { - return filepath.Join(c.execBundlePath(sessionID), "exec_log") -} - -// the socket conmon creates for an exec session -func (c *Container) execAttachSocketPath(sessionID string) (string, error) { - return c.ociRuntime.ExecAttachSocketPath(c, sessionID) -} - -// execExitFileDir gets the path to the container's exit file -func (c *Container) execExitFileDir(sessionID string) string { - return filepath.Join(c.execBundlePath(sessionID), "exit") -} - -// execOCILog returns the file path for the exec sessions oci log -func (c *Container) execOCILog(sessionID string) string { - if !c.ociRuntime.SupportsJSONErrors() { - return "" - } - return filepath.Join(c.execBundlePath(sessionID), "oci-log") -} - -// readExecExitCode reads the exit file for an exec session and returns -// the exit code -func (c *Container) readExecExitCode(sessionID string) (int, error) { - exitFile := filepath.Join(c.execExitFileDir(sessionID), c.ID()) - chWait := make(chan error) - defer close(chWait) - - _, err := WaitForFile(exitFile, chWait, time.Second*5) - if err != nil { - return -1, err - } - ec, err := ioutil.ReadFile(exitFile) - if err != nil { - return -1, err - } - ecInt, err := strconv.Atoi(string(ec)) - if err != nil { - return -1, err - } - return ecInt, nil -} - // Wait for the container's exit file to appear. // When it does, update our state based on it. func (c *Container) waitForExitFileAndSync() error { @@ -556,9 +470,9 @@ func (c *Container) teardownStorage() error { return nil } -// Reset resets state fields to default values -// It is performed before a refresh and clears the state after a reboot -// It does not save the results - assumes the database will do that for us +// Reset resets state fields to default values. +// It is performed before a refresh and clears the state after a reboot. +// It does not save the results - assumes the database will do that for us. func resetState(state *ContainerState) error { state.PID = 0 state.ConmonPID = 0 @@ -568,7 +482,7 @@ func resetState(state *ContainerState) error { state.State = define.ContainerStateConfigured } state.ExecSessions = make(map[string]*ExecSession) - state.NetworkStatus = nil + state.LegacyExecSessions = nil state.BindMounts = make(map[string]string) state.StoppedByUser = false state.RestartPolicyMatch = false @@ -601,14 +515,14 @@ func (c *Container) refresh() error { c.state.RunDir = dir if len(c.config.IDMappings.UIDMap) != 0 || len(c.config.IDMappings.GIDMap) != 0 { - info, err := os.Stat(c.runtime.config.TmpDir) + info, err := os.Stat(c.runtime.config.Engine.TmpDir) if err != nil { - return errors.Wrapf(err, "cannot stat `%s`", c.runtime.config.TmpDir) + return errors.Wrapf(err, "cannot stat `%s`", c.runtime.config.Engine.TmpDir) } - if err := os.Chmod(c.runtime.config.TmpDir, info.Mode()|0111); err != nil { - return errors.Wrapf(err, "cannot chmod `%s`", c.runtime.config.TmpDir) + if err := os.Chmod(c.runtime.config.Engine.TmpDir, info.Mode()|0111); err != nil { + return errors.Wrapf(err, "cannot chmod `%s`", c.runtime.config.Engine.TmpDir) } - root := filepath.Join(c.runtime.config.TmpDir, "containers-root", c.ID()) + root := filepath.Join(c.runtime.config.Engine.TmpDir, "containers-root", c.ID()) if err := os.MkdirAll(root, 0755); err != nil { return errors.Wrapf(err, "error creating userNS tmpdir for container %s", c.ID()) } @@ -624,6 +538,18 @@ func (c *Container) refresh() error { } c.lock = lock + // Try to delete any lingering IP allocations. + // If this fails, just log and ignore. + // I'm a little concerned that this is so far down in refresh() and we + // could fail before getting to it - but the worst that would happen is + // that Inspect() would return info on IPs we no longer own. + if len(c.state.NetworkStatus) > 0 { + if err := c.removeIPv4Allocations(); err != nil { + logrus.Errorf("Error removing IP allocations for container %s: %v", c.ID(), err) + } + } + c.state.NetworkStatus = nil + if err := c.save(); err != nil { return errors.Wrapf(err, "error refreshing state for container %s", c.ID()) } @@ -633,11 +559,58 @@ func (c *Container) refresh() error { return err } - if rootless.IsRootless() { + return nil +} + +// Try and remove IP address allocations. Presently IPv4 only. +// Should be safe as rootless because NetworkStatus should only be populated if +// CNI is running. +func (c *Container) removeIPv4Allocations() error { + cniNetworksDir, err := getCNINetworksDir() + if err != nil { + return err + } + + if len(c.state.NetworkStatus) == 0 { return nil } - return c.refreshCNI() + cniDefaultNetwork := "" + if c.runtime.netPlugin != nil { + cniDefaultNetwork = c.runtime.netPlugin.GetDefaultNetworkName() + } + + switch { + case len(c.config.Networks) > 0 && len(c.config.Networks) != len(c.state.NetworkStatus): + return errors.Wrapf(define.ErrInternal, "network mismatch: asked to join %d CNI networks but got %d CNI results", len(c.config.Networks), len(c.state.NetworkStatus)) + case len(c.config.Networks) == 0 && len(c.state.NetworkStatus) != 1: + return errors.Wrapf(define.ErrInternal, "network mismatch: did not specify CNI networks but joined more than one (%d)", len(c.state.NetworkStatus)) + case len(c.config.Networks) == 0 && cniDefaultNetwork == "": + return errors.Wrapf(define.ErrInternal, "could not retrieve name of CNI default network") + } + + for index, result := range c.state.NetworkStatus { + for _, ctrIP := range result.IPs { + if ctrIP.Version != "4" { + continue + } + candidate := "" + if len(c.config.Networks) > 0 { + // CNI returns networks in order we passed them. + // So our index into results should be our index + // into networks. + candidate = filepath.Join(cniNetworksDir, c.config.Networks[index], ctrIP.Address.IP.String()) + } else { + candidate = filepath.Join(cniNetworksDir, cniDefaultNetwork, ctrIP.Address.IP.String()) + } + logrus.Debugf("Going to try removing IP address reservation file %q for container %s", candidate, c.ID()) + if err := os.Remove(candidate); err != nil && !os.IsNotExist(err) { + return errors.Wrapf(err, "error removing CNI IP reservation file %q for container %s", candidate, c.ID()) + } + } + } + + return nil } // Remove conmon attach socket and terminal resize FIFO @@ -925,7 +898,7 @@ func (c *Container) completeNetworkSetup() error { if err := c.syncContainer(); err != nil { return err } - if c.config.NetMode == "slirp4netns" { + if c.config.NetMode.IsSlirp4netns() { return c.runtime.setupRootlessNetNS(c) } if err := c.runtime.setupNetNS(c); err != nil { @@ -1716,7 +1689,7 @@ func (c *Container) saveSpec(spec *spec.Spec) error { // Warning: precreate hooks may alter 'config' in place. func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (extensionStageHooks map[string][]spec.Hook, err error) { allHooks := make(map[string][]spec.Hook) - if c.runtime.config.HooksDir == nil { + if c.runtime.config.Engine.HooksDir == nil { if rootless.IsRootless() { return nil, nil } @@ -1740,7 +1713,7 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (exten } } } else { - manager, err := hooks.New(ctx, c.runtime.config.HooksDir, []string{"precreate", "poststop"}) + manager, err := hooks.New(ctx, c.runtime.config.Engine.HooksDir, []string{"precreate", "poststop"}) if err != nil { return nil, err } @@ -1814,12 +1787,12 @@ func (c *Container) checkReadyForRemoval() error { return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed without force", c.ID(), c.state.State.String()) } - // Reap exec sessions - if err := c.reapExecSessions(); err != nil { + // Check exec sessions + sessions, err := c.getActiveExecSessions() + if err != nil { return err } - - if len(c.state.ExecSessions) != 0 { + if len(sessions) != 0 { return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it has active exec sessions", c.ID()) } @@ -1926,41 +1899,6 @@ func (c *Container) checkExitFile() error { return c.handleExitFile(exitFile, info) } -// Reap dead exec sessions -func (c *Container) reapExecSessions() error { - // Instead of saving once per iteration, use a defer to do it once at - // the end. - var lastErr error - needSave := false - for id := range c.state.ExecSessions { - alive, err := c.ociRuntime.ExecUpdateStatus(c, id) - if err != nil { - if lastErr != nil { - logrus.Errorf("Error reaping exec sessions for container %s: %v", c.ID(), lastErr) - } - lastErr = err - continue - } - if !alive { - // Clean up lingering files and remove the exec session - if err := c.ociRuntime.ExecContainerCleanup(c, id); err != nil { - return errors.Wrapf(err, "error cleaning up container %s exec session %s files", c.ID(), id) - } - delete(c.state.ExecSessions, id) - needSave = true - } - } - if needSave { - if err := c.save(); err != nil { - if lastErr != nil { - logrus.Errorf("Error reaping exec sessions for container %s: %v", c.ID(), lastErr) - } - lastErr = err - } - } - return lastErr -} - func (c *Container) hasNamespace(namespace spec.LinuxNamespaceType) bool { if c.config.Spec == nil || c.config.Spec.Linux == nil { return false diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 63968918c..a3f97f2a6 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -20,6 +20,7 @@ import ( cnitypes "github.com/containernetworking/cni/pkg/types/current" "github.com/containernetworking/plugins/pkg/ns" "github.com/containers/buildah/pkg/secrets" + "github.com/containers/common/pkg/config" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/annotations" @@ -29,6 +30,7 @@ import ( "github.com/containers/libpod/pkg/lookup" "github.com/containers/libpod/pkg/resolvconf" "github.com/containers/libpod/pkg/rootless" + "github.com/containers/libpod/pkg/util" "github.com/containers/storage/pkg/archive" securejoin "github.com/cyphar/filepath-securejoin" "github.com/opencontainers/runc/libcontainer/user" @@ -92,7 +94,7 @@ func (c *Container) prepare() error { } // handle rootless network namespace setup - if c.state.NetNS != nil && c.config.NetMode == "slirp4netns" && !c.config.PostConfigureNetNS { + if c.state.NetNS != nil && c.config.NetMode.IsSlirp4netns() && !c.config.PostConfigureNetNS { createNetNSErr = c.runtime.setupRootlessNetNS(c) } }() @@ -1213,7 +1215,7 @@ func (c *Container) makeBindMounts() error { } // Add Secret Mounts - secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.DefaultMountsFile, c.state.RunDir, c.RootUID(), c.RootGID(), rootless.IsRootless(), false) + secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.Containers.DefaultMountsFile, c.state.RunDir, c.RootUID(), c.RootGID(), rootless.IsRootless(), false) for _, mount := range secretMounts { if _, ok := c.state.BindMounts[mount.Destination]; !ok { c.state.BindMounts[mount.Destination] = mount.Source @@ -1274,11 +1276,21 @@ func (c *Container) generateResolvConf() (string, error) { } } + var dns []net.IP + for _, i := range c.runtime.config.Containers.DNSServers { + result := net.ParseIP(i) + if result == nil { + return "", errors.Wrapf(define.ErrInvalidArg, "invalid IP address %s", i) + } + dns = append(dns, result) + } + dnsServers := append(dns, c.config.DNSServer...) // If the user provided dns, it trumps all; then dns masq; then resolv.conf switch { - case len(c.config.DNSServer) > 0: + case len(dnsServers) > 0: + // We store DNS servers as net.IP, so need to convert to string - for _, server := range c.config.DNSServer { + for _, server := range dnsServers { nameservers = append(nameservers, server.String()) } case len(cniNameServers) > 0: @@ -1292,14 +1304,22 @@ func (c *Container) generateResolvConf() (string, error) { } } - search := resolvconf.GetSearchDomains(resolv.Content) - if len(c.config.DNSSearch) > 0 { - search = c.config.DNSSearch + var search []string + if len(c.config.DNSSearch) > 0 || len(c.runtime.config.Containers.DNSSearches) > 0 { + if !util.StringInSlice(".", c.config.DNSSearch) { + search = c.runtime.config.Containers.DNSSearches + search = append(search, c.config.DNSSearch...) + } + } else { + search = resolvconf.GetSearchDomains(resolv.Content) } - options := resolvconf.GetOptions(resolv.Content) - if len(c.config.DNSOption) > 0 { - options = c.config.DNSOption + var options []string + if len(c.config.DNSOption) > 0 || len(c.runtime.config.Containers.DNSOptions) > 0 { + options = c.runtime.config.Containers.DNSOptions + options = append(options, c.config.DNSOption...) + } else { + options = resolvconf.GetOptions(resolv.Content) } destPath := filepath.Join(c.state.RunDir, "resolv.conf") @@ -1436,13 +1456,6 @@ func (c *Container) copyOwnerAndPerms(source, dest string) error { return nil } -// Teardown CNI config on refresh -func (c *Container) refreshCNI() error { - // Let's try and delete any lingering network config... - podNetwork := c.runtime.getPodNetwork(c.ID(), c.config.Name, "", c.config.Networks, c.config.PortMappings, c.config.StaticIP, c.config.StaticMAC) - return c.runtime.netPlugin.TearDownPod(podNetwork) -} - // Get cgroup path in a format suitable for the OCI spec func (c *Container) getOCICgroupPath() (string, error) { unified, err := cgroups.IsCgroup2UnifiedMode() @@ -1452,14 +1465,14 @@ func (c *Container) getOCICgroupPath() (string, error) { switch { case (rootless.IsRootless() && !unified) || c.config.NoCgroups: return "", nil - case c.runtime.config.CgroupManager == define.SystemdCgroupsManager: + case c.runtime.config.Engine.CgroupManager == config.SystemdCgroupsManager: // When runc is set to use Systemd as a cgroup manager, it // expects cgroups to be passed as follows: // slice:prefix:name systemdCgroups := fmt.Sprintf("%s:libpod:%s", path.Base(c.config.CgroupParent), c.ID()) logrus.Debugf("Setting CGroups for container %s to %s", c.ID(), systemdCgroups) return systemdCgroups, nil - case c.runtime.config.CgroupManager == define.CgroupfsCgroupsManager: + case c.runtime.config.Engine.CgroupManager == config.CgroupfsCgroupsManager: cgroupPath, err := c.CGroupPath() if err != nil { return "", err @@ -1467,6 +1480,6 @@ func (c *Container) getOCICgroupPath() (string, error) { logrus.Debugf("Setting CGroup path for container %s to %s", c.ID(), cgroupPath) return cgroupPath, nil default: - return "", errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager %s requested", c.runtime.config.CgroupManager) + return "", errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager %s requested", c.runtime.config.Engine.CgroupManager) } } diff --git a/libpod/container_internal_unsupported.go b/libpod/container_internal_unsupported.go index 4abaa6362..395271b2a 100644 --- a/libpod/container_internal_unsupported.go +++ b/libpod/container_internal_unsupported.go @@ -41,10 +41,6 @@ func (c *Container) copyOwnerAndPerms(source, dest string) error { return nil } -func (c *Container) refreshCNI() error { - return define.ErrNotImplemented -} - func (c *Container) getOCICgroupPath() (string, error) { return "", define.ErrNotImplemented } diff --git a/libpod/container_top_linux.go b/libpod/container_top_linux.go index 98edc340a..2a35a2ae9 100644 --- a/libpod/container_top_linux.go +++ b/libpod/container_top_linux.go @@ -134,7 +134,9 @@ func (c *Container) execPS(args []string) ([]string, error) { }() cmd := append([]string{"ps"}, args...) - ec, err := c.Exec(false, false, map[string]string{}, cmd, "", "", streams, 0, nil, "") + config := new(ExecConfig) + config.Command = cmd + ec, err := c.Exec(config, streams, nil) if err != nil { return nil, err } else if ec != 0 { diff --git a/libpod/container_top_unsupported.go b/libpod/container_top_unsupported.go index 382c98b54..12f6cbb6c 100644 --- a/libpod/container_top_unsupported.go +++ b/libpod/container_top_unsupported.go @@ -4,6 +4,12 @@ package libpod import "github.com/containers/libpod/libpod/define" +// Top gathers statistics about the running processes in a container. It returns a +// []string for output +func (c *Container) Top(descriptors []string) ([]string, error) { + return nil, define.ErrNotImplemented +} + // GetContainerPidInformation returns process-related data of all processes in // the container. The output data can be controlled via the `descriptors` // argument which expects format descriptors and supports all AIXformat diff --git a/libpod/define/config.go b/libpod/define/config.go index 8bd59be75..5598f97a3 100644 --- a/libpod/define/config.go +++ b/libpod/define/config.go @@ -1,25 +1,17 @@ package define var ( - // DefaultInitPath is the default path to the container-init binary - DefaultInitPath = "/usr/libexec/podman/catatonit" // DefaultInfraImage to use for infra container - DefaultInfraImage = "k8s.gcr.io/pause:3.1" + DefaultInfraImage = "k8s.gcr.io/pause:3.2" // DefaultInfraCommand to be run in an infra container DefaultInfraCommand = "/pause" // DefaultSHMLockPath is the default path for SHM locks DefaultSHMLockPath = "/libpod_lock" // DefaultRootlessSHMLockPath is the default path for rootless SHM locks DefaultRootlessSHMLockPath = "/libpod_rootless_lock" - // DefaultDetachKeys is the default keys sequence for detaching a - // container - DefaultDetachKeys = "ctrl-p,ctrl-q" ) const ( - // CtrRemoveTimeout is the default number of seconds to wait after stopping a container - // before sending the kill signal - CtrRemoveTimeout = 10 // DefaultTransport is a prefix that we apply to an image name // to check docker hub first for the image DefaultTransport = "docker://" diff --git a/libpod/define/container_inspect.go b/libpod/define/container_inspect.go new file mode 100644 index 000000000..e6a19e5b4 --- /dev/null +++ b/libpod/define/container_inspect.go @@ -0,0 +1,624 @@ +package define + +import ( + "time" + + "github.com/containers/image/v5/manifest" + "github.com/containers/libpod/libpod/driver" + "github.com/cri-o/ocicni/pkg/ocicni" +) + +// InspectContainerConfig holds further data about how a container was initially +// configured. +type InspectContainerConfig struct { + // Container hostname + Hostname string `json:"Hostname"` + // Container domain name - unused at present + DomainName string `json:"Domainname"` + // User the container was launched with + User string `json:"User"` + // Unused, at present + AttachStdin bool `json:"AttachStdin"` + // Unused, at present + AttachStdout bool `json:"AttachStdout"` + // Unused, at present + AttachStderr bool `json:"AttachStderr"` + // Whether the container creates a TTY + Tty bool `json:"Tty"` + // Whether the container leaves STDIN open + OpenStdin bool `json:"OpenStdin"` + // Whether STDIN is only left open once. + // Presently not supported by Podman, unused. + StdinOnce bool `json:"StdinOnce"` + // Container environment variables + Env []string `json:"Env"` + // Container command + Cmd []string `json:"Cmd"` + // Container image + Image string `json:"Image"` + // Unused, at present. I've never seen this field populated. + Volumes map[string]struct{} `json:"Volumes"` + // Container working directory + WorkingDir string `json:"WorkingDir"` + // Container entrypoint + Entrypoint string `json:"Entrypoint"` + // On-build arguments - presently unused. More of Buildah's domain. + OnBuild *string `json:"OnBuild"` + // Container labels + Labels map[string]string `json:"Labels"` + // Container annotations + Annotations map[string]string `json:"Annotations"` + // Container stop signal + StopSignal uint `json:"StopSignal"` + // Configured healthcheck for the container + Healthcheck *manifest.Schema2HealthConfig `json:"Healthcheck,omitempty"` + // CreateCommand is the full command plus arguments of the process the + // container has been created with. + CreateCommand []string `json:"CreateCommand,omitempty"` +} + +// InspectRestartPolicy holds information about the container's restart policy. +type InspectRestartPolicy struct { + // Name contains the container's restart policy. + // Allowable values are "no" or "" (take no action), + // "on-failure" (restart on non-zero exit code, with an optional max + // retry count), and "always" (always restart on container stop, unless + // explicitly requested by API). + // Note that this is NOT actually a name of any sort - the poor naming + // is for Docker compatibility. + Name string `json:"Name"` + // MaximumRetryCount is the maximum number of retries allowed if the + // "on-failure" restart policy is in use. Not used if "on-failure" is + // not set. + MaximumRetryCount uint `json:"MaximumRetryCount"` +} + +// InspectLogConfig holds information about a container's configured log driver +// and is presently unused. It is retained for Docker compatibility. +type InspectLogConfig struct { + Type string `json:"Type"` + Config map[string]string `json:"Config"` //idk type, TODO +} + +// InspectBlkioWeightDevice holds information about the relative weight +// of an individual device node. Weights are used in the I/O scheduler to give +// relative priority to some accesses. +type InspectBlkioWeightDevice struct { + // Path is the path to the device this applies to. + Path string `json:"Path"` + // Weight is the relative weight the scheduler will use when scheduling + // I/O. + Weight uint16 `json:"Weight"` +} + +// InspectBlkioThrottleDevice holds information about a speed cap for a device +// node. This cap applies to a specific operation (read, write, etc) on the given +// node. +type InspectBlkioThrottleDevice struct { + // Path is the path to the device this applies to. + Path string `json:"Path"` + // Rate is the maximum rate. It is in either bytes per second or iops + // per second, determined by where it is used - documentation will + // indicate which is appropriate. + Rate uint64 `json:"Rate"` +} + +// InspectUlimit is a ulimit that will be applied to the container. +type InspectUlimit struct { + // Name is the name (type) of the ulimit. + Name string `json:"Name"` + // Soft is the soft limit that will be applied. + Soft uint64 `json:"Soft"` + // Hard is the hard limit that will be applied. + Hard uint64 `json:"Hard"` +} + +// InspectDevice is a single device that will be mounted into the container. +type InspectDevice struct { + // PathOnHost is the path of the device on the host. + PathOnHost string `json:"PathOnHost"` + // PathInContainer is the path of the device within the container. + PathInContainer string `json:"PathInContainer"` + // CgroupPermissions is the permissions of the mounted device. + // Presently not populated. + // TODO. + CgroupPermissions string `json:"CgroupPermissions"` +} + +// InspectHostPort provides information on a port on the host that a container's +// port is bound to. +type InspectHostPort struct { + // IP on the host we are bound to. "" if not specified (binding to all + // IPs). + HostIP string `json:"HostIp"` + // Port on the host we are bound to. No special formatting - just an + // integer stuffed into a string. + HostPort string `json:"HostPort"` +} + +// InspectMount provides a record of a single mount in a container. It contains +// fields for both named and normal volumes. Only user-specified volumes will be +// included, and tmpfs volumes are not included even if the user specified them. +type InspectMount struct { + // Whether the mount is a volume or bind mount. Allowed values are + // "volume" and "bind". + Type string `json:"Type"` + // The name of the volume. Empty for bind mounts. + Name string `json:"Name,omptempty"` + // The source directory for the volume. + Source string `json:"Source"` + // The destination directory for the volume. Specified as a path within + // the container, as it would be passed into the OCI runtime. + Destination string `json:"Destination"` + // The driver used for the named volume. Empty for bind mounts. + Driver string `json:"Driver"` + // Contains SELinux :z/:Z mount options. Unclear what, if anything, else + // goes in here. + Mode string `json:"Mode"` + // All remaining mount options. Additional data, not present in the + // original output. + Options []string `json:"Options"` + // Whether the volume is read-write + RW bool `json:"RW"` + // Mount propagation for the mount. Can be empty if not specified, but + // is always printed - no omitempty. + Propagation string `json:"Propagation"` +} + +// InspectContainerState provides a detailed record of a container's current +// state. It is returned as part of InspectContainerData. +// As with InspectContainerData, many portions of this struct are matched to +// Docker, but here we see more fields that are unused (nonsensical in the +// context of Libpod). +type InspectContainerState struct { + OciVersion string `json:"OciVersion"` + Status string `json:"Status"` + Running bool `json:"Running"` + Paused bool `json:"Paused"` + Restarting bool `json:"Restarting"` // TODO + OOMKilled bool `json:"OOMKilled"` + Dead bool `json:"Dead"` + Pid int `json:"Pid"` + ConmonPid int `json:"ConmonPid,omitempty"` + ExitCode int32 `json:"ExitCode"` + Error string `json:"Error"` // TODO + StartedAt time.Time `json:"StartedAt"` + FinishedAt time.Time `json:"FinishedAt"` + Healthcheck HealthCheckResults `json:"Healthcheck,omitempty"` +} + +// HealthCheckResults describes the results/logs from a healthcheck +type HealthCheckResults struct { + // Status healthy or unhealthy + Status string `json:"Status"` + // FailingStreak is the number of consecutive failed healthchecks + FailingStreak int `json:"FailingStreak"` + // Log describes healthcheck attempts and results + Log []HealthCheckLog `json:"Log"` +} + +// HealthCheckLog describes the results of a single healthcheck +type HealthCheckLog struct { + // Start time as string + Start string `json:"Start"` + // End time as a string + End string `json:"End"` + // Exitcode is 0 or 1 + ExitCode int `json:"ExitCode"` + // Output is the stdout/stderr from the healthcheck command + Output string `json:"Output"` +} + +// InspectContainerHostConfig holds information used when the container was +// created. +// It's very much a Docker-specific struct, retained (mostly) as-is for +// compatibility. We fill individual fields as best as we can, inferring as much +// as possible from the spec and container config. +// Some things cannot be inferred. These will be populated by spec annotations +// (if available). +// Field names are fixed for compatibility and cannot be changed. +// As such, silence lint warnings about them. +//nolint +type InspectContainerHostConfig struct { + // Binds contains an array of user-added mounts. + // Both volume mounts and named volumes are included. + // Tmpfs mounts are NOT included. + // In 'docker inspect' this is separated into 'Binds' and 'Mounts' based + // on how a mount was added. We do not make this distinction and do not + // include a Mounts field in inspect. + // Format: <src>:<destination>[:<comma-separated options>] + Binds []string `json:"Binds"` + // ContainerIDFile is a file created during container creation to hold + // the ID of the created container. + // This is not handled within libpod and is stored in an annotation. + ContainerIDFile string `json:"ContainerIDFile"` + // LogConfig contains information on the container's logging backend + LogConfig *InspectLogConfig `json:"LogConfig"` + // NetworkMode is the configuration of the container's network + // namespace. + // Populated as follows: + // default - A network namespace is being created and configured via CNI + // none - A network namespace is being created, not configured via CNI + // host - No network namespace created + // container:<id> - Using another container's network namespace + // ns:<path> - A path to a network namespace has been specified + NetworkMode string `json:"NetworkMode"` + // PortBindings contains the container's port bindings. + // It is formatted as map[string][]InspectHostPort. + // The string key here is formatted as <integer port number>/<protocol> + // and represents the container port. A single container port may be + // bound to multiple host ports (on different IPs). + PortBindings map[string][]InspectHostPort `json:"PortBindings"` + // RestartPolicy contains the container's restart policy. + RestartPolicy *InspectRestartPolicy `json:"RestartPolicy"` + // AutoRemove is whether the container will be automatically removed on + // exiting. + // It is not handled directly within libpod and is stored in an + // annotation. + AutoRemove bool `json:"AutoRemove"` + // VolumeDriver is presently unused and is retained for Docker + // compatibility. + VolumeDriver string `json:"VolumeDriver"` + // VolumesFrom is a list of containers which this container uses volumes + // from. This is not handled directly within libpod and is stored in an + // annotation. + // It is formatted as an array of container names and IDs. + VolumesFrom []string `json:"VolumesFrom"` + // CapAdd is a list of capabilities added to the container. + // It is not directly stored by Libpod, and instead computed from the + // capabilities listed in the container's spec, compared against a set + // of default capabilities. + CapAdd []string `json:"CapAdd"` + // CapDrop is a list of capabilities removed from the container. + // It is not directly stored by libpod, and instead computed from the + // capabilities listed in the container's spec, compared against a set + // of default capabilities. + CapDrop []string `json:"CapDrop"` + // Dns is a list of DNS nameservers that will be added to the + // container's resolv.conf + Dns []string `json:"Dns"` + // DnsOptions is a list of DNS options that will be set in the + // container's resolv.conf + DnsOptions []string `json:"DnsOptions"` + // DnsSearch is a list of DNS search domains that will be set in the + // container's resolv.conf + DnsSearch []string `json:"DnsSearch"` + // ExtraHosts contains hosts that will be aded to the container's + // /etc/hosts. + ExtraHosts []string `json:"ExtraHosts"` + // GroupAdd contains groups that the user inside the container will be + // added to. + GroupAdd []string `json:"GroupAdd"` + // IpcMode represents the configuration of the container's IPC + // namespace. + // Populated as follows: + // "" (empty string) - Default, an IPC namespace will be created + // host - No IPC namespace created + // container:<id> - Using another container's IPC namespace + // ns:<path> - A path to an IPC namespace has been specified + IpcMode string `json:"IpcMode"` + // Cgroup contains the container's cgroup. It is presently not + // populated. + // TODO. + Cgroup string `json:"Cgroup"` + // Cgroups contains the container's CGroup mode. + // Allowed values are "default" (container is creating CGroups) and + // "disabled" (container is not creating CGroups). + // This is Libpod-specific and not included in `docker inspect`. + Cgroups string `json:"Cgroups"` + // Links is unused, and provided purely for Docker compatibility. + Links []string `json:"Links"` + // OOMScoreAdj is an adjustment that will be made to the container's OOM + // score. + OomScoreAdj int `json:"OomScoreAdj"` + // PidMode represents the configuration of the container's PID + // namespace. + // Populated as follows: + // "" (empty string) - Default, a PID namespace will be created + // host - No PID namespace created + // container:<id> - Using another container's PID namespace + // ns:<path> - A path to a PID namespace has been specified + PidMode string `json:"PidMode"` + // Privileged indicates whether the container is running with elevated + // privileges. + // This has a very specific meaning in the Docker sense, so it's very + // difficult to decode from the spec and config, and so is stored as an + // annotation. + Privileged bool `json:"Privileged"` + // PublishAllPorts indicates whether image ports are being published. + // This is not directly stored in libpod and is saved as an annotation. + PublishAllPorts bool `json:"PublishAllPorts"` + // ReadonlyRootfs is whether the container will be mounted read-only. + ReadonlyRootfs bool `json:"ReadonlyRootfs"` + // SecurityOpt is a list of security-related options that are set in the + // container. + SecurityOpt []string `json:"SecurityOpt"` + // Tmpfs is a list of tmpfs filesystems that will be mounted into the + // container. + // It is a map of destination path to options for the mount. + Tmpfs map[string]string `json:"Tmpfs"` + // UTSMode represents the configuration of the container's UID + // namespace. + // Populated as follows: + // "" (empty string) - Default, a UTS namespace will be created + // host - no UTS namespace created + // container:<id> - Using another container's UTS namespace + // ns:<path> - A path to a UTS namespace has been specified + UTSMode string `json:"UTSMode"` + // UsernsMode represents the configuration of the container's user + // namespace. + // When running rootless, a user namespace is created outside of libpod + // to allow some privileged operations. This will not be reflected here. + // Populated as follows: + // "" (empty string) - No user namespace will be created + // private - The container will be run in a user namespace + // container:<id> - Using another container's user namespace + // ns:<path> - A path to a user namespace has been specified + // TODO Rootless has an additional 'keep-id' option, presently not + // reflected here. + UsernsMode string `json:"UsernsMode"` + // ShmSize is the size of the container's SHM device. + ShmSize int64 `json:"ShmSize"` + // Runtime is provided purely for Docker compatibility. + // It is set unconditionally to "oci" as Podman does not presently + // support non-OCI runtimes. + Runtime string `json:"Runtime"` + // ConsoleSize is an array of 2 integers showing the size of the + // container's console. + // It is only set if the container is creating a terminal. + // TODO. + ConsoleSize []uint `json:"ConsoleSize"` + // Isolation is presently unused and provided solely for Docker + // compatibility. + Isolation string `json:"Isolation"` + // CpuShares indicates the CPU resources allocated to the container. + // It is a relative weight in the scheduler for assigning CPU time + // versus other CGroups. + CpuShares uint64 `json:"CpuShares"` + // Memory indicates the memory resources allocated to the container. + // This is the limit (in bytes) of RAM the container may use. + Memory int64 `json:"Memory"` + // NanoCpus indicates number of CPUs allocated to the container. + // It is an integer where one full CPU is indicated by 1000000000 (one + // billion). + // Thus, 2.5 CPUs (fractional portions of CPUs are allowed) would be + // 2500000000 (2.5 billion). + // In 'docker inspect' this is set exclusively of two further options in + // the output (CpuPeriod and CpuQuota) which are both used to implement + // this functionality. + // We can't distinguish here, so if CpuQuota is set to the default of + // 100000, we will set both CpuQuota, CpuPeriod, and NanoCpus. If + // CpuQuota is not the default, we will not set NanoCpus. + NanoCpus int64 `json:"NanoCpus"` + // CgroupParent is the CGroup parent of the container. + // Only set if not default. + CgroupParent string `json:"CgroupParent"` + // BlkioWeight indicates the I/O resources allocated to the container. + // It is a relative weight in the scheduler for assigning I/O time + // versus other CGroups. + BlkioWeight uint16 `json:"BlkioWeight"` + // BlkioWeightDevice is an array of I/O resource priorities for + // individual device nodes. + // Unfortunately, the spec only stores the device's Major/Minor numbers + // and not the path, which is used here. + // Fortunately, the kernel provides an interface for retrieving the path + // of a given node by major:minor at /sys/dev/. However, the exact path + // in use may not be what was used in the original CLI invocation - + // though it is guaranteed that the device node will be the same, and + // using the given path will be functionally identical. + BlkioWeightDevice []InspectBlkioWeightDevice `json:"BlkioWeightDevice"` + // BlkioDeviceReadBps is an array of I/O throttle parameters for + // individual device nodes. + // This specifically sets read rate cap in bytes per second for device + // nodes. + // As with BlkioWeightDevice, we pull the path from /sys/dev, and we + // don't guarantee the path will be identical to the original (though + // the node will be). + BlkioDeviceReadBps []InspectBlkioThrottleDevice `json:"BlkioDeviceReadBps"` + // BlkioDeviceWriteBps is an array of I/O throttle parameters for + // individual device nodes. + // this specifically sets write rate cap in bytes per second for device + // nodes. + // as with BlkioWeightDevice, we pull the path from /sys/dev, and we + // don't guarantee the path will be identical to the original (though + // the node will be). + BlkioDeviceWriteBps []InspectBlkioThrottleDevice `json:"BlkioDeviceWriteBps"` + // BlkioDeviceReadIOps is an array of I/O throttle parameters for + // individual device nodes. + // This specifically sets the read rate cap in iops per second for + // device nodes. + // As with BlkioWeightDevice, we pull the path from /sys/dev, and we + // don't guarantee the path will be identical to the original (though + // the node will be). + BlkioDeviceReadIOps []InspectBlkioThrottleDevice `json:"BlkioDeviceReadIOps"` + // BlkioDeviceWriteIOps is an array of I/O throttle parameters for + // individual device nodes. + // This specifically sets the write rate cap in iops per second for + // device nodes. + // As with BlkioWeightDevice, we pull the path from /sys/dev, and we + // don't guarantee the path will be identical to the original (though + // the node will be). + BlkioDeviceWriteIOps []InspectBlkioThrottleDevice `json:"BlkioDeviceWriteIOps"` + // CpuPeriod is the length of a CPU period in microseconds. + // It relates directly to CpuQuota. + CpuPeriod uint64 `json:"CpuPeriod"` + // CpuPeriod is the amount of time (in microseconds) that a container + // can use the CPU in every CpuPeriod. + CpuQuota int64 `json:"CpuQuota"` + // CpuRealtimePeriod is the length of time (in microseconds) of the CPU + // realtime period. If set to 0, no time will be allocated to realtime + // tasks. + CpuRealtimePeriod uint64 `json:"CpuRealtimePeriod"` + // CpuRealtimeRuntime is the length of time (in microseconds) allocated + // for realtime tasks within every CpuRealtimePeriod. + CpuRealtimeRuntime int64 `json:"CpuRealtimeRuntime"` + // CpusetCpus is the is the set of CPUs that the container will execute + // on. Formatted as `0-3` or `0,2`. Default (if unset) is all CPUs. + CpusetCpus string `json:"CpusetCpus"` + // CpusetMems is the set of memory nodes the container will use. + // Formatted as `0-3` or `0,2`. Default (if unset) is all memory nodes. + CpusetMems string `json:"CpusetMems"` + // Devices is a list of device nodes that will be added to the + // container. + // These are stored in the OCI spec only as type, major, minor while we + // display the host path. We convert this with /sys/dev, but we cannot + // guarantee that the host path will be identical - only that the actual + // device will be. + Devices []InspectDevice `json:"Devices"` + // DiskQuota is the maximum amount of disk space the container may use + // (in bytes). + // Presently not populated. + // TODO. + DiskQuota uint64 `json:"DiskQuota"` + // KernelMemory is the maximum amount of memory the kernel will devote + // to the container. + KernelMemory int64 `json:"KernelMemory"` + // MemoryReservation is the reservation (soft limit) of memory available + // to the container. Soft limits are warnings only and can be exceeded. + MemoryReservation int64 `json:"MemoryReservation"` + // MemorySwap is the total limit for all memory available to the + // container, including swap. 0 indicates that there is no limit to the + // amount of memory available. + MemorySwap int64 `json:"MemorySwap"` + // MemorySwappiness is the willingness of the kernel to page container + // memory to swap. It is an integer from 0 to 100, with low numbers + // being more likely to be put into swap. + // -1, the default, will not set swappiness and use the system defaults. + MemorySwappiness int64 `json:"MemorySwappiness"` + // OomKillDisable indicates whether the kernel OOM killer is disabled + // for the container. + OomKillDisable bool `json:"OomKillDisable"` + // Init indicates whether the container has an init mounted into it. + Init bool `json:"Init,omitempty"` + // PidsLimit is the maximum number of PIDs what may be created within + // the container. 0, the default, indicates no limit. + PidsLimit int64 `json:"PidsLimit"` + // Ulimits is a set of ulimits that will be set within the container. + Ulimits []InspectUlimit `json:"Ulimits"` + // CpuCount is Windows-only and not presently implemented. + CpuCount uint64 `json:"CpuCount"` + // CpuPercent is Windows-only and not presently implemented. + CpuPercent uint64 `json:"CpuPercent"` + // IOMaximumIOps is Windows-only and not presently implemented. + IOMaximumIOps uint64 `json:"IOMaximumIOps"` + // IOMaximumBandwidth is Windows-only and not presently implemented. + IOMaximumBandwidth uint64 `json:"IOMaximumBandwidth"` +} + +// InspectBasicNetworkConfig holds basic configuration information (e.g. IP +// addresses, MAC address, subnet masks, etc) that are common for all networks +// (both additional and main). +type InspectBasicNetworkConfig struct { + // EndpointID is unused, maintained exclusively for compatibility. + EndpointID string `json:"EndpointID"` + // Gateway is the IP address of the gateway this network will use. + Gateway string `json:"Gateway"` + // IPAddress is the IP address for this network. + IPAddress string `json:"IPAddress"` + // IPPrefixLen is the length of the subnet mask of this network. + IPPrefixLen int `json:"IPPrefixLen"` + // SecondaryIPAddresses is a list of extra IP Addresses that the + // container has been assigned in this network. + SecondaryIPAddresses []string `json:"SecondaryIPAddresses,omitempty"` + // IPv6Gateway is the IPv6 gateway this network will use. + IPv6Gateway string `json:"IPv6Gateway"` + // GlobalIPv6Address is the global-scope IPv6 Address for this network. + GlobalIPv6Address string `json:"GlobalIPv6Address"` + // GlobalIPv6PrefixLen is the length of the subnet mask of this network. + GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen"` + // SecondaryIPv6Addresses is a list of extra IPv6 Addresses that the + // container has been assigned in this networ. + SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses,omitempty"` + // MacAddress is the MAC address for the interface in this network. + MacAddress string `json:"MacAddress"` + // AdditionalMacAddresses is a set of additional MAC Addresses beyond + // the first. CNI may configure more than one interface for a single + // network, which can cause this. + AdditionalMacAddresses []string `json:"AdditionalMACAddresses,omitempty"` +} + +// InspectAdditionalNetwork holds information about non-default CNI networks the +// container has been connected to. +// As with InspectNetworkSettings, many fields are unused and maintained only +// for compatibility with Docker. +type InspectAdditionalNetwork struct { + InspectBasicNetworkConfig + + // Name of the network we're connecting to. + NetworkID string `json:"NetworkID,omitempty"` + // DriverOpts is presently unused and maintained exclusively for + // compatibility. + DriverOpts map[string]string `json:"DriverOpts"` + // IPAMConfig is presently unused and maintained exclusively for + // compatibility. + IPAMConfig map[string]string `json:"IPAMConfig"` + // Links is presently unused and maintained exclusively for + // compatibility. + Links []string `json:"Links"` +} + +// InspectNetworkSettings holds information about the network settings of the +// container. +// Many fields are maintained only for compatibility with `docker inspect` and +// are unused within Libpod. +type InspectNetworkSettings struct { + InspectBasicNetworkConfig + + Bridge string `json:"Bridge"` + SandboxID string `json:"SandboxID"` + HairpinMode bool `json:"HairpinMode"` + LinkLocalIPv6Address string `json:"LinkLocalIPv6Address"` + LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen"` + Ports []ocicni.PortMapping `json:"Ports"` + SandboxKey string `json:"SandboxKey"` + // Networks contains information on non-default CNI networks this + // container has joined. + // It is a map of network name to network information. + Networks map[string]*InspectAdditionalNetwork `json:"Networks,omitempty"` +} + +// InspectContainerData provides a detailed record of a container's configuration +// and state as viewed by Libpod. +// Large portions of this structure are defined such that the output is +// compatible with `docker inspect` JSON, but additional fields have been added +// as required to share information not in the original output. +type InspectContainerData struct { + ID string `json:"Id"` + Created time.Time `json:"Created"` + Path string `json:"Path"` + Args []string `json:"Args"` + State *InspectContainerState `json:"State"` + Image string `json:"Image"` + ImageName string `json:"ImageName"` + Rootfs string `json:"Rootfs"` + Pod string `json:"Pod"` + ResolvConfPath string `json:"ResolvConfPath"` + HostnamePath string `json:"HostnamePath"` + HostsPath string `json:"HostsPath"` + StaticDir string `json:"StaticDir"` + OCIConfigPath string `json:"OCIConfigPath,omitempty"` + OCIRuntime string `json:"OCIRuntime,omitempty"` + LogPath string `json:"LogPath"` + LogTag string `json:"LogTag"` + ConmonPidFile string `json:"ConmonPidFile"` + Name string `json:"Name"` + RestartCount int32 `json:"RestartCount"` + Driver string `json:"Driver"` + MountLabel string `json:"MountLabel"` + ProcessLabel string `json:"ProcessLabel"` + AppArmorProfile string `json:"AppArmorProfile"` + EffectiveCaps []string `json:"EffectiveCaps"` + BoundingCaps []string `json:"BoundingCaps"` + ExecIDs []string `json:"ExecIDs"` + GraphDriver *driver.Data `json:"GraphDriver"` + SizeRw *int64 `json:"SizeRw,omitempty"` + SizeRootFs int64 `json:"SizeRootFs,omitempty"` + Mounts []InspectMount `json:"Mounts"` + Dependencies []string `json:"Dependencies"` + NetworkSettings *InspectNetworkSettings `json:"NetworkSettings"` //TODO + ExitCommand []string `json:"ExitCommand"` + Namespace string `json:"Namespace"` + IsInfra bool `json:"IsInfra"` + Config *InspectContainerConfig `json:"Config"` + HostConfig *InspectContainerHostConfig `json:"HostConfig"` +} diff --git a/libpod/define/containerstate.go b/libpod/define/containerstate.go index e7d258e21..6da49a594 100644 --- a/libpod/define/containerstate.go +++ b/libpod/define/containerstate.go @@ -78,3 +78,37 @@ func StringToContainerStatus(status string) (ContainerStatus, error) { return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status) } } + +// ContainerExecStatus is the status of an exec session within a container. +type ContainerExecStatus int + +const ( + // ExecStateUnknown indicates that the state of the exec session is not + // known. + ExecStateUnknown ContainerExecStatus = iota + // ExecStateCreated indicates that the exec session has been created but + // not yet started + ExecStateCreated ContainerExecStatus = iota + // ExecStateRunning indicates that the exec session has been started but + // has not yet exited. + ExecStateRunning ContainerExecStatus = iota + // ExecStateStopped indicates that the exec session has stopped and is + // no longer running. + ExecStateStopped ContainerExecStatus = iota +) + +// String returns a string representation of a given exec state. +func (s ContainerExecStatus) String() string { + switch s { + case ExecStateUnknown: + return "unknown" + case ExecStateCreated: + return "created" + case ExecStateRunning: + return "running" + case ExecStateStopped: + return "stopped" + default: + return "bad state" + } +} diff --git a/libpod/define/errors.go b/libpod/define/errors.go index b79cf08dc..3ba343789 100644 --- a/libpod/define/errors.go +++ b/libpod/define/errors.go @@ -20,6 +20,10 @@ var ( // ErrNoSuchVolume indicates the requested volume does not exist ErrNoSuchVolume = errors.New("no such volume") + // ErrNoSuchExecSession indicates that the requested exec session does + // not exist. + ErrNoSuchExecSession = errors.New("no such exec session") + // ErrCtrExists indicates a container with the same name or ID already // exists ErrCtrExists = errors.New("container already exists") @@ -29,10 +33,16 @@ var ( ErrImageExists = errors.New("image already exists") // ErrVolumeExists indicates a volume with the same name already exists ErrVolumeExists = errors.New("volume already exists") + // ErrExecSessionExists indicates an exec session with the same ID + // already exists. + ErrExecSessionExists = errors.New("exec session already exists") // ErrCtrStateInvalid indicates a container is in an improper state for // the requested operation ErrCtrStateInvalid = errors.New("container state improper") + // ErrExecSessionStateInvalid indicates that an exec session is in an + // improper state for the requested operation + ErrExecSessionStateInvalid = errors.New("exec session state improper") // ErrVolumeBeingUsed indicates that a volume is being used by at least one container ErrVolumeBeingUsed = errors.New("volume is being used") @@ -90,6 +100,9 @@ var ( // ErrVolumeRemoved indicates that the volume has already been removed and // no further operations can be performed on it ErrVolumeRemoved = errors.New("volume has already been removed") + // ErrExecSessionRemoved indicates that the exec session has already + // been removed and no further operations can be performed on it. + ErrExecSessionRemoved = errors.New("exec session has already been removed") // ErrDBClosed indicates that the connection to the state database has // already been closed diff --git a/libpod/define/inspect.go b/libpod/define/inspect.go new file mode 100644 index 000000000..b7cd13f82 --- /dev/null +++ b/libpod/define/inspect.go @@ -0,0 +1,54 @@ +package define + +// InspectExecSession contains information about a given exec session. +type InspectExecSession struct { + // CanRemove is legacy and used purely for compatibility reasons. + // Will always be set to true, unless the exec session is running. + CanRemove bool `json:"CanRemove"` + // ContainerID is the ID of the container this exec session is attached + // to. + ContainerID string `json:"ContainerID"` + // DetachKeys are the detach keys used by the exec session. + // If set to "" the default keys are being used. + // Will show "<none>" if no detach keys are set. + DetachKeys string `json:"DetachKeys"` + // ExitCode is the exit code of the exec session. Will be set to 0 if + // the exec session has not yet exited. + ExitCode int `json:"ExitCode"` + // ID is the ID of the exec session. + ID string `json:"ID"` + // OpenStderr is whether the container's STDERR stream will be attached. + // Always set to true if the exec session created a TTY. + OpenStderr bool `json:"OpenStderr"` + // OpenStdin is whether the container's STDIN stream will be attached + // to. + OpenStdin bool `json:"OpenStdin"` + // OpenStdout is whether the container's STDOUT stream will be attached. + // Always set to true if the exec session created a TTY. + OpenStdout bool `json:"OpenStdout"` + // Running is whether the exec session is running. + Running bool `json:"Running"` + // Pid is the PID of the exec session's process. + // Will be set to 0 if the exec session is not running. + Pid int `json:"Pid"` + // ProcessConfig contains information about the exec session's process. + ProcessConfig *InspectExecProcess `json:"ProcessConfig"` +} + +// InspectExecProcess contains information about the process in a given exec +// session. +type InspectExecProcess struct { + // Arguments are the arguments to the entrypoint command of the exec + // session. + Arguments []string `json:"arguments"` + // Entrypoint is the entrypoint for the exec session (the command that + // will be executed in the container). + Entrypoint string `json:"entrypoint"` + // Privileged is whether the exec session will be started with elevated + // privileges. + Privileged bool `json:"privileged"` + // Tty is whether the exec session created a terminal. + Tty bool `json:"tty"` + // User is the user the exec session was started as. + User string `json:"user"` +} diff --git a/libpod/define/runtime.go b/libpod/define/runtime.go index 4d8c6cb4d..1539e19ee 100644 --- a/libpod/define/runtime.go +++ b/libpod/define/runtime.go @@ -18,10 +18,6 @@ const ( SQLiteStateStore RuntimeStateStore = iota // BoltDBStateStore is a state backed by a BoltDB database BoltDBStateStore RuntimeStateStore = iota - // CgroupfsCgroupsManager represents cgroupfs native cgroup manager - CgroupfsCgroupsManager = "cgroupfs" - // SystemdCgroupsManager represents systemd native cgroup manager - SystemdCgroupsManager = "systemd" // ContainerCreateTimeout is the timeout before we decide we've failed // to create a container. // TODO: Make this generic - all OCI runtime operations should use the @@ -29,9 +25,4 @@ const ( // TODO: Consider dropping from 240 to 60 seconds. I don't think waiting // 4 minutes versus 1 minute makes a real difference. ContainerCreateTimeout = 240 * time.Second - // DefaultShmSize is the default shm size - DefaultShmSize = 64 * 1024 * 1024 - // NsRunDir is the default directory in which running network namespaces - // are stored - NsRunDir = "/var/run/netns" ) diff --git a/libpod/events.go b/libpod/events.go index be21e510a..20ebecc66 100644 --- a/libpod/events.go +++ b/libpod/events.go @@ -11,8 +11,8 @@ import ( // newEventer returns an eventer that can be used to read/write events func (r *Runtime) newEventer() (events.Eventer, error) { options := events.EventerOptions{ - EventerType: r.config.EventsLogger, - LogFilePath: r.config.EventsLogFilePath, + EventerType: r.config.Engine.EventsLogger, + LogFilePath: r.config.Engine.EventsLogFilePath, } return events.NewEventer(options) } diff --git a/libpod/events/config.go b/libpod/events/config.go index 20c01baff..8fe551c5d 100644 --- a/libpod/events/config.go +++ b/libpod/events/config.go @@ -98,6 +98,8 @@ const ( // Attach ... Attach Status = "attach" + // AutoUpdate ... + AutoUpdate Status = "auto-update" // Checkpoint ... Checkpoint Status = "checkpoint" // Cleanup ... diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go index 9c274c4f3..08a613dfe 100644 --- a/libpod/healthcheck.go +++ b/libpod/healthcheck.go @@ -53,28 +53,6 @@ const ( HealthCheckStarting string = "starting" ) -// HealthCheckResults describes the results/logs from a healthcheck -type HealthCheckResults struct { - // Status healthy or unhealthy - Status string `json:"Status"` - // FailingStreak is the number of consecutive failed healthchecks - FailingStreak int `json:"FailingStreak"` - // Log describes healthcheck attempts and results - Log []HealthCheckLog `json:"Log"` -} - -// HealthCheckLog describes the results of a single healthcheck -type HealthCheckLog struct { - // Start time as string - Start string `json:"Start"` - // End time as a string - End string `json:"End"` - // Exitcode is 0 or 1 - ExitCode int `json:"ExitCode"` - // Output is the stdout/stderr from the healthcheck command - Output string `json:"Output"` -} - // hcWriteCloser allows us to use bufio as a WriteCloser type hcWriteCloser struct { *bufio.Writer @@ -143,7 +121,9 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) { logrus.Debugf("executing health check command %s for %s", strings.Join(newCommand, " "), c.ID()) timeStart := time.Now() hcResult := HealthCheckSuccess - _, hcErr := c.Exec(false, false, map[string]string{}, newCommand, "", "", streams, 0, nil, "") + config := new(ExecConfig) + config.Command = newCommand + _, hcErr := c.Exec(config, streams, nil) if hcErr != nil { errCause := errors.Cause(hcErr) hcResult = HealthCheckFailure @@ -198,8 +178,8 @@ func checkHealthCheckCanBeRun(c *Container) (HealthCheckStatus, error) { return HealthCheckDefined, nil } -func newHealthCheckLog(start, end time.Time, exitCode int, log string) HealthCheckLog { - return HealthCheckLog{ +func newHealthCheckLog(start, end time.Time, exitCode int, log string) define.HealthCheckLog { + return define.HealthCheckLog{ Start: start.Format(time.RFC3339Nano), End: end.Format(time.RFC3339Nano), ExitCode: exitCode, @@ -223,7 +203,7 @@ func (c *Container) updateHealthStatus(status string) error { } // UpdateHealthCheckLog parses the health check results and writes the log -func (c *Container) updateHealthCheckLog(hcl HealthCheckLog, inStartPeriod bool) error { +func (c *Container) updateHealthCheckLog(hcl define.HealthCheckLog, inStartPeriod bool) error { healthCheck, err := c.GetHealthCheckLog() if err != nil { return err @@ -264,8 +244,8 @@ func (c *Container) healthCheckLogPath() string { // GetHealthCheckLog returns HealthCheck results by reading the container's // health check log file. If the health check log file does not exist, then // an empty healthcheck struct is returned -func (c *Container) GetHealthCheckLog() (HealthCheckResults, error) { - var healthCheck HealthCheckResults +func (c *Container) GetHealthCheckLog() (define.HealthCheckResults, error) { + var healthCheck define.HealthCheckResults if _, err := os.Stat(c.healthCheckLogPath()); os.IsNotExist(err) { return healthCheck, nil } diff --git a/libpod/healthcheck_linux.go b/libpod/healthcheck_linux.go index 5da2d311b..42dba6610 100644 --- a/libpod/healthcheck_linux.go +++ b/libpod/healthcheck_linux.go @@ -4,50 +4,14 @@ import ( "fmt" "os" "os/exec" - "path/filepath" - "strconv" "strings" "github.com/containers/libpod/pkg/rootless" - "github.com/coreos/go-systemd/v22/dbus" - godbus "github.com/godbus/dbus/v5" + "github.com/containers/libpod/pkg/systemd" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -func dbusAuthRootlessConnection(createBus func(opts ...godbus.ConnOption) (*godbus.Conn, error)) (*godbus.Conn, error) { - conn, err := createBus() - if err != nil { - return nil, err - } - - methods := []godbus.Auth{godbus.AuthExternal(strconv.Itoa(rootless.GetRootlessUID()))} - - err = conn.Auth(methods) - if err != nil { - conn.Close() - return nil, err - } - - return conn, nil -} - -func newRootlessConnection() (*dbus.Conn, error) { - return dbus.NewConnection(func() (*godbus.Conn, error) { - return dbusAuthRootlessConnection(func(opts ...godbus.ConnOption) (*godbus.Conn, error) { - path := filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "systemd/private") - return godbus.Dial(fmt.Sprintf("unix:path=%s", path)) - }) - }) -} - -func getConnection() (*dbus.Conn, error) { - if rootless.IsRootless() { - return newRootlessConnection() - } - return dbus.NewSystemdConnection() -} - // createTimer systemd timers for healthchecks of a container func (c *Container) createTimer() error { if c.disableHealthCheckSystemd() { @@ -64,7 +28,7 @@ func (c *Container) createTimer() error { } cmd = append(cmd, "--unit", c.ID(), fmt.Sprintf("--on-unit-inactive=%s", c.HealthCheckConfig().Interval.String()), "--timer-property=AccuracySec=1s", podman, "healthcheck", "run", c.ID()) - conn, err := getConnection() + conn, err := systemd.ConnectToDBUS() if err != nil { return errors.Wrapf(err, "unable to get systemd connection to add healthchecks") } @@ -83,7 +47,7 @@ func (c *Container) startTimer() error { if c.disableHealthCheckSystemd() { return nil } - conn, err := getConnection() + conn, err := systemd.ConnectToDBUS() if err != nil { return errors.Wrapf(err, "unable to get systemd connection to start healthchecks") } @@ -98,7 +62,7 @@ func (c *Container) removeTimer() error { if c.disableHealthCheckSystemd() { return nil } - conn, err := getConnection() + conn, err := systemd.ConnectToDBUS() if err != nil { return errors.Wrapf(err, "unable to get systemd connection to remove healthchecks") } diff --git a/libpod/image/manifests.go b/libpod/image/manifests.go new file mode 100644 index 000000000..9dbeb4cc5 --- /dev/null +++ b/libpod/image/manifests.go @@ -0,0 +1,154 @@ +package image + +import ( + "context" + + "github.com/containers/buildah/manifests" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Options for adding a manifest +// swagger:model ManifestAddOpts +type ManifestAddOpts struct { + All bool `json:"all"` + Annotation map[string]string `json:"annotation"` + Arch string `json:"arch"` + Features []string `json:"features"` + Images []string `json:"images"` + OSVersion string `json:"os_version"` + Variant string `json:"variant"` +} + +// InspectManifest returns a dockerized version of the manifest list +func (i *Image) InspectManifest() (*manifest.Schema2List, error) { + list, err := i.getManifestList() + if err != nil { + return nil, err + } + return list.Docker(), nil +} + +// RemoveManifest removes the given digest from the manifest list. +func (i *Image) RemoveManifest(d digest.Digest) (string, error) { + list, err := i.getManifestList() + if err != nil { + return "", err + } + if err := list.Remove(d); err != nil { + return "", err + } + return list.SaveToImage(i.imageruntime.store, i.ID(), nil, "") +} + +// getManifestList is a helper to obtain a manifest list +func (i *Image) getManifestList() (manifests.List, error) { + _, list, err := manifests.LoadFromImage(i.imageruntime.store, i.ID()) + return list, err +} + +// CreateManifestList creates a new manifest list and can optionally add given images +// to the list +func CreateManifestList(rt *Runtime, systemContext types.SystemContext, names []string, imgs []string, all bool) (string, error) { + list := manifests.Create() + opts := ManifestAddOpts{Images: names, All: all} + for _, img := range imgs { + var ref types.ImageReference + newImage, err := rt.NewFromLocal(img) + if err == nil { + ir, err := newImage.toImageRef(context.Background()) + if err != nil { + return "", err + } + if ir == nil { + return "", errors.New("unable to convert image to ImageReference") + } + ref = ir.Reference() + } else { + ref, err = alltransports.ParseImageName(img) + if err != nil { + return "", err + } + } + list, err = addManifestToList(ref, list, systemContext, opts) + if err != nil { + return "", err + } + } + return list.SaveToImage(rt.store, "", names, manifest.DockerV2ListMediaType) +} + +func addManifestToList(ref types.ImageReference, list manifests.List, systemContext types.SystemContext, opts ManifestAddOpts) (manifests.List, error) { + d, err := list.Add(context.Background(), &systemContext, ref, opts.All) + if err != nil { + return nil, err + } + if len(opts.OSVersion) > 0 { + if err := list.SetOSVersion(d, opts.OSVersion); err != nil { + return nil, err + } + } + if len(opts.Features) > 0 { + if err := list.SetFeatures(d, opts.Features); err != nil { + return nil, err + } + } + if len(opts.Arch) > 0 { + if err := list.SetArchitecture(d, opts.Arch); err != nil { + return nil, err + } + } + if len(opts.Variant) > 0 { + if err := list.SetVariant(d, opts.Variant); err != nil { + return nil, err + } + } + if len(opts.Annotation) > 0 { + if err := list.SetAnnotations(&d, opts.Annotation); err != nil { + return nil, err + } + } + return list, err +} + +// AddManifest adds a manifest to a given manifest list. +func (i *Image) AddManifest(systemContext types.SystemContext, opts ManifestAddOpts) (string, error) { + var ( + ref types.ImageReference + ) + newImage, err := i.imageruntime.NewFromLocal(opts.Images[0]) + if err == nil { + ir, err := newImage.toImageRef(context.Background()) + if err != nil { + return "", err + } + ref = ir.Reference() + } else { + ref, err = alltransports.ParseImageName(opts.Images[0]) + if err != nil { + return "", err + } + } + list, err := i.getManifestList() + if err != nil { + return "", err + } + list, err = addManifestToList(ref, list, systemContext, opts) + if err != nil { + return "", err + } + return list.SaveToImage(i.imageruntime.store, i.ID(), nil, "") +} + +// PushManifest pushes a manifest to a destination +func (i *Image) PushManifest(dest types.ImageReference, opts manifests.PushOptions) (digest.Digest, error) { + list, err := i.getManifestList() + if err != nil { + return "", err + } + _, d, err := list.Push(context.Background(), dest, opts) + return d, err +} diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go index 2144671a5..2f802f333 100644 --- a/libpod/in_memory_state.go +++ b/libpod/in_memory_state.go @@ -3,7 +3,6 @@ package libpod import ( "strings" - "github.com/containers/libpod/libpod/config" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/registrar" "github.com/containers/storage/pkg/truncindex" @@ -20,10 +19,16 @@ type InMemoryState struct { pods map[string]*Pod // Maps container ID to container struct. containers map[string]*Container - volumes map[string]*Volume + // Maps volume ID to volume struct + volumes map[string]*Volume + // Maps exec session ID to ID of associated container + execSessions map[string]string // Maps container ID to a list of IDs of dependencies. - ctrDepends map[string][]string + ctrDepends map[string][]string + // Maps volume ID to IDs of dependencies volumeDepends map[string][]string + // Maps container ID to IDs of associated exec sessions. + ctrExecSessions map[string][]string // Maps pod ID to a map of container ID to container struct. podContainers map[string]map[string]*Container // Global name registry - ensures name uniqueness and performs lookups. @@ -51,10 +56,13 @@ func NewInMemoryState() (State, error) { state.pods = make(map[string]*Pod) state.containers = make(map[string]*Container) state.volumes = make(map[string]*Volume) + state.execSessions = make(map[string]string) state.ctrDepends = make(map[string][]string) state.volumeDepends = make(map[string][]string) + state.ctrExecSessions = make(map[string][]string) + state.podContainers = make(map[string]map[string]*Container) state.nameIndex = registrar.NewRegistrar() @@ -81,8 +89,8 @@ func (s *InMemoryState) Refresh() error { // GetDBConfig is not implemented for in-memory state. // As we do not store a config, return an empty one. -func (s *InMemoryState) GetDBConfig() (*config.DBConfig, error) { - return &config.DBConfig{}, nil +func (s *InMemoryState) GetDBConfig() (*DBConfig, error) { + return &DBConfig{}, nil } // ValidateDBConfig is not implemented for the in-memory state. @@ -316,6 +324,13 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error { return errors.Wrapf(define.ErrCtrExists, "the following containers depend on container %s: %s", ctr.ID(), depsStr) } + // Ensure we don't have active exec sessions + ctrSessions := s.ctrExecSessions[ctr.ID()] + if len(ctrSessions) > 0 { + sessStr := strings.Join(ctrSessions, ", ") + return errors.Wrapf(define.ErrCtrExists, "the following exec sessions are running for container %s: %s", ctr.ID(), sessStr) + } + if _, ok := s.containers[ctr.ID()]; !ok { ctr.valid = false return errors.Wrapf(define.ErrNoSuchCtr, "no container exists in state with ID %s", ctr.ID()) @@ -437,6 +452,117 @@ func (s *InMemoryState) GetContainerConfig(id string) (*ContainerConfig, error) return ctr.Config(), nil } +// Add an exec session to the database +func (s *InMemoryState) AddExecSession(ctr *Container, session *ExecSession) error { + if !ctr.valid { + return define.ErrCtrRemoved + } + if session.ContainerID() != ctr.ID() { + return errors.Wrapf(define.ErrInvalidArg, "container ID and exec session ID must match") + } + if _, ok := s.containers[ctr.ID()]; !ok { + return define.ErrNoSuchCtr + } + + if _, ok := s.execSessions[session.ID()]; ok { + return define.ErrExecSessionExists + } + + s.execSessions[session.ID()] = ctr.ID() + + ctrSessions, ok := s.ctrExecSessions[ctr.ID()] + if !ok { + ctrSessions = []string{} + } + + ctrSessions = append(ctrSessions, session.ID()) + s.ctrExecSessions[ctr.ID()] = ctrSessions + + return nil +} + +// Get an exec session from the database by full or partial ID. +func (s *InMemoryState) GetExecSession(id string) (string, error) { + if id == "" { + return "", define.ErrEmptyID + } + + session, ok := s.execSessions[id] + if !ok { + return "", define.ErrNoSuchExecSession + } + + return session, nil +} + +// RemoveExecSession removes an exec session from the database. +func (s *InMemoryState) RemoveExecSession(session *ExecSession) error { + if _, ok := s.execSessions[session.ID()]; !ok { + return define.ErrNoSuchExecSession + } + + ctrSessions, ok := s.ctrExecSessions[session.ContainerID()] + // If !ok - internal state seems inconsistent, but the thing we wanted + // to remove is gone. Continue. + if ok { + newSessions := []string{} + for _, sess := range ctrSessions { + if sess != session.ID() { + newSessions = append(newSessions, sess) + } + } + s.ctrExecSessions[session.ContainerID()] = newSessions + } + + delete(s.execSessions, session.ID()) + + return nil +} + +// GetContainerExecSessions retrieves all exec sessions for the given container. +func (s *InMemoryState) GetContainerExecSessions(ctr *Container) ([]string, error) { + if !ctr.valid { + return nil, define.ErrCtrRemoved + } + if _, ok := s.containers[ctr.ID()]; !ok { + ctr.valid = false + return nil, define.ErrNoSuchCtr + } + + ctrSessions := s.ctrExecSessions[ctr.ID()] + + return ctrSessions, nil +} + +// RemoveContainerExecSessions removes all exec sessions for the given +// container. +func (s *InMemoryState) RemoveContainerExecSessions(ctr *Container) error { + if !ctr.valid { + return define.ErrCtrRemoved + } + if _, ok := s.containers[ctr.ID()]; !ok { + ctr.valid = false + return define.ErrNoSuchCtr + } + + ctrSessions, ok := s.ctrExecSessions[ctr.ID()] + if !ok { + return nil + } + + for _, sess := range ctrSessions { + if _, ok := s.execSessions[sess]; !ok { + // We have an internal state inconsistency + // Error out + return errors.Wrapf(define.ErrInternal, "inconsistent database state: exec session %s is missing", sess) + } + delete(s.execSessions, sess) + } + delete(s.ctrExecSessions, ctr.ID()) + + return nil +} + // RewriteContainerConfig rewrites a container's configuration. // This function is DANGEROUS, even with an in-memory state. // Please read the full comment on it in state.go before using it. @@ -1056,6 +1182,13 @@ func (s *InMemoryState) RemoveContainerFromPod(pod *Pod, ctr *Container) error { return errors.Wrapf(define.ErrCtrExists, "the following containers depend on container %s: %s", ctr.ID(), depsStr) } + // Ensure we don't have active exec sessions + ctrSessions := s.ctrExecSessions[ctr.ID()] + if len(ctrSessions) > 0 { + sessStr := strings.Join(ctrSessions, ", ") + return errors.Wrapf(define.ErrCtrExists, "the following exec sessions are running for container %s: %s", ctr.ID(), sessStr) + } + // Retrieve pod containers podCtrs, ok := s.podContainers[pod.ID()] if !ok { diff --git a/libpod/info.go b/libpod/info.go index e5c075d97..8d411f0d4 100644 --- a/libpod/info.go +++ b/libpod/info.go @@ -165,7 +165,7 @@ func (r *Runtime) storeInfo() (map[string]interface{}, error) { } } info["GraphOptions"] = graphOptions - info["VolumePath"] = r.config.VolumePath + info["VolumePath"] = r.config.Engine.VolumePath configFile, err := storage.DefaultConfigFile(rootless.IsRootless()) if err != nil { diff --git a/libpod/lock/shm/shm_lock_test.go b/libpod/lock/shm/shm_lock_test.go index 830035881..41a150c59 100644 --- a/libpod/lock/shm/shm_lock_test.go +++ b/libpod/lock/shm/shm_lock_test.go @@ -1,3 +1,5 @@ +// +build linux + package shm import ( diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index 5a27a2abb..c3a90f481 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -148,24 +148,36 @@ func (r *Runtime) createNetNS(ctr *Container) (n ns.NetNS, q []*cnitypes.Result, logrus.Debugf("Made network namespace at %s for container %s", ctrNS.Path(), ctr.ID()) networkStatus := []*cnitypes.Result{} - if !rootless.IsRootless() && ctr.config.NetMode != "slirp4netns" { + if !rootless.IsRootless() && !ctr.config.NetMode.IsSlirp4netns() { networkStatus, err = r.configureNetNS(ctr, ctrNS) } return ctrNS, networkStatus, err } -func checkSlirpFlags(path string) (bool, bool, bool, error) { +type slirpFeatures struct { + HasDisableHostLoopback bool + HasMTU bool + HasEnableSandbox bool + HasEnableSeccomp bool +} + +func checkSlirpFlags(path string) (*slirpFeatures, error) { cmd := exec.Command(path, "--help") out, err := cmd.CombinedOutput() if err != nil { - return false, false, false, errors.Wrapf(err, "slirp4netns %q", out) - } - return strings.Contains(string(out), "--disable-host-loopback"), strings.Contains(string(out), "--mtu"), strings.Contains(string(out), "--enable-sandbox"), nil + return nil, errors.Wrapf(err, "slirp4netns %q", out) + } + return &slirpFeatures{ + HasDisableHostLoopback: strings.Contains(string(out), "--disable-host-loopback"), + HasMTU: strings.Contains(string(out), "--mtu"), + HasEnableSandbox: strings.Contains(string(out), "--enable-sandbox"), + HasEnableSeccomp: strings.Contains(string(out), "--enable-seccomp"), + }, nil } // Configure the network namespace for a rootless container func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { - path := r.config.NetworkCmdPath + path := r.config.Engine.NetworkCmdPath if path == "" { var err error @@ -184,22 +196,25 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { defer errorhandling.CloseQuiet(syncW) havePortMapping := len(ctr.Config().PortMappings) > 0 - logPath := filepath.Join(ctr.runtime.config.TmpDir, fmt.Sprintf("slirp4netns-%s.log", ctr.config.ID)) + logPath := filepath.Join(ctr.runtime.config.Engine.TmpDir, fmt.Sprintf("slirp4netns-%s.log", ctr.config.ID)) cmdArgs := []string{} - dhp, mtu, sandbox, err := checkSlirpFlags(path) + slirpFeatures, err := checkSlirpFlags(path) if err != nil { return errors.Wrapf(err, "error checking slirp4netns binary %s: %q", path, err) } - if dhp { + if slirpFeatures.HasDisableHostLoopback { cmdArgs = append(cmdArgs, "--disable-host-loopback") } - if mtu { + if slirpFeatures.HasMTU { cmdArgs = append(cmdArgs, "--mtu", "65520") } - if sandbox { + if slirpFeatures.HasEnableSandbox { cmdArgs = append(cmdArgs, "--enable-sandbox") } + if slirpFeatures.HasEnableSeccomp { + cmdArgs = append(cmdArgs, "--enable-seccomp") + } // the slirp4netns arguments being passed are describes as follows: // from the slirp4netns documentation: https://github.com/rootless-containers/slirp4netns @@ -230,7 +245,7 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { } // workaround for https://github.com/rootless-containers/slirp4netns/pull/153 - if sandbox { + if slirpFeatures.HasEnableSandbox { cmd.SysProcAttr.Cloneflags = syscall.CLONE_NEWNS cmd.SysProcAttr.Unshareflags = syscall.CLONE_NEWNS } @@ -323,7 +338,7 @@ func (r *Runtime) setupRootlessPortMapping(ctr *Container, netnsPath string) (er defer errorhandling.CloseQuiet(syncR) defer errorhandling.CloseQuiet(syncW) - logPath := filepath.Join(ctr.runtime.config.TmpDir, fmt.Sprintf("rootlessport-%s.log", ctr.config.ID)) + logPath := filepath.Join(ctr.runtime.config.Engine.TmpDir, fmt.Sprintf("rootlessport-%s.log", ctr.config.ID)) logFile, err := os.Create(logPath) if err != nil { return errors.Wrapf(err, "failed to open rootlessport log file %s", logPath) @@ -347,7 +362,7 @@ func (r *Runtime) setupRootlessPortMapping(ctr *Container, netnsPath string) (er NetNSPath: netnsPath, ExitFD: 3, ReadyFD: 4, - TmpDir: ctr.runtime.config.TmpDir, + TmpDir: ctr.runtime.config.Engine.TmpDir, } cfgJSON, err := json.Marshal(cfg) if err != nil { @@ -470,7 +485,7 @@ func (r *Runtime) teardownNetNS(ctr *Container) error { logrus.Debugf("Tearing down network namespace at %s for container %s", ctr.state.NetNS.Path(), ctr.ID()) // rootless containers do not use the CNI plugin - if !rootless.IsRootless() && ctr.config.NetMode != "slirp4netns" { + if !rootless.IsRootless() && !ctr.config.NetMode.IsSlirp4netns() { var requestedIP net.IP if ctr.requestedIP != nil { requestedIP = ctr.requestedIP @@ -558,8 +573,8 @@ func getContainerNetIO(ctr *Container) (*netlink.LinkStatistics, error) { // Produce an InspectNetworkSettings containing information on the container // network. -func (c *Container) getContainerNetworkInfo() (*InspectNetworkSettings, error) { - settings := new(InspectNetworkSettings) +func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, error) { + settings := new(define.InspectNetworkSettings) settings.Ports = []ocicni.PortMapping{} if c.config.PortMappings != nil { // TODO: This may not be safe. @@ -585,13 +600,13 @@ func (c *Container) getContainerNetworkInfo() (*InspectNetworkSettings, error) { return nil, errors.Wrapf(define.ErrInternal, "network inspection mismatch: asked to join %d CNI networks but have information on %d networks", len(c.config.Networks), len(c.state.NetworkStatus)) } - settings.Networks = make(map[string]*InspectAdditionalNetwork) + settings.Networks = make(map[string]*define.InspectAdditionalNetwork) // CNI results should be in the same order as the list of // networks we pass into CNI. for index, name := range c.config.Networks { cniResult := c.state.NetworkStatus[index] - addedNet := new(InspectAdditionalNetwork) + addedNet := new(define.InspectAdditionalNetwork) addedNet.NetworkID = name basicConfig, err := resultToBasicNetworkConfig(cniResult) @@ -625,8 +640,8 @@ func (c *Container) getContainerNetworkInfo() (*InspectNetworkSettings, error) { // resultToBasicNetworkConfig produces an InspectBasicNetworkConfig from a CNI // result -func resultToBasicNetworkConfig(result *cnitypes.Result) (InspectBasicNetworkConfig, error) { - config := InspectBasicNetworkConfig{} +func resultToBasicNetworkConfig(result *cnitypes.Result) (define.InspectBasicNetworkConfig, error) { + config := define.InspectBasicNetworkConfig{} for _, ctrIP := range result.IPs { size, _ := ctrIP.Address.Mask.Size() @@ -657,6 +672,13 @@ func resultToBasicNetworkConfig(result *cnitypes.Result) (InspectBasicNetworkCon return config, nil } +// This is a horrible hack, necessary because CNI does not properly clean up +// after itself on an unclean reboot. Return what we're pretty sure is the path +// to CNI's internal files (it's not really exposed to us). +func getCNINetworksDir() (string, error) { + return "/var/lib/cni/networks", nil +} + type logrusDebugWriter struct { prefix string } diff --git a/libpod/networking_unsupported.go b/libpod/networking_unsupported.go index 7f343cf35..1ef8fe2dc 100644 --- a/libpod/networking_unsupported.go +++ b/libpod/networking_unsupported.go @@ -20,6 +20,10 @@ func (r *Runtime) createNetNS(ctr *Container) (err error) { return define.ErrNotImplemented } -func (c *Container) getContainerNetworkInfo() (*InspectNetworkSettings, error) { +func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, error) { return nil, define.ErrNotImplemented } + +func getCNINetworksDir() (string, error) { + return "", define.ErrNotImplemented +} diff --git a/libpod/oci.go b/libpod/oci.go index 27edebefc..ef46cf5c3 100644 --- a/libpod/oci.go +++ b/libpod/oci.go @@ -71,6 +71,9 @@ type OCIRuntime interface { // Returns an int (exit code), error channel (errors from attach), and // error (errors that occurred attempting to start the exec session). ExecContainer(ctr *Container, sessionID string, options *ExecOptions) (int, chan error, error) + // ExecAttachResize resizes the terminal of a running exec session. Only + // allowed with sessions that were created with a TTY. + ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error // ExecStopContainer stops a given exec session in a running container. // SIGTERM with be sent initially, then SIGKILL after the given timeout. // If timeout is 0, SIGKILL will be sent immediately, and SIGTERM will @@ -143,12 +146,12 @@ type ExecOptions struct { // to 0, 1, 2) that will be passed to the executed process. The total FDs // passed will be 3 + PreserveFDs. PreserveFDs uint - // Resize is a channel where terminal resize events are sent to be - // handled. - Resize chan remotecommand.TerminalSize // DetachKeys is a set of keys that, when pressed in sequence, will // detach from the container. - DetachKeys string + // If not provided, the default keys will be used. + // If provided but set to "", detaching from the container will be + // disabled. + DetachKeys *string } // HTTPAttachStreams informs the HTTPAttach endpoint which of the container's diff --git a/libpod/oci_attach_linux.go b/libpod/oci_attach_linux.go index 46c70e7eb..433993edb 100644 --- a/libpod/oci_attach_linux.go +++ b/libpod/oci_attach_linux.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" + "github.com/containers/common/pkg/config" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/errorhandling" "github.com/containers/libpod/pkg/kubeutils" @@ -93,7 +94,7 @@ func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan re // 4. attachToExec sends on startFd, signalling it has attached to the socket and child is ready to go // 5. child receives on startFd, runs the runtime exec command // attachToExec is responsible for closing startFd and attachFd -func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, sessionID string, startFd, attachFd *os.File) error { +func (c *Container) attachToExec(streams *AttachStreams, keys *string, sessionID string, startFd, attachFd *os.File) error { if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput { return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to") } @@ -104,7 +105,11 @@ func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-c defer errorhandling.CloseQuiet(startFd) defer errorhandling.CloseQuiet(attachFd) - detachKeys, err := processDetachKeys(keys) + detachString := config.DefaultDetachKeys + if keys != nil { + detachString = *keys + } + detachKeys, err := processDetachKeys(detachString) if err != nil { return err } @@ -134,10 +139,6 @@ func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-c } }() - // Register the resize func after we've read the attach socket, as we know at this point the - // 'ctl' file has been created in conmon - registerResizeFunc(resize, c.execBundlePath(sessionID)) - // start listening on stdio of the process receiveStdoutError, stdinDone := setupStdioChannels(streams, conn, detachKeys) diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go index a5530e448..ce888c690 100644 --- a/libpod/oci_conmon_linux.go +++ b/libpod/oci_conmon_linux.go @@ -20,8 +20,8 @@ import ( "text/template" "time" + "github.com/containers/common/pkg/config" conmonConfig "github.com/containers/conmon/runner/config" - "github.com/containers/libpod/libpod/config" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/errorhandling" @@ -80,13 +80,13 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime runtime.name = name runtime.conmonPath = conmonPath - runtime.conmonEnv = runtimeCfg.ConmonEnvVars - runtime.cgroupManager = runtimeCfg.CgroupManager - runtime.tmpDir = runtimeCfg.TmpDir - runtime.logSizeMax = runtimeCfg.MaxLogSize - runtime.noPivot = runtimeCfg.NoPivotRoot - runtime.reservePorts = runtimeCfg.EnablePortReservation - runtime.sdNotify = runtimeCfg.SDNotify + runtime.conmonEnv = runtimeCfg.Engine.ConmonEnvVars + runtime.cgroupManager = runtimeCfg.Engine.CgroupManager + runtime.tmpDir = runtimeCfg.Engine.TmpDir + runtime.logSizeMax = runtimeCfg.Containers.LogSizeMax + runtime.noPivot = runtimeCfg.Engine.NoPivotRoot + runtime.reservePorts = runtimeCfg.Engine.EnablePortReservation + runtime.sdNotify = runtimeCfg.Engine.SDNotify // TODO: probe OCI runtime for feature and enable automatically if // available. @@ -127,7 +127,7 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits") runtime.socketsDir = filepath.Join(runtime.tmpDir, "socket") - if runtime.cgroupManager != define.CgroupfsCgroupsManager && runtime.cgroupManager != define.SystemdCgroupsManager { + if runtime.cgroupManager != config.CgroupfsCgroupsManager && runtime.cgroupManager != config.SystemdCgroupsManager { return nil, errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager specified: %s", runtime.cgroupManager) } @@ -177,7 +177,7 @@ func hasCurrentUserMapped(ctr *Container) bool { // CreateContainer creates a container. func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (err error) { if !hasCurrentUserMapped(ctr) { - for _, i := range []string{ctr.state.RunDir, ctr.runtime.config.TmpDir, ctr.config.StaticDir, ctr.state.Mountpoint, ctr.runtime.config.VolumePath} { + for _, i := range []string{ctr.state.RunDir, ctr.runtime.config.Engine.TmpDir, ctr.config.StaticDir, ctr.state.Mountpoint, ctr.runtime.config.Engine.VolumePath} { if err := makeAccessible(i, ctr.RootUID(), ctr.RootGID()); err != nil { return err } @@ -519,7 +519,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, httpConn net.Conn, httpBuf logrus.Debugf("Successfully connected to container %s attach socket %s", ctr.ID(), socketPath) - detachString := define.DefaultDetachKeys + detachString := ctr.runtime.config.Engine.DetachKeys if detachKeys != nil { detachString = *detachKeys } @@ -769,47 +769,67 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options attachChan := make(chan error) go func() { // attachToExec is responsible for closing pipes - attachChan <- c.attachToExec(options.Streams, options.DetachKeys, options.Resize, sessionID, parentStartPipe, parentAttachPipe) + attachChan <- c.attachToExec(options.Streams, options.DetachKeys, sessionID, parentStartPipe, parentAttachPipe) close(attachChan) }() attachToExecCalled = true + if err := execCmd.Wait(); err != nil { + return -1, nil, errors.Wrapf(err, "cannot run conmon") + } + pid, err := readConmonPipeData(parentSyncPipe, ociLog) return pid, attachChan, err } +// ExecAttachResize resizes the TTY of the given exec session. +func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error { + // TODO: probably want a dedicated function to get ctl file path? + controlPath := filepath.Join(ctr.execBundlePath(sessionID), "ctl") + controlFile, err := os.OpenFile(controlPath, unix.O_WRONLY, 0) + if err != nil { + return errors.Wrapf(err, "could not open ctl file for terminal resize for container %s exec session %s", ctr.ID(), sessionID) + } + defer controlFile.Close() + + if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil { + return errors.Wrapf(err, "failed to write to ctl file to resize terminal") + } + + return nil +} + // ExecStopContainer stops a given exec session in a running container. func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, timeout uint) error { - session, ok := ctr.state.ExecSessions[sessionID] - if !ok { - // TODO This should probably be a separate error - return errors.Wrapf(define.ErrInvalidArg, "no exec session with ID %s found in container %s", sessionID, ctr.ID()) + pid, err := ctr.getExecSessionPID(sessionID) + if err != nil { + return err } logrus.Debugf("Going to stop container %s exec session %s", ctr.ID(), sessionID) // Is the session dead? // Ping the PID with signal 0 to see if it still exists. - if err := unix.Kill(session.PID, 0); err != nil { + if err := unix.Kill(pid, 0); err != nil { if err == unix.ESRCH { return nil } - return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, session.PID) + return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid) } if timeout > 0 { // Use SIGTERM by default, then SIGSTOP after timeout. - logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, session.PID, ctr.ID()) - if err := unix.Kill(session.PID, unix.SIGTERM); err != nil { + logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, pid, ctr.ID()) + if err := unix.Kill(pid, unix.SIGTERM); err != nil { if err == unix.ESRCH { return nil } - return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, session.PID) + return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, pid) } // Wait for the PID to stop - if err := waitPidStop(session.PID, time.Duration(timeout)*time.Second); err != nil { + if err := waitPidStop(pid, time.Duration(timeout)*time.Second); err != nil { logrus.Warnf("Timed out waiting for container %s exec session %s to stop, resorting to SIGKILL", ctr.ID(), sessionID) } else { // No error, container is dead @@ -818,17 +838,17 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t } // SIGTERM did not work. On to SIGKILL. - logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, session.PID, ctr.ID()) - if err := unix.Kill(session.PID, unix.SIGTERM); err != nil { + logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, pid, ctr.ID()) + if err := unix.Kill(pid, unix.SIGTERM); err != nil { if err == unix.ESRCH { return nil } - return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, session.PID) + return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, pid) } // Wait for the PID to stop - if err := waitPidStop(session.PID, killContainerTimeout*time.Second); err != nil { - return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, session.PID) + if err := waitPidStop(pid, killContainerTimeout*time.Second); err != nil { + return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid) } return nil @@ -836,21 +856,20 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t // ExecUpdateStatus checks if the given exec session is still running. func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (bool, error) { - session, ok := ctr.state.ExecSessions[sessionID] - if !ok { - // TODO This should probably be a separate error - return false, errors.Wrapf(define.ErrInvalidArg, "no exec session with ID %s found in container %s", sessionID, ctr.ID()) + pid, err := ctr.getExecSessionPID(sessionID) + if err != nil { + return false, err } logrus.Debugf("Checking status of container %s exec session %s", ctr.ID(), sessionID) // Is the session dead? // Ping the PID with signal 0 to see if it still exists. - if err := unix.Kill(session.PID, 0); err != nil { + if err := unix.Kill(pid, 0); err != nil { if err == unix.ESRCH { return false, nil } - return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, session.PID) + return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid) } return true, nil @@ -1346,7 +1365,7 @@ func (r *ConmonOCIRuntime) configureConmonEnv(runtimeDir string) ([]string, []*o func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, pidPath, logPath, exitDir, ociLogPath, logTag string) []string { // set the conmon API version to be able to use the correct sync struct keys args := []string{"--api-version", "1"} - if r.cgroupManager == define.SystemdCgroupsManager && !ctr.config.NoCgroups { + if r.cgroupManager == config.SystemdCgroupsManager && !ctr.config.NoCgroups { args = append(args, "-s") } args = append(args, "-c", ctr.ID()) @@ -1464,7 +1483,7 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec if mustCreateCgroup { cgroupParent := ctr.CgroupParent() - if r.cgroupManager == define.SystemdCgroupsManager { + if r.cgroupManager == config.SystemdCgroupsManager { unitName := createUnitName("libpod-conmon", ctr.ID()) realCgroupParent := cgroupParent diff --git a/libpod/oci_conmon_unsupported.go b/libpod/oci_conmon_unsupported.go index a2f52f527..395b6f6d9 100644 --- a/libpod/oci_conmon_unsupported.go +++ b/libpod/oci_conmon_unsupported.go @@ -3,7 +3,8 @@ package libpod import ( - "github.com/containers/libpod/libpod/config" + "github.com/containers/common/pkg/config" + "github.com/containers/libpod/libpod/define" ) diff --git a/libpod/oci_missing.go b/libpod/oci_missing.go index ff7eea625..a5d589255 100644 --- a/libpod/oci_missing.go +++ b/libpod/oci_missing.go @@ -50,7 +50,7 @@ func getMissingRuntime(name string, r *Runtime) (OCIRuntime, error) { newRuntime := new(MissingRuntime) newRuntime.name = name - newRuntime.exitsDir = filepath.Join(r.config.TmpDir, "exits") + newRuntime.exitsDir = filepath.Join(r.config.Engine.TmpDir, "exits") missingRuntimes[name] = newRuntime @@ -125,6 +125,11 @@ func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options return -1, nil, r.printError() } +// ExecAttachResize is not available as the runtime is missing. +func (r *MissingRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error { + return r.printError() +} + // ExecStopContainer is not available as the runtime is missing. // TODO: We can also investigate using unix.Kill() on the PID of the exec // session here if we want to make stopping containers possible. Won't be diff --git a/libpod/options.go b/libpod/options.go index 98de71af2..dfbec364a 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -7,6 +7,7 @@ import ( "regexp" "syscall" + "github.com/containers/common/pkg/config" "github.com/containers/image/v5/manifest" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" @@ -21,6 +22,8 @@ import ( var ( // NameRegex is a regular expression to validate container/pod names. + // This must NOT be changed from outside of Libpod. It should be a + // constant, but Go won't let us do that. NameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") // RegexError is thrown in presence of an invalid container/pod name. RegexError = errors.Wrapf(define.ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*") @@ -40,48 +43,48 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption { setField := false if config.RunRoot != "" { - rt.config.StorageConfig.RunRoot = config.RunRoot - rt.config.StorageConfigRunRootSet = true + rt.storageConfig.RunRoot = config.RunRoot + rt.storageSet.RunRootSet = true setField = true } if config.GraphRoot != "" { - rt.config.StorageConfig.GraphRoot = config.GraphRoot - rt.config.StorageConfigGraphRootSet = true + rt.storageConfig.GraphRoot = config.GraphRoot + rt.storageSet.GraphRootSet = true // Also set libpod static dir, so we are a subdirectory // of the c/storage store by default - rt.config.StaticDir = filepath.Join(config.GraphRoot, "libpod") - rt.config.StaticDirSet = true + rt.config.Engine.StaticDir = filepath.Join(config.GraphRoot, "libpod") + rt.storageSet.StaticDirSet = true // Also set libpod volume path, so we are a subdirectory // of the c/storage store by default - rt.config.VolumePath = filepath.Join(config.GraphRoot, "volumes") - rt.config.VolumePathSet = true + rt.config.Engine.VolumePath = filepath.Join(config.GraphRoot, "volumes") + rt.storageSet.VolumePathSet = true setField = true } if config.GraphDriverName != "" { - rt.config.StorageConfig.GraphDriverName = config.GraphDriverName - rt.config.StorageConfigGraphDriverNameSet = true + rt.storageConfig.GraphDriverName = config.GraphDriverName + rt.storageSet.GraphDriverNameSet = true setField = true } if config.GraphDriverOptions != nil { - rt.config.StorageConfig.GraphDriverOptions = make([]string, len(config.GraphDriverOptions)) - copy(rt.config.StorageConfig.GraphDriverOptions, config.GraphDriverOptions) + rt.storageConfig.GraphDriverOptions = make([]string, len(config.GraphDriverOptions)) + copy(rt.storageConfig.GraphDriverOptions, config.GraphDriverOptions) setField = true } if config.UIDMap != nil { - rt.config.StorageConfig.UIDMap = make([]idtools.IDMap, len(config.UIDMap)) - copy(rt.config.StorageConfig.UIDMap, config.UIDMap) + rt.storageConfig.UIDMap = make([]idtools.IDMap, len(config.UIDMap)) + copy(rt.storageConfig.UIDMap, config.UIDMap) } if config.GIDMap != nil { - rt.config.StorageConfig.GIDMap = make([]idtools.IDMap, len(config.GIDMap)) - copy(rt.config.StorageConfig.GIDMap, config.GIDMap) + rt.storageConfig.GIDMap = make([]idtools.IDMap, len(config.GIDMap)) + copy(rt.storageConfig.GIDMap, config.GIDMap) } // If any one of runroot, graphroot, graphdrivername, @@ -92,11 +95,11 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption { if err != nil { return err } - if rt.config.StorageConfig.GraphRoot == "" { - rt.config.StorageConfig.GraphRoot = storeOpts.GraphRoot + if rt.storageConfig.GraphRoot == "" { + rt.storageConfig.GraphRoot = storeOpts.GraphRoot } - if rt.config.StorageConfig.RunRoot == "" { - rt.config.StorageConfig.RunRoot = storeOpts.RunRoot + if rt.storageConfig.RunRoot == "" { + rt.storageConfig.RunRoot = storeOpts.RunRoot } } @@ -111,7 +114,7 @@ func WithDefaultTransport(defaultTransport string) RuntimeOption { return define.ErrRuntimeFinalized } - rt.config.ImageDefaultTransport = defaultTransport + rt.config.Engine.ImageDefaultTransport = defaultTransport return nil } @@ -127,7 +130,7 @@ func WithSignaturePolicy(path string) RuntimeOption { return define.ErrRuntimeFinalized } - rt.config.SignaturePolicyPath = path + rt.config.Engine.SignaturePolicyPath = path return nil } @@ -137,17 +140,17 @@ func WithSignaturePolicy(path string) RuntimeOption { // Please note that information is not portable between backing states. // As such, if this differs between two libpods running on the same system, // they will not share containers, and unspecified behavior may occur. -func WithStateType(storeType define.RuntimeStateStore) RuntimeOption { +func WithStateType(storeType config.RuntimeStateStore) RuntimeOption { return func(rt *Runtime) error { if rt.valid { return define.ErrRuntimeFinalized } - if storeType == define.InvalidStateStore { + if storeType == config.InvalidStateStore { return errors.Wrapf(define.ErrInvalidArg, "must provide a valid state store type") } - rt.config.StateType = storeType + rt.config.Engine.StateType = storeType return nil } @@ -164,8 +167,7 @@ func WithOCIRuntime(runtime string) RuntimeOption { return errors.Wrapf(define.ErrInvalidArg, "must provide a valid path") } - rt.config.OCIRuntime = runtime - rt.config.RuntimePath = nil + rt.config.Engine.OCIRuntime = runtime return nil } @@ -183,7 +185,7 @@ func WithConmonPath(path string) RuntimeOption { return errors.Wrapf(define.ErrInvalidArg, "must provide a valid path") } - rt.config.ConmonPath = []string{path} + rt.config.Engine.ConmonPath = []string{path} return nil } @@ -196,8 +198,8 @@ func WithConmonEnv(environment []string) RuntimeOption { return define.ErrRuntimeFinalized } - rt.config.ConmonEnvVars = make([]string, len(environment)) - copy(rt.config.ConmonEnvVars, environment) + rt.config.Engine.ConmonEnvVars = make([]string, len(environment)) + copy(rt.config.Engine.ConmonEnvVars, environment) return nil } @@ -211,7 +213,7 @@ func WithNetworkCmdPath(path string) RuntimeOption { return define.ErrRuntimeFinalized } - rt.config.NetworkCmdPath = path + rt.config.Engine.NetworkCmdPath = path return nil } @@ -226,12 +228,12 @@ func WithCgroupManager(manager string) RuntimeOption { return define.ErrRuntimeFinalized } - if manager != define.CgroupfsCgroupsManager && manager != define.SystemdCgroupsManager { + if manager != config.CgroupfsCgroupsManager && manager != config.SystemdCgroupsManager { return errors.Wrapf(define.ErrInvalidArg, "CGroup manager must be one of %s and %s", - define.CgroupfsCgroupsManager, define.SystemdCgroupsManager) + config.CgroupfsCgroupsManager, config.SystemdCgroupsManager) } - rt.config.CgroupManager = manager + rt.config.Engine.CgroupManager = manager return nil } @@ -245,8 +247,8 @@ func WithStaticDir(dir string) RuntimeOption { return define.ErrRuntimeFinalized } - rt.config.StaticDir = dir - rt.config.StaticDirSet = true + rt.config.Engine.StaticDir = dir + rt.config.Engine.StaticDirSet = true return nil } @@ -265,7 +267,7 @@ func WithHooksDir(hooksDirs ...string) RuntimeOption { } } - rt.config.HooksDir = hooksDirs + rt.config.Engine.HooksDir = hooksDirs return nil } } @@ -283,7 +285,7 @@ func WithDefaultMountsFile(mountsFile string) RuntimeOption { if mountsFile == "" { return define.ErrInvalidArg } - rt.config.DefaultMountsFile = mountsFile + rt.config.Containers.DefaultMountsFile = mountsFile return nil } } @@ -296,8 +298,8 @@ func WithTmpDir(dir string) RuntimeOption { if rt.valid { return define.ErrRuntimeFinalized } - rt.config.TmpDir = dir - rt.config.TmpDirSet = true + rt.config.Engine.TmpDir = dir + rt.config.Engine.TmpDirSet = true return nil } @@ -320,7 +322,7 @@ func WithMaxLogSize(limit int64) RuntimeOption { return define.ErrRuntimeFinalized } - rt.config.MaxLogSize = limit + rt.config.Containers.LogSizeMax = limit return nil } @@ -334,7 +336,7 @@ func WithNoPivotRoot() RuntimeOption { return define.ErrRuntimeFinalized } - rt.config.NoPivotRoot = true + rt.config.Engine.NoPivotRoot = true return nil } @@ -347,7 +349,7 @@ func WithCNIConfigDir(dir string) RuntimeOption { return define.ErrRuntimeFinalized } - rt.config.CNIConfigDir = dir + rt.config.Network.NetworkConfigDir = dir return nil } @@ -360,7 +362,7 @@ func WithCNIPluginDir(dir string) RuntimeOption { return define.ErrRuntimeFinalized } - rt.config.CNIPluginDir = []string{dir} + rt.config.Network.CNIPluginDirs = []string{dir} return nil } @@ -380,7 +382,7 @@ func WithNamespace(ns string) RuntimeOption { return define.ErrRuntimeFinalized } - rt.config.Namespace = ns + rt.config.Engine.Namespace = ns return nil } @@ -395,8 +397,8 @@ func WithVolumePath(volPath string) RuntimeOption { return define.ErrRuntimeFinalized } - rt.config.VolumePath = volPath - rt.config.VolumePathSet = true + rt.config.Engine.VolumePath = volPath + rt.config.Engine.VolumePathSet = true return nil } @@ -413,7 +415,7 @@ func WithDefaultInfraImage(img string) RuntimeOption { return define.ErrRuntimeFinalized } - rt.config.InfraImage = img + rt.config.Engine.InfraImage = img return nil } @@ -427,7 +429,7 @@ func WithDefaultInfraCommand(cmd string) RuntimeOption { return define.ErrRuntimeFinalized } - rt.config.InfraCommand = cmd + rt.config.Engine.InfraCommand = cmd return nil } @@ -450,7 +452,7 @@ func WithRenumber() RuntimeOption { } // WithMigrate instructs libpod to migrate container configurations to account -// for changes between Libpod versions. All running containers will be stopped +// for changes between Engine versions. All running containers will be stopped // during a migration, then restarted after the migration is complete. func WithMigrate() RuntimeOption { return func(rt *Runtime) error { @@ -464,10 +466,10 @@ func WithMigrate() RuntimeOption { } } -// WithMigrateRuntime instructs Libpod to change the default OCI runtime on all +// WithMigrateRuntime instructs Engine to change the default OCI runtime on all // containers during a migration. This is not used if `MigrateRuntime()` is not // also passed. -// Libpod makes no promises that your containers continue to work with the new +// Engine makes no promises that your containers continue to work with the new // runtime - migrations between dissimilar runtimes may well break things. // Use with caution. func WithMigrateRuntime(requestedRuntime string) RuntimeOption { @@ -499,7 +501,7 @@ func WithEventsLogger(logger string) RuntimeOption { return errors.Wrapf(define.ErrInvalidArg, "%q is not a valid events backend", logger) } - rt.config.EventsLogger = logger + rt.config.Engine.EventsLogger = logger return nil } @@ -509,7 +511,7 @@ func WithEventsLogger(logger string) RuntimeOption { // listening func WithEnableSDNotify() RuntimeOption { return func(rt *Runtime) error { - rt.config.SDNotify = true + rt.config.Engine.SDNotify = true return nil } } @@ -593,7 +595,7 @@ func WithUser(user string) CtrCreateOption { // other configuration from the image will be added to the config. // TODO: Replace image name and ID with a libpod.Image struct when that is // finished. -func WithRootFSFromImage(imageID string, imageName string) CtrCreateOption { +func WithRootFSFromImage(imageID, imageName, rawImageName string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { return define.ErrCtrFinalized @@ -601,7 +603,7 @@ func WithRootFSFromImage(imageID string, imageName string) CtrCreateOption { ctr.config.RootfsImageID = imageID ctr.config.RootfsImageName = imageName - + ctr.config.RawImageName = rawImageName return nil } } @@ -1092,7 +1094,8 @@ func WithDNS(dnsServers []string) CtrCreateOption { } dns = append(dns, result) } - ctr.config.DNSServer = dns + ctr.config.DNSServer = append(ctr.config.DNSServer, dns...) + return nil } } @@ -1103,7 +1106,10 @@ func WithDNSOption(dnsOptions []string) CtrCreateOption { if ctr.valid { return define.ErrCtrFinalized } - ctr.config.DNSOption = dnsOptions + if ctr.config.UseImageResolvConf { + return errors.Wrapf(define.ErrInvalidArg, "cannot add DNS options if container will not create /etc/resolv.conf") + } + ctr.config.DNSOption = append(ctr.config.DNSOption, dnsOptions...) return nil } } diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go index b5895d133..851f52a4e 100644 --- a/libpod/pod_internal.go +++ b/libpod/pod_internal.go @@ -5,6 +5,7 @@ import ( "path/filepath" "time" + "github.com/containers/common/pkg/config" "github.com/containers/libpod/libpod/define" "github.com/containers/storage/pkg/stringid" "github.com/pkg/errors" @@ -65,19 +66,19 @@ func (p *Pod) refresh() error { // We need to recreate the pod's cgroup if p.config.UsePodCgroup { - switch p.runtime.config.CgroupManager { - case define.SystemdCgroupsManager: + switch p.runtime.config.Engine.CgroupManager { + case config.SystemdCgroupsManager: cgroupPath, err := systemdSliceFromPath(p.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", p.ID())) if err != nil { logrus.Errorf("Error creating CGroup for pod %s: %v", p.ID(), err) } p.state.CgroupPath = cgroupPath - case define.CgroupfsCgroupsManager: + case config.CgroupfsCgroupsManager: p.state.CgroupPath = filepath.Join(p.config.CgroupParent, p.ID()) logrus.Debugf("setting pod cgroup to %s", p.state.CgroupPath) default: - return errors.Wrapf(define.ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.CgroupManager) + return errors.Wrapf(define.ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.Engine.CgroupManager) } } diff --git a/libpod/reset.go b/libpod/reset.go index ae0a0cde9..c6754b7f6 100644 --- a/libpod/reset.go +++ b/libpod/reset.go @@ -88,14 +88,13 @@ func (r *Runtime) Reset(ctx context.Context) error { } prevError = err } - runtimeDir, err := util.GetRuntimeDir() if err != nil { return err } - tempDir := r.config.TmpDir - if r.config.TmpDir == runtimeDir { - tempDir = filepath.Join(r.config.TmpDir, "containers") + tempDir := r.config.Engine.TmpDir + if tempDir == runtimeDir { + tempDir = filepath.Join(tempDir, "containers") } if err := os.RemoveAll(tempDir); err != nil { if prevError != nil { diff --git a/libpod/runtime.go b/libpod/runtime.go index 8dcec82db..422b79359 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -11,11 +11,13 @@ import ( is "github.com/containers/image/v5/storage" "github.com/containers/image/v5/types" - "github.com/containers/libpod/libpod/config" + + "github.com/containers/common/pkg/config" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/libpod/image" "github.com/containers/libpod/libpod/lock" + "github.com/containers/libpod/pkg/cgroups" sysreg "github.com/containers/libpod/pkg/registries" "github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/util" @@ -30,9 +32,20 @@ import ( // NewRuntime type RuntimeOption func(*Runtime) error +type storageSet struct { + RunRootSet bool + GraphRootSet bool + StaticDirSet bool + VolumePathSet bool + GraphDriverNameSet bool + TmpDirSet bool +} + // Runtime is the core libpod runtime type Runtime struct { - config *config.Config + config *config.Config + storageConfig storage.StoreOptions + storageSet storageSet state State store storage.Store @@ -116,7 +129,12 @@ func SetXdgDirs() error { // NewRuntime creates a new container runtime // Options can be passed to override the default configuration for the runtime func NewRuntime(ctx context.Context, options ...RuntimeOption) (runtime *Runtime, err error) { - return newRuntimeFromConfig(ctx, "", options...) + conf, err := config.NewConfig("") + if err != nil { + return nil, err + } + conf.CheckCgroupsAndAdjustConfig() + return newRuntimeFromConfig(ctx, conf, options...) } // NewRuntimeFromConfig creates a new container runtime using the given @@ -124,21 +142,29 @@ func NewRuntime(ctx context.Context, options ...RuntimeOption) (runtime *Runtime // functions can be used to mutate this configuration further. // An error will be returned if the configuration file at the given path does // not exist or cannot be loaded -func NewRuntimeFromConfig(ctx context.Context, userConfigPath string, options ...RuntimeOption) (runtime *Runtime, err error) { - if userConfigPath == "" { - return nil, errors.New("invalid configuration file specified") - } - return newRuntimeFromConfig(ctx, userConfigPath, options...) +func NewRuntimeFromConfig(ctx context.Context, userConfig *config.Config, options ...RuntimeOption) (runtime *Runtime, err error) { + + return newRuntimeFromConfig(ctx, userConfig, options...) } -func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ...RuntimeOption) (runtime *Runtime, err error) { +func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...RuntimeOption) (runtime *Runtime, err error) { runtime = new(Runtime) - conf, err := config.NewConfig(userConfigPath) + if conf.Engine.OCIRuntime == "" { + conf.Engine.OCIRuntime = "runc" + // If we're running on cgroups v2, default to using crun. + if onCgroupsv2, _ := cgroups.IsCgroup2UnifiedMode(); onCgroupsv2 { + conf.Engine.OCIRuntime = "crun" + } + } + + runtime.config = conf + + storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID()) if err != nil { return nil, err } - runtime.config = conf + runtime.storageConfig = storeOpts // Overwrite config with user-given configuration options for _, opt := range options { @@ -157,9 +183,9 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) { var err error var manager lock.Manager - switch runtime.config.LockType { + switch runtime.config.Engine.LockType { case "file": - lockPath := filepath.Join(runtime.config.TmpDir, "locks") + lockPath := filepath.Join(runtime.config.Engine.TmpDir, "locks") manager, err = lock.OpenFileLockManager(lockPath) if err != nil { if os.IsNotExist(errors.Cause(err)) { @@ -178,11 +204,11 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) { lockPath = fmt.Sprintf("%s_%d", define.DefaultRootlessSHMLockPath, rootless.GetRootlessUID()) } // Set up the lock manager - manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks) + manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.Engine.NumLocks) if err != nil { switch { case os.IsNotExist(errors.Cause(err)): - manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks) + manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks) if err != nil { return nil, errors.Wrapf(err, "failed to get new shm lock manager") } @@ -196,7 +222,7 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) { return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath) } - manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks) + manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks) if err != nil { return nil, err } @@ -205,7 +231,7 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) { } } default: - return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.LockType) + return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.Engine.LockType) } return manager, nil } @@ -221,11 +247,11 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { runtime.conmonPath = cPath // Make the static files directory if it does not exist - if err := os.MkdirAll(runtime.config.StaticDir, 0700); err != nil { + if err := os.MkdirAll(runtime.config.Engine.StaticDir, 0700); err != nil { // The directory is allowed to exist if !os.IsExist(err) { return errors.Wrapf(err, "error creating runtime static files directory %s", - runtime.config.StaticDir) + runtime.config.Engine.StaticDir) } } @@ -235,17 +261,17 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { // libpod/state, the config could take care of the code below. It // would further allow to move the types and consts into a coherent // package. - switch runtime.config.StateType { - case define.InMemoryStateStore: + switch runtime.config.Engine.StateType { + case config.InMemoryStateStore: state, err := NewInMemoryState() if err != nil { return err } runtime.state = state - case define.SQLiteStateStore: + case config.SQLiteStateStore: return errors.Wrapf(define.ErrInvalidArg, "SQLite state is currently disabled") - case define.BoltDBStateStore: - dbPath := filepath.Join(runtime.config.StaticDir, "bolt_state.db") + case config.BoltDBStateStore: + dbPath := filepath.Join(runtime.config.Engine.StaticDir, "bolt_state.db") state, err := NewBoltState(dbPath, runtime) if err != nil { @@ -253,7 +279,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { } runtime.state = state default: - return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed (%v)", runtime.config.StateType) + return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed (%v)", runtime.config.Engine.StateType) } // Grab config from the database so we can reset some defaults @@ -262,16 +288,16 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { return errors.Wrapf(err, "error retrieving runtime configuration from database") } - if err := runtime.config.MergeDBConfig(dbConfig); err != nil { + if err := runtime.mergeDBConfig(dbConfig); err != nil { return errors.Wrapf(err, "error merging database config into runtime config") } - logrus.Debugf("Using graph driver %s", runtime.config.StorageConfig.GraphDriverName) - logrus.Debugf("Using graph root %s", runtime.config.StorageConfig.GraphRoot) - logrus.Debugf("Using run root %s", runtime.config.StorageConfig.RunRoot) - logrus.Debugf("Using static dir %s", runtime.config.StaticDir) - logrus.Debugf("Using tmp dir %s", runtime.config.TmpDir) - logrus.Debugf("Using volume path %s", runtime.config.VolumePath) + logrus.Debugf("Using graph driver %s", runtime.storageConfig.GraphDriverName) + logrus.Debugf("Using graph root %s", runtime.storageConfig.GraphRoot) + logrus.Debugf("Using run root %s", runtime.storageConfig.RunRoot) + logrus.Debugf("Using static dir %s", runtime.config.Engine.StaticDir) + logrus.Debugf("Using tmp dir %s", runtime.config.Engine.TmpDir) + logrus.Debugf("Using volume path %s", runtime.config.Engine.VolumePath) // Validate our config against the database, now that we've set our // final storage configuration @@ -279,10 +305,10 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { return err } - if err := runtime.state.SetNamespace(runtime.config.Namespace); err != nil { + if err := runtime.state.SetNamespace(runtime.config.Engine.Namespace); err != nil { return errors.Wrapf(err, "error setting libpod namespace in state") } - logrus.Debugf("Set libpod namespace to %q", runtime.config.Namespace) + logrus.Debugf("Set libpod namespace to %q", runtime.config.Engine.Namespace) // Set up containers/storage var store storage.Store @@ -316,66 +342,40 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { // Set up containers/image runtime.imageContext = &types.SystemContext{ - SignaturePolicyPath: runtime.config.SignaturePolicyPath, + SignaturePolicyPath: runtime.config.Engine.SignaturePolicyPath, } // Create the tmpDir - if err := os.MkdirAll(runtime.config.TmpDir, 0751); err != nil { + if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil { // The directory is allowed to exist if !os.IsExist(err) { - return errors.Wrapf(err, "error creating tmpdir %s", runtime.config.TmpDir) + return errors.Wrapf(err, "error creating tmpdir %s", runtime.config.Engine.TmpDir) } } // Create events log dir - if err := os.MkdirAll(filepath.Dir(runtime.config.EventsLogFilePath), 0700); err != nil { + if err := os.MkdirAll(filepath.Dir(runtime.config.Engine.EventsLogFilePath), 0700); err != nil { // The directory is allowed to exist if !os.IsExist(err) { - return errors.Wrapf(err, "error creating events dirs %s", filepath.Dir(runtime.config.EventsLogFilePath)) + return errors.Wrapf(err, "error creating events dirs %s", filepath.Dir(runtime.config.Engine.EventsLogFilePath)) } } // Make lookup tables for runtime support supportsJSON := make(map[string]bool) supportsNoCgroups := make(map[string]bool) - for _, r := range runtime.config.RuntimeSupportsJSON { + for _, r := range runtime.config.Engine.RuntimeSupportsJSON { supportsJSON[r] = true } - for _, r := range runtime.config.RuntimeSupportsNoCgroups { + for _, r := range runtime.config.Engine.RuntimeSupportsNoCgroups { supportsNoCgroups[r] = true } // Get us at least one working OCI runtime. runtime.ociRuntimes = make(map[string]OCIRuntime) - // Is the old runtime_path defined? - if runtime.config.RuntimePath != nil { - // Don't print twice in rootless mode. - if os.Geteuid() == 0 { - logrus.Warningf("The configuration is using `runtime_path`, which is deprecated and will be removed in future. Please use `runtimes` and `runtime`") - logrus.Warningf("If you are using both `runtime_path` and `runtime`, the configuration from `runtime_path` is used") - } - - if len(runtime.config.RuntimePath) == 0 { - return errors.Wrapf(define.ErrInvalidArg, "empty runtime path array passed") - } - - name := filepath.Base(runtime.config.RuntimePath[0]) - - json := supportsJSON[name] - nocgroups := supportsNoCgroups[name] - - ociRuntime, err := newConmonOCIRuntime(name, runtime.config.RuntimePath, runtime.conmonPath, runtime.config, json, nocgroups) - if err != nil { - return err - } - - runtime.ociRuntimes[name] = ociRuntime - runtime.defaultOCIRuntime = ociRuntime - } - // Initialize remaining OCI runtimes - for name, paths := range runtime.config.OCIRuntimes { + for name, paths := range runtime.config.Engine.OCIRuntimes { json := supportsJSON[name] nocgroups := supportsNoCgroups[name] @@ -393,16 +393,16 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { } // Do we have a default OCI runtime? - if runtime.config.OCIRuntime != "" { + if runtime.config.Engine.OCIRuntime != "" { // If the string starts with / it's a path to a runtime // executable. - if strings.HasPrefix(runtime.config.OCIRuntime, "/") { - name := filepath.Base(runtime.config.OCIRuntime) + if strings.HasPrefix(runtime.config.Engine.OCIRuntime, "/") { + name := filepath.Base(runtime.config.Engine.OCIRuntime) json := supportsJSON[name] nocgroups := supportsNoCgroups[name] - ociRuntime, err := newConmonOCIRuntime(name, []string{runtime.config.OCIRuntime}, runtime.conmonPath, runtime.config, json, nocgroups) + ociRuntime, err := newConmonOCIRuntime(name, []string{runtime.config.Engine.OCIRuntime}, runtime.conmonPath, runtime.config, json, nocgroups) if err != nil { return err } @@ -410,9 +410,9 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { runtime.ociRuntimes[name] = ociRuntime runtime.defaultOCIRuntime = ociRuntime } else { - ociRuntime, ok := runtime.ociRuntimes[runtime.config.OCIRuntime] + ociRuntime, ok := runtime.ociRuntimes[runtime.config.Engine.OCIRuntime] if !ok { - return errors.Wrapf(define.ErrInvalidArg, "default OCI runtime %q not found", runtime.config.OCIRuntime) + return errors.Wrapf(define.ErrInvalidArg, "default OCI runtime %q not found", runtime.config.Engine.OCIRuntime) } runtime.defaultOCIRuntime = ociRuntime } @@ -429,17 +429,18 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { } // Make the per-boot files directory if it does not exist - if err := os.MkdirAll(runtime.config.TmpDir, 0755); err != nil { + if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0755); err != nil { // The directory is allowed to exist if !os.IsExist(err) { return errors.Wrapf(err, "error creating runtime temporary files directory %s", - runtime.config.TmpDir) + runtime.config.Engine.TmpDir) } } // Set up the CNI net plugin if !rootless.IsRootless() { - netPlugin, err := ocicni.InitCNI(runtime.config.CNIDefaultNetwork, runtime.config.CNIConfigDir, runtime.config.CNIPluginDir...) + + netPlugin, err := ocicni.InitCNI(runtime.config.Network.DefaultNetwork, runtime.config.Network.NetworkConfigDir, runtime.config.Network.CNIPluginDirs...) if err != nil { return errors.Wrapf(err, "error configuring CNI network plugin") } @@ -449,8 +450,8 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { // We now need to see if the system has restarted // We check for the presence of a file in our tmp directory to verify this // This check must be locked to prevent races - runtimeAliveLock := filepath.Join(runtime.config.TmpDir, "alive.lck") - runtimeAliveFile := filepath.Join(runtime.config.TmpDir, "alive") + runtimeAliveLock := filepath.Join(runtime.config.Engine.TmpDir, "alive.lck") + runtimeAliveFile := filepath.Join(runtime.config.Engine.TmpDir, "alive") aliveLock, err := storage.GetLockfile(runtimeAliveLock) if err != nil { return errors.Wrapf(err, "error acquiring runtime init lock") @@ -587,7 +588,7 @@ func (r *Runtime) Shutdown(force bool) error { logrus.Errorf("Error retrieving containers from database: %v", err) } else { for _, ctr := range ctrs { - if err := ctr.StopWithTimeout(define.CtrRemoveTimeout); err != nil { + if err := ctr.StopWithTimeout(r.config.Engine.StopTimeout); err != nil { logrus.Errorf("Error stopping container %s: %v", ctr.ID(), err) } } @@ -733,7 +734,7 @@ func (r *Runtime) generateName() (string, error) { // Configure store and image runtime func (r *Runtime) configureStore() error { - store, err := storage.GetStore(r.config.StorageConfig) + store, err := storage.GetStore(r.storageConfig) if err != nil { return err } @@ -750,9 +751,9 @@ func (r *Runtime) configureStore() error { r.storageService = storageService ir := image.NewImageRuntimeFromStore(r.store) - ir.SignaturePolicyPath = r.config.SignaturePolicyPath - ir.EventsLogFilePath = r.config.EventsLogFilePath - ir.EventsLogger = r.config.EventsLogger + ir.SignaturePolicyPath = r.config.Engine.SignaturePolicyPath + ir.EventsLogFilePath = r.config.Engine.EventsLogFilePath + ir.EventsLogger = r.config.Engine.EventsLogger r.imageRuntime = ir @@ -775,3 +776,74 @@ func (r *Runtime) SystemContext() *types.SystemContext { func (r *Runtime) GetOCIRuntimePath() string { return r.defaultOCIRuntime.Path() } + +// StorageConfig retrieves the storage options for the container runtime +func (r *Runtime) StorageConfig() storage.StoreOptions { + return r.storageConfig +} + +// DBConfig is a set of Libpod runtime configuration settings that are saved in +// a State when it is first created, and can subsequently be retrieved. +type DBConfig struct { + LibpodRoot string + LibpodTmp string + StorageRoot string + StorageTmp string + GraphDriver string + VolumePath string +} + +// mergeDBConfig merges the configuration from the database. +func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) error { + + c := r.config.Engine + if !r.storageSet.RunRootSet && dbConfig.StorageTmp != "" { + if r.storageConfig.RunRoot != dbConfig.StorageTmp && + r.storageConfig.RunRoot != "" { + logrus.Debugf("Overriding run root %q with %q from database", + r.storageConfig.RunRoot, dbConfig.StorageTmp) + } + r.storageConfig.RunRoot = dbConfig.StorageTmp + } + + if !r.storageSet.GraphRootSet && dbConfig.StorageRoot != "" { + if r.storageConfig.GraphRoot != dbConfig.StorageRoot && + r.storageConfig.GraphRoot != "" { + logrus.Debugf("Overriding graph root %q with %q from database", + r.storageConfig.GraphRoot, dbConfig.StorageRoot) + } + r.storageConfig.GraphRoot = dbConfig.StorageRoot + } + + if !r.storageSet.GraphDriverNameSet && dbConfig.GraphDriver != "" { + if r.storageConfig.GraphDriverName != dbConfig.GraphDriver && + r.storageConfig.GraphDriverName != "" { + logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve", + r.storageConfig.GraphDriverName, dbConfig.GraphDriver) + } + r.storageConfig.GraphDriverName = dbConfig.GraphDriver + } + + if !r.storageSet.StaticDirSet && dbConfig.LibpodRoot != "" { + if c.StaticDir != dbConfig.LibpodRoot && c.StaticDir != "" { + logrus.Debugf("Overriding static dir %q with %q from database", c.StaticDir, dbConfig.LibpodRoot) + } + c.StaticDir = dbConfig.LibpodRoot + } + + if !r.storageSet.TmpDirSet && dbConfig.LibpodTmp != "" { + if c.TmpDir != dbConfig.LibpodTmp && c.TmpDir != "" { + logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp) + } + c.TmpDir = dbConfig.LibpodTmp + c.EventsLogFilePath = filepath.Join(dbConfig.LibpodTmp, "events", "events.log") + } + + if !r.storageSet.VolumePathSet && dbConfig.VolumePath != "" { + if c.VolumePath != dbConfig.VolumePath && c.VolumePath != "" { + logrus.Debugf("Overriding volume path %q with %q from database", c.VolumePath, dbConfig.VolumePath) + } + c.VolumePath = dbConfig.VolumePath + } + return nil +} diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index ba2a6b93e..207ac6477 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -8,11 +8,13 @@ import ( "strings" "time" + "github.com/containers/common/pkg/config" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/rootless" "github.com/containers/storage/pkg/stringid" + "github.com/docker/go-units" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opentracing/opentracing-go" @@ -59,7 +61,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config // If the path to ConmonPidFile starts with the default value (RunRoot), then // the user has not specified '--conmon-pidfile' during run or create (probably). // In that case reset ConmonPidFile to be set to the default value later. - if strings.HasPrefix(ctr.config.ConmonPidFile, r.config.StorageConfig.RunRoot) { + if strings.HasPrefix(ctr.config.ConmonPidFile, r.storageConfig.RunRoot) { ctr.config.ConmonPidFile = "" } @@ -76,7 +78,11 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf if config == nil { ctr.config.ID = stringid.GenerateNonCryptoID() - ctr.config.ShmSize = define.DefaultShmSize + size, err := units.FromHumanSize(r.config.Containers.ShmSize) + if err != nil { + return nil, errors.Wrapf(err, "converting containers.conf ShmSize %s to an int", r.config.Containers.ShmSize) + } + ctr.config.ShmSize = size } else { // This is a restore from an imported checkpoint ctr.restoreFromCheckpoint = true @@ -101,14 +107,14 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf ctr.state.BindMounts = make(map[string]string) - ctr.config.StopTimeout = define.CtrRemoveTimeout + ctr.config.StopTimeout = r.config.Engine.StopTimeout ctr.config.OCIRuntime = r.defaultOCIRuntime.Name() // Set namespace based on current runtime namespace // Do so before options run so they can override it - if r.config.Namespace != "" { - ctr.config.Namespace = r.config.Namespace + if r.config.Engine.Namespace != "" { + ctr.config.Namespace = r.config.Engine.Namespace } ctr.runtime = r @@ -131,6 +137,7 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options .. return nil, errors.Wrapf(err, "error running container create option") } } + return r.setupContainer(ctx, ctr) } @@ -199,8 +206,8 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai // Check CGroup parent sanity, and set it if it was not set. // Only if we're actually configuring CGroups. if !ctr.config.NoCgroups { - switch r.config.CgroupManager { - case define.CgroupfsCgroupsManager: + switch r.config.Engine.CgroupManager { + case config.CgroupfsCgroupsManager: if ctr.config.CgroupParent == "" { if pod != nil && pod.config.UsePodCgroup { podCgroup, err := pod.CgroupPath() @@ -217,7 +224,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai } else if strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") { return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs") } - case define.SystemdCgroupsManager: + case config.SystemdCgroupsManager: if ctr.config.CgroupParent == "" { switch { case pod != nil && pod.config.UsePodCgroup: @@ -235,7 +242,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups") } default: - return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.CgroupManager) + return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.Engine.CgroupManager) } } @@ -462,11 +469,9 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } } - // Check that all of our exec sessions have finished - for _, session := range c.state.ExecSessions { - if err := c.ociRuntime.ExecStopContainer(c, session.ID, c.StopTimeout()); err != nil { - return errors.Wrapf(err, "error stopping exec session %s of container %s", session.ID, c.ID()) - } + // Remove all active exec sessions + if err := c.removeAllExecSessions(); err != nil { + return err } // Check that no other containers depend on the container. @@ -483,9 +488,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } } - // Set ContainerStateRemoving and remove exec sessions + // Set ContainerStateRemoving c.state.State = define.ContainerStateRemoving - c.state.ExecSessions = nil if err := c.save(); err != nil { return errors.Wrapf(err, "unable to set container %s removing state in database", c.ID()) @@ -832,6 +836,24 @@ func (r *Runtime) GetLatestContainer() (*Container, error) { return ctrs[lastCreatedIndex], nil } +// GetExecSessionContainer gets the container that a given exec session ID is +// attached to. +func (r *Runtime) GetExecSessionContainer(id string) (*Container, error) { + r.lock.RLock() + defer r.lock.RUnlock() + + if !r.valid { + return nil, define.ErrRuntimeStopped + } + + ctrID, err := r.state.GetExecSession(id) + if err != nil { + return nil, err + } + + return r.state.Container(ctrID) +} + // PruneContainers removes stopped and exited containers from localstorage. A set of optional filters // can be provided to be more granular. func (r *Runtime) PruneContainers(filterFuncs []ContainerFilter) (map[string]int64, map[string]error, error) { diff --git a/libpod/runtime_pod.go b/libpod/runtime_pod.go index e1dc31391..be566e211 100644 --- a/libpod/runtime_pod.go +++ b/libpod/runtime_pod.go @@ -90,18 +90,10 @@ func (r *Runtime) LookupPod(idOrName string) (*Pod, error) { // output. Multiple filters are handled by ANDing their output, so only pods // matching all filters are returned func (r *Runtime) Pods(filters ...PodFilter) ([]*Pod, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.valid { - return nil, define.ErrRuntimeStopped - } - - pods, err := r.state.AllPods() + pods, err := r.GetAllPods() if err != nil { return nil, err } - podsFiltered := make([]*Pod, 0, len(pods)) for _, pod := range pods { include := true diff --git a/libpod/runtime_pod_infra_linux.go b/libpod/runtime_pod_infra_linux.go index 279cafa39..06a7b3936 100644 --- a/libpod/runtime_pod_infra_linux.go +++ b/libpod/runtime_pod_infra_linux.go @@ -23,7 +23,7 @@ const ( IDTruncLength = 12 ) -func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, imgID string, config *v1.ImageConfig) (*Container, error) { +func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, rawImageName, imgID string, config *v1.ImageConfig) (*Container, error) { // Set up generator for infra container defaults g, err := generate.New("linux") @@ -36,7 +36,7 @@ func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, imgID isRootless := rootless.IsRootless() - entryCmd := []string{r.config.InfraCommand} + entryCmd := []string{r.config.Engine.InfraCommand} var options []CtrCreateOption // I've seen circumstances where config is being passed as nil. // Let's err on the side of safety and make sure it's safe to use. @@ -127,7 +127,7 @@ func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, imgID containerName := p.ID()[:IDTruncLength] + "-infra" options = append(options, r.WithPod(p)) - options = append(options, WithRootFSFromImage(imgID, imgName)) + options = append(options, WithRootFSFromImage(imgID, imgName, rawImageName)) options = append(options, WithName(containerName)) options = append(options, withIsInfra()) @@ -142,7 +142,7 @@ func (r *Runtime) createInfraContainer(ctx context.Context, p *Pod) (*Container, return nil, define.ErrRuntimeStopped } - newImage, err := r.ImageRuntime().New(ctx, r.config.InfraImage, "", "", nil, nil, image.SigningOptions{}, nil, util.PullImageMissing) + newImage, err := r.ImageRuntime().New(ctx, r.config.Engine.InfraImage, "", "", nil, nil, image.SigningOptions{}, nil, util.PullImageMissing) if err != nil { return nil, err } @@ -154,5 +154,5 @@ func (r *Runtime) createInfraContainer(ctx context.Context, p *Pod) (*Container, imageName := newImage.Names()[0] imageID := data.ID - return r.makeInfraContainer(ctx, p, imageName, imageID, data.Config) + return r.makeInfraContainer(ctx, p, imageName, r.config.Engine.InfraImage, imageID, data.Config) } diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go index 4afd5760a..872e8ea8a 100644 --- a/libpod/runtime_pod_linux.go +++ b/libpod/runtime_pod_linux.go @@ -9,6 +9,7 @@ import ( "path/filepath" "strings" + "github.com/containers/common/pkg/config" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/pkg/cgroups" @@ -34,8 +35,8 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (_ *Po // Set default namespace to runtime's namespace // Do so before options run so they can override it - if r.config.Namespace != "" { - pod.config.Namespace = r.config.Namespace + if r.config.Engine.Namespace != "" { + pod.config.Namespace = r.config.Engine.Namespace } for _, option := range options { @@ -75,8 +76,8 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (_ *Po pod.valid = true // Check CGroup parent sanity, and set it if it was not set - switch r.config.CgroupManager { - case define.CgroupfsCgroupsManager: + switch r.config.Engine.CgroupManager { + case config.CgroupfsCgroupsManager: if pod.config.CgroupParent == "" { pod.config.CgroupParent = CgroupfsDefaultCgroupParent } else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") { @@ -89,7 +90,7 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (_ *Po if pod.config.UsePodCgroup { pod.state.CgroupPath = filepath.Join(pod.config.CgroupParent, pod.ID()) } - case define.SystemdCgroupsManager: + case config.SystemdCgroupsManager: if pod.config.CgroupParent == "" { if rootless.IsRootless() { pod.config.CgroupParent = SystemdDefaultRootlessCgroupParent @@ -109,7 +110,7 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (_ *Po pod.state.CgroupPath = cgroupPath } default: - return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.CgroupManager) + return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.Engine.CgroupManager) } if pod.config.UsePodCgroup { @@ -198,7 +199,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) // the pod and conmon CGroups with a PID limit to prevent them from // spawning any further processes (particularly cleanup processes) which // would prevent removing the CGroups. - if p.runtime.config.CgroupManager == define.CgroupfsCgroupsManager { + if p.runtime.config.Engine.CgroupManager == config.CgroupfsCgroupsManager { // Get the conmon CGroup conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon") conmonCgroup, err := cgroups.Load(conmonCgroupPath) @@ -272,8 +273,8 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) if p.state.CgroupPath != "" { logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath) - switch p.runtime.config.CgroupManager { - case define.SystemdCgroupsManager: + switch p.runtime.config.Engine.CgroupManager { + case config.SystemdCgroupsManager: if err := deleteSystemdCgroup(p.state.CgroupPath); err != nil { if removalErr == nil { removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID()) @@ -281,7 +282,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) logrus.Errorf("Error deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err) } } - case define.CgroupfsCgroupsManager: + case config.CgroupfsCgroupsManager: // Delete the cgroupfs cgroup // Make sure the conmon cgroup is deleted first // Since the pod is almost gone, don't bother failing @@ -326,9 +327,9 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool) // keep going so we make sure to evict the pod before // ending up with an inconsistent state. if removalErr == nil { - removalErr = errors.Wrapf(define.ErrInternal, "unrecognized cgroup manager %s when removing pod %s cgroups", p.runtime.config.CgroupManager, p.ID()) + removalErr = errors.Wrapf(define.ErrInternal, "unrecognized cgroup manager %s when removing pod %s cgroups", p.runtime.config.Engine.CgroupManager, p.ID()) } else { - logrus.Errorf("Unknown cgroups manager %s specified - cannot remove pod %s cgroup", p.runtime.config.CgroupManager, p.ID()) + logrus.Errorf("Unknown cgroups manager %s specified - cannot remove pod %s cgroup", p.runtime.config.Engine.CgroupManager, p.ID()) } } } diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go index efc3c5bd9..d5fede1d1 100644 --- a/libpod/runtime_volume.go +++ b/libpod/runtime_volume.go @@ -35,7 +35,6 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error return nil } } - return r.removeVolume(ctx, v, force) } @@ -130,26 +129,22 @@ func (r *Runtime) GetAllVolumes() ([]*Volume, error) { } // PruneVolumes removes unused volumes from the system -func (r *Runtime) PruneVolumes(ctx context.Context) ([]string, []error) { - var ( - prunedIDs []string - pruneErrors []error - ) +func (r *Runtime) PruneVolumes(ctx context.Context) (map[string]error, error) { + reports := make(map[string]error) vols, err := r.GetAllVolumes() if err != nil { - pruneErrors = append(pruneErrors, err) - return nil, pruneErrors + return nil, err } for _, vol := range vols { if err := r.RemoveVolume(ctx, vol, false); err != nil { if errors.Cause(err) != define.ErrVolumeBeingUsed && errors.Cause(err) != define.ErrVolumeRemoved { - pruneErrors = append(pruneErrors, err) + reports[vol.Name()] = err } continue } vol.newVolumeEvent(events.Prune) - prunedIDs = append(prunedIDs, vol.Name()) + reports[vol.Name()] = nil } - return prunedIDs, pruneErrors + return reports, nil } diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go index e9cfda9d4..d4b46cc94 100644 --- a/libpod/runtime_volume_linux.go +++ b/libpod/runtime_volume_linux.go @@ -71,7 +71,7 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) } // Create the mountpoint of this volume - volPathRoot := filepath.Join(r.config.VolumePath, volume.config.Name) + volPathRoot := filepath.Join(r.config.Engine.VolumePath, volume.config.Name) if err := os.MkdirAll(volPathRoot, 0700); err != nil { return nil, errors.Wrapf(err, "error creating volume directory %q", volPathRoot) } diff --git a/libpod/state.go b/libpod/state.go index b246b5eac..6206a2994 100644 --- a/libpod/state.go +++ b/libpod/state.go @@ -1,7 +1,5 @@ package libpod -import "github.com/containers/libpod/libpod/config" - // State is a storage backend for libpod's current state. // A State is only initialized once per instance of libpod. // As such, initialization methods for State implementations may safely assume @@ -28,7 +26,7 @@ type State interface { // root and tmp dirs, and c/storage graph driver. // This is not implemented by the in-memory state, as it has no need to // validate runtime configuration. - GetDBConfig() (*config.DBConfig, error) + GetDBConfig() (*DBConfig, error) // ValidateDBConfig validates the config in the given Runtime struct // against paths stored in the configured database. @@ -72,6 +70,8 @@ type State interface { // Removes container from state. // Containers that are part of pods must use RemoveContainerFromPod. // The container must be part of the set namespace. + // All dependencies must be removed first. + // All exec sessions referencing the container must be removed first. RemoveContainer(ctr *Container) error // UpdateContainer updates a container's state from the backing store. // The container must be part of the set namespace. @@ -95,6 +95,30 @@ type State interface { // Return a container config from the database by full ID GetContainerConfig(id string) (*ContainerConfig, error) + // Add creates a reference to an exec session in the database. + // The container the exec session is attached to will be recorded. + // The container state will not be modified. + // The actual exec session itself is part of the container's state. + // We assume higher-level callers will add the session by saving the + // container's state before calling this. This only ensures that the ID + // of the exec session is associated with the ID of the container. + // Implementations may, but are not required to, verify that the state + // of the given container has an exec session with the ID given. + AddExecSession(ctr *Container, session *ExecSession) error + // Get retrieves the container a given exec session is attached to. + GetExecSession(id string) (string, error) + // Remove a reference to an exec session from the database. + // This will not modify container state to remove the exec session there + // and instead only removes the session ID -> container ID reference + // added by AddExecSession. + RemoveExecSession(session *ExecSession) error + // Get the IDs of all exec sessions attached to a given container. + GetContainerExecSessions(ctr *Container) ([]string, error) + // Remove all exec sessions for a single container. + // Usually used as part of removing the container. + // As with RemoveExecSession, container state will not be modified. + RemoveContainerExecSessions(ctr *Container) error + // PLEASE READ FULL DESCRIPTION BEFORE USING. // Rewrite a container's configuration. // This function breaks libpod's normal prohibition on a read-only diff --git a/libpod/state_test.go b/libpod/state_test.go index 39937d8e4..db1c8dd99 100644 --- a/libpod/state_test.go +++ b/libpod/state_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/containers/libpod/libpod/config" + "github.com/containers/common/pkg/config" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/lock" "github.com/containers/storage" @@ -54,7 +54,7 @@ func getEmptyBoltState() (s State, p string, m lock.Manager, err error) { runtime := new(Runtime) runtime.config = new(config.Config) - runtime.config.StorageConfig = storage.StoreOptions{} + runtime.storageConfig = storage.StoreOptions{} runtime.lockManager = lockManager state, err := NewBoltState(dbPath, runtime) diff --git a/libpod/util.go b/libpod/util.go index f79d6c09b..e9d234bbe 100644 --- a/libpod/util.go +++ b/libpod/util.go @@ -12,7 +12,8 @@ import ( "strings" "time" - "github.com/containers/libpod/libpod/config" + "github.com/containers/common/pkg/config" + "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/utils" "github.com/fsnotify/fsnotify" diff --git a/libpod/util_unsupported.go b/libpod/util_unsupported.go index 9a9a6eeb6..4c5616bd0 100644 --- a/libpod/util_unsupported.go +++ b/libpod/util_unsupported.go @@ -25,7 +25,7 @@ func assembleSystemdCgroupName(baseSlice, newSlice string) (string, error) { // LabelVolumePath takes a mount path for a volume and gives it an // selinux label of either shared or not -func LabelVolumePath(path string, shared bool) error { +func LabelVolumePath(path string) error { return define.ErrNotImplemented } diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go index e89b3484d..781ff77ca 100644 --- a/libpod/volume_internal.go +++ b/libpod/volume_internal.go @@ -23,7 +23,7 @@ func newVolume(runtime *Runtime) (*Volume, error) { // teardownStorage deletes the volume from volumePath func (v *Volume) teardownStorage() error { - return os.RemoveAll(filepath.Join(v.runtime.config.VolumePath, v.Name())) + return os.RemoveAll(filepath.Join(v.runtime.config.Engine.VolumePath, v.Name())) } // Volumes with options set, or a filesystem type, or a device to mount need to |