diff options
Diffstat (limited to 'libpod')
-rw-r--r-- | libpod/boltdb_state.go | 70 | ||||
-rw-r--r-- | libpod/boltdb_state_internal.go | 204 | ||||
-rw-r--r-- | libpod/container.go | 9 | ||||
-rw-r--r-- | libpod/container_api.go | 13 | ||||
-rw-r--r-- | libpod/container_commit.go | 4 | ||||
-rw-r--r-- | libpod/container_inspect.go | 140 | ||||
-rw-r--r-- | libpod/container_internal.go | 26 | ||||
-rw-r--r-- | libpod/container_internal_linux.go | 12 | ||||
-rw-r--r-- | libpod/image/pull.go | 8 | ||||
-rw-r--r-- | libpod/info.go | 10 | ||||
-rw-r--r-- | libpod/lock/shm/shm_lock.c | 6 | ||||
-rw-r--r-- | libpod/lock/shm/shm_lock.go | 1 | ||||
-rw-r--r-- | libpod/lock/shm/shm_lock.h | 3 | ||||
-rw-r--r-- | libpod/networking_linux.go | 2 | ||||
-rw-r--r-- | libpod/oci.go | 52 | ||||
-rw-r--r-- | libpod/oci_linux.go | 4 | ||||
-rw-r--r-- | libpod/pod_api.go | 2 | ||||
-rw-r--r-- | libpod/pod_internal.go | 2 | ||||
-rw-r--r-- | libpod/runtime.go | 204 | ||||
-rw-r--r-- | libpod/runtime_ctr.go | 18 |
20 files changed, 529 insertions, 261 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index 63e40a98f..12c364993 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -73,42 +73,50 @@ func NewBoltState(path string, runtime *Runtime) (State, error) { // As such, just a db.Close() is fine here. defer db.Close() - // Perform initial database setup - err = db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucketIfNotExists(idRegistryBkt); err != nil { - return errors.Wrapf(err, "error creating id-registry bucket") - } - if _, err := tx.CreateBucketIfNotExists(nameRegistryBkt); err != nil { - return errors.Wrapf(err, "error creating name-registry bucket") - } - if _, err := tx.CreateBucketIfNotExists(nsRegistryBkt); err != nil { - return errors.Wrapf(err, "error creating ns-registry bucket") - } - if _, err := tx.CreateBucketIfNotExists(ctrBkt); err != nil { - return errors.Wrapf(err, "error creating containers bucket") - } - if _, err := tx.CreateBucketIfNotExists(allCtrsBkt); err != nil { - return errors.Wrapf(err, "error creating all containers bucket") - } - if _, err := tx.CreateBucketIfNotExists(podBkt); err != nil { - return errors.Wrapf(err, "error creating pods bucket") - } - if _, err := tx.CreateBucketIfNotExists(allPodsBkt); err != nil { - return errors.Wrapf(err, "error creating all pods bucket") - } - if _, err := tx.CreateBucketIfNotExists(volBkt); err != nil { - return errors.Wrapf(err, "error creating volume bucket") - } - if _, err := tx.CreateBucketIfNotExists(allVolsBkt); err != nil { - return errors.Wrapf(err, "error creating all volumes bucket") + createBuckets := [][]byte{ + idRegistryBkt, + nameRegistryBkt, + nsRegistryBkt, + ctrBkt, + allCtrsBkt, + podBkt, + allPodsBkt, + volBkt, + allVolsBkt, + runtimeConfigBkt, + } + + // Does the DB need an update? + needsUpdate := false + err = db.View(func(tx *bolt.Tx) error { + for _, bkt := range createBuckets { + if test := tx.Bucket(bkt); test == nil { + needsUpdate = true + break + } } - if _, err := tx.CreateBucketIfNotExists(runtimeConfigBkt); err != nil { - return errors.Wrapf(err, "error creating runtime-config bucket") + return nil + }) + if err != nil { + return nil, errors.Wrapf(err, "error checking DB schema") + } + + if !needsUpdate { + state.valid = true + return state, nil + } + + // Ensure schema is properly created in DB + err = db.Update(func(tx *bolt.Tx) error { + for _, bkt := range createBuckets { + if _, err := tx.CreateBucketIfNotExists(bkt); err != nil { + return errors.Wrapf(err, "error creating bucket %s", string(bkt)) + } } return nil }) if err != nil { - return nil, errors.Wrapf(err, "error creating initial database layout") + return nil, errors.Wrapf(err, "error creating buckets for DB") } state.valid = true diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index 313e5f4d7..b7930e158 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -2,6 +2,7 @@ package libpod import ( "bytes" + "path/filepath" "runtime" "strings" @@ -72,98 +73,160 @@ var ( volPathKey = []byte(volPathName) ) +// This represents a field in the runtime configuration that will be validated +// against the DB to ensure no configuration mismatches occur. +type dbConfigValidation struct { + name string // Only used for error messages + runtimeValue string + key []byte + defaultValue string +} + // Check if the configuration of the database is compatible with the // configuration of the runtime opening it // If there is no runtime configuration loaded, load our own func checkRuntimeConfig(db *bolt.DB, rt *Runtime) error { - err := db.Update(func(tx *bolt.Tx) error { + storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID()) + if err != nil { + return err + } + + // We need to validate the following things + checks := []dbConfigValidation{ + { + "OS", + runtime.GOOS, + osKey, + runtime.GOOS, + }, + { + "libpod root directory (staticdir)", + rt.config.StaticDir, + staticDirKey, + "", + }, + { + "libpod temporary files directory (tmpdir)", + rt.config.TmpDir, + tmpDirKey, + "", + }, + { + "storage temporary directory (runroot)", + rt.config.StorageConfig.RunRoot, + runRootKey, + storeOpts.RunRoot, + }, + { + "storage graph root directory (graphroot)", + rt.config.StorageConfig.GraphRoot, + graphRootKey, + storeOpts.GraphRoot, + }, + { + "storage graph driver", + rt.config.StorageConfig.GraphDriverName, + graphDriverKey, + storeOpts.GraphDriverName, + }, + { + "volume path", + rt.config.VolumePath, + volPathKey, + "", + }, + } + + // These fields were missing and will have to be recreated. + missingFields := []dbConfigValidation{} + + // Let's try and validate read-only first + err = db.View(func(tx *bolt.Tx) error { configBkt, err := getRuntimeConfigBucket(tx) if err != nil { return err } - if err := validateDBAgainstConfig(configBkt, "OS", runtime.GOOS, osKey, runtime.GOOS); err != nil { - return err + for _, check := range checks { + exists, err := readOnlyValidateConfig(configBkt, check) + if err != nil { + return err + } + if !exists { + missingFields = append(missingFields, check) + } } - if err := validateDBAgainstConfig(configBkt, "libpod root directory (staticdir)", - rt.config.StaticDir, staticDirKey, ""); err != nil { - return err - } + return nil + }) + if err != nil { + return err + } - if err := validateDBAgainstConfig(configBkt, "libpod temporary files directory (tmpdir)", - rt.config.TmpDir, tmpDirKey, ""); err != nil { - return err - } + if len(missingFields) == 0 { + return nil + } - storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID()) + // Populate missing fields + return db.Update(func(tx *bolt.Tx) error { + configBkt, err := getRuntimeConfigBucket(tx) if err != nil { return err } - if err := validateDBAgainstConfig(configBkt, "storage temporary directory (runroot)", - rt.config.StorageConfig.RunRoot, runRootKey, - storeOpts.RunRoot); err != nil { - return err - } - if err := validateDBAgainstConfig(configBkt, "storage graph root directory (graphroot)", - rt.config.StorageConfig.GraphRoot, graphRootKey, - storeOpts.GraphRoot); err != nil { - return err - } + for _, missing := range missingFields { + dbValue := []byte(missing.runtimeValue) + if missing.runtimeValue == "" && missing.defaultValue != "" { + dbValue = []byte(missing.defaultValue) + } - if err := validateDBAgainstConfig(configBkt, "storage graph driver", - rt.config.StorageConfig.GraphDriverName, - graphDriverKey, - storeOpts.GraphDriverName); err != nil { - return err + if err := configBkt.Put(missing.key, dbValue); err != nil { + return errors.Wrapf(err, "error updating %s in DB runtime config", missing.name) + } } - return validateDBAgainstConfig(configBkt, "volume path", - rt.config.VolumePath, volPathKey, "") + return nil }) - - return err } -// Validate a configuration entry in the DB against current runtime config -// If the given configuration key does not exist it will be created -// If the given runtimeValue or value retrieved from the database are the empty -// string and defaultValue is not, defaultValue will be checked instead. This -// ensures that we will not fail on configuration changes in configured c/storage. -func validateDBAgainstConfig(bucket *bolt.Bucket, fieldName, runtimeValue string, keyName []byte, defaultValue string) error { - keyBytes := bucket.Get(keyName) +// Attempt a read-only validation of a configuration entry in the DB against an +// element of the current runtime configuration. +// If the configuration key in question does not exist, (false, nil) will be +// returned. +// If the configuration key does exist, and matches the runtime configuration +// successfully, (true, nil) is returned. +// An error is only returned when validation fails. +// if the given runtimeValue or value retrieved from the database are empty, +// and defaultValue is not, defaultValue will be checked instead. This ensures +// that we will not fail on configuration changes in c/storage (where we may +// pass the empty string to use defaults). +func readOnlyValidateConfig(bucket *bolt.Bucket, toCheck dbConfigValidation) (bool, error) { + keyBytes := bucket.Get(toCheck.key) if keyBytes == nil { - dbValue := []byte(runtimeValue) - if runtimeValue == "" && defaultValue != "" { - dbValue = []byte(defaultValue) - } + // False return indicates missing key + return false, nil + } - if err := bucket.Put(keyName, dbValue); err != nil { - return errors.Wrapf(err, "error updating %s in DB runtime config", fieldName) - } - } else { - if runtimeValue != string(keyBytes) { - // If runtimeValue is the empty string, check against - // the default - if runtimeValue == "" && defaultValue != "" && - string(keyBytes) == defaultValue { - return nil - } + dbValue := string(keyBytes) - // If DB value is the empty string, check that the - // runtime value is the default - if string(keyBytes) == "" && defaultValue != "" && - runtimeValue == defaultValue { - return nil - } + if toCheck.runtimeValue != dbValue { + // If the runtime value is the empty string and default is not, + // check against default. + if toCheck.runtimeValue == "" && toCheck.defaultValue != "" && dbValue == toCheck.defaultValue { + return true, nil + } - return errors.Wrapf(ErrDBBadConfig, "database %s %s does not match our %s %s", - fieldName, string(keyBytes), fieldName, runtimeValue) + // If the DB value is the empty string, check that the runtime + // value is the default. + if dbValue == "" && toCheck.defaultValue != "" && toCheck.runtimeValue == toCheck.defaultValue { + return true, nil } + + return true, errors.Wrapf(ErrDBBadConfig, "database %s %q does not match our %s %q", + toCheck.name, dbValue, toCheck.name, toCheck.runtimeValue) } - return nil + return true, nil } // Open a connection to the database. @@ -304,6 +367,23 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt. } ctr.lock = lock + if ctr.config.OCIRuntime == "" { + ctr.ociRuntime = s.runtime.defaultOCIRuntime + } else { + // Handle legacy containers which might use a literal path for + // their OCI runtime name. + runtimeName := ctr.config.OCIRuntime + if strings.HasPrefix(runtimeName, "/") { + runtimeName = filepath.Base(runtimeName) + } + + ociRuntime, ok := s.runtime.ociRuntimes[runtimeName] + if !ok { + return errors.Wrapf(ErrInternal, "container %s was created with OCI runtime %s, but that runtime is not available in the current configuration", ctr.ID(), ctr.config.OCIRuntime) + } + ctr.ociRuntime = ociRuntime + } + ctr.runtime = s.runtime ctr.valid = valid diff --git a/libpod/container.go b/libpod/container.go index 68c4cd6b0..464b233d1 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -145,9 +145,10 @@ type Container struct { // Functions called on a batched container will not lock or sync batched bool - valid bool - lock lock.Locker - runtime *Runtime + valid bool + lock lock.Locker + runtime *Runtime + ociRuntime *OCIRuntime rootlessSlirpSyncR *os.File rootlessSlirpSyncW *os.File @@ -789,7 +790,7 @@ func (c *Container) LogDriver() string { // RuntimeName returns the name of the runtime func (c *Container) RuntimeName() string { - return c.runtime.ociRuntime.name + return c.config.OCIRuntime } // Runtime spec accessors diff --git a/libpod/container_api.go b/libpod/container_api.go index 52d3afc0a..370e3e5d9 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -180,7 +180,7 @@ func (c *Container) StopWithTimeout(timeout uint) error { if c.state.State == ContainerStateConfigured || c.state.State == ContainerStateUnknown || c.state.State == ContainerStatePaused { - return errors.Wrapf(ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s in state %s", c.ID(), c.state.State.String()) + return errors.Wrapf(ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s is in state %s", c.ID(), c.state.State.String()) } if c.state.State == ContainerStateStopped || @@ -203,11 +203,11 @@ func (c *Container) Kill(signal uint) error { } if c.state.State != ContainerStateRunning { - return errors.Wrapf(ErrCtrStateInvalid, "can only kill running containers") + return errors.Wrapf(ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String()) } defer c.newContainerEvent(events.Kill) - if err := c.runtime.ociRuntime.killContainer(c, signal); err != nil { + if err := c.ociRuntime.killContainer(c, signal); err != nil { return err } @@ -280,7 +280,7 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir logrus.Debugf("Creating new exec session in container %s with session id %s", c.ID(), sessionID) - execCmd, err := c.runtime.ociRuntime.execContainer(c, cmd, capList, env, tty, workDir, hostUser, sessionID, streams, preserveFDs) + execCmd, err := c.ociRuntime.execContainer(c, cmd, capList, env, tty, workDir, hostUser, sessionID, streams, preserveFDs) if err != nil { return errors.Wrapf(err, "error exec %s", c.ID()) } @@ -627,6 +627,7 @@ func (c *Container) Batch(batchFunc func(*Container) error) error { newCtr.config = c.config newCtr.state = c.state newCtr.runtime = c.runtime + newCtr.ociRuntime = c.ociRuntime newCtr.lock = c.lock newCtr.valid = true @@ -658,7 +659,7 @@ func (c *Container) Sync() error { (c.state.State != ContainerStateConfigured) && (c.state.State != ContainerStateExited) { oldState := c.state.State - if err := c.runtime.ociRuntime.updateContainerStatus(c, true); err != nil { + if err := c.ociRuntime.updateContainerStatus(c, true); err != nil { return err } // Only save back to DB if state changed @@ -715,7 +716,7 @@ func (c *Container) Refresh(ctx context.Context) error { if len(c.state.ExecSessions) > 0 { logrus.Infof("Killing %d exec sessions in container %s. They will not be restored after refresh.", len(c.state.ExecSessions), c.ID()) - if err := c.runtime.ociRuntime.execStopContainer(c, c.config.StopTimeout); err != nil { + if err := c.ociRuntime.execStopContainer(c, c.config.StopTimeout); err != nil { return err } } diff --git a/libpod/container_commit.go b/libpod/container_commit.go index 739fcd80e..5e6ddcbd1 100644 --- a/libpod/container_commit.go +++ b/libpod/container_commit.go @@ -52,11 +52,11 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai } if c.state.State == ContainerStateRunning && options.Pause { - if err := c.runtime.ociRuntime.pauseContainer(c); err != nil { + if err := c.ociRuntime.pauseContainer(c); err != nil { return nil, errors.Wrapf(err, "error pausing container %q", c.ID()) } defer func() { - if err := c.runtime.ociRuntime.unpauseContainer(c); err != nil { + if err := c.ociRuntime.unpauseContainer(c); err != nil { logrus.Errorf("error unpausing container %q: %v", c.ID(), err) } }() diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go index 0a62ceb7c..3ac774060 100644 --- a/libpod/container_inspect.go +++ b/libpod/container_inspect.go @@ -1,8 +1,10 @@ package libpod import ( + "strings" "time" + "github.com/containers/image/manifest" "github.com/containers/libpod/libpod/driver" "github.com/cri-o/ocicni/pkg/ocicni" spec "github.com/opencontainers/runtime-spec/specs-go" @@ -29,6 +31,7 @@ type InspectContainerData struct { HostsPath string `json:"HostsPath"` StaticDir string `json:"StaticDir"` OCIConfigPath string `json:"OCIConfigPath,omitempty"` + OCIRuntime string `json:"OCIRuntime,omitempty"` LogPath string `json:"LogPath"` ConmonPidFile string `json:"ConmonPidFile"` Name string `json:"Name"` @@ -43,12 +46,59 @@ type InspectContainerData struct { GraphDriver *driver.Data `json:"GraphDriver"` SizeRw int64 `json:"SizeRw,omitempty"` SizeRootFs int64 `json:"SizeRootFs,omitempty"` - Mounts []*InspectMount `json:"Mounts"` + Mounts []InspectMount `json:"Mounts"` Dependencies []string `json:"Dependencies"` NetworkSettings *InspectNetworkSettings `json:"NetworkSettings"` //TODO ExitCommand []string `json:"ExitCommand"` Namespace string `json:"Namespace"` IsInfra bool `json:"IsInfra"` + Config *InspectContainerConfig `json:"Config"` +} + +// InspectContainerConfig holds further data about how a container was initially +// configured. +type InspectContainerConfig struct { + // Container hostname + Hostname string `json:"Hostname"` + // Container domain name - unused at present + DomainName string `json:"Domainname"` + // User the container was launched with + User string `json:"User"` + // Unused, at present + AttachStdin bool `json:"AttachStdin"` + // Unused, at present + AttachStdout bool `json:"AttachStdout"` + // Unused, at present + AttachStderr bool `json:"AttachStderr"` + // Whether the container creates a TTY + Tty bool `json:"Tty"` + // Whether the container leaves STDIN open + OpenStdin bool `json:"OpenStdin"` + // Whether STDIN is only left open once. + // Presently not supported by Podman, unused. + StdinOnce bool `json:"StdinOnce"` + // Container environment variables + Env []string `json:"Env"` + // Container command + Cmd []string `json:"Cmd"` + // Container image + Image string `json:"Image"` + // Unused, at present. I've never seen this field populated. + Volumes map[string]struct{} `json:"Volumes"` + // Container working directory + WorkingDir string `json:"WorkingDir"` + // Container entrypoint + Entrypoint string `json:"Entrypoint"` + // On-build arguments - presently unused. More of Buildah's domain. + OnBuild *string `json:"OnBuild"` + // Container labels + Labels map[string]string `json:"Labels"` + // Container annotations + Annotations map[string]string `json:"Annotations"` + // Container stop signal + StopSignal uint `json:"StopSignal"` + // Configured healthcheck for the container + Healthcheck *manifest.Schema2HealthConfig `json:"Healthcheck,omitempty"` } // InspectMount provides a record of a single mount in a container. It contains @@ -61,10 +111,10 @@ type InspectMount struct { // The name of the volume. Empty for bind mounts. Name string `json:"Name,omptempty"` // The source directory for the volume. - Src string `json:"Source"` + Source string `json:"Source"` // The destination directory for the volume. Specified as a path within // the container, as it would be passed into the OCI runtime. - Dst string `json:"Destination"` + Destination string `json:"Destination"` // The driver used for the named volume. Empty for bind mounts. Driver string `json:"Driver"` // Contains SELinux :z/:Z mount options. Unclear what, if anything, else @@ -192,7 +242,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) } } - mounts, err := c.getInspectMounts() + mounts, err := c.getInspectMounts(spec) if err != nil { return nil, err } @@ -225,6 +275,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) HostsPath: hostsPath, StaticDir: config.StaticDir, LogPath: config.LogPath, + OCIRuntime: config.OCIRuntime, ConmonPidFile: config.ConmonPidFile, Name: config.Name, RestartCount: int32(runtimeInfo.RestartCount), @@ -284,6 +335,12 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) // Get information on the container's network namespace (if present) data = c.getContainerNetworkInfo(data) + inspectConfig, err := c.generateInspectContainerConfig(spec) + if err != nil { + return nil, err + } + data.Config = inspectConfig + if size { rootFsSize, err := c.rootFsSize() if err != nil { @@ -302,8 +359,8 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) // Get inspect-formatted mounts list. // Only includes user-specified mounts. Only includes bind mounts and named // volumes, not tmpfs volumes. -func (c *Container) getInspectMounts() ([]*InspectMount, error) { - inspectMounts := []*InspectMount{} +func (c *Container) getInspectMounts(ctrSpec *spec.Spec) ([]InspectMount, error) { + inspectMounts := []InspectMount{} // No mounts, return early if len(c.config.UserVolumes) == 0 { @@ -319,7 +376,7 @@ func (c *Container) getInspectMounts() ([]*InspectMount, error) { for _, namedVol := range c.config.NamedVolumes { namedVolumes[namedVol.Dest] = namedVol } - for _, mount := range c.config.Spec.Mounts { + for _, mount := range ctrSpec.Mounts { mounts[mount.Destination] = mount } @@ -327,9 +384,9 @@ func (c *Container) getInspectMounts() ([]*InspectMount, error) { // We need to look up the volumes. // First: is it a named volume? if volume, ok := namedVolumes[vol]; ok { - mountStruct := new(InspectMount) + mountStruct := InspectMount{} mountStruct.Type = "volume" - mountStruct.Dst = volume.Dest + mountStruct.Destination = volume.Dest mountStruct.Name = volume.Name // For src and driver, we need to look up the named @@ -339,9 +396,9 @@ func (c *Container) getInspectMounts() ([]*InspectMount, error) { return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID()) } mountStruct.Driver = volFromDB.Driver() - mountStruct.Src = volFromDB.MountPoint() + mountStruct.Source = volFromDB.MountPoint() - parseMountOptionsForInspect(volume.Options, mountStruct) + parseMountOptionsForInspect(volume.Options, &mountStruct) inspectMounts = append(inspectMounts, mountStruct) } else if mount, ok := mounts[vol]; ok { @@ -351,12 +408,12 @@ func (c *Container) getInspectMounts() ([]*InspectMount, error) { continue } - mountStruct := new(InspectMount) + mountStruct := InspectMount{} mountStruct.Type = "bind" - mountStruct.Src = mount.Source - mountStruct.Dst = mount.Destination + mountStruct.Source = mount.Source + mountStruct.Destination = mount.Destination - parseMountOptionsForInspect(mount.Options, mountStruct) + parseMountOptionsForInspect(mount.Options, &mountStruct) inspectMounts = append(inspectMounts, mountStruct) } @@ -401,3 +458,56 @@ func parseMountOptionsForInspect(options []string, mount *InspectMount) { mount.Mode = zZ mount.Options = otherOpts } + +// Generate the InspectContainerConfig struct for the Config field of Inspect. +func (c *Container) generateInspectContainerConfig(spec *spec.Spec) (*InspectContainerConfig, error) { + ctrConfig := new(InspectContainerConfig) + + ctrConfig.Hostname = c.Hostname() + ctrConfig.User = c.config.User + if spec.Process != nil { + ctrConfig.Tty = spec.Process.Terminal + ctrConfig.Env = []string{} + for _, val := range spec.Process.Env { + ctrConfig.Env = append(ctrConfig.Env, val) + } + ctrConfig.WorkingDir = spec.Process.Cwd + } + + ctrConfig.OpenStdin = c.config.Stdin + ctrConfig.Image = c.config.RootfsImageName + + // Leave empty is not explicitly overwritten by user + if len(c.config.Command) != 0 { + ctrConfig.Cmd = []string{} + for _, val := range c.config.Command { + ctrConfig.Cmd = append(ctrConfig.Cmd, val) + } + } + + // Leave empty if not explicitly overwritten by user + if len(c.config.Entrypoint) != 0 { + ctrConfig.Entrypoint = strings.Join(c.config.Entrypoint, " ") + } + + if len(c.config.Labels) != 0 { + ctrConfig.Labels = make(map[string]string) + for k, v := range c.config.Labels { + ctrConfig.Labels[k] = v + } + } + + if len(spec.Annotations) != 0 { + ctrConfig.Annotations = make(map[string]string) + for k, v := range spec.Annotations { + ctrConfig.Annotations[k] = v + } + } + + ctrConfig.StopSignal = c.config.StopSignal + // TODO: should JSON deep copy this to ensure internal pointers don't + // leak. + ctrConfig.Healthcheck = c.config.HealthCheckConfig + + return ctrConfig, nil +} diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 9245a8840..56fd27afb 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -128,7 +128,7 @@ func (c *Container) CheckpointPath() string { // AttachSocketPath retrieves the path of the container's attach socket func (c *Container) AttachSocketPath() string { - return filepath.Join(c.runtime.ociRuntime.socketsDir, c.ID(), "attach") + return filepath.Join(c.ociRuntime.socketsDir, c.ID(), "attach") } // Get PID file path for a container's exec session @@ -138,7 +138,7 @@ func (c *Container) execPidPath(sessionID string) string { // exitFilePath gets the path to the container's exit file func (c *Container) exitFilePath() string { - return filepath.Join(c.runtime.ociRuntime.exitsDir, c.ID()) + return filepath.Join(c.ociRuntime.exitsDir, c.ID()) } // Wait for the container's exit file to appear. @@ -164,7 +164,7 @@ func (c *Container) waitForExitFileAndSync() error { return err } - if err := c.runtime.ociRuntime.updateContainerStatus(c, false); err != nil { + if err := c.ociRuntime.updateContainerStatus(c, false); err != nil { return err } @@ -299,7 +299,7 @@ func (c *Container) syncContainer() error { (c.state.State != ContainerStateExited) { oldState := c.state.State // TODO: optionally replace this with a stat for the exit file - if err := c.runtime.ociRuntime.updateContainerStatus(c, false); err != nil { + if err := c.ociRuntime.updateContainerStatus(c, false); err != nil { return err } // Only save back to DB if state changed @@ -509,7 +509,7 @@ func (c *Container) refresh() error { // We need to pick up a new lock lock, err := c.runtime.lockManager.AllocateAndRetrieveLock(c.config.LockID) if err != nil { - return errors.Wrapf(err, "error acquiring lock for container %s", c.ID()) + return errors.Wrapf(err, "error acquiring lock %d for container %s", c.config.LockID, c.ID()) } c.lock = lock @@ -547,8 +547,8 @@ func (c *Container) removeConmonFiles() error { // Instead of outright deleting the exit file, rename it (if it exists). // We want to retain it so we can get the exit code of containers which // are removed (at least until we have a workable events system) - exitFile := filepath.Join(c.runtime.ociRuntime.exitsDir, c.ID()) - oldExitFile := filepath.Join(c.runtime.ociRuntime.exitsDir, fmt.Sprintf("%s-old", c.ID())) + exitFile := filepath.Join(c.ociRuntime.exitsDir, c.ID()) + oldExitFile := filepath.Join(c.ociRuntime.exitsDir, fmt.Sprintf("%s-old", c.ID())) if _, err := os.Stat(exitFile); err != nil { if !os.IsNotExist(err) { return errors.Wrapf(err, "error running stat on container %s exit file", c.ID()) @@ -866,7 +866,7 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { } // With the spec complete, do an OCI create - if err := c.runtime.ociRuntime.createContainer(c, c.config.CgroupParent, nil); err != nil { + if err := c.ociRuntime.createContainer(c, c.config.CgroupParent, nil); err != nil { return err } @@ -1013,7 +1013,7 @@ func (c *Container) start() error { logrus.Debugf("Starting container %s with command %v", c.ID(), c.config.Spec.Process.Args) } - if err := c.runtime.ociRuntime.startContainer(c); err != nil { + if err := c.ociRuntime.startContainer(c); err != nil { return err } logrus.Debugf("Started container %s", c.ID()) @@ -1038,7 +1038,7 @@ func (c *Container) start() error { func (c *Container) stop(timeout uint) error { logrus.Debugf("Stopping ctr %s (timeout %d)", c.ID(), timeout) - if err := c.runtime.ociRuntime.stopContainer(c, timeout); err != nil { + if err := c.ociRuntime.stopContainer(c, timeout); err != nil { return err } @@ -1053,7 +1053,7 @@ func (c *Container) stop(timeout uint) error { // Internal, non-locking function to pause a container func (c *Container) pause() error { - if err := c.runtime.ociRuntime.pauseContainer(c); err != nil { + if err := c.ociRuntime.pauseContainer(c); err != nil { return err } @@ -1066,7 +1066,7 @@ func (c *Container) pause() error { // Internal, non-locking function to unpause a container func (c *Container) unpause() error { - if err := c.runtime.ociRuntime.unpauseContainer(c); err != nil { + if err := c.ociRuntime.unpauseContainer(c); err != nil { return err } @@ -1245,7 +1245,7 @@ func (c *Container) delete(ctx context.Context) (err error) { span.SetTag("struct", "container") defer span.Finish() - if err := c.runtime.ociRuntime.deleteContainer(c); err != nil { + if err := c.ociRuntime.deleteContainer(c); err != nil { return errors.Wrapf(err, "error removing container %s from runtime", c.ID()) } diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 0be5427d9..50a2e2d44 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -424,7 +424,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { // It also expects to be able to write to /sys/fs/cgroup/systemd and /var/log/journal func (c *Container) setupSystemd(mounts []spec.Mount, g generate.Generator) error { options := []string{"rw", "rprivate", "noexec", "nosuid", "nodev"} - for _, dest := range []string{"/run"} { + for _, dest := range []string{"/run", "/run/lock"} { if MountExists(mounts, dest) { continue } @@ -541,7 +541,7 @@ func (c *Container) checkpointRestoreSupported() (err error) { if !criu.CheckForCriu() { return errors.Errorf("Checkpoint/Restore requires at least CRIU %d", criu.MinCriuVersion) } - if !c.runtime.ociRuntime.featureCheckCheckpointing() { + if !c.ociRuntime.featureCheckCheckpointing() { return errors.Errorf("Configured runtime does not support checkpoint/restore") } return nil @@ -575,7 +575,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO return err } - if err := c.runtime.ociRuntime.checkpointContainer(c, options); err != nil { + if err := c.ociRuntime.checkpointContainer(c, options); err != nil { return err } @@ -769,7 +769,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti if err := c.saveSpec(g.Spec()); err != nil { return err } - if err := c.runtime.ociRuntime.createContainer(c, c.config.CgroupParent, &options); err != nil { + if err := c.ociRuntime.createContainer(c, c.config.CgroupParent, &options); err != nil { return err } @@ -1069,6 +1069,10 @@ func (c *Container) getHosts() string { hosts += fmt.Sprintf("%s %s\n", fields[1], fields[0]) } } + if c.config.NetMode.IsSlirp4netns() { + // When using slirp4netns, the interface gets a static IP + hosts += fmt.Sprintf("# used by slirp4netns\n%s\t%s\n", "10.0.2.100", c.Hostname()) + } if len(c.state.NetworkStatus) > 0 && len(c.state.NetworkStatus[0].IPs) > 0 { ipAddress := strings.Split(c.state.NetworkStatus[0].IPs[0].Address.String(), "/")[0] hosts += fmt.Sprintf("%s\t%s\n", ipAddress, c.Hostname()) diff --git a/libpod/image/pull.go b/libpod/image/pull.go index cb7411ce5..644a9ae86 100644 --- a/libpod/image/pull.go +++ b/libpod/image/pull.go @@ -149,6 +149,13 @@ func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types. // Need to load in all the repo tags from the manifest res := []pullRefPair{} for _, dst := range manifest[0].RepoTags { + //check if image exists and gives a warning of untagging + localImage, err := ir.NewFromLocal(dst) + imageID := strings.TrimSuffix(manifest[0].Config, ".json") + if err == nil && imageID != localImage.ID() { + logrus.Errorf("the image %s already exists, renaming the old one with ID %s to empty string", dst, localImage.ID()) + } + pullInfo, err := ir.getPullRefPair(srcRef, dst) if err != nil { return nil, err @@ -168,7 +175,6 @@ func (ir *Runtime) pullGoalFromImageReference(ctx context.Context, srcRef types. if err != nil { return nil, errors.Wrapf(err, "error loading manifest for %q", srcRef) } - var dest string if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" { // If the input image has no image.ref.name, we need to feed it a dest anyways diff --git a/libpod/info.go b/libpod/info.go index b42f64a1f..c96293e3d 100644 --- a/libpod/info.go +++ b/libpod/info.go @@ -47,12 +47,12 @@ func (r *Runtime) hostInfo() (map[string]interface{}, error) { hostDistributionInfo := r.GetHostDistributionInfo() info["Conmon"] = map[string]interface{}{ "path": r.conmonPath, - "package": r.ociRuntime.conmonPackage(), + "package": r.defaultOCIRuntime.conmonPackage(), "version": conmonVersion, } info["OCIRuntime"] = map[string]interface{}{ - "path": r.ociRuntime.path, - "package": r.ociRuntime.pathPackage(), + "path": r.defaultOCIRuntime.path, + "package": r.defaultOCIRuntime.pathPackage(), "version": ociruntimeVersion, } info["Distribution"] = map[string]interface{}{ @@ -190,12 +190,12 @@ func (r *Runtime) GetConmonVersion() (string, error) { // GetOCIRuntimePath returns the path to the OCI Runtime Path the runtime is using func (r *Runtime) GetOCIRuntimePath() string { - return r.ociRuntimePath.Paths[0] + return r.defaultOCIRuntime.path } // GetOCIRuntimeVersion returns a string representation of the oci runtimes version func (r *Runtime) GetOCIRuntimeVersion() (string, error) { - output, err := utils.ExecCmd(r.ociRuntimePath.Paths[0], "--version") + output, err := utils.ExecCmd(r.GetOCIRuntimePath(), "--version") if err != nil { return "", err } diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c index 047d3c417..fbb3f57cc 100644 --- a/libpod/lock/shm/shm_lock.c +++ b/libpod/lock/shm/shm_lock.c @@ -413,7 +413,7 @@ int32_t allocate_given_semaphore(shm_struct_t *shm, uint32_t sem_index) { // Returns 0 on success, negative ERRNO values on failure int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) { bitmap_t test_map; - int bitmap_index, index_in_bitmap, ret_code, i; + int bitmap_index, index_in_bitmap, ret_code; if (shm == NULL) { return -1 * EINVAL; @@ -500,7 +500,7 @@ int32_t deallocate_all_semaphores(shm_struct_t *shm) { // subsequently realize they have been removed). // Returns 0 on success, -1 on failure int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) { - int bitmap_index, index_in_bitmap, ret_code; + int bitmap_index, index_in_bitmap; if (shm == NULL) { return -1 * EINVAL; @@ -522,7 +522,7 @@ int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) { // subsequently realize they have been removed). // Returns 0 on success, -1 on failure int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) { - int bitmap_index, index_in_bitmap, ret_code; + int bitmap_index, index_in_bitmap; if (shm == NULL) { return -1 * EINVAL; diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go index c21e9a221..76dd5729e 100644 --- a/libpod/lock/shm/shm_lock.go +++ b/libpod/lock/shm/shm_lock.go @@ -1,6 +1,7 @@ package shm // #cgo LDFLAGS: -lrt -lpthread +// #cgo CFLAGS: -Wall -Werror // #include <stdlib.h> // #include "shm_lock.h" // const uint32_t bitmap_size_c = BITMAP_SIZE; diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h index 759f8178a..8796b43f4 100644 --- a/libpod/lock/shm/shm_lock.h +++ b/libpod/lock/shm/shm_lock.h @@ -32,9 +32,6 @@ typedef struct shm_struct { lock_group_t locks[]; } shm_struct_t; -static size_t compute_shm_size(uint32_t num_bitmaps); -static int take_mutex(pthread_mutex_t *mutex); -static int release_mutex(pthread_mutex_t *mutex); shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code); shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code); int32_t close_lock_shm(shm_struct_t *shm); diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index ed9ad5f0d..93ec157c5 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -170,7 +170,7 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { defer syncW.Close() havePortMapping := len(ctr.Config().PortMappings) > 0 - apiSocket := filepath.Join(r.ociRuntime.tmpDir, fmt.Sprintf("%s.net", ctr.config.ID)) + apiSocket := filepath.Join(ctr.ociRuntime.tmpDir, fmt.Sprintf("%s.net", ctr.config.ID)) cmdArgs := []string{} if havePortMapping { diff --git a/libpod/oci.go b/libpod/oci.go index dcb72fc1b..36c1dea84 100644 --- a/libpod/oci.go +++ b/libpod/oci.go @@ -75,25 +75,53 @@ type ociError struct { Msg string `json:"msg,omitempty"` } -// Make a new OCI runtime with provided options -func newOCIRuntime(oruntime OCIRuntimePath, conmonPath string, conmonEnv []string, cgroupManager string, tmpDir string, logSizeMax int64, noPivotRoot bool, reservePorts bool, supportsJSON bool) (*OCIRuntime, error) { +// Make a new OCI runtime with provided options. +// The first path that points to a valid executable will be used. +func newOCIRuntime(name string, paths []string, conmonPath string, runtimeCfg *RuntimeConfig, supportsJSON bool) (*OCIRuntime, error) { + if name == "" { + return nil, errors.Wrapf(ErrInvalidArg, "the OCI runtime must be provided a non-empty name") + } + runtime := new(OCIRuntime) - runtime.name = oruntime.Name - runtime.path = oruntime.Paths[0] + runtime.name = name runtime.conmonPath = conmonPath - runtime.conmonEnv = conmonEnv - runtime.cgroupManager = cgroupManager - runtime.tmpDir = tmpDir - runtime.logSizeMax = logSizeMax - runtime.noPivot = noPivotRoot - runtime.reservePorts = reservePorts + + runtime.conmonEnv = runtimeCfg.ConmonEnvVars + runtime.cgroupManager = runtimeCfg.CgroupManager + runtime.tmpDir = runtimeCfg.TmpDir + runtime.logSizeMax = runtimeCfg.MaxLogSize + runtime.noPivot = runtimeCfg.NoPivotRoot + runtime.reservePorts = runtimeCfg.EnablePortReservation + + // TODO: probe OCI runtime for feature and enable automatically if + // available. runtime.supportsJSON = supportsJSON + foundPath := false + for _, path := range paths { + stat, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, errors.Wrapf(err, "cannot stat %s", path) + } + if !stat.Mode().IsRegular() { + continue + } + foundPath = true + runtime.path = path + break + } + if !foundPath { + return nil, errors.Wrapf(ErrInvalidArg, "no valid executable found for OCI runtime %s", name) + } + runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits") runtime.socketsDir = filepath.Join(runtime.tmpDir, "socket") - if cgroupManager != CgroupfsCgroupsManager && cgroupManager != SystemdCgroupsManager { - return nil, errors.Wrapf(ErrInvalidArg, "invalid cgroup manager specified: %s", cgroupManager) + if runtime.cgroupManager != CgroupfsCgroupsManager && runtime.cgroupManager != SystemdCgroupsManager { + return nil, errors.Wrapf(ErrInvalidArg, "invalid cgroup manager specified: %s", runtime.cgroupManager) } // Create the exit files and attach sockets directories diff --git a/libpod/oci_linux.go b/libpod/oci_linux.go index be7210bd2..b7efa742a 100644 --- a/libpod/oci_linux.go +++ b/libpod/oci_linux.go @@ -246,7 +246,9 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res } logDriver := KubernetesLogging - if ctr.LogDriver() != "" { + if ctr.LogDriver() == JSONLogging { + logrus.Errorf("json-file logging specified but not supported. Choosing k8s-file logging instead") + } else if ctr.LogDriver() != "" { logDriver = ctr.LogDriver() } args = append(args, "-l", fmt.Sprintf("%s:%s", logDriver, ctr.LogPath())) diff --git a/libpod/pod_api.go b/libpod/pod_api.go index 9ed5c88eb..b913857dd 100644 --- a/libpod/pod_api.go +++ b/libpod/pod_api.go @@ -357,7 +357,7 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) { continue } - if err := ctr.runtime.ociRuntime.killContainer(ctr, signal); err != nil { + if err := ctr.ociRuntime.killContainer(ctr, signal); err != nil { ctr.lock.Unlock() ctrErrors[ctr.ID()] = err continue diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go index 1fcb5b1a6..23359d841 100644 --- a/libpod/pod_internal.go +++ b/libpod/pod_internal.go @@ -58,7 +58,7 @@ func (p *Pod) refresh() error { // Retrieve the pod's lock lock, err := p.runtime.lockManager.AllocateAndRetrieveLock(p.config.LockID) if err != nil { - return errors.Wrapf(err, "error retrieving lock for pod %s", p.ID()) + return errors.Wrapf(err, "error retrieving lock %d for pod %s", p.config.LockID, p.ID()) } p.lock = lock diff --git a/libpod/runtime.go b/libpod/runtime.go index 82e7338aa..52ce8062b 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -7,6 +7,7 @@ import ( "os" "os/user" "path/filepath" + "strings" "sync" "syscall" @@ -92,18 +93,18 @@ type RuntimeOption func(*Runtime) error type Runtime struct { config *RuntimeConfig - state State - store storage.Store - storageService *storageService - imageContext *types.SystemContext - ociRuntime *OCIRuntime - netPlugin ocicni.CNIPlugin - ociRuntimePath OCIRuntimePath - conmonPath string - imageRuntime *image.Runtime - firewallBackend firewall.FirewallBackend - lockManager lock.Manager - configuredFrom *runtimeConfiguredFrom + state State + store storage.Store + storageService *storageService + imageContext *types.SystemContext + defaultOCIRuntime *OCIRuntime + ociRuntimes map[string]*OCIRuntime + netPlugin ocicni.CNIPlugin + conmonPath string + imageRuntime *image.Runtime + firewallBackend firewall.FirewallBackend + lockManager lock.Manager + configuredFrom *runtimeConfiguredFrom // doRenumber indicates that the runtime should perform a lock renumber // during initialization. @@ -124,14 +125,6 @@ type Runtime struct { eventer events.Eventer } -// OCIRuntimePath contains information about an OCI runtime. -type OCIRuntimePath struct { - // Name of the runtime to refer to by the --runtime flag - Name string `toml:"name"` - // Paths to check for this executable - Paths []string `toml:"paths"` -} - // RuntimeConfig contains configuration options used to set up the runtime type RuntimeConfig struct { // StorageConfig is the configuration used by containers/storage @@ -299,6 +292,8 @@ func defaultRuntimeConfig() (RuntimeConfig, error) { "/usr/local/libexec/crio/conmon", "/usr/bin/conmon", "/usr/sbin/conmon", + "/usr/local/bin/conmon", + "/usr/local/sbin/conmon", "/usr/lib/crio/bin/conmon", }, ConmonEnvVars: []string{ @@ -645,63 +640,6 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options .. // Make a new runtime based on the given configuration // Sets up containers/storage, state store, OCI runtime func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { - // Backward compatibility for `runtime_path` - if runtime.config.RuntimePath != nil { - // Don't print twice in rootless mode. - if os.Geteuid() == 0 { - logrus.Warningf("The configuration is using `runtime_path`, which is deprecated and will be removed in future. Please use `runtimes` and `runtime`") - logrus.Warningf("If you are using both `runtime_path` and `runtime`, the configuration from `runtime_path` is used") - } - - // Transform `runtime_path` into `runtimes` and `runtime`. - name := filepath.Base(runtime.config.RuntimePath[0]) - runtime.config.OCIRuntime = name - runtime.config.OCIRuntimes = map[string][]string{name: runtime.config.RuntimePath} - } - - // Find a working OCI runtime binary - foundRuntime := false - // If runtime is an absolute path, then use it as it is. - if runtime.config.OCIRuntime != "" && runtime.config.OCIRuntime[0] == '/' { - foundRuntime = true - runtime.ociRuntimePath = OCIRuntimePath{Name: filepath.Base(runtime.config.OCIRuntime), Paths: []string{runtime.config.OCIRuntime}} - stat, err := os.Stat(runtime.config.OCIRuntime) - if err != nil { - if os.IsNotExist(err) { - return errors.Wrapf(err, "the specified OCI runtime %s does not exist", runtime.config.OCIRuntime) - } - return errors.Wrapf(err, "cannot stat the OCI runtime path %s", runtime.config.OCIRuntime) - } - if !stat.Mode().IsRegular() { - return fmt.Errorf("the specified OCI runtime %s is not a valid file", runtime.config.OCIRuntime) - } - } else { - // If not, look it up in the configuration. - paths := runtime.config.OCIRuntimes[runtime.config.OCIRuntime] - if paths != nil { - for _, path := range paths { - stat, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - continue - } - return errors.Wrapf(err, "cannot stat %s", path) - } - if !stat.Mode().IsRegular() { - continue - } - foundRuntime = true - runtime.ociRuntimePath = OCIRuntimePath{Name: runtime.config.OCIRuntime, Paths: []string{path}} - break - } - } - } - if !foundRuntime { - return errors.Wrapf(ErrInvalidArg, - "could not find a working binary (configured options: %v)", - runtime.config.OCIRuntimes) - } - // Find a working conmon binary foundConmon := false for _, path := range runtime.config.ConmonPath { @@ -898,25 +836,107 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { } } - supportsJSON := false - for _, r := range runtime.config.RuntimeSupportsJSON { - if r == runtime.config.OCIRuntime { - supportsJSON = true - break + // Get us at least one working OCI runtime. + runtime.ociRuntimes = make(map[string]*OCIRuntime) + + // Is the old runtime_path defined? + if runtime.config.RuntimePath != nil { + // Don't print twice in rootless mode. + if os.Geteuid() == 0 { + logrus.Warningf("The configuration is using `runtime_path`, which is deprecated and will be removed in future. Please use `runtimes` and `runtime`") + logrus.Warningf("If you are using both `runtime_path` and `runtime`, the configuration from `runtime_path` is used") } + + if len(runtime.config.RuntimePath) == 0 { + return errors.Wrapf(ErrInvalidArg, "empty runtime path array passed") + } + + name := filepath.Base(runtime.config.RuntimePath[0]) + + supportsJSON := false + for _, r := range runtime.config.RuntimeSupportsJSON { + if r == name { + supportsJSON = true + break + } + } + + ociRuntime, err := newOCIRuntime(name, runtime.config.RuntimePath, runtime.conmonPath, runtime.config, supportsJSON) + if err != nil { + return err + } + + runtime.ociRuntimes[name] = ociRuntime + runtime.defaultOCIRuntime = ociRuntime } - // Make an OCI runtime to perform container operations - ociRuntime, err := newOCIRuntime(runtime.ociRuntimePath, - runtime.conmonPath, runtime.config.ConmonEnvVars, - runtime.config.CgroupManager, runtime.config.TmpDir, - runtime.config.MaxLogSize, runtime.config.NoPivotRoot, - runtime.config.EnablePortReservation, - supportsJSON) - if err != nil { - return err + // Initialize remaining OCI runtimes + for name, paths := range runtime.config.OCIRuntimes { + if len(paths) == 0 { + return errors.Wrapf(ErrInvalidArg, "must provide at least 1 path to OCI runtime %s", name) + } + + supportsJSON := false + for _, r := range runtime.config.RuntimeSupportsJSON { + if r == name { + supportsJSON = true + break + } + } + + ociRuntime, err := newOCIRuntime(name, paths, runtime.conmonPath, runtime.config, supportsJSON) + if err != nil { + // Don't fatally error. + // This will allow us to ship configs including optional + // runtimes that might not be installed (crun, kata). + // Only a warnf so default configs don't spec errors. + logrus.Warnf("Error initializing configured OCI runtime %s: %v", name, err) + continue + } + + runtime.ociRuntimes[name] = ociRuntime + } + + // Do we have a default OCI runtime? + if runtime.config.OCIRuntime != "" { + // If the string starts with / it's a path to a runtime + // executable. + if strings.HasPrefix(runtime.config.OCIRuntime, "/") { + name := filepath.Base(runtime.config.OCIRuntime) + + supportsJSON := false + for _, r := range runtime.config.RuntimeSupportsJSON { + if r == name { + supportsJSON = true + break + } + } + + ociRuntime, err := newOCIRuntime(name, []string{runtime.config.OCIRuntime}, runtime.conmonPath, runtime.config, supportsJSON) + if err != nil { + return err + } + + runtime.ociRuntimes[name] = ociRuntime + runtime.defaultOCIRuntime = ociRuntime + } else { + ociRuntime, ok := runtime.ociRuntimes[runtime.config.OCIRuntime] + if !ok { + return errors.Wrapf(ErrInvalidArg, "default OCI runtime %q not found", runtime.config.OCIRuntime) + } + runtime.defaultOCIRuntime = ociRuntime + } + } + + // Do we have at least one valid OCI runtime? + if len(runtime.ociRuntimes) == 0 { + return errors.Wrapf(ErrInvalidArg, "no OCI runtime has been configured") + } + + // Do we have a default runtime? + if runtime.defaultOCIRuntime == nil { + return errors.Wrapf(ErrInvalidArg, "no default OCI runtime was configured") } - runtime.ociRuntime = ociRuntime // Make the per-boot files directory if it does not exist if err := os.MkdirAll(runtime.config.TmpDir, 0755); err != nil { diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 0871b83a7..271d4160d 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -94,7 +94,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf ctr.config.StopTimeout = CtrRemoveTimeout - ctr.config.OCIRuntime = r.config.OCIRuntime + ctr.config.OCIRuntime = r.defaultOCIRuntime.name // Set namespace based on current runtime namespace // Do so before options run so they can override it @@ -139,6 +139,16 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container, restore bo ctr.state.State = ContainerStateConfigured ctr.runtime = r + if ctr.config.OCIRuntime == "" { + ctr.ociRuntime = r.defaultOCIRuntime + } else { + ociRuntime, ok := r.ociRuntimes[ctr.config.OCIRuntime] + if !ok { + return nil, errors.Wrapf(ErrInvalidArg, "requested OCI runtime %s is not available", ctr.config.OCIRuntime) + } + ctr.ociRuntime = ociRuntime + } + var pod *Pod if ctr.config.Pod != "" { // Get the pod from state @@ -362,7 +372,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } if c.state.State == ContainerStatePaused { - if err := c.runtime.ociRuntime.killContainer(c, 9); err != nil { + if err := c.ociRuntime.killContainer(c, 9); err != nil { return err } if err := c.unpause(); err != nil { @@ -376,7 +386,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, // Check that the container's in a good state to be removed if c.state.State == ContainerStateRunning { - if err := r.ociRuntime.stopContainer(c, c.StopTimeout()); err != nil { + if err := c.ociRuntime.stopContainer(c, c.StopTimeout()); err != nil { return errors.Wrapf(err, "cannot remove container %s as it could not be stopped", c.ID()) } @@ -388,7 +398,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, // Check that all of our exec sessions have finished if len(c.state.ExecSessions) != 0 { - if err := r.ociRuntime.execStopContainer(c, c.StopTimeout()); err != nil { + if err := c.ociRuntime.execStopContainer(c, c.StopTimeout()); err != nil { return err } } |