diff options
Diffstat (limited to 'libpod')
-rw-r--r-- | libpod/boltdb_state.go | 70 | ||||
-rw-r--r-- | libpod/boltdb_state_internal.go | 204 | ||||
-rw-r--r-- | libpod/container.go | 9 | ||||
-rw-r--r-- | libpod/container_api.go | 8 | ||||
-rw-r--r-- | libpod/container_commit.go | 4 | ||||
-rw-r--r-- | libpod/container_inspect.go | 2 | ||||
-rw-r--r-- | libpod/container_internal.go | 24 | ||||
-rw-r--r-- | libpod/container_internal_linux.go | 6 | ||||
-rw-r--r-- | libpod/info.go | 10 | ||||
-rw-r--r-- | libpod/lock/shm/shm_lock.c | 6 | ||||
-rw-r--r-- | libpod/lock/shm/shm_lock.go | 1 | ||||
-rw-r--r-- | libpod/lock/shm/shm_lock.h | 3 | ||||
-rw-r--r-- | libpod/networking_linux.go | 2 | ||||
-rw-r--r-- | libpod/oci.go | 52 | ||||
-rw-r--r-- | libpod/pod_api.go | 2 | ||||
-rw-r--r-- | libpod/runtime.go | 202 | ||||
-rw-r--r-- | libpod/runtime_ctr.go | 18 |
17 files changed, 384 insertions, 239 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index 63e40a98f..12c364993 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -73,42 +73,50 @@ func NewBoltState(path string, runtime *Runtime) (State, error) { // As such, just a db.Close() is fine here. defer db.Close() - // Perform initial database setup - err = db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucketIfNotExists(idRegistryBkt); err != nil { - return errors.Wrapf(err, "error creating id-registry bucket") - } - if _, err := tx.CreateBucketIfNotExists(nameRegistryBkt); err != nil { - return errors.Wrapf(err, "error creating name-registry bucket") - } - if _, err := tx.CreateBucketIfNotExists(nsRegistryBkt); err != nil { - return errors.Wrapf(err, "error creating ns-registry bucket") - } - if _, err := tx.CreateBucketIfNotExists(ctrBkt); err != nil { - return errors.Wrapf(err, "error creating containers bucket") - } - if _, err := tx.CreateBucketIfNotExists(allCtrsBkt); err != nil { - return errors.Wrapf(err, "error creating all containers bucket") - } - if _, err := tx.CreateBucketIfNotExists(podBkt); err != nil { - return errors.Wrapf(err, "error creating pods bucket") - } - if _, err := tx.CreateBucketIfNotExists(allPodsBkt); err != nil { - return errors.Wrapf(err, "error creating all pods bucket") - } - if _, err := tx.CreateBucketIfNotExists(volBkt); err != nil { - return errors.Wrapf(err, "error creating volume bucket") - } - if _, err := tx.CreateBucketIfNotExists(allVolsBkt); err != nil { - return errors.Wrapf(err, "error creating all volumes bucket") + createBuckets := [][]byte{ + idRegistryBkt, + nameRegistryBkt, + nsRegistryBkt, + ctrBkt, + allCtrsBkt, + podBkt, + allPodsBkt, + volBkt, + allVolsBkt, + runtimeConfigBkt, + } + + // Does the DB need an update? + needsUpdate := false + err = db.View(func(tx *bolt.Tx) error { + for _, bkt := range createBuckets { + if test := tx.Bucket(bkt); test == nil { + needsUpdate = true + break + } } - if _, err := tx.CreateBucketIfNotExists(runtimeConfigBkt); err != nil { - return errors.Wrapf(err, "error creating runtime-config bucket") + return nil + }) + if err != nil { + return nil, errors.Wrapf(err, "error checking DB schema") + } + + if !needsUpdate { + state.valid = true + return state, nil + } + + // Ensure schema is properly created in DB + err = db.Update(func(tx *bolt.Tx) error { + for _, bkt := range createBuckets { + if _, err := tx.CreateBucketIfNotExists(bkt); err != nil { + return errors.Wrapf(err, "error creating bucket %s", string(bkt)) + } } return nil }) if err != nil { - return nil, errors.Wrapf(err, "error creating initial database layout") + return nil, errors.Wrapf(err, "error creating buckets for DB") } state.valid = true diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index 313e5f4d7..b7930e158 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -2,6 +2,7 @@ package libpod import ( "bytes" + "path/filepath" "runtime" "strings" @@ -72,98 +73,160 @@ var ( volPathKey = []byte(volPathName) ) +// This represents a field in the runtime configuration that will be validated +// against the DB to ensure no configuration mismatches occur. +type dbConfigValidation struct { + name string // Only used for error messages + runtimeValue string + key []byte + defaultValue string +} + // Check if the configuration of the database is compatible with the // configuration of the runtime opening it // If there is no runtime configuration loaded, load our own func checkRuntimeConfig(db *bolt.DB, rt *Runtime) error { - err := db.Update(func(tx *bolt.Tx) error { + storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID()) + if err != nil { + return err + } + + // We need to validate the following things + checks := []dbConfigValidation{ + { + "OS", + runtime.GOOS, + osKey, + runtime.GOOS, + }, + { + "libpod root directory (staticdir)", + rt.config.StaticDir, + staticDirKey, + "", + }, + { + "libpod temporary files directory (tmpdir)", + rt.config.TmpDir, + tmpDirKey, + "", + }, + { + "storage temporary directory (runroot)", + rt.config.StorageConfig.RunRoot, + runRootKey, + storeOpts.RunRoot, + }, + { + "storage graph root directory (graphroot)", + rt.config.StorageConfig.GraphRoot, + graphRootKey, + storeOpts.GraphRoot, + }, + { + "storage graph driver", + rt.config.StorageConfig.GraphDriverName, + graphDriverKey, + storeOpts.GraphDriverName, + }, + { + "volume path", + rt.config.VolumePath, + volPathKey, + "", + }, + } + + // These fields were missing and will have to be recreated. + missingFields := []dbConfigValidation{} + + // Let's try and validate read-only first + err = db.View(func(tx *bolt.Tx) error { configBkt, err := getRuntimeConfigBucket(tx) if err != nil { return err } - if err := validateDBAgainstConfig(configBkt, "OS", runtime.GOOS, osKey, runtime.GOOS); err != nil { - return err + for _, check := range checks { + exists, err := readOnlyValidateConfig(configBkt, check) + if err != nil { + return err + } + if !exists { + missingFields = append(missingFields, check) + } } - if err := validateDBAgainstConfig(configBkt, "libpod root directory (staticdir)", - rt.config.StaticDir, staticDirKey, ""); err != nil { - return err - } + return nil + }) + if err != nil { + return err + } - if err := validateDBAgainstConfig(configBkt, "libpod temporary files directory (tmpdir)", - rt.config.TmpDir, tmpDirKey, ""); err != nil { - return err - } + if len(missingFields) == 0 { + return nil + } - storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID()) + // Populate missing fields + return db.Update(func(tx *bolt.Tx) error { + configBkt, err := getRuntimeConfigBucket(tx) if err != nil { return err } - if err := validateDBAgainstConfig(configBkt, "storage temporary directory (runroot)", - rt.config.StorageConfig.RunRoot, runRootKey, - storeOpts.RunRoot); err != nil { - return err - } - if err := validateDBAgainstConfig(configBkt, "storage graph root directory (graphroot)", - rt.config.StorageConfig.GraphRoot, graphRootKey, - storeOpts.GraphRoot); err != nil { - return err - } + for _, missing := range missingFields { + dbValue := []byte(missing.runtimeValue) + if missing.runtimeValue == "" && missing.defaultValue != "" { + dbValue = []byte(missing.defaultValue) + } - if err := validateDBAgainstConfig(configBkt, "storage graph driver", - rt.config.StorageConfig.GraphDriverName, - graphDriverKey, - storeOpts.GraphDriverName); err != nil { - return err + if err := configBkt.Put(missing.key, dbValue); err != nil { + return errors.Wrapf(err, "error updating %s in DB runtime config", missing.name) + } } - return validateDBAgainstConfig(configBkt, "volume path", - rt.config.VolumePath, volPathKey, "") + return nil }) - - return err } -// Validate a configuration entry in the DB against current runtime config -// If the given configuration key does not exist it will be created -// If the given runtimeValue or value retrieved from the database are the empty -// string and defaultValue is not, defaultValue will be checked instead. This -// ensures that we will not fail on configuration changes in configured c/storage. -func validateDBAgainstConfig(bucket *bolt.Bucket, fieldName, runtimeValue string, keyName []byte, defaultValue string) error { - keyBytes := bucket.Get(keyName) +// Attempt a read-only validation of a configuration entry in the DB against an +// element of the current runtime configuration. +// If the configuration key in question does not exist, (false, nil) will be +// returned. +// If the configuration key does exist, and matches the runtime configuration +// successfully, (true, nil) is returned. +// An error is only returned when validation fails. +// if the given runtimeValue or value retrieved from the database are empty, +// and defaultValue is not, defaultValue will be checked instead. This ensures +// that we will not fail on configuration changes in c/storage (where we may +// pass the empty string to use defaults). +func readOnlyValidateConfig(bucket *bolt.Bucket, toCheck dbConfigValidation) (bool, error) { + keyBytes := bucket.Get(toCheck.key) if keyBytes == nil { - dbValue := []byte(runtimeValue) - if runtimeValue == "" && defaultValue != "" { - dbValue = []byte(defaultValue) - } + // False return indicates missing key + return false, nil + } - if err := bucket.Put(keyName, dbValue); err != nil { - return errors.Wrapf(err, "error updating %s in DB runtime config", fieldName) - } - } else { - if runtimeValue != string(keyBytes) { - // If runtimeValue is the empty string, check against - // the default - if runtimeValue == "" && defaultValue != "" && - string(keyBytes) == defaultValue { - return nil - } + dbValue := string(keyBytes) - // If DB value is the empty string, check that the - // runtime value is the default - if string(keyBytes) == "" && defaultValue != "" && - runtimeValue == defaultValue { - return nil - } + if toCheck.runtimeValue != dbValue { + // If the runtime value is the empty string and default is not, + // check against default. + if toCheck.runtimeValue == "" && toCheck.defaultValue != "" && dbValue == toCheck.defaultValue { + return true, nil + } - return errors.Wrapf(ErrDBBadConfig, "database %s %s does not match our %s %s", - fieldName, string(keyBytes), fieldName, runtimeValue) + // If the DB value is the empty string, check that the runtime + // value is the default. + if dbValue == "" && toCheck.defaultValue != "" && toCheck.runtimeValue == toCheck.defaultValue { + return true, nil } + + return true, errors.Wrapf(ErrDBBadConfig, "database %s %q does not match our %s %q", + toCheck.name, dbValue, toCheck.name, toCheck.runtimeValue) } - return nil + return true, nil } // Open a connection to the database. @@ -304,6 +367,23 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt. } ctr.lock = lock + if ctr.config.OCIRuntime == "" { + ctr.ociRuntime = s.runtime.defaultOCIRuntime + } else { + // Handle legacy containers which might use a literal path for + // their OCI runtime name. + runtimeName := ctr.config.OCIRuntime + if strings.HasPrefix(runtimeName, "/") { + runtimeName = filepath.Base(runtimeName) + } + + ociRuntime, ok := s.runtime.ociRuntimes[runtimeName] + if !ok { + return errors.Wrapf(ErrInternal, "container %s was created with OCI runtime %s, but that runtime is not available in the current configuration", ctr.ID(), ctr.config.OCIRuntime) + } + ctr.ociRuntime = ociRuntime + } + ctr.runtime = s.runtime ctr.valid = valid diff --git a/libpod/container.go b/libpod/container.go index 68c4cd6b0..464b233d1 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -145,9 +145,10 @@ type Container struct { // Functions called on a batched container will not lock or sync batched bool - valid bool - lock lock.Locker - runtime *Runtime + valid bool + lock lock.Locker + runtime *Runtime + ociRuntime *OCIRuntime rootlessSlirpSyncR *os.File rootlessSlirpSyncW *os.File @@ -789,7 +790,7 @@ func (c *Container) LogDriver() string { // RuntimeName returns the name of the runtime func (c *Container) RuntimeName() string { - return c.runtime.ociRuntime.name + return c.config.OCIRuntime } // Runtime spec accessors diff --git a/libpod/container_api.go b/libpod/container_api.go index 0e877d04e..ed3e08dc7 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -207,7 +207,7 @@ func (c *Container) Kill(signal uint) error { } defer c.newContainerEvent(events.Kill) - if err := c.runtime.ociRuntime.killContainer(c, signal); err != nil { + if err := c.ociRuntime.killContainer(c, signal); err != nil { return err } @@ -280,7 +280,7 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir logrus.Debugf("Creating new exec session in container %s with session id %s", c.ID(), sessionID) - execCmd, err := c.runtime.ociRuntime.execContainer(c, cmd, capList, env, tty, workDir, hostUser, sessionID, streams, preserveFDs) + execCmd, err := c.ociRuntime.execContainer(c, cmd, capList, env, tty, workDir, hostUser, sessionID, streams, preserveFDs) if err != nil { return errors.Wrapf(err, "error exec %s", c.ID()) } @@ -658,7 +658,7 @@ func (c *Container) Sync() error { (c.state.State != ContainerStateConfigured) && (c.state.State != ContainerStateExited) { oldState := c.state.State - if err := c.runtime.ociRuntime.updateContainerStatus(c, true); err != nil { + if err := c.ociRuntime.updateContainerStatus(c, true); err != nil { return err } // Only save back to DB if state changed @@ -715,7 +715,7 @@ func (c *Container) Refresh(ctx context.Context) error { if len(c.state.ExecSessions) > 0 { logrus.Infof("Killing %d exec sessions in container %s. They will not be restored after refresh.", len(c.state.ExecSessions), c.ID()) - if err := c.runtime.ociRuntime.execStopContainer(c, c.config.StopTimeout); err != nil { + if err := c.ociRuntime.execStopContainer(c, c.config.StopTimeout); err != nil { return err } } diff --git a/libpod/container_commit.go b/libpod/container_commit.go index 739fcd80e..5e6ddcbd1 100644 --- a/libpod/container_commit.go +++ b/libpod/container_commit.go @@ -52,11 +52,11 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai } if c.state.State == ContainerStateRunning && options.Pause { - if err := c.runtime.ociRuntime.pauseContainer(c); err != nil { + if err := c.ociRuntime.pauseContainer(c); err != nil { return nil, errors.Wrapf(err, "error pausing container %q", c.ID()) } defer func() { - if err := c.runtime.ociRuntime.unpauseContainer(c); err != nil { + if err := c.ociRuntime.unpauseContainer(c); err != nil { logrus.Errorf("error unpausing container %q: %v", c.ID(), err) } }() diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go index 752823634..1d12b1b35 100644 --- a/libpod/container_inspect.go +++ b/libpod/container_inspect.go @@ -31,6 +31,7 @@ type InspectContainerData struct { HostsPath string `json:"HostsPath"` StaticDir string `json:"StaticDir"` OCIConfigPath string `json:"OCIConfigPath,omitempty"` + OCIRuntime string `json:"OCIRuntime,omitempty"` LogPath string `json:"LogPath"` ConmonPidFile string `json:"ConmonPidFile"` Name string `json:"Name"` @@ -274,6 +275,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) HostsPath: hostsPath, StaticDir: config.StaticDir, LogPath: config.LogPath, + OCIRuntime: config.OCIRuntime, ConmonPidFile: config.ConmonPidFile, Name: config.Name, RestartCount: int32(runtimeInfo.RestartCount), diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 9245a8840..3e2fd0c44 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -128,7 +128,7 @@ func (c *Container) CheckpointPath() string { // AttachSocketPath retrieves the path of the container's attach socket func (c *Container) AttachSocketPath() string { - return filepath.Join(c.runtime.ociRuntime.socketsDir, c.ID(), "attach") + return filepath.Join(c.ociRuntime.socketsDir, c.ID(), "attach") } // Get PID file path for a container's exec session @@ -138,7 +138,7 @@ func (c *Container) execPidPath(sessionID string) string { // exitFilePath gets the path to the container's exit file func (c *Container) exitFilePath() string { - return filepath.Join(c.runtime.ociRuntime.exitsDir, c.ID()) + return filepath.Join(c.ociRuntime.exitsDir, c.ID()) } // Wait for the container's exit file to appear. @@ -164,7 +164,7 @@ func (c *Container) waitForExitFileAndSync() error { return err } - if err := c.runtime.ociRuntime.updateContainerStatus(c, false); err != nil { + if err := c.ociRuntime.updateContainerStatus(c, false); err != nil { return err } @@ -299,7 +299,7 @@ func (c *Container) syncContainer() error { (c.state.State != ContainerStateExited) { oldState := c.state.State // TODO: optionally replace this with a stat for the exit file - if err := c.runtime.ociRuntime.updateContainerStatus(c, false); err != nil { + if err := c.ociRuntime.updateContainerStatus(c, false); err != nil { return err } // Only save back to DB if state changed @@ -547,8 +547,8 @@ func (c *Container) removeConmonFiles() error { // Instead of outright deleting the exit file, rename it (if it exists). // We want to retain it so we can get the exit code of containers which // are removed (at least until we have a workable events system) - exitFile := filepath.Join(c.runtime.ociRuntime.exitsDir, c.ID()) - oldExitFile := filepath.Join(c.runtime.ociRuntime.exitsDir, fmt.Sprintf("%s-old", c.ID())) + exitFile := filepath.Join(c.ociRuntime.exitsDir, c.ID()) + oldExitFile := filepath.Join(c.ociRuntime.exitsDir, fmt.Sprintf("%s-old", c.ID())) if _, err := os.Stat(exitFile); err != nil { if !os.IsNotExist(err) { return errors.Wrapf(err, "error running stat on container %s exit file", c.ID()) @@ -866,7 +866,7 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { } // With the spec complete, do an OCI create - if err := c.runtime.ociRuntime.createContainer(c, c.config.CgroupParent, nil); err != nil { + if err := c.ociRuntime.createContainer(c, c.config.CgroupParent, nil); err != nil { return err } @@ -1013,7 +1013,7 @@ func (c *Container) start() error { logrus.Debugf("Starting container %s with command %v", c.ID(), c.config.Spec.Process.Args) } - if err := c.runtime.ociRuntime.startContainer(c); err != nil { + if err := c.ociRuntime.startContainer(c); err != nil { return err } logrus.Debugf("Started container %s", c.ID()) @@ -1038,7 +1038,7 @@ func (c *Container) start() error { func (c *Container) stop(timeout uint) error { logrus.Debugf("Stopping ctr %s (timeout %d)", c.ID(), timeout) - if err := c.runtime.ociRuntime.stopContainer(c, timeout); err != nil { + if err := c.ociRuntime.stopContainer(c, timeout); err != nil { return err } @@ -1053,7 +1053,7 @@ func (c *Container) stop(timeout uint) error { // Internal, non-locking function to pause a container func (c *Container) pause() error { - if err := c.runtime.ociRuntime.pauseContainer(c); err != nil { + if err := c.ociRuntime.pauseContainer(c); err != nil { return err } @@ -1066,7 +1066,7 @@ func (c *Container) pause() error { // Internal, non-locking function to unpause a container func (c *Container) unpause() error { - if err := c.runtime.ociRuntime.unpauseContainer(c); err != nil { + if err := c.ociRuntime.unpauseContainer(c); err != nil { return err } @@ -1245,7 +1245,7 @@ func (c *Container) delete(ctx context.Context) (err error) { span.SetTag("struct", "container") defer span.Finish() - if err := c.runtime.ociRuntime.deleteContainer(c); err != nil { + if err := c.ociRuntime.deleteContainer(c); err != nil { return errors.Wrapf(err, "error removing container %s from runtime", c.ID()) } diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 55cc5089b..60633e58c 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -541,7 +541,7 @@ func (c *Container) checkpointRestoreSupported() (err error) { if !criu.CheckForCriu() { return errors.Errorf("Checkpoint/Restore requires at least CRIU %d", criu.MinCriuVersion) } - if !c.runtime.ociRuntime.featureCheckCheckpointing() { + if !c.ociRuntime.featureCheckCheckpointing() { return errors.Errorf("Configured runtime does not support checkpoint/restore") } return nil @@ -575,7 +575,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO return err } - if err := c.runtime.ociRuntime.checkpointContainer(c, options); err != nil { + if err := c.ociRuntime.checkpointContainer(c, options); err != nil { return err } @@ -769,7 +769,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti if err := c.saveSpec(g.Spec()); err != nil { return err } - if err := c.runtime.ociRuntime.createContainer(c, c.config.CgroupParent, &options); err != nil { + if err := c.ociRuntime.createContainer(c, c.config.CgroupParent, &options); err != nil { return err } diff --git a/libpod/info.go b/libpod/info.go index b42f64a1f..c96293e3d 100644 --- a/libpod/info.go +++ b/libpod/info.go @@ -47,12 +47,12 @@ func (r *Runtime) hostInfo() (map[string]interface{}, error) { hostDistributionInfo := r.GetHostDistributionInfo() info["Conmon"] = map[string]interface{}{ "path": r.conmonPath, - "package": r.ociRuntime.conmonPackage(), + "package": r.defaultOCIRuntime.conmonPackage(), "version": conmonVersion, } info["OCIRuntime"] = map[string]interface{}{ - "path": r.ociRuntime.path, - "package": r.ociRuntime.pathPackage(), + "path": r.defaultOCIRuntime.path, + "package": r.defaultOCIRuntime.pathPackage(), "version": ociruntimeVersion, } info["Distribution"] = map[string]interface{}{ @@ -190,12 +190,12 @@ func (r *Runtime) GetConmonVersion() (string, error) { // GetOCIRuntimePath returns the path to the OCI Runtime Path the runtime is using func (r *Runtime) GetOCIRuntimePath() string { - return r.ociRuntimePath.Paths[0] + return r.defaultOCIRuntime.path } // GetOCIRuntimeVersion returns a string representation of the oci runtimes version func (r *Runtime) GetOCIRuntimeVersion() (string, error) { - output, err := utils.ExecCmd(r.ociRuntimePath.Paths[0], "--version") + output, err := utils.ExecCmd(r.GetOCIRuntimePath(), "--version") if err != nil { return "", err } diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c index 047d3c417..fbb3f57cc 100644 --- a/libpod/lock/shm/shm_lock.c +++ b/libpod/lock/shm/shm_lock.c @@ -413,7 +413,7 @@ int32_t allocate_given_semaphore(shm_struct_t *shm, uint32_t sem_index) { // Returns 0 on success, negative ERRNO values on failure int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) { bitmap_t test_map; - int bitmap_index, index_in_bitmap, ret_code, i; + int bitmap_index, index_in_bitmap, ret_code; if (shm == NULL) { return -1 * EINVAL; @@ -500,7 +500,7 @@ int32_t deallocate_all_semaphores(shm_struct_t *shm) { // subsequently realize they have been removed). // Returns 0 on success, -1 on failure int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) { - int bitmap_index, index_in_bitmap, ret_code; + int bitmap_index, index_in_bitmap; if (shm == NULL) { return -1 * EINVAL; @@ -522,7 +522,7 @@ int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) { // subsequently realize they have been removed). // Returns 0 on success, -1 on failure int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) { - int bitmap_index, index_in_bitmap, ret_code; + int bitmap_index, index_in_bitmap; if (shm == NULL) { return -1 * EINVAL; diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go index c21e9a221..76dd5729e 100644 --- a/libpod/lock/shm/shm_lock.go +++ b/libpod/lock/shm/shm_lock.go @@ -1,6 +1,7 @@ package shm // #cgo LDFLAGS: -lrt -lpthread +// #cgo CFLAGS: -Wall -Werror // #include <stdlib.h> // #include "shm_lock.h" // const uint32_t bitmap_size_c = BITMAP_SIZE; diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h index 759f8178a..8796b43f4 100644 --- a/libpod/lock/shm/shm_lock.h +++ b/libpod/lock/shm/shm_lock.h @@ -32,9 +32,6 @@ typedef struct shm_struct { lock_group_t locks[]; } shm_struct_t; -static size_t compute_shm_size(uint32_t num_bitmaps); -static int take_mutex(pthread_mutex_t *mutex); -static int release_mutex(pthread_mutex_t *mutex); shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code); shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code); int32_t close_lock_shm(shm_struct_t *shm); diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index ed9ad5f0d..93ec157c5 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -170,7 +170,7 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) { defer syncW.Close() havePortMapping := len(ctr.Config().PortMappings) > 0 - apiSocket := filepath.Join(r.ociRuntime.tmpDir, fmt.Sprintf("%s.net", ctr.config.ID)) + apiSocket := filepath.Join(ctr.ociRuntime.tmpDir, fmt.Sprintf("%s.net", ctr.config.ID)) cmdArgs := []string{} if havePortMapping { diff --git a/libpod/oci.go b/libpod/oci.go index dcb72fc1b..36c1dea84 100644 --- a/libpod/oci.go +++ b/libpod/oci.go @@ -75,25 +75,53 @@ type ociError struct { Msg string `json:"msg,omitempty"` } -// Make a new OCI runtime with provided options -func newOCIRuntime(oruntime OCIRuntimePath, conmonPath string, conmonEnv []string, cgroupManager string, tmpDir string, logSizeMax int64, noPivotRoot bool, reservePorts bool, supportsJSON bool) (*OCIRuntime, error) { +// Make a new OCI runtime with provided options. +// The first path that points to a valid executable will be used. +func newOCIRuntime(name string, paths []string, conmonPath string, runtimeCfg *RuntimeConfig, supportsJSON bool) (*OCIRuntime, error) { + if name == "" { + return nil, errors.Wrapf(ErrInvalidArg, "the OCI runtime must be provided a non-empty name") + } + runtime := new(OCIRuntime) - runtime.name = oruntime.Name - runtime.path = oruntime.Paths[0] + runtime.name = name runtime.conmonPath = conmonPath - runtime.conmonEnv = conmonEnv - runtime.cgroupManager = cgroupManager - runtime.tmpDir = tmpDir - runtime.logSizeMax = logSizeMax - runtime.noPivot = noPivotRoot - runtime.reservePorts = reservePorts + + runtime.conmonEnv = runtimeCfg.ConmonEnvVars + runtime.cgroupManager = runtimeCfg.CgroupManager + runtime.tmpDir = runtimeCfg.TmpDir + runtime.logSizeMax = runtimeCfg.MaxLogSize + runtime.noPivot = runtimeCfg.NoPivotRoot + runtime.reservePorts = runtimeCfg.EnablePortReservation + + // TODO: probe OCI runtime for feature and enable automatically if + // available. runtime.supportsJSON = supportsJSON + foundPath := false + for _, path := range paths { + stat, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, errors.Wrapf(err, "cannot stat %s", path) + } + if !stat.Mode().IsRegular() { + continue + } + foundPath = true + runtime.path = path + break + } + if !foundPath { + return nil, errors.Wrapf(ErrInvalidArg, "no valid executable found for OCI runtime %s", name) + } + runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits") runtime.socketsDir = filepath.Join(runtime.tmpDir, "socket") - if cgroupManager != CgroupfsCgroupsManager && cgroupManager != SystemdCgroupsManager { - return nil, errors.Wrapf(ErrInvalidArg, "invalid cgroup manager specified: %s", cgroupManager) + if runtime.cgroupManager != CgroupfsCgroupsManager && runtime.cgroupManager != SystemdCgroupsManager { + return nil, errors.Wrapf(ErrInvalidArg, "invalid cgroup manager specified: %s", runtime.cgroupManager) } // Create the exit files and attach sockets directories diff --git a/libpod/pod_api.go b/libpod/pod_api.go index 9ed5c88eb..b913857dd 100644 --- a/libpod/pod_api.go +++ b/libpod/pod_api.go @@ -357,7 +357,7 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) { continue } - if err := ctr.runtime.ociRuntime.killContainer(ctr, signal); err != nil { + if err := ctr.ociRuntime.killContainer(ctr, signal); err != nil { ctr.lock.Unlock() ctrErrors[ctr.ID()] = err continue diff --git a/libpod/runtime.go b/libpod/runtime.go index 2c50fce85..d4d34242c 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "os" "path/filepath" + "strings" "sync" "syscall" @@ -91,18 +92,18 @@ type RuntimeOption func(*Runtime) error type Runtime struct { config *RuntimeConfig - state State - store storage.Store - storageService *storageService - imageContext *types.SystemContext - ociRuntime *OCIRuntime - netPlugin ocicni.CNIPlugin - ociRuntimePath OCIRuntimePath - conmonPath string - imageRuntime *image.Runtime - firewallBackend firewall.FirewallBackend - lockManager lock.Manager - configuredFrom *runtimeConfiguredFrom + state State + store storage.Store + storageService *storageService + imageContext *types.SystemContext + defaultOCIRuntime *OCIRuntime + ociRuntimes map[string]*OCIRuntime + netPlugin ocicni.CNIPlugin + conmonPath string + imageRuntime *image.Runtime + firewallBackend firewall.FirewallBackend + lockManager lock.Manager + configuredFrom *runtimeConfiguredFrom // doRenumber indicates that the runtime should perform a lock renumber // during initialization. @@ -123,14 +124,6 @@ type Runtime struct { eventer events.Eventer } -// OCIRuntimePath contains information about an OCI runtime. -type OCIRuntimePath struct { - // Name of the runtime to refer to by the --runtime flag - Name string `toml:"name"` - // Paths to check for this executable - Paths []string `toml:"paths"` -} - // RuntimeConfig contains configuration options used to set up the runtime type RuntimeConfig struct { // StorageConfig is the configuration used by containers/storage @@ -588,63 +581,6 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options .. // Make a new runtime based on the given configuration // Sets up containers/storage, state store, OCI runtime func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { - // Backward compatibility for `runtime_path` - if runtime.config.RuntimePath != nil { - // Don't print twice in rootless mode. - if os.Geteuid() == 0 { - logrus.Warningf("The configuration is using `runtime_path`, which is deprecated and will be removed in future. Please use `runtimes` and `runtime`") - logrus.Warningf("If you are using both `runtime_path` and `runtime`, the configuration from `runtime_path` is used") - } - - // Transform `runtime_path` into `runtimes` and `runtime`. - name := filepath.Base(runtime.config.RuntimePath[0]) - runtime.config.OCIRuntime = name - runtime.config.OCIRuntimes = map[string][]string{name: runtime.config.RuntimePath} - } - - // Find a working OCI runtime binary - foundRuntime := false - // If runtime is an absolute path, then use it as it is. - if runtime.config.OCIRuntime != "" && runtime.config.OCIRuntime[0] == '/' { - foundRuntime = true - runtime.ociRuntimePath = OCIRuntimePath{Name: filepath.Base(runtime.config.OCIRuntime), Paths: []string{runtime.config.OCIRuntime}} - stat, err := os.Stat(runtime.config.OCIRuntime) - if err != nil { - if os.IsNotExist(err) { - return errors.Wrapf(err, "the specified OCI runtime %s does not exist", runtime.config.OCIRuntime) - } - return errors.Wrapf(err, "cannot stat the OCI runtime path %s", runtime.config.OCIRuntime) - } - if !stat.Mode().IsRegular() { - return fmt.Errorf("the specified OCI runtime %s is not a valid file", runtime.config.OCIRuntime) - } - } else { - // If not, look it up in the configuration. - paths := runtime.config.OCIRuntimes[runtime.config.OCIRuntime] - if paths != nil { - for _, path := range paths { - stat, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - continue - } - return errors.Wrapf(err, "cannot stat %s", path) - } - if !stat.Mode().IsRegular() { - continue - } - foundRuntime = true - runtime.ociRuntimePath = OCIRuntimePath{Name: runtime.config.OCIRuntime, Paths: []string{path}} - break - } - } - } - if !foundRuntime { - return errors.Wrapf(ErrInvalidArg, - "could not find a working binary (configured options: %v)", - runtime.config.OCIRuntimes) - } - // Find a working conmon binary foundConmon := false for _, path := range runtime.config.ConmonPath { @@ -841,25 +777,107 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { } } - supportsJSON := false - for _, r := range runtime.config.RuntimeSupportsJSON { - if r == runtime.config.OCIRuntime { - supportsJSON = true - break + // Get us at least one working OCI runtime. + runtime.ociRuntimes = make(map[string]*OCIRuntime) + + // Is the old runtime_path defined? + if runtime.config.RuntimePath != nil { + // Don't print twice in rootless mode. + if os.Geteuid() == 0 { + logrus.Warningf("The configuration is using `runtime_path`, which is deprecated and will be removed in future. Please use `runtimes` and `runtime`") + logrus.Warningf("If you are using both `runtime_path` and `runtime`, the configuration from `runtime_path` is used") } + + if len(runtime.config.RuntimePath) == 0 { + return errors.Wrapf(ErrInvalidArg, "empty runtime path array passed") + } + + name := filepath.Base(runtime.config.RuntimePath[0]) + + supportsJSON := false + for _, r := range runtime.config.RuntimeSupportsJSON { + if r == name { + supportsJSON = true + break + } + } + + ociRuntime, err := newOCIRuntime(name, runtime.config.RuntimePath, runtime.conmonPath, runtime.config, supportsJSON) + if err != nil { + return err + } + + runtime.ociRuntimes[name] = ociRuntime + runtime.defaultOCIRuntime = ociRuntime } - // Make an OCI runtime to perform container operations - ociRuntime, err := newOCIRuntime(runtime.ociRuntimePath, - runtime.conmonPath, runtime.config.ConmonEnvVars, - runtime.config.CgroupManager, runtime.config.TmpDir, - runtime.config.MaxLogSize, runtime.config.NoPivotRoot, - runtime.config.EnablePortReservation, - supportsJSON) - if err != nil { - return err + // Initialize remaining OCI runtimes + for name, paths := range runtime.config.OCIRuntimes { + if len(paths) == 0 { + return errors.Wrapf(ErrInvalidArg, "must provide at least 1 path to OCI runtime %s", name) + } + + supportsJSON := false + for _, r := range runtime.config.RuntimeSupportsJSON { + if r == name { + supportsJSON = true + break + } + } + + ociRuntime, err := newOCIRuntime(name, paths, runtime.conmonPath, runtime.config, supportsJSON) + if err != nil { + // Don't fatally error. + // This will allow us to ship configs including optional + // runtimes that might not be installed (crun, kata). + // Only a warnf so default configs don't spec errors. + logrus.Warnf("Error initializing configured OCI runtime %s: %v", name, err) + continue + } + + runtime.ociRuntimes[name] = ociRuntime + } + + // Do we have a default OCI runtime? + if runtime.config.OCIRuntime != "" { + // If the string starts with / it's a path to a runtime + // executable. + if strings.HasPrefix(runtime.config.OCIRuntime, "/") { + name := filepath.Base(runtime.config.OCIRuntime) + + supportsJSON := false + for _, r := range runtime.config.RuntimeSupportsJSON { + if r == name { + supportsJSON = true + break + } + } + + ociRuntime, err := newOCIRuntime(name, []string{runtime.config.OCIRuntime}, runtime.conmonPath, runtime.config, supportsJSON) + if err != nil { + return err + } + + runtime.ociRuntimes[name] = ociRuntime + runtime.defaultOCIRuntime = ociRuntime + } else { + ociRuntime, ok := runtime.ociRuntimes[runtime.config.OCIRuntime] + if !ok { + return errors.Wrapf(ErrInvalidArg, "default OCI runtime %q not found", runtime.config.OCIRuntime) + } + runtime.defaultOCIRuntime = ociRuntime + } + } + + // Do we have at least one valid OCI runtime? + if len(runtime.ociRuntimes) == 0 { + return errors.Wrapf(ErrInvalidArg, "no OCI runtime has been configured") + } + + // Do we have a default runtime? + if runtime.defaultOCIRuntime == nil { + return errors.Wrapf(ErrInvalidArg, "no default OCI runtime was configured") } - runtime.ociRuntime = ociRuntime // Make the per-boot files directory if it does not exist if err := os.MkdirAll(runtime.config.TmpDir, 0755); err != nil { diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 0871b83a7..271d4160d 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -94,7 +94,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf ctr.config.StopTimeout = CtrRemoveTimeout - ctr.config.OCIRuntime = r.config.OCIRuntime + ctr.config.OCIRuntime = r.defaultOCIRuntime.name // Set namespace based on current runtime namespace // Do so before options run so they can override it @@ -139,6 +139,16 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container, restore bo ctr.state.State = ContainerStateConfigured ctr.runtime = r + if ctr.config.OCIRuntime == "" { + ctr.ociRuntime = r.defaultOCIRuntime + } else { + ociRuntime, ok := r.ociRuntimes[ctr.config.OCIRuntime] + if !ok { + return nil, errors.Wrapf(ErrInvalidArg, "requested OCI runtime %s is not available", ctr.config.OCIRuntime) + } + ctr.ociRuntime = ociRuntime + } + var pod *Pod if ctr.config.Pod != "" { // Get the pod from state @@ -362,7 +372,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, } if c.state.State == ContainerStatePaused { - if err := c.runtime.ociRuntime.killContainer(c, 9); err != nil { + if err := c.ociRuntime.killContainer(c, 9); err != nil { return err } if err := c.unpause(); err != nil { @@ -376,7 +386,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, // Check that the container's in a good state to be removed if c.state.State == ContainerStateRunning { - if err := r.ociRuntime.stopContainer(c, c.StopTimeout()); err != nil { + if err := c.ociRuntime.stopContainer(c, c.StopTimeout()); err != nil { return errors.Wrapf(err, "cannot remove container %s as it could not be stopped", c.ID()) } @@ -388,7 +398,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, // Check that all of our exec sessions have finished if len(c.state.ExecSessions) != 0 { - if err := r.ociRuntime.execStopContainer(c, c.StopTimeout()); err != nil { + if err := c.ociRuntime.execStopContainer(c, c.StopTimeout()); err != nil { return err } } |