diff options
Diffstat (limited to 'libpod')
-rw-r--r-- | libpod/boltdb_state.go | 92 | ||||
-rw-r--r-- | libpod/boltdb_state_internal.go | 7 | ||||
-rw-r--r-- | libpod/in_memory_state.go | 64 | ||||
-rw-r--r-- | libpod/lock/in_memory_locks.go | 11 | ||||
-rw-r--r-- | libpod/lock/lock.go | 14 | ||||
-rw-r--r-- | libpod/lock/shm/shm_lock.c | 36 | ||||
-rw-r--r-- | libpod/lock/shm/shm_lock.go | 16 | ||||
-rw-r--r-- | libpod/lock/shm/shm_lock.h | 1 | ||||
-rw-r--r-- | libpod/lock/shm/shm_lock_test.go | 28 | ||||
-rw-r--r-- | libpod/lock/shm_lock_manager_linux.go | 7 | ||||
-rw-r--r-- | libpod/lock/shm_lock_manager_unsupported.go | 5 | ||||
-rw-r--r-- | libpod/options.go | 16 | ||||
-rw-r--r-- | libpod/runtime.go | 43 | ||||
-rw-r--r-- | libpod/runtime_ctr.go | 2 | ||||
-rw-r--r-- | libpod/runtime_renumber.go | 57 | ||||
-rw-r--r-- | libpod/runtime_volume.go | 13 | ||||
-rw-r--r-- | libpod/runtime_volume_linux.go | 31 | ||||
-rw-r--r-- | libpod/state.go | 24 | ||||
-rw-r--r-- | libpod/state_test.go | 72 | ||||
-rw-r--r-- | libpod/volume.go | 5 | ||||
-rw-r--r-- | libpod/volume_internal.go | 3 |
21 files changed, 477 insertions, 70 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index 5bc15dd7f..25ef5cd0e 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -783,6 +783,94 @@ func (s *BoltState) AllContainers() ([]*Container, error) { return ctrs, nil } +// RewriteContainerConfig rewrites a container's configuration. +// WARNING: This function is DANGEROUS. Do not use without reading the full +// comment on this function in state.go. +func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error { + if !s.valid { + return ErrDBClosed + } + + if !ctr.valid { + return ErrCtrRemoved + } + + newCfgJSON, err := json.Marshal(newCfg) + if err != nil { + return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", ctr.ID()) + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.closeDBCon(db) + + err = db.Update(func(tx *bolt.Tx) error { + ctrBkt, err := getCtrBucket(tx) + if err != nil { + return err + } + + ctrDB := ctrBkt.Bucket([]byte(ctr.ID())) + if ctrDB == nil { + ctr.valid = false + return errors.Wrapf(ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID()) + } + + if err := ctrDB.Put(configKey, newCfgJSON); err != nil { + return errors.Wrapf(err, "error updating container %s config JSON", ctr.ID()) + } + + return nil + }) + return err +} + +// RewritePodConfig rewrites a pod's configuration. +// WARNING: This function is DANGEROUS. Do not use without reading the full +// comment on this function in state.go. +func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error { + if !s.valid { + return ErrDBClosed + } + + if !pod.valid { + return ErrPodRemoved + } + + newCfgJSON, err := json.Marshal(newCfg) + if err != nil { + return errors.Wrapf(err, "error marshalling new configuration JSON for container %s", pod.ID()) + } + + db, err := s.getDBCon() + if err != nil { + return err + } + defer s.closeDBCon(db) + + err = db.Update(func(tx *bolt.Tx) error { + podBkt, err := getPodBucket(tx) + if err != nil { + return err + } + + podDB := podBkt.Bucket([]byte(pod.ID())) + if podDB == nil { + pod.valid = false + return errors.Wrapf(ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID()) + } + + if err := podDB.Put(configKey, newCfgJSON); err != nil { + return errors.Wrapf(err, "error updating pod %s config JSON", pod.ID()) + } + + return nil + }) + return err +} + // Pod retrieves a pod given its full ID func (s *BoltState) Pod(id string) (*Pod, error) { if id == "" { @@ -1281,10 +1369,6 @@ func (s *BoltState) RemoveVolume(volume *Volume) error { return ErrDBClosed } - if !volume.valid { - return ErrVolumeRemoved - } - volName := []byte(volume.Name()) db, err := s.getDBCon() diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go index ea150cfac..3d749849d 100644 --- a/libpod/boltdb_state_internal.go +++ b/libpod/boltdb_state_internal.go @@ -348,13 +348,6 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name)) } - // Get the lock - lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID) - if err != nil { - return errors.Wrapf(err, "error retrieving lockfile for volume %s", string(name)) - } - volume.lock = lock - volume.runtime = s.runtime volume.valid = true diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go index 314799309..ab4fc8ba7 100644 --- a/libpod/in_memory_state.go +++ b/libpod/in_memory_state.go @@ -378,6 +378,58 @@ func (s *InMemoryState) ContainerInUse(ctr *Container) ([]string, error) { return arr, nil } +// AllContainers retrieves all containers from the state +func (s *InMemoryState) AllContainers() ([]*Container, error) { + ctrs := make([]*Container, 0, len(s.containers)) + for _, ctr := range s.containers { + if s.namespace == "" || ctr.config.Namespace == s.namespace { + ctrs = append(ctrs, ctr) + } + } + + return ctrs, nil +} + +// RewriteContainerConfig rewrites a container's configuration. +// This function is DANGEROUS, even with an in-memory state. +// Please read the full comment on it in state.go before using it. +func (s *InMemoryState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error { + if !ctr.valid { + return ErrCtrRemoved + } + + // If the container does not exist, return error + stateCtr, ok := s.containers[ctr.ID()] + if !ok { + ctr.valid = false + return errors.Wrapf(ErrNoSuchCtr, "container with ID %s not found in state", ctr.ID()) + } + + stateCtr.config = newCfg + + return nil +} + +// RewritePodConfig rewrites a pod's configuration. +// This function is DANGEROUS, even with in-memory state. +// Please read the full comment on it in state.go before using it. +func (s *InMemoryState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error { + if !pod.valid { + return ErrPodRemoved + } + + // If the pod does not exist, return error + statePod, ok := s.pods[pod.ID()] + if !ok { + pod.valid = false + return errors.Wrapf(ErrNoSuchPod, "pod with ID %s not found in state", pod.ID()) + } + + statePod.config = newCfg + + return nil +} + // Volume retrieves a volume from its full name func (s *InMemoryState) Volume(name string) (*Volume, error) { if name == "" { @@ -486,18 +538,6 @@ func (s *InMemoryState) AllVolumes() ([]*Volume, error) { return allVols, nil } -// AllContainers retrieves all containers from the state -func (s *InMemoryState) AllContainers() ([]*Container, error) { - ctrs := make([]*Container, 0, len(s.containers)) - for _, ctr := range s.containers { - if s.namespace == "" || ctr.config.Namespace == s.namespace { - ctrs = append(ctrs, ctr) - } - } - - return ctrs, nil -} - // Pod retrieves a pod from the state from its full ID func (s *InMemoryState) Pod(id string) (*Pod, error) { if id == "" { diff --git a/libpod/lock/in_memory_locks.go b/libpod/lock/in_memory_locks.go index db8f20e95..7c9605917 100644 --- a/libpod/lock/in_memory_locks.go +++ b/libpod/lock/in_memory_locks.go @@ -89,3 +89,14 @@ func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) { return m.locks[id], nil } + +// FreeAllLocks frees all locks. +// This function is DANGEROUS. Please read the full comment in locks.go before +// trying to use it. +func (m *InMemoryManager) FreeAllLocks() error { + for _, lock := range m.locks { + lock.allocated = false + } + + return nil +} diff --git a/libpod/lock/lock.go b/libpod/lock/lock.go index 1f94171fe..d6841646b 100644 --- a/libpod/lock/lock.go +++ b/libpod/lock/lock.go @@ -24,6 +24,20 @@ type Manager interface { // The underlying lock MUST be the same as another other lock with the // same UUID. RetrieveLock(id uint32) (Locker, error) + // PLEASE READ FULL DESCRIPTION BEFORE USING. + // FreeAllLocks frees all allocated locks, in preparation for lock + // reallocation. + // As this deallocates all presently-held locks, this can be very + // dangerous - if there are other processes running that might be + // attempting to allocate new locks and free existing locks, we may + // encounter races leading to an inconsistent state. + // (This is in addition to the fact that FreeAllLocks instantly makes + // the state inconsistent simply by using it, and requires a full + // lock renumbering to restore consistency!). + // In short, this should only be used as part of unit tests, or lock + // renumbering, where reasonable guarantees about other processes can be + // made. + FreeAllLocks() error } // Locker is similar to sync.Locker, but provides a method for freeing the lock diff --git a/libpod/lock/shm/shm_lock.c b/libpod/lock/shm/shm_lock.c index 4af58d857..d11fce71a 100644 --- a/libpod/lock/shm/shm_lock.c +++ b/libpod/lock/shm/shm_lock.c @@ -203,6 +203,8 @@ shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code) { // terminating NULL byte. // Returns a valid pointer on success or NULL on error. // If an error occurs, negative ERRNO values will be written to error_code. +// ERANGE is returned for a mismatch between num_locks and the number of locks +// available in the the SHM lock struct. shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code) { int shm_fd; shm_struct_t *shm; @@ -255,11 +257,11 @@ shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code) { // Need to check the SHM to see if it's actually our locks if (shm->magic != MAGIC) { - *error_code = -1 * errno; + *error_code = -1 * EBADF; goto CLEANUP; } if (shm->num_locks != (num_bitmaps * BITMAP_SIZE)) { - *error_code = -1 * errno; + *error_code = -1 * ERANGE; goto CLEANUP; } @@ -407,6 +409,36 @@ int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) { return 0; } +// Deallocate all semaphores unconditionally. +// Returns negative ERRNO values. +int32_t deallocate_all_semaphores(shm_struct_t *shm) { + int ret_code; + uint i; + + if (shm == NULL) { + return -1 * EINVAL; + } + + // Lock the mutex controlling access to our shared memory + ret_code = take_mutex(&(shm->segment_lock)); + if (ret_code != 0) { + return -1 * ret_code; + } + + // Iterate through all bitmaps and reset to unused + for (i = 0; i < shm->num_bitmaps; i++) { + shm->locks[i].bitmap = 0; + } + + // Unlock the allocation control mutex + ret_code = release_mutex(&(shm->segment_lock)); + if (ret_code != 0) { + return -1 * ret_code; + } + + return 0; +} + // Lock a given semaphore // Does not check if the semaphore is allocated - this ensures that, even for // removed containers, we can still successfully lock to check status (and diff --git a/libpod/lock/shm/shm_lock.go b/libpod/lock/shm/shm_lock.go index 87d28e5c1..e70ea8743 100644 --- a/libpod/lock/shm/shm_lock.go +++ b/libpod/lock/shm/shm_lock.go @@ -155,6 +155,22 @@ func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error { return nil } +// DeallocateAllSemaphores frees all semaphores so they can be reallocated to +// other containers and pods. +func (locks *SHMLocks) DeallocateAllSemaphores() error { + if !locks.valid { + return errors.Wrapf(syscall.EINVAL, "locks have already been closed") + } + + retCode := C.deallocate_all_semaphores(locks.lockStruct) + if retCode < 0 { + // Negative errno return from C + return syscall.Errno(-1 * retCode) + } + + return nil +} + // LockSemaphore locks the given semaphore. // If the semaphore is already locked, LockSemaphore will block until the lock // can be acquired. diff --git a/libpod/lock/shm/shm_lock.h b/libpod/lock/shm/shm_lock.h index 8e7e23fb7..58e4297e2 100644 --- a/libpod/lock/shm/shm_lock.h +++ b/libpod/lock/shm/shm_lock.h @@ -40,6 +40,7 @@ shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code); int32_t close_lock_shm(shm_struct_t *shm); int64_t allocate_semaphore(shm_struct_t *shm); int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index); +int32_t deallocate_all_semaphores(shm_struct_t *shm); int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index); int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index); diff --git a/libpod/lock/shm/shm_lock_test.go b/libpod/lock/shm/shm_lock_test.go index 594eb5d8e..830035881 100644 --- a/libpod/lock/shm/shm_lock_test.go +++ b/libpod/lock/shm/shm_lock_test.go @@ -4,7 +4,6 @@ import ( "fmt" "os" "runtime" - "syscall" "testing" "time" @@ -53,12 +52,8 @@ func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) { } defer func() { // Deallocate all locks - // Ignore ENOENT (lock is not allocated) - var i uint32 - for i = 0; i < numLocks; i++ { - if err := locks.DeallocateSemaphore(i); err != nil && err != syscall.ENOENT { - t.Fatalf("Error deallocating semaphore %d: %v", i, err) - } + if err := locks.DeallocateAllSemaphores(); err != nil { + t.Fatalf("Error deallocating semaphores: %v", err) } if err := locks.Close(); err != nil { @@ -212,6 +207,25 @@ func TestAllocateDeallocateCycle(t *testing.T) { }) } +// Test that DeallocateAllSemaphores deallocates all semaphores +func TestDeallocateAllSemaphoresDeallocatesAll(t *testing.T) { + runLockTest(t, func(t *testing.T, locks *SHMLocks) { + // Allocate a lock + locks1, err := locks.AllocateSemaphore() + assert.NoError(t, err) + + // Free all locks + err = locks.DeallocateAllSemaphores() + assert.NoError(t, err) + + // Allocate another lock + locks2, err := locks.AllocateSemaphore() + assert.NoError(t, err) + + assert.Equal(t, locks1, locks2) + }) +} + // Test that locks actually lock func TestLockSemaphoreActuallyLocks(t *testing.T) { runLockTest(t, func(t *testing.T, locks *SHMLocks) { diff --git a/libpod/lock/shm_lock_manager_linux.go b/libpod/lock/shm_lock_manager_linux.go index 94dfd7dd7..8678958ee 100644 --- a/libpod/lock/shm_lock_manager_linux.go +++ b/libpod/lock/shm_lock_manager_linux.go @@ -71,6 +71,13 @@ func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) { return lock, nil } +// FreeAllLocks frees all locks in the manager. +// This function is DANGEROUS. Please read the full comment in locks.go before +// trying to use it. +func (m *SHMLockManager) FreeAllLocks() error { + return m.locks.DeallocateAllSemaphores() +} + // SHMLock is an individual shared memory lock. type SHMLock struct { lockID uint32 diff --git a/libpod/lock/shm_lock_manager_unsupported.go b/libpod/lock/shm_lock_manager_unsupported.go index cbdb2f7bc..1d6e3fcbd 100644 --- a/libpod/lock/shm_lock_manager_unsupported.go +++ b/libpod/lock/shm_lock_manager_unsupported.go @@ -27,3 +27,8 @@ func (m *SHMLockManager) AllocateLock() (Locker, error) { func (m *SHMLockManager) RetrieveLock(id string) (Locker, error) { return nil, fmt.Errorf("not supported") } + +// FreeAllLocks is not supported on this platform +func (m *SHMLockManager) FreeAllLocks() error { + return fmt.Errorf("not supported") +} diff --git a/libpod/options.go b/libpod/options.go index 06737776b..7c37fd65b 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -394,6 +394,22 @@ func WithDefaultInfraCommand(cmd string) RuntimeOption { } } +// WithRenumber instructs libpod to perform a lock renumbering while +// initializing. This will handle migrations from early versions of libpod with +// file locks to newer versions with SHM locking, as well as changes in the +// number of configured locks. +func WithRenumber() RuntimeOption { + return func(rt *Runtime) error { + if rt.valid { + return ErrRuntimeFinalized + } + + rt.doRenumber = true + + return nil + } +} + // Container Creation Options // WithShmDir sets the directory that should be mounted on /dev/shm. diff --git a/libpod/runtime.go b/libpod/runtime.go index 4f5d1e292..94dbf37dd 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -7,6 +7,7 @@ import ( "os/exec" "path/filepath" "sync" + "syscall" "github.com/BurntSushi/toml" is "github.com/containers/image/storage" @@ -79,7 +80,8 @@ type RuntimeOption func(*Runtime) error // Runtime is the core libpod runtime type Runtime struct { - config *RuntimeConfig + config *RuntimeConfig + state State store storage.Store storageService *storageService @@ -88,12 +90,23 @@ type Runtime struct { netPlugin ocicni.CNIPlugin ociRuntimePath OCIRuntimePath conmonPath string - valid bool - lock sync.RWMutex imageRuntime *image.Runtime firewallBackend firewall.FirewallBackend lockManager lock.Manager configuredFrom *runtimeConfiguredFrom + + // doRenumber indicates that the runtime should perform a lock renumber + // during initialization. + // Once the runtime has been initialized and returned, this variable is + // unused. + doRenumber bool + + // valid indicates whether the runtime is ready to use. + // valid is set to true when a runtime is returned from GetRuntime(), + // and remains true until the runtime is shut down (rendering its + // storage unusable). When valid is false, the runtime cannot be used. + valid bool + lock sync.RWMutex } // OCIRuntimePath contains information about an OCI runtime. @@ -753,6 +766,7 @@ func makeRuntime(runtime *Runtime) (err error) { aliveLock.Unlock() } }() + _, err = os.Stat(runtimeAliveFile) if err != nil { // If the file doesn't exist, we need to refresh the state @@ -778,12 +792,35 @@ func makeRuntime(runtime *Runtime) (err error) { if err != nil { return err } + } else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber { + logrus.Debugf("Number of locks does not match - removing old locks") + + // ERANGE indicates a lock numbering mismatch. + // Since we're renumbering, this is not fatal. + // Remove the earlier set of locks and recreate. + if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil { + return errors.Wrapf(err, "error removing libpod locks file %s", lockPath) + } + + manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks) + if err != nil { + return err + } } else { return err } } runtime.lockManager = manager + // If we're renumbering locks, do it now. + // It breaks out of normal runtime init, and will not return a valid + // runtime. + if runtime.doRenumber { + if err := runtime.renumberLocks(); err != nil { + return err + } + } + // If we need to refresh the state, do it now - things are guaranteed to // be set up by now. if doRefresh { diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 1055da75b..2ec8d0795 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -421,7 +421,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool, for _, v := range volumes { if volume, err := runtime.state.Volume(v); err == nil { - if err := runtime.removeVolume(ctx, volume, false, true); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed { + if err := runtime.removeVolume(ctx, volume, false); err != nil && err != ErrNoSuchVolume && err != ErrVolumeBeingUsed { logrus.Errorf("cleanup volume (%s): %v", v, err) } } diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go new file mode 100644 index 000000000..125cf0825 --- /dev/null +++ b/libpod/runtime_renumber.go @@ -0,0 +1,57 @@ +package libpod + +import ( + "github.com/pkg/errors" +) + +// renumberLocks reassigns lock numbers for all containers and pods in the +// state. +// TODO: It would be desirable to make it impossible to call this until all +// other libpod sessions are dead. +// Possibly use a read-write file lock, with all non-renumber podmans owning the +// lock as read, renumber attempting to take a write lock? +// The alternative is some sort of session tracking, and I don't know how +// reliable that can be. +func (r *Runtime) renumberLocks() error { + // Start off by deallocating all locks + if err := r.lockManager.FreeAllLocks(); err != nil { + return err + } + + allCtrs, err := r.state.AllContainers() + if err != nil { + return err + } + for _, ctr := range allCtrs { + lock, err := r.lockManager.AllocateLock() + if err != nil { + return errors.Wrapf(err, "error allocating lock for container %s", ctr.ID()) + } + + ctr.config.LockID = lock.ID() + + // Write the new lock ID + if err := r.state.RewriteContainerConfig(ctr, ctr.config); err != nil { + return err + } + } + allPods, err := r.state.AllPods() + if err != nil { + return err + } + for _, pod := range allPods { + lock, err := r.lockManager.AllocateLock() + if err != nil { + return errors.Wrapf(err, "error allocating lock for pod %s", pod.ID()) + } + + pod.config.LockID = lock.ID() + + // Write the new lock ID + if err := r.state.RewritePodConfig(pod, pod.config); err != nil { + return err + } + } + + return nil +} diff --git a/libpod/runtime_volume.go b/libpod/runtime_volume.go index beae50ac9..11f37ad4b 100644 --- a/libpod/runtime_volume.go +++ b/libpod/runtime_volume.go @@ -19,7 +19,7 @@ type VolumeCreateOption func(*Volume) error type VolumeFilter func(*Volume) bool // RemoveVolume removes a volumes -func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool) error { +func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool) error { r.lock.Lock() defer r.lock.Unlock() @@ -35,10 +35,7 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force, prune bool } } - v.lock.Lock() - defer v.lock.Unlock() - - return r.removeVolume(ctx, v, force, prune) + return r.removeVolume(ctx, v, force) } // RemoveVolumes removes a slice of volumes or all with a force bool @@ -64,7 +61,7 @@ func (r *Runtime) RemoveVolumes(ctx context.Context, volumes []string, all, forc } for _, vol := range vols { - if err := r.RemoveVolume(ctx, vol, force, false); err != nil { + if err := r.RemoveVolume(ctx, vol, force); err != nil { return deletedVols, err } logrus.Debugf("removed volume %s", vol.Name()) @@ -168,8 +165,8 @@ func (r *Runtime) PruneVolumes(ctx context.Context) ([]string, []error) { } for _, vol := range vols { - if err := r.RemoveVolume(ctx, vol, false, true); err != nil { - if err != ErrVolumeBeingUsed { + if err := r.RemoveVolume(ctx, vol, false); err != nil { + if errors.Cause(err) != ErrVolumeBeingUsed && errors.Cause(err) != ErrVolumeRemoved { pruneErrors = append(pruneErrors, err) } continue diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go index 0727cfedf..838c0167a 100644 --- a/libpod/runtime_volume_linux.go +++ b/libpod/runtime_volume_linux.go @@ -67,13 +67,6 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) } volume.config.MountPoint = fullVolPath - lock, err := r.lockManager.AllocateLock() - if err != nil { - return nil, errors.Wrapf(err, "error allocating lock for new volume") - } - volume.lock = lock - volume.config.LockID = volume.lock.ID() - volume.valid = true // Add the volume to state @@ -85,9 +78,12 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) } // removeVolume removes the specified volume from state as well tears down its mountpoint and storage -func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool) error { +func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error { if !v.valid { - return ErrNoSuchVolume + if ok, _ := r.state.HasVolume(v.Name()); !ok { + return nil + } + return ErrVolumeRemoved } deps, err := r.state.VolumeInUse(v) @@ -95,9 +91,6 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool return err } if len(deps) != 0 { - if prune { - return ErrVolumeBeingUsed - } depsStr := strings.Join(deps, ", ") if !force { return errors.Wrapf(ErrVolumeBeingUsed, "volume %s is being used by the following container(s): %s", v.Name(), depsStr) @@ -112,18 +105,20 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force, prune bool } } - // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes - if err := v.teardownStorage(); err != nil { - return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name()) - } + // Set volume as invalid so it can no longer be used + v.valid = false // Remove the volume from the state if err := r.state.RemoveVolume(v); err != nil { return errors.Wrapf(err, "error removing volume %s", v.Name()) } - // Set volume as invalid so it can no longer be used - v.valid = false + // Delete the mountpoint path of the volume, that is delete the volume from /var/lib/containers/storage/volumes + if err := v.teardownStorage(); err != nil { + return errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name()) + } + + logrus.Debugf("Removed volume %s", v.Name()) return nil } diff --git a/libpod/state.go b/libpod/state.go index 88d89f673..98282fc83 100644 --- a/libpod/state.go +++ b/libpod/state.go @@ -97,6 +97,30 @@ type State interface { // returned. AllContainers() ([]*Container, error) + // PLEASE READ FULL DESCRIPTION BEFORE USING. + // Rewrite a container's configuration. + // This function breaks libpod's normal prohibition on a read-only + // configuration, and as such should be used EXTREMELY SPARINGLY and + // only in very specific circumstances. + // Specifically, it is ONLY safe to use thing function to make changes + // that result in a functionally identical configuration (migrating to + // newer, but identical, configuration fields), or during libpod init + // WHILE HOLDING THE ALIVE LOCK (to prevent other libpod instances from + // being initialized). + // Most things in config can be changed by this, but container ID and + // name ABSOLUTELY CANNOT BE ALTERED. If you do so, there is a high + // potential for database corruption. + // There are a lot of capital letters and conditions here, but the short + // answer is this: use this only very sparingly, and only if you really + // know what you're doing. + RewriteContainerConfig(ctr *Container, newCfg *ContainerConfig) error + // PLEASE READ THE ABOVE DESCRIPTION BEFORE USING. + // This function is identical to RewriteContainerConfig, save for the + // fact that it is used with pods instead. + // It is subject to the same conditions as RewriteContainerConfig. + // Please do not use this unless you know what you're doing. + RewritePodConfig(pod *Pod, newCfg *PodConfig) error + // Accepts full ID of pod. // If the pod given is not in the set namespace, an error will be // returned. diff --git a/libpod/state_test.go b/libpod/state_test.go index 4bd00ab55..be68a2d69 100644 --- a/libpod/state_test.go +++ b/libpod/state_test.go @@ -1298,6 +1298,78 @@ func TestCannotUseBadIDAsGenericDependency(t *testing.T) { }) } +func TestRewriteContainerConfigDoesNotExist(t *testing.T) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + err := state.RewriteContainerConfig(&Container{}, &ContainerConfig{}) + assert.Error(t, err) + }) +} + +func TestRewriteContainerConfigNotInState(t *testing.T) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) + assert.NoError(t, err) + err = state.RewriteContainerConfig(testCtr, &ContainerConfig{}) + assert.Error(t, err) + }) +} + +func TestRewriteContainerConfigRewritesConfig(t *testing.T) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testCtr, err := getTestCtr1(manager) + assert.NoError(t, err) + + err = state.AddContainer(testCtr) + assert.NoError(t, err) + + testCtr.config.LogPath = "/another/path/" + + err = state.RewriteContainerConfig(testCtr, testCtr.config) + assert.NoError(t, err) + + testCtrFromState, err := state.Container(testCtr.ID()) + assert.NoError(t, err) + + testContainersEqual(t, testCtrFromState, testCtr, true) + }) +} + +func TestRewritePodConfigDoesNotExist(t *testing.T) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + err := state.RewritePodConfig(&Pod{}, &PodConfig{}) + assert.Error(t, err) + }) +} + +func TestRewritePodConfigNotInState(t *testing.T) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) + assert.NoError(t, err) + err = state.RewritePodConfig(testPod, &PodConfig{}) + assert.Error(t, err) + }) +} + +func TestRewritePodConfigRewritesConfig(t *testing.T) { + runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { + testPod, err := getTestPod1(manager) + assert.NoError(t, err) + + err = state.AddPod(testPod) + assert.NoError(t, err) + + testPod.config.CgroupParent = "/another_cgroup_parent" + + err = state.RewritePodConfig(testPod, testPod.config) + assert.NoError(t, err) + + testPodFromState, err := state.Pod(testPod.ID()) + assert.NoError(t, err) + + testPodsEqual(t, testPodFromState, testPod, true) + }) +} + func TestGetPodDoesNotExist(t *testing.T) { runForAllStates(t, func(t *testing.T, state State, manager lock.Manager) { _, err := state.Pod("doesnotexist") diff --git a/libpod/volume.go b/libpod/volume.go index 026a3bf49..74878b6a4 100644 --- a/libpod/volume.go +++ b/libpod/volume.go @@ -1,7 +1,5 @@ package libpod -import "github.com/containers/libpod/libpod/lock" - // Volume is the type used to create named volumes // TODO: all volumes should be created using this and the Volume API type Volume struct { @@ -9,7 +7,6 @@ type Volume struct { valid bool runtime *Runtime - lock lock.Locker } // VolumeConfig holds the volume's config information @@ -17,8 +14,6 @@ type Volume struct { type VolumeConfig struct { // Name of the volume Name string `json:"name"` - // ID of this volume's lock - LockID uint32 `json:"lockID"` Labels map[string]string `json:"labels"` MountPoint string `json:"mountPoint"` diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go index 0de8a2350..35f0ca19d 100644 --- a/libpod/volume_internal.go +++ b/libpod/volume_internal.go @@ -18,8 +18,5 @@ func newVolume(runtime *Runtime) (*Volume, error) { // teardownStorage deletes the volume from volumePath func (v *Volume) teardownStorage() error { - if !v.valid { - return ErrNoSuchVolume - } return os.RemoveAll(filepath.Join(v.runtime.config.VolumePath, v.Name())) } |