summaryrefslogtreecommitdiff
path: root/libpod/container_internal.go
diff options
context:
space:
mode:
authorDaniel J Walsh <dwalsh@redhat.com>2021-06-02 05:28:26 -0400
committerDaniel J Walsh <dwalsh@redhat.com>2021-06-14 11:56:48 -0400
commit81eb71fe36db14f82452b0ded176095b63cb4a4e (patch)
tree430d50d61ce7ed25db52c5270ae8b3fd0944faf5 /libpod/container_internal.go
parente3dd12a2bed03ae7ad221eefd7b92ddef2a59888 (diff)
downloadpodman-81eb71fe36db14f82452b0ded176095b63cb4a4e.tar.gz
podman-81eb71fe36db14f82452b0ded176095b63cb4a4e.tar.bz2
podman-81eb71fe36db14f82452b0ded176095b63cb4a4e.zip
Fix permissions on initially created named volumes
Permission of volume should match the directory it is being mounted on. Fixes: https://github.com/containers/podman/issues/10188 Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
Diffstat (limited to 'libpod/container_internal.go')
-rw-r--r--libpod/container_internal.go60
1 files changed, 1 insertions, 59 deletions
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 3e4eea003..545b78976 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -1062,7 +1062,7 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
}
for _, v := range c.config.NamedVolumes {
- if err := c.chownVolume(v.Name); err != nil {
+ if err := c.fixVolumePermissions(v); err != nil {
return err
}
}
@@ -1681,64 +1681,6 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
return vol, nil
}
-// Chown the specified volume if necessary.
-func (c *Container) chownVolume(volumeName string) error {
- vol, err := c.runtime.state.Volume(volumeName)
- if err != nil {
- return errors.Wrapf(err, "error retrieving named volume %s for container %s", volumeName, c.ID())
- }
-
- vol.lock.Lock()
- defer vol.lock.Unlock()
-
- // The volume may need a copy-up. Check the state.
- if err := vol.update(); err != nil {
- return err
- }
-
- // TODO: For now, I've disabled chowning volumes owned by non-Podman
- // drivers. This may be safe, but it's really going to be a case-by-case
- // thing, I think - safest to leave disabled now and re-enable later if
- // there is a demand.
- if vol.state.NeedsChown && !vol.UsesVolumeDriver() {
- vol.state.NeedsChown = false
-
- uid := int(c.config.Spec.Process.User.UID)
- gid := int(c.config.Spec.Process.User.GID)
-
- if c.config.IDMappings.UIDMap != nil {
- p := idtools.IDPair{
- UID: uid,
- GID: gid,
- }
- mappings := idtools.NewIDMappingsFromMaps(c.config.IDMappings.UIDMap, c.config.IDMappings.GIDMap)
- newPair, err := mappings.ToHost(p)
- if err != nil {
- return errors.Wrapf(err, "error mapping user %d:%d", uid, gid)
- }
- uid = newPair.UID
- gid = newPair.GID
- }
-
- vol.state.UIDChowned = uid
- vol.state.GIDChowned = gid
-
- if err := vol.save(); err != nil {
- return err
- }
-
- mountPoint, err := vol.MountPoint()
- if err != nil {
- return err
- }
-
- if err := os.Lchown(mountPoint, uid, gid); err != nil {
- return err
- }
- }
- return nil
-}
-
// cleanupStorage unmounts and cleans up the container's root filesystem
func (c *Container) cleanupStorage() error {
if !c.state.Mounted {