summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--libpod/container_internal.go48
-rw-r--r--libpod/options.go13
-rw-r--r--libpod/runtime_ctr.go2
-rw-r--r--libpod/volume.go36
-rw-r--r--libpod/volume_inspect.go11
-rw-r--r--pkg/api/handlers/libpod/volumes.go30
-rw-r--r--pkg/domain/infra/abi/volumes.go26
7 files changed, 151 insertions, 15 deletions
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 2ae894139..27b795871 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -1015,6 +1015,12 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
return err
}
+ for _, v := range c.config.NamedVolumes {
+ if err := c.chownVolume(v.Name); err != nil {
+ return err
+ }
+ }
+
// With the spec complete, do an OCI create
if err := c.ociRuntime.CreateContainer(c, nil); err != nil {
// Fedora 31 is carrying a patch to display improved error
@@ -1508,6 +1514,48 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
return vol, nil
}
+// Chown the specified volume if necessary.
+func (c *Container) chownVolume(volumeName string) error {
+ vol, err := c.runtime.state.Volume(volumeName)
+ if err != nil {
+ return errors.Wrapf(err, "error retrieving named volume %s for container %s", volumeName, c.ID())
+ }
+
+ uid := int(c.config.Spec.Process.User.UID)
+ gid := int(c.config.Spec.Process.User.GID)
+
+ vol.lock.Lock()
+ defer vol.lock.Unlock()
+
+ // The volume may need a copy-up. Check the state.
+ if err := vol.update(); err != nil {
+ return err
+ }
+
+ if vol.state.NeedsChown {
+ vol.state.NeedsChown = false
+ vol.state.UIDChowned = uid
+ vol.state.GIDChowned = gid
+
+ if err := vol.save(); err != nil {
+ return err
+ }
+ err := filepath.Walk(vol.MountPoint(), func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if err := os.Chown(path, uid, gid); err != nil {
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// cleanupStorage unmounts and cleans up the container's root filesystem
func (c *Container) cleanupStorage() error {
if !c.state.Mounted {
diff --git a/libpod/options.go b/libpod/options.go
index 7a60870a0..cfc0397e3 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -1497,6 +1497,19 @@ func WithVolumeGID(gid int) VolumeCreateOption {
}
}
+// WithVolumeNeedsChown sets the NeedsChown flag for the volume.
+func WithVolumeNeedsChown() VolumeCreateOption {
+ return func(volume *Volume) error {
+ if volume.valid {
+ return define.ErrVolumeFinalized
+ }
+
+ volume.state.NeedsChown = true
+
+ return nil
+ }
+}
+
// withSetAnon sets a bool notifying libpod that this volume is anonymous and
// should be removed when containers using it are removed and volumes are
// specified for removal.
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index f1752cbeb..8bb6a4bcf 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -309,7 +309,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
logrus.Debugf("Creating new volume %s for container", vol.Name)
// The volume does not exist, so we need to create it.
- volOptions := []VolumeCreateOption{WithVolumeName(vol.Name), WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID())}
+ volOptions := []VolumeCreateOption{WithVolumeName(vol.Name), WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID()), WithVolumeNeedsChown()}
if isAnonymous {
volOptions = append(volOptions, withSetAnon())
}
diff --git a/libpod/volume.go b/libpod/volume.go
index 82f389833..ac5f61255 100644
--- a/libpod/volume.go
+++ b/libpod/volume.go
@@ -64,6 +64,14 @@ type VolumeState struct {
// create time, then cleared after the copy up is done and never set
// again.
NeedsCopyUp bool `json:"notYetMounted,omitempty"`
+ // NeedsChown indicates that the next time the volume is mounted into
+ // a container, the container will chown the volume to the container process
+ // UID/GID.
+ NeedsChown bool `json:"notYetChowned,omitempty"`
+ // UIDChowned is the UID the volume was chowned to.
+ UIDChowned int `json:"uidChowned,omitempty"`
+ // GIDChowned is the GID the volume was chowned to.
+ GIDChowned int `json:"gidChowned,omitempty"`
}
// Name retrieves the volume's name
@@ -113,13 +121,33 @@ func (v *Volume) Anonymous() bool {
}
// UID returns the UID the volume will be created as.
-func (v *Volume) UID() int {
- return v.config.UID
+func (v *Volume) UID() (int, error) {
+ v.lock.Lock()
+ defer v.lock.Unlock()
+
+ if !v.valid {
+ return -1, define.ErrVolumeRemoved
+ }
+
+ if v.state.UIDChowned > 0 {
+ return v.state.UIDChowned, nil
+ }
+ return v.config.UID, nil
}
// GID returns the GID the volume will be created as.
-func (v *Volume) GID() int {
- return v.config.GID
+func (v *Volume) GID() (int, error) {
+ v.lock.Lock()
+ defer v.lock.Unlock()
+
+ if !v.valid {
+ return -1, define.ErrVolumeRemoved
+ }
+
+ if v.state.GIDChowned > 0 {
+ return v.state.GIDChowned, nil
+ }
+ return v.config.GID, nil
}
// CreatedTime returns the time the volume was created at. It was not tracked
diff --git a/libpod/volume_inspect.go b/libpod/volume_inspect.go
index 136f9da5e..2be0aeaec 100644
--- a/libpod/volume_inspect.go
+++ b/libpod/volume_inspect.go
@@ -65,8 +65,15 @@ func (v *Volume) Inspect() (*InspectVolumeData, error) {
for k, v := range v.config.Options {
data.Options[k] = v
}
- data.UID = v.config.UID
- data.GID = v.config.GID
+ var err error
+ data.UID, err = v.UID()
+ if err != nil {
+ return nil, err
+ }
+ data.GID, err = v.GID()
+ if err != nil {
+ return nil, err
+ }
data.Anonymous = v.config.IsAnon
return data, nil
diff --git a/pkg/api/handlers/libpod/volumes.go b/pkg/api/handlers/libpod/volumes.go
index 4b3b5430b..6523244f3 100644
--- a/pkg/api/handlers/libpod/volumes.go
+++ b/pkg/api/handlers/libpod/volumes.go
@@ -86,6 +86,17 @@ func InspectVolume(w http.ResponseWriter, r *http.Request) {
utils.VolumeNotFound(w, name, err)
return
}
+ var uid, gid int
+ uid, err = vol.UID()
+ if err != nil {
+ utils.Error(w, "Error fetching volume UID", http.StatusInternalServerError, err)
+ return
+ }
+ gid, err = vol.GID()
+ if err != nil {
+ utils.Error(w, "Error fetching volume GID", http.StatusInternalServerError, err)
+ return
+ }
volResponse := entities.VolumeConfigResponse{
Name: vol.Name(),
Driver: vol.Driver(),
@@ -94,8 +105,8 @@ func InspectVolume(w http.ResponseWriter, r *http.Request) {
Labels: vol.Labels(),
Scope: vol.Scope(),
Options: vol.Options(),
- UID: vol.UID(),
- GID: vol.GID(),
+ UID: uid,
+ GID: gid,
}
utils.WriteResponse(w, http.StatusOK, volResponse)
}
@@ -130,6 +141,17 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) {
}
volumeConfigs := make([]*entities.VolumeListReport, 0, len(vols))
for _, v := range vols {
+ var uid, gid int
+ uid, err = v.UID()
+ if err != nil {
+ utils.Error(w, "Error fetching volume UID", http.StatusInternalServerError, err)
+ return
+ }
+ gid, err = v.GID()
+ if err != nil {
+ utils.Error(w, "Error fetching volume GID", http.StatusInternalServerError, err)
+ return
+ }
config := entities.VolumeConfigResponse{
Name: v.Name(),
Driver: v.Driver(),
@@ -138,8 +160,8 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) {
Labels: v.Labels(),
Scope: v.Scope(),
Options: v.Options(),
- UID: v.UID(),
- GID: v.GID(),
+ UID: uid,
+ GID: gid,
}
volumeConfigs = append(volumeConfigs, &entities.VolumeListReport{VolumeConfigResponse: config})
}
diff --git a/pkg/domain/infra/abi/volumes.go b/pkg/domain/infra/abi/volumes.go
index 702e11003..36847dd79 100644
--- a/pkg/domain/infra/abi/volumes.go
+++ b/pkg/domain/infra/abi/volumes.go
@@ -95,6 +95,15 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin
}
reports := make([]*entities.VolumeInspectReport, 0, len(vols))
for _, v := range vols {
+ var uid, gid int
+ uid, err = v.UID()
+ if err != nil {
+ return nil, err
+ }
+ gid, err = v.GID()
+ if err != nil {
+ return nil, err
+ }
config := entities.VolumeConfigResponse{
Name: v.Name(),
Driver: v.Driver(),
@@ -103,8 +112,8 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin
Labels: v.Labels(),
Scope: v.Scope(),
Options: v.Options(),
- UID: v.UID(),
- GID: v.GID(),
+ UID: uid,
+ GID: gid,
}
reports = append(reports, &entities.VolumeInspectReport{VolumeConfigResponse: &config})
}
@@ -141,6 +150,15 @@ func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeL
}
reports := make([]*entities.VolumeListReport, 0, len(vols))
for _, v := range vols {
+ var uid, gid int
+ uid, err = v.UID()
+ if err != nil {
+ return nil, err
+ }
+ gid, err = v.GID()
+ if err != nil {
+ return nil, err
+ }
config := entities.VolumeConfigResponse{
Name: v.Name(),
Driver: v.Driver(),
@@ -149,8 +167,8 @@ func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeL
Labels: v.Labels(),
Scope: v.Scope(),
Options: v.Options(),
- UID: v.UID(),
- GID: v.GID(),
+ UID: uid,
+ GID: gid,
}
reports = append(reports, &entities.VolumeListReport{VolumeConfigResponse: config})
}