diff options
-rw-r--r-- | cmd/podman/inspect.go | 18 | ||||
-rw-r--r-- | docs/source/markdown/podman-inspect.1.md | 14 | ||||
-rw-r--r-- | libpod/container_internal.go | 52 | ||||
-rw-r--r-- | libpod/options.go | 13 | ||||
-rw-r--r-- | libpod/runtime_ctr.go | 2 | ||||
-rw-r--r-- | libpod/volume.go | 36 | ||||
-rw-r--r-- | libpod/volume_inspect.go | 11 | ||||
-rw-r--r-- | pkg/api/handlers/libpod/volumes.go | 30 | ||||
-rw-r--r-- | pkg/domain/infra/abi/volumes.go | 26 | ||||
-rw-r--r-- | test/e2e/run_userns_test.go | 27 | ||||
-rw-r--r-- | utils/utils_supported.go | 4 |
11 files changed, 206 insertions, 27 deletions
diff --git a/cmd/podman/inspect.go b/cmd/podman/inspect.go index 12e11d0f5..befdeb445 100644 --- a/cmd/podman/inspect.go +++ b/cmd/podman/inspect.go @@ -8,12 +8,22 @@ import ( ) var ( + inspectDescription = `Displays the low-level information on an object identified by name or ID. + For more inspection options, see: + + podman container inspect + podman image inspect + podman network inspect + podman pod inspect + podman volume inspect` + // Command: podman _inspect_ Object_ID inspectCmd = &cobra.Command{ - Use: "inspect [flags] {CONTAINER_ID | IMAGE_ID} [...]", - Short: "Display the configuration of object denoted by ID", - Long: "Displays the low-level information on an object identified by name or ID", - RunE: inspectExec, + Use: "inspect [flags] {CONTAINER_ID | IMAGE_ID} [...]", + Short: "Display the configuration of object denoted by ID", + RunE: inspectExec, + Long: inspectDescription, + TraverseChildren: true, Example: `podman inspect fedora podman inspect --type image fedora podman inspect CtrID ImgID diff --git a/docs/source/markdown/podman-inspect.1.md b/docs/source/markdown/podman-inspect.1.md index 4998772c3..a1dcd1a0e 100644 --- a/docs/source/markdown/podman-inspect.1.md +++ b/docs/source/markdown/podman-inspect.1.md @@ -6,15 +6,21 @@ podman\-inspect - Display a container or image's configuration ## SYNOPSIS **podman inspect** [*options*] *name* [...] -**podman image inspect** [*options*] *image* - -**podman container inspect** [*options*] *container* - ## DESCRIPTION + This displays the low-level information on containers and images identified by name or ID. By default, this will render all results in a JSON array. If the container and image have the same name, this will return container JSON for unspecified type. If a format is specified, the given template will be executed for each result. +For more inspection options, see: + + podman container inspect + podman image inspect + podman network inspect + podman pod inspect + podman volume inspect + + ## OPTIONS **--type**, **-t**=*type* diff --git a/libpod/container_internal.go b/libpod/container_internal.go index db64f5eeb..27b795871 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -1015,6 +1015,12 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { return err } + for _, v := range c.config.NamedVolumes { + if err := c.chownVolume(v.Name); err != nil { + return err + } + } + // With the spec complete, do an OCI create if err := c.ociRuntime.CreateContainer(c, nil); err != nil { // Fedora 31 is carrying a patch to display improved error @@ -1508,6 +1514,48 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) return vol, nil } +// Chown the specified volume if necessary. +func (c *Container) chownVolume(volumeName string) error { + vol, err := c.runtime.state.Volume(volumeName) + if err != nil { + return errors.Wrapf(err, "error retrieving named volume %s for container %s", volumeName, c.ID()) + } + + uid := int(c.config.Spec.Process.User.UID) + gid := int(c.config.Spec.Process.User.GID) + + vol.lock.Lock() + defer vol.lock.Unlock() + + // The volume may need a copy-up. Check the state. + if err := vol.update(); err != nil { + return err + } + + if vol.state.NeedsChown { + vol.state.NeedsChown = false + vol.state.UIDChowned = uid + vol.state.GIDChowned = gid + + if err := vol.save(); err != nil { + return err + } + err := filepath.Walk(vol.MountPoint(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if err := os.Chown(path, uid, gid); err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + } + return nil +} + // cleanupStorage unmounts and cleans up the container's root filesystem func (c *Container) cleanupStorage() error { if !c.state.Mounted { @@ -1854,8 +1902,8 @@ func (c *Container) unmount(force bool) error { // this should be from chrootarchive. // Container MUST be mounted before calling. func (c *Container) copyWithTarFromImage(source, dest string) error { - a := archive.NewDefaultArchiver() - + mappings := idtools.NewIDMappingsFromMaps(c.config.IDMappings.UIDMap, c.config.IDMappings.GIDMap) + a := archive.NewArchiver(mappings) if err := c.copyOwnerAndPerms(source, dest); err != nil { return err } diff --git a/libpod/options.go b/libpod/options.go index 28be1bc03..4041fb1cf 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -1497,6 +1497,19 @@ func WithVolumeGID(gid int) VolumeCreateOption { } } +// WithVolumeNeedsChown sets the NeedsChown flag for the volume. +func WithVolumeNeedsChown() VolumeCreateOption { + return func(volume *Volume) error { + if volume.valid { + return define.ErrVolumeFinalized + } + + volume.state.NeedsChown = true + + return nil + } +} + // withSetAnon sets a bool notifying libpod that this volume is anonymous and // should be removed when containers using it are removed and volumes are // specified for removal. diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index dd6602acb..74647dab8 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -309,7 +309,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai logrus.Debugf("Creating new volume %s for container", vol.Name) // The volume does not exist, so we need to create it. - volOptions := []VolumeCreateOption{WithVolumeName(vol.Name), WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID())} + volOptions := []VolumeCreateOption{WithVolumeName(vol.Name), WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID()), WithVolumeNeedsChown()} if isAnonymous { volOptions = append(volOptions, withSetAnon()) } diff --git a/libpod/volume.go b/libpod/volume.go index b29ac7ddf..58d1f81a6 100644 --- a/libpod/volume.go +++ b/libpod/volume.go @@ -64,6 +64,14 @@ type VolumeState struct { // create time, then cleared after the copy up is done and never set // again. NeedsCopyUp bool `json:"notYetMounted,omitempty"` + // NeedsChown indicates that the next time the volume is mounted into + // a container, the container will chown the volume to the container process + // UID/GID. + NeedsChown bool `json:"notYetChowned,omitempty"` + // UIDChowned is the UID the volume was chowned to. + UIDChowned int `json:"uidChowned,omitempty"` + // GIDChowned is the GID the volume was chowned to. + GIDChowned int `json:"gidChowned,omitempty"` } // Name retrieves the volume's name @@ -113,13 +121,33 @@ func (v *Volume) Anonymous() bool { } // UID returns the UID the volume will be created as. -func (v *Volume) UID() int { - return v.config.UID +func (v *Volume) UID() (int, error) { + v.lock.Lock() + defer v.lock.Unlock() + + if !v.valid { + return -1, define.ErrVolumeRemoved + } + + if v.state.UIDChowned > 0 { + return v.state.UIDChowned, nil + } + return v.config.UID, nil } // GID returns the GID the volume will be created as. -func (v *Volume) GID() int { - return v.config.GID +func (v *Volume) GID() (int, error) { + v.lock.Lock() + defer v.lock.Unlock() + + if !v.valid { + return -1, define.ErrVolumeRemoved + } + + if v.state.GIDChowned > 0 { + return v.state.GIDChowned, nil + } + return v.config.GID, nil } // CreatedTime returns the time the volume was created at. It was not tracked diff --git a/libpod/volume_inspect.go b/libpod/volume_inspect.go index 136f9da5e..2be0aeaec 100644 --- a/libpod/volume_inspect.go +++ b/libpod/volume_inspect.go @@ -65,8 +65,15 @@ func (v *Volume) Inspect() (*InspectVolumeData, error) { for k, v := range v.config.Options { data.Options[k] = v } - data.UID = v.config.UID - data.GID = v.config.GID + var err error + data.UID, err = v.UID() + if err != nil { + return nil, err + } + data.GID, err = v.GID() + if err != nil { + return nil, err + } data.Anonymous = v.config.IsAnon return data, nil diff --git a/pkg/api/handlers/libpod/volumes.go b/pkg/api/handlers/libpod/volumes.go index 4b3b5430b..6523244f3 100644 --- a/pkg/api/handlers/libpod/volumes.go +++ b/pkg/api/handlers/libpod/volumes.go @@ -86,6 +86,17 @@ func InspectVolume(w http.ResponseWriter, r *http.Request) { utils.VolumeNotFound(w, name, err) return } + var uid, gid int + uid, err = vol.UID() + if err != nil { + utils.Error(w, "Error fetching volume UID", http.StatusInternalServerError, err) + return + } + gid, err = vol.GID() + if err != nil { + utils.Error(w, "Error fetching volume GID", http.StatusInternalServerError, err) + return + } volResponse := entities.VolumeConfigResponse{ Name: vol.Name(), Driver: vol.Driver(), @@ -94,8 +105,8 @@ func InspectVolume(w http.ResponseWriter, r *http.Request) { Labels: vol.Labels(), Scope: vol.Scope(), Options: vol.Options(), - UID: vol.UID(), - GID: vol.GID(), + UID: uid, + GID: gid, } utils.WriteResponse(w, http.StatusOK, volResponse) } @@ -130,6 +141,17 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) { } volumeConfigs := make([]*entities.VolumeListReport, 0, len(vols)) for _, v := range vols { + var uid, gid int + uid, err = v.UID() + if err != nil { + utils.Error(w, "Error fetching volume UID", http.StatusInternalServerError, err) + return + } + gid, err = v.GID() + if err != nil { + utils.Error(w, "Error fetching volume GID", http.StatusInternalServerError, err) + return + } config := entities.VolumeConfigResponse{ Name: v.Name(), Driver: v.Driver(), @@ -138,8 +160,8 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) { Labels: v.Labels(), Scope: v.Scope(), Options: v.Options(), - UID: v.UID(), - GID: v.GID(), + UID: uid, + GID: gid, } volumeConfigs = append(volumeConfigs, &entities.VolumeListReport{VolumeConfigResponse: config}) } diff --git a/pkg/domain/infra/abi/volumes.go b/pkg/domain/infra/abi/volumes.go index 702e11003..36847dd79 100644 --- a/pkg/domain/infra/abi/volumes.go +++ b/pkg/domain/infra/abi/volumes.go @@ -95,6 +95,15 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin } reports := make([]*entities.VolumeInspectReport, 0, len(vols)) for _, v := range vols { + var uid, gid int + uid, err = v.UID() + if err != nil { + return nil, err + } + gid, err = v.GID() + if err != nil { + return nil, err + } config := entities.VolumeConfigResponse{ Name: v.Name(), Driver: v.Driver(), @@ -103,8 +112,8 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin Labels: v.Labels(), Scope: v.Scope(), Options: v.Options(), - UID: v.UID(), - GID: v.GID(), + UID: uid, + GID: gid, } reports = append(reports, &entities.VolumeInspectReport{VolumeConfigResponse: &config}) } @@ -141,6 +150,15 @@ func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeL } reports := make([]*entities.VolumeListReport, 0, len(vols)) for _, v := range vols { + var uid, gid int + uid, err = v.UID() + if err != nil { + return nil, err + } + gid, err = v.GID() + if err != nil { + return nil, err + } config := entities.VolumeConfigResponse{ Name: v.Name(), Driver: v.Driver(), @@ -149,8 +167,8 @@ func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeL Labels: v.Labels(), Scope: v.Scope(), Options: v.Options(), - UID: v.UID(), - GID: v.GID(), + UID: uid, + GID: gid, } reports = append(reports, &entities.VolumeListReport{VolumeConfigResponse: config}) } diff --git a/test/e2e/run_userns_test.go b/test/e2e/run_userns_test.go index be0981408..3e55f56c0 100644 --- a/test/e2e/run_userns_test.go +++ b/test/e2e/run_userns_test.go @@ -245,4 +245,31 @@ var _ = Describe("Podman UserNS support", func() { ok, _ := session.GrepString("4998") Expect(ok).To(BeTrue()) }) + + It("podman --user with volume", func() { + tests := []struct { + uid, gid, arg, vol string + }{ + {"0", "0", "0:0", "vol-0"}, + {"1000", "0", "1000", "vol-1"}, + {"1000", "1000", "1000:1000", "vol-2"}, + } + + for _, tt := range tests { + session := podmanTest.Podman([]string{"run", "-d", "--user", tt.arg, "--mount", "type=volume,src=" + tt.vol + ",dst=/home/user", "alpine", "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + inspectUID := podmanTest.Podman([]string{"volume", "inspect", "--format", "{{ .UID }}", tt.vol}) + inspectUID.WaitWithDefaultTimeout() + Expect(inspectUID.ExitCode()).To(Equal(0)) + Expect(inspectUID.OutputToString()).To(Equal(tt.uid)) + + // Make sure we're defaulting to 0. + inspectGID := podmanTest.Podman([]string{"volume", "inspect", "--format", "{{ .GID }}", tt.vol}) + inspectGID.WaitWithDefaultTimeout() + Expect(inspectGID.ExitCode()).To(Equal(0)) + Expect(inspectGID.OutputToString()).To(Equal(tt.gid)) + } + }) }) diff --git a/utils/utils_supported.go b/utils/utils_supported.go index 201ddb57b..4258e6d7a 100644 --- a/utils/utils_supported.go +++ b/utils/utils_supported.go @@ -64,7 +64,7 @@ func getCgroupProcess(procFile string) (string, error) { cgroup := "/" for scanner.Scan() { line := scanner.Text() - parts := strings.Split(line, ":") + parts := strings.SplitN(line, ":", 3) if len(parts) != 3 { return "", errors.Errorf("cannot parse cgroup line %q", line) } @@ -116,7 +116,7 @@ func MoveUnderCgroupSubtree(subtree string) error { scanner := bufio.NewScanner(f) for scanner.Scan() { line := scanner.Text() - parts := strings.Split(line, ":") + parts := strings.SplitN(line, ":", 3) if len(parts) != 3 { return errors.Errorf("cannot parse cgroup line %q", line) } |