diff options
-rw-r--r-- | cmd/podman/main.go | 5 | ||||
-rw-r--r-- | docs/source/markdown/podman-system-service.1.md | 4 | ||||
-rw-r--r-- | libpod/container_internal.go | 52 | ||||
-rw-r--r-- | libpod/options.go | 13 | ||||
-rw-r--r-- | libpod/runtime_ctr.go | 2 | ||||
-rw-r--r-- | libpod/volume.go | 36 | ||||
-rw-r--r-- | libpod/volume_inspect.go | 11 | ||||
-rw-r--r-- | pkg/api/handlers/libpod/volumes.go | 30 | ||||
-rw-r--r-- | pkg/domain/infra/abi/volumes.go | 26 | ||||
-rw-r--r-- | pkg/spec/createconfig.go | 9 | ||||
-rw-r--r-- | test/e2e/run_userns_test.go | 27 |
11 files changed, 194 insertions, 21 deletions
diff --git a/cmd/podman/main.go b/cmd/podman/main.go index 7a015b300..636538131 100644 --- a/cmd/podman/main.go +++ b/cmd/podman/main.go @@ -29,6 +29,11 @@ func main() { return } + // Hard code TMPDIR functions to use /var/tmp, if user did not override + if _, ok := os.LookupEnv("TMPDIR"); !ok { + os.Setenv("TMPDIR", "/var/tmp") + } + cfg := registry.PodmanConfig() for _, c := range registry.Commands { for _, m := range c.Mode { diff --git a/docs/source/markdown/podman-system-service.1.md b/docs/source/markdown/podman-system-service.1.md index 3ae414f7a..7d18a6832 100644 --- a/docs/source/markdown/podman-system-service.1.md +++ b/docs/source/markdown/podman-system-service.1.md @@ -13,6 +13,10 @@ If no endpoint is provided, defaults will be used. The default endpoint for a r service is *unix:/run/podman/podman.sock* and rootless is *unix:/$XDG_RUNTIME_DIR/podman/podman.sock* (for example *unix:/run/user/1000/podman/podman.sock*) +The REST API provided by **podman system service** is split into two parts: a compatibility layer offering support for the Docker v1.40 API, and a Podman-native Libpod layer. +Documentation for the latter is available at *https://docs.podman.io/en/latest/_static/api.html*. +Both APIs are versioned, but the server will not reject requests with an unsupported version set. + ## OPTIONS **--time**, **-t** diff --git a/libpod/container_internal.go b/libpod/container_internal.go index db64f5eeb..27b795871 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -1015,6 +1015,12 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { return err } + for _, v := range c.config.NamedVolumes { + if err := c.chownVolume(v.Name); err != nil { + return err + } + } + // With the spec complete, do an OCI create if err := c.ociRuntime.CreateContainer(c, nil); err != nil { // Fedora 31 is carrying a patch to display improved error @@ -1508,6 +1514,48 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) return vol, nil } +// Chown the specified volume if necessary. +func (c *Container) chownVolume(volumeName string) error { + vol, err := c.runtime.state.Volume(volumeName) + if err != nil { + return errors.Wrapf(err, "error retrieving named volume %s for container %s", volumeName, c.ID()) + } + + uid := int(c.config.Spec.Process.User.UID) + gid := int(c.config.Spec.Process.User.GID) + + vol.lock.Lock() + defer vol.lock.Unlock() + + // The volume may need a copy-up. Check the state. + if err := vol.update(); err != nil { + return err + } + + if vol.state.NeedsChown { + vol.state.NeedsChown = false + vol.state.UIDChowned = uid + vol.state.GIDChowned = gid + + if err := vol.save(); err != nil { + return err + } + err := filepath.Walk(vol.MountPoint(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if err := os.Chown(path, uid, gid); err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + } + return nil +} + // cleanupStorage unmounts and cleans up the container's root filesystem func (c *Container) cleanupStorage() error { if !c.state.Mounted { @@ -1854,8 +1902,8 @@ func (c *Container) unmount(force bool) error { // this should be from chrootarchive. // Container MUST be mounted before calling. func (c *Container) copyWithTarFromImage(source, dest string) error { - a := archive.NewDefaultArchiver() - + mappings := idtools.NewIDMappingsFromMaps(c.config.IDMappings.UIDMap, c.config.IDMappings.GIDMap) + a := archive.NewArchiver(mappings) if err := c.copyOwnerAndPerms(source, dest); err != nil { return err } diff --git a/libpod/options.go b/libpod/options.go index 28be1bc03..4041fb1cf 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -1497,6 +1497,19 @@ func WithVolumeGID(gid int) VolumeCreateOption { } } +// WithVolumeNeedsChown sets the NeedsChown flag for the volume. +func WithVolumeNeedsChown() VolumeCreateOption { + return func(volume *Volume) error { + if volume.valid { + return define.ErrVolumeFinalized + } + + volume.state.NeedsChown = true + + return nil + } +} + // withSetAnon sets a bool notifying libpod that this volume is anonymous and // should be removed when containers using it are removed and volumes are // specified for removal. diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index dd6602acb..74647dab8 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -309,7 +309,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai logrus.Debugf("Creating new volume %s for container", vol.Name) // The volume does not exist, so we need to create it. - volOptions := []VolumeCreateOption{WithVolumeName(vol.Name), WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID())} + volOptions := []VolumeCreateOption{WithVolumeName(vol.Name), WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID()), WithVolumeNeedsChown()} if isAnonymous { volOptions = append(volOptions, withSetAnon()) } diff --git a/libpod/volume.go b/libpod/volume.go index b29ac7ddf..58d1f81a6 100644 --- a/libpod/volume.go +++ b/libpod/volume.go @@ -64,6 +64,14 @@ type VolumeState struct { // create time, then cleared after the copy up is done and never set // again. NeedsCopyUp bool `json:"notYetMounted,omitempty"` + // NeedsChown indicates that the next time the volume is mounted into + // a container, the container will chown the volume to the container process + // UID/GID. + NeedsChown bool `json:"notYetChowned,omitempty"` + // UIDChowned is the UID the volume was chowned to. + UIDChowned int `json:"uidChowned,omitempty"` + // GIDChowned is the GID the volume was chowned to. + GIDChowned int `json:"gidChowned,omitempty"` } // Name retrieves the volume's name @@ -113,13 +121,33 @@ func (v *Volume) Anonymous() bool { } // UID returns the UID the volume will be created as. -func (v *Volume) UID() int { - return v.config.UID +func (v *Volume) UID() (int, error) { + v.lock.Lock() + defer v.lock.Unlock() + + if !v.valid { + return -1, define.ErrVolumeRemoved + } + + if v.state.UIDChowned > 0 { + return v.state.UIDChowned, nil + } + return v.config.UID, nil } // GID returns the GID the volume will be created as. -func (v *Volume) GID() int { - return v.config.GID +func (v *Volume) GID() (int, error) { + v.lock.Lock() + defer v.lock.Unlock() + + if !v.valid { + return -1, define.ErrVolumeRemoved + } + + if v.state.GIDChowned > 0 { + return v.state.GIDChowned, nil + } + return v.config.GID, nil } // CreatedTime returns the time the volume was created at. It was not tracked diff --git a/libpod/volume_inspect.go b/libpod/volume_inspect.go index 136f9da5e..2be0aeaec 100644 --- a/libpod/volume_inspect.go +++ b/libpod/volume_inspect.go @@ -65,8 +65,15 @@ func (v *Volume) Inspect() (*InspectVolumeData, error) { for k, v := range v.config.Options { data.Options[k] = v } - data.UID = v.config.UID - data.GID = v.config.GID + var err error + data.UID, err = v.UID() + if err != nil { + return nil, err + } + data.GID, err = v.GID() + if err != nil { + return nil, err + } data.Anonymous = v.config.IsAnon return data, nil diff --git a/pkg/api/handlers/libpod/volumes.go b/pkg/api/handlers/libpod/volumes.go index 4b3b5430b..6523244f3 100644 --- a/pkg/api/handlers/libpod/volumes.go +++ b/pkg/api/handlers/libpod/volumes.go @@ -86,6 +86,17 @@ func InspectVolume(w http.ResponseWriter, r *http.Request) { utils.VolumeNotFound(w, name, err) return } + var uid, gid int + uid, err = vol.UID() + if err != nil { + utils.Error(w, "Error fetching volume UID", http.StatusInternalServerError, err) + return + } + gid, err = vol.GID() + if err != nil { + utils.Error(w, "Error fetching volume GID", http.StatusInternalServerError, err) + return + } volResponse := entities.VolumeConfigResponse{ Name: vol.Name(), Driver: vol.Driver(), @@ -94,8 +105,8 @@ func InspectVolume(w http.ResponseWriter, r *http.Request) { Labels: vol.Labels(), Scope: vol.Scope(), Options: vol.Options(), - UID: vol.UID(), - GID: vol.GID(), + UID: uid, + GID: gid, } utils.WriteResponse(w, http.StatusOK, volResponse) } @@ -130,6 +141,17 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) { } volumeConfigs := make([]*entities.VolumeListReport, 0, len(vols)) for _, v := range vols { + var uid, gid int + uid, err = v.UID() + if err != nil { + utils.Error(w, "Error fetching volume UID", http.StatusInternalServerError, err) + return + } + gid, err = v.GID() + if err != nil { + utils.Error(w, "Error fetching volume GID", http.StatusInternalServerError, err) + return + } config := entities.VolumeConfigResponse{ Name: v.Name(), Driver: v.Driver(), @@ -138,8 +160,8 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) { Labels: v.Labels(), Scope: v.Scope(), Options: v.Options(), - UID: v.UID(), - GID: v.GID(), + UID: uid, + GID: gid, } volumeConfigs = append(volumeConfigs, &entities.VolumeListReport{VolumeConfigResponse: config}) } diff --git a/pkg/domain/infra/abi/volumes.go b/pkg/domain/infra/abi/volumes.go index 702e11003..36847dd79 100644 --- a/pkg/domain/infra/abi/volumes.go +++ b/pkg/domain/infra/abi/volumes.go @@ -95,6 +95,15 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin } reports := make([]*entities.VolumeInspectReport, 0, len(vols)) for _, v := range vols { + var uid, gid int + uid, err = v.UID() + if err != nil { + return nil, err + } + gid, err = v.GID() + if err != nil { + return nil, err + } config := entities.VolumeConfigResponse{ Name: v.Name(), Driver: v.Driver(), @@ -103,8 +112,8 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin Labels: v.Labels(), Scope: v.Scope(), Options: v.Options(), - UID: v.UID(), - GID: v.GID(), + UID: uid, + GID: gid, } reports = append(reports, &entities.VolumeInspectReport{VolumeConfigResponse: &config}) } @@ -141,6 +150,15 @@ func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeL } reports := make([]*entities.VolumeListReport, 0, len(vols)) for _, v := range vols { + var uid, gid int + uid, err = v.UID() + if err != nil { + return nil, err + } + gid, err = v.GID() + if err != nil { + return nil, err + } config := entities.VolumeConfigResponse{ Name: v.Name(), Driver: v.Driver(), @@ -149,8 +167,8 @@ func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeL Labels: v.Labels(), Scope: v.Scope(), Options: v.Options(), - UID: v.UID(), - GID: v.GID(), + UID: uid, + GID: gid, } reports = append(reports, &entities.VolumeListReport{VolumeConfigResponse: config}) } diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go index e19c582b5..a04afa00f 100644 --- a/pkg/spec/createconfig.go +++ b/pkg/spec/createconfig.go @@ -287,10 +287,11 @@ func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *l options = append(options, libpod.WithCommand(c.UserCommand)) } - // Add entrypoint unconditionally - // If it's empty it's because it was explicitly set to "" or the image - // does not have one - options = append(options, libpod.WithEntrypoint(c.Entrypoint)) + // Add entrypoint if it was set + // If it's empty it's because it was explicitly set to "" + if c.Entrypoint != nil { + options = append(options, libpod.WithEntrypoint(c.Entrypoint)) + } // TODO: MNT, USER, CGROUP options = append(options, libpod.WithStopSignal(c.StopSignal)) diff --git a/test/e2e/run_userns_test.go b/test/e2e/run_userns_test.go index be0981408..3e55f56c0 100644 --- a/test/e2e/run_userns_test.go +++ b/test/e2e/run_userns_test.go @@ -245,4 +245,31 @@ var _ = Describe("Podman UserNS support", func() { ok, _ := session.GrepString("4998") Expect(ok).To(BeTrue()) }) + + It("podman --user with volume", func() { + tests := []struct { + uid, gid, arg, vol string + }{ + {"0", "0", "0:0", "vol-0"}, + {"1000", "0", "1000", "vol-1"}, + {"1000", "1000", "1000:1000", "vol-2"}, + } + + for _, tt := range tests { + session := podmanTest.Podman([]string{"run", "-d", "--user", tt.arg, "--mount", "type=volume,src=" + tt.vol + ",dst=/home/user", "alpine", "top"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + + inspectUID := podmanTest.Podman([]string{"volume", "inspect", "--format", "{{ .UID }}", tt.vol}) + inspectUID.WaitWithDefaultTimeout() + Expect(inspectUID.ExitCode()).To(Equal(0)) + Expect(inspectUID.OutputToString()).To(Equal(tt.uid)) + + // Make sure we're defaulting to 0. + inspectGID := podmanTest.Podman([]string{"volume", "inspect", "--format", "{{ .GID }}", tt.vol}) + inspectGID.WaitWithDefaultTimeout() + Expect(inspectGID.ExitCode()).To(Equal(0)) + Expect(inspectGID.OutputToString()).To(Equal(tt.gid)) + } + }) }) |