summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/container_internal_linux.go40
-rw-r--r--libpod/container_log_linux.go2
-rw-r--r--libpod/image/prune.go50
3 files changed, 69 insertions, 23 deletions
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index eba732d2a..514cdaee1 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -7,6 +7,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ "math"
"net"
"os"
"os/user"
@@ -35,6 +36,7 @@ import (
"github.com/containers/podman/v2/pkg/util"
"github.com/containers/podman/v2/utils"
"github.com/containers/storage/pkg/archive"
+ "github.com/containers/storage/pkg/idtools"
securejoin "github.com/cyphar/filepath-securejoin"
runcuser "github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
@@ -416,9 +418,43 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
// Look up and add groups the user belongs to, if a group wasn't directly specified
if !strings.Contains(c.config.User, ":") {
+ // the gidMappings that are present inside the container user namespace
+ var gidMappings []idtools.IDMap
+
+ switch {
+ case len(c.config.IDMappings.GIDMap) > 0:
+ gidMappings = c.config.IDMappings.GIDMap
+ case rootless.IsRootless():
+ // Check whether the current user namespace has enough gids available.
+ availableGids, err := rootless.GetAvailableGids()
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot read number of available GIDs")
+ }
+ gidMappings = []idtools.IDMap{{
+ ContainerID: 0,
+ HostID: 0,
+ Size: int(availableGids),
+ }}
+ default:
+ gidMappings = []idtools.IDMap{{
+ ContainerID: 0,
+ HostID: 0,
+ Size: math.MaxInt32,
+ }}
+ }
for _, gid := range execUser.Sgids {
- // FIXME: We need to add a flag to containers.conf to not add these for HPC Users.
- g.AddProcessAdditionalGid(uint32(gid))
+ isGidAvailable := false
+ for _, m := range gidMappings {
+ if gid >= m.ContainerID && gid < m.ContainerID+m.Size {
+ isGidAvailable = true
+ break
+ }
+ }
+ if isGidAvailable {
+ g.AddProcessAdditionalGid(uint32(gid))
+ } else {
+ logrus.Warnf("additional gid=%d is not present in the user namespace, skip setting it", gid)
+ }
}
}
diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go
index 73c2df76e..d895171cf 100644
--- a/libpod/container_log_linux.go
+++ b/libpod/container_log_linux.go
@@ -33,7 +33,7 @@ const (
func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOptions, logChannel chan *logs.LogLine) error {
var config journal.JournalReaderConfig
if options.Tail < 0 {
- config.NumFromTail = math.MaxUint64
+ config.NumFromTail = 0
} else {
config.NumFromTail = uint64(options.Tail)
}
diff --git a/libpod/image/prune.go b/libpod/image/prune.go
index fcc65fb03..b38265a7e 100644
--- a/libpod/image/prune.go
+++ b/libpod/image/prune.go
@@ -125,29 +125,39 @@ func (ir *Runtime) PruneImages(ctx context.Context, all bool, filter []string) (
filterFuncs = append(filterFuncs, generatedFunc)
}
- pruneImages, err := ir.GetPruneImages(ctx, all, filterFuncs)
- if err != nil {
- return nil, errors.Wrap(err, "unable to get images to prune")
- }
- prunedCids := make([]string, 0, len(pruneImages))
- for _, p := range pruneImages {
- repotags, err := p.RepoTags()
+ pruned := []string{}
+ prev := 0
+ for {
+ toPrune, err := ir.GetPruneImages(ctx, all, filterFuncs)
if err != nil {
- return nil, err
+ return nil, errors.Wrap(err, "unable to get images to prune")
}
- if err := p.Remove(ctx, true); err != nil {
- if errors.Cause(err) == storage.ErrImageUsedByContainer {
- logrus.Warnf("Failed to prune image %s as it is in use: %v.\nA container associated with containers/storage i.e. Buildah, CRI-O, etc., maybe associated with this image.\nUsing the rmi command with the --force option will remove the container and image, but may cause failures for other dependent systems.", p.ID(), err)
- continue
- }
- return nil, errors.Wrap(err, "failed to prune image")
+ numImages := len(toPrune)
+ if numImages == 0 || numImages == prev {
+ // If there's nothing left to do, return.
+ break
}
- defer p.newImageEvent(events.Prune)
- nameOrID := p.ID()
- if len(repotags) > 0 {
- nameOrID = repotags[0]
+ prev = numImages
+ for _, img := range toPrune {
+ repotags, err := img.RepoTags()
+ if err != nil {
+ return nil, err
+ }
+ if err := img.Remove(ctx, false); err != nil {
+ if errors.Cause(err) == storage.ErrImageUsedByContainer {
+ logrus.Warnf("Failed to prune image %s as it is in use: %v.\nA container associated with containers/storage (e.g., Buildah, CRI-O, etc.) maybe associated with this image.\nUsing the rmi command with the --force option will remove the container and image, but may cause failures for other dependent systems.", img.ID(), err)
+ continue
+ }
+ return nil, errors.Wrap(err, "failed to prune image")
+ }
+ defer img.newImageEvent(events.Prune)
+ nameOrID := img.ID()
+ if len(repotags) > 0 {
+ nameOrID = repotags[0]
+ }
+ pruned = append(pruned, nameOrID)
}
- prunedCids = append(prunedCids, nameOrID)
+
}
- return prunedCids, nil
+ return pruned, nil
}