summaryrefslogtreecommitdiff
path: root/pkg/domain
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/domain')
-rw-r--r--pkg/domain/entities/system.go1
-rw-r--r--pkg/domain/infra/abi/containers.go36
-rw-r--r--pkg/domain/infra/abi/network.go86
-rw-r--r--pkg/domain/infra/abi/play.go6
-rw-r--r--pkg/domain/infra/abi/pods.go4
-rw-r--r--pkg/domain/infra/abi/system.go54
-rw-r--r--pkg/domain/infra/runtime_libpod.go4
-rw-r--r--pkg/domain/infra/tunnel/helpers.go4
8 files changed, 128 insertions, 67 deletions
diff --git a/pkg/domain/entities/system.go b/pkg/domain/entities/system.go
index 21026477d..331d2bcdc 100644
--- a/pkg/domain/entities/system.go
+++ b/pkg/domain/entities/system.go
@@ -28,6 +28,7 @@ type SystemPruneReport struct {
PodPruneReport []*PodPruneReport
ContainerPruneReports []*reports.PruneReport
ImagePruneReports []*reports.PruneReport
+ NetworkPruneReports []*reports.PruneReport
VolumePruneReports []*reports.PruneReport
ReclaimedSpace uint64
}
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index c7cd0cb56..281e448f6 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -16,7 +16,6 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
- "github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/libpod/logs"
"github.com/containers/podman/v4/pkg/checkpoint"
"github.com/containers/podman/v4/pkg/domain/entities"
@@ -38,7 +37,7 @@ import (
)
// getContainersAndInputByContext gets containers whether all, latest, or a slice of names/ids
-// is specified. It also returns a list of the corresponding input name used to lookup each container.
+// is specified. It also returns a list of the corresponding input name used to look up each container.
func getContainersAndInputByContext(all, latest bool, names []string, runtime *libpod.Runtime) (ctrs []*libpod.Container, rawInput []string, err error) {
var ctr *libpod.Container
ctrs = []*libpod.Container{}
@@ -183,7 +182,7 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin
if err != nil {
// Issue #7384 and #11384: If the container is configured for
// auto-removal, it might already have been removed at this point.
- // We still need to to cleanup since we do not know if the other cleanup process is successful
+ // We still need to clean up since we do not know if the other cleanup process is successful
if c.AutoRemove() && (errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved)) {
return nil
}
@@ -488,7 +487,7 @@ func (ic *ContainerEngine) ContainerTop(ctx context.Context, options entities.To
container, err = ic.Libpod.LookupContainer(options.NameOrID)
}
if err != nil {
- return nil, errors.Wrap(err, "unable to lookup requested container")
+ return nil, errors.Wrap(err, "unable to look up requested container")
}
// Run Top.
@@ -635,13 +634,13 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st
containers, err = getContainersByContext(false, options.Latest, namesOrIds, ic.Libpod)
default:
for _, nameOrID := range namesOrIds {
- logrus.Debugf("lookup container: %q", nameOrID)
+ logrus.Debugf("look up container: %q", nameOrID)
ctr, err := ic.Libpod.LookupContainer(nameOrID)
if err == nil {
containers = append(containers, ctr)
} else {
// If container was not found, check if this is a checkpoint image
- logrus.Debugf("lookup image: %q", nameOrID)
+ logrus.Debugf("look up image: %q", nameOrID)
img, _, err := ic.Libpod.LibimageRuntime().LookupImage(nameOrID, nil)
if err != nil {
return nil, fmt.Errorf("no such container or image: %s", nameOrID)
@@ -939,6 +938,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
}
return reports, errors.Wrapf(err, "unable to start container %s", ctr.ID())
}
+
exitCode = ic.GetContainerExitCode(ctx, ctr)
reports = append(reports, &entities.ContainerStartReport{
Id: ctr.ID(),
@@ -1099,25 +1099,11 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
func (ic *ContainerEngine) GetContainerExitCode(ctx context.Context, ctr *libpod.Container) int {
exitCode, err := ctr.Wait(ctx)
- if err == nil {
- return int(exitCode)
- }
- if errors.Cause(err) != define.ErrNoSuchCtr {
- logrus.Errorf("Could not retrieve exit code: %v", err)
+ if err != nil {
+ logrus.Errorf("Waiting for container %s: %v", ctr.ID(), err)
return define.ExecErrorCodeNotFound
}
- // Make 4 attempt with 0.25s backoff between each for 1 second total
- var event *events.Event
- for i := 0; i < 4; i++ {
- event, err = ic.Libpod.GetLastContainerEvent(ctx, ctr.ID(), events.Exited)
- if err != nil {
- time.Sleep(250 * time.Millisecond)
- continue
- }
- return event.ContainerExitCode
- }
- logrus.Errorf("Could not retrieve exit code from event: %v", err)
- return define.ExecErrorCodeNotFound
+ return int(exitCode)
}
func (ic *ContainerEngine) ContainerLogs(ctx context.Context, containers []string, options entities.ContainerLogsOptions) error {
@@ -1194,12 +1180,12 @@ func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []st
var timeout *uint
err = ic.Libpod.RemoveContainer(ctx, ctr, false, true, timeout)
if err != nil {
- report.RmErr = errors.Wrapf(err, "failed to cleanup and remove container %v", ctr.ID())
+ report.RmErr = errors.Wrapf(err, "failed to clean up and remove container %v", ctr.ID())
}
} else {
err := ctr.Cleanup(ctx)
if err != nil {
- report.CleanErr = errors.Wrapf(err, "failed to cleanup container %v", ctr.ID())
+ report.CleanErr = errors.Wrapf(err, "failed to clean up container %v", ctr.ID())
}
}
diff --git a/pkg/domain/infra/abi/network.go b/pkg/domain/infra/abi/network.go
index 47f7917f4..8b95607f4 100644
--- a/pkg/domain/infra/abi/network.go
+++ b/pkg/domain/infra/abi/network.go
@@ -2,6 +2,7 @@ package abi
import (
"context"
+ "strconv"
"github.com/containers/common/libnetwork/types"
netutil "github.com/containers/common/libnetwork/util"
@@ -12,10 +13,39 @@ import (
)
func (ic *ContainerEngine) NetworkList(ctx context.Context, options entities.NetworkListOptions) ([]types.Network, error) {
+ // dangling filter is not provided by netutil
+ var wantDangling bool
+
+ val, filterDangling := options.Filters["dangling"]
+ if filterDangling {
+ switch len(val) {
+ case 0:
+ return nil, errors.Errorf("got no values for filter key \"dangling\"")
+ case 1:
+ var err error
+ wantDangling, err = strconv.ParseBool(val[0])
+ if err != nil {
+ return nil, errors.Errorf("invalid dangling filter value \"%v\"", val[0])
+ }
+ delete(options.Filters, "dangling")
+ default:
+ return nil, errors.Errorf("got more than one value for filter key \"dangling\"")
+ }
+ }
+
filters, err := netutil.GenerateNetworkFilters(options.Filters)
if err != nil {
return nil, err
}
+
+ if filterDangling {
+ danglingFilterFunc, err := ic.createDanglingFilterFunc(wantDangling)
+ if err != nil {
+ return nil, err
+ }
+
+ filters = append(filters, danglingFilterFunc)
+ }
nets, err := ic.Libpod.Network().NetworkList(filters...)
return nets, err
}
@@ -142,8 +172,35 @@ func (ic *ContainerEngine) NetworkExists(ctx context.Context, networkname string
}, nil
}
-// Network prune removes unused cni networks
+// Network prune removes unused networks
func (ic *ContainerEngine) NetworkPrune(ctx context.Context, options entities.NetworkPruneOptions) ([]*entities.NetworkPruneReport, error) {
+ // get all filters
+ filters, err := netutil.GenerateNetworkPruneFilters(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+ danglingFilterFunc, err := ic.createDanglingFilterFunc(true)
+ if err != nil {
+ return nil, err
+ }
+ filters = append(filters, danglingFilterFunc)
+ nets, err := ic.Libpod.Network().NetworkList(filters...)
+ if err != nil {
+ return nil, err
+ }
+
+ pruneReport := make([]*entities.NetworkPruneReport, 0, len(nets))
+ for _, net := range nets {
+ pruneReport = append(pruneReport, &entities.NetworkPruneReport{
+ Name: net.Name,
+ Error: ic.Libpod.Network().NetworkRemove(net.Name),
+ })
+ }
+ return pruneReport, nil
+}
+
+// danglingFilter function is special and not implemented in libnetwork filters
+func (ic *ContainerEngine) createDanglingFilterFunc(wantDangling bool) (types.FilterFunc, error) {
cons, err := ic.Libpod.GetAllContainers()
if err != nil {
return nil, err
@@ -163,31 +220,12 @@ func (ic *ContainerEngine) NetworkPrune(ctx context.Context, options entities.Ne
// ignore the default network, this one cannot be deleted
networksToKeep[ic.Libpod.GetDefaultNetworkName()] = true
- // get all filters
- filters, err := netutil.GenerateNetworkPruneFilters(options.Filters)
- if err != nil {
- return nil, err
- }
- danglingFilterFunc := func(net types.Network) bool {
+ return func(net types.Network) bool {
for network := range networksToKeep {
if network == net.Name {
- return false
+ return !wantDangling
}
}
- return true
- }
- filters = append(filters, danglingFilterFunc)
- nets, err := ic.Libpod.Network().NetworkList(filters...)
- if err != nil {
- return nil, err
- }
-
- pruneReport := make([]*entities.NetworkPruneReport, 0, len(nets))
- for _, net := range nets {
- pruneReport = append(pruneReport, &entities.NetworkPruneReport{
- Name: net.Name,
- Error: ic.Libpod.Network().NetworkRemove(net.Name),
- })
- }
- return pruneReport, nil
+ return wantDangling
+ }, nil
}
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index e04ab3a1a..e14a819fa 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -31,7 +31,7 @@ import (
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- yamlv2 "gopkg.in/yaml.v2"
+ yamlv3 "gopkg.in/yaml.v3"
)
// createServiceContainer creates a container that can later on
@@ -790,7 +790,7 @@ func readConfigMapFromFile(r io.Reader) (v1.ConfigMap, error) {
func splitMultiDocYAML(yamlContent []byte) ([][]byte, error) {
var documentList [][]byte
- d := yamlv2.NewDecoder(bytes.NewReader(yamlContent))
+ d := yamlv3.NewDecoder(bytes.NewReader(yamlContent))
for {
var o interface{}
// read individual document
@@ -804,7 +804,7 @@ func splitMultiDocYAML(yamlContent []byte) ([][]byte, error) {
if o != nil {
// back to bytes
- document, err := yamlv2.Marshal(o)
+ document, err := yamlv3.Marshal(o)
if err != nil {
return nil, errors.Wrapf(err, "individual doc yaml could not be marshalled")
}
diff --git a/pkg/domain/infra/abi/pods.go b/pkg/domain/infra/abi/pods.go
index 8638f4783..3e9cb7f5e 100644
--- a/pkg/domain/infra/abi/pods.go
+++ b/pkg/domain/infra/abi/pods.go
@@ -393,7 +393,7 @@ func (ic *ContainerEngine) PodTop(ctx context.Context, options entities.PodTopOp
pod, err = ic.Libpod.LookupPod(options.NameOrID)
}
if err != nil {
- return nil, errors.Wrap(err, "unable to lookup requested container")
+ return nil, errors.Wrap(err, "unable to look up requested container")
}
// Run Top.
@@ -494,7 +494,7 @@ func (ic *ContainerEngine) PodInspect(ctx context.Context, options entities.PodI
pod, err = ic.Libpod.LookupPod(options.NameOrID)
}
if err != nil {
- return nil, errors.Wrap(err, "unable to lookup requested container")
+ return nil, errors.Wrap(err, "unable to look up requested container")
}
inspect, err := pod.Inspect()
if err != nil {
diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go
index 762f0d79a..6e26026d4 100644
--- a/pkg/domain/infra/abi/system.go
+++ b/pkg/domain/infra/abi/system.go
@@ -125,8 +125,14 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool)
paths = append(paths, ctr.Config().ConmonPidFile)
}
- became, ret, err = rootless.TryJoinFromFilePaths(pausePidPath, true, paths)
- utils.MovePauseProcessToScope(pausePidPath)
+ if len(paths) > 0 {
+ became, ret, err = rootless.TryJoinFromFilePaths(pausePidPath, true, paths)
+ } else {
+ became, ret, err = rootless.BecomeRootInUserNS(pausePidPath)
+ if err == nil {
+ utils.MovePauseProcessToScope(pausePidPath)
+ }
+ }
if err != nil {
logrus.Error(errors.Wrapf(err, "invalid internal status, try resetting the pause process with %q", os.Args[0]+" system migrate"))
os.Exit(1)
@@ -137,7 +143,7 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool)
return nil
}
-// SystemPrune removes unused data from the system. Pruning pods, containers, volumes and images.
+// SystemPrune removes unused data from the system. Pruning pods, containers, networks, volumes and images.
func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.SystemPruneOptions) (*entities.SystemPruneReport, error) {
var systemPruneReport = new(entities.SystemPruneReport)
filters := []string{}
@@ -148,6 +154,9 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.Sys
found := true
for found {
found = false
+
+ // TODO: Figure out cleaner way to handle all of the different PruneOptions
+ // Remove all unused pods.
podPruneReport, err := ic.prunePodHelper(ctx)
if err != nil {
return nil, err
@@ -155,9 +164,10 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.Sys
if len(podPruneReport) > 0 {
found = true
}
+
systemPruneReport.PodPruneReport = append(systemPruneReport.PodPruneReport, podPruneReport...)
- // TODO: Figure out cleaner way to handle all of the different PruneOptions
+ // Remove all unused containers.
containerPruneOptions := entities.ContainerPruneOptions{}
containerPruneOptions.Filters = (url.Values)(options.Filters)
@@ -165,16 +175,18 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.Sys
if err != nil {
return nil, err
}
+
reclaimedSpace += reports.PruneReportsSize(containerPruneReports)
systemPruneReport.ContainerPruneReports = append(systemPruneReport.ContainerPruneReports, containerPruneReports...)
+
+ // Remove all unused images.
imagePruneOptions := entities.ImagePruneOptions{
All: options.All,
Filter: filters,
}
+
imageEngine := ImageEngine{Libpod: ic.Libpod}
imagePruneReports, err := imageEngine.Prune(ctx, imagePruneOptions)
- reclaimedSpace += reports.PruneReportsSize(imagePruneReports)
-
if err != nil {
return nil, err
}
@@ -182,10 +194,33 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.Sys
found = true
}
+ reclaimedSpace += reports.PruneReportsSize(imagePruneReports)
systemPruneReport.ImagePruneReports = append(systemPruneReport.ImagePruneReports, imagePruneReports...)
+
+ // Remove all unused networks.
+ networkPruneOptions := entities.NetworkPruneOptions{}
+ networkPruneOptions.Filters = options.Filters
+
+ networkPruneReport, err := ic.NetworkPrune(ctx, networkPruneOptions)
+ if err != nil {
+ return nil, err
+ }
+ if len(networkPruneReport) > 0 {
+ found = true
+ }
+ for _, net := range networkPruneReport {
+ systemPruneReport.NetworkPruneReports = append(systemPruneReport.NetworkPruneReports, &reports.PruneReport{
+ Id: net.Name,
+ Err: net.Error,
+ Size: 0,
+ })
+ }
+
+ // Remove unused volume data.
if options.Volume {
volumePruneOptions := entities.VolumePruneOptions{}
volumePruneOptions.Filters = (url.Values)(options.Filters)
+
volumePruneReport, err := ic.VolumePrune(ctx, volumePruneOptions)
if err != nil {
return nil, err
@@ -193,6 +228,7 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.Sys
if len(volumePruneReport) > 0 {
found = true
}
+
reclaimedSpace += reports.PruneReportsSize(volumePruneReport)
systemPruneReport.VolumePruneReports = append(systemPruneReport.VolumePruneReports, volumePruneReport...)
}
@@ -368,9 +404,9 @@ func (ic *ContainerEngine) Unshare(ctx context.Context, args []string, options e
}
// Make sure to unlock, unshare can run for a long time.
rootlessNetNS.Lock.Unlock()
- // We do not want to cleanup the netns after unshare.
- // The problem is that we cannot know if we need to cleanup and
- // secondly unshare should allow user to setup the namespace with
+ // We do not want to clean up the netns after unshare.
+ // The problem is that we cannot know if we need to clean up and
+ // secondly unshare should allow user to set up the namespace with
// special things, e.g. potentially macvlan or something like that.
return rootlessNetNS.Do(unshare)
}
diff --git a/pkg/domain/infra/runtime_libpod.go b/pkg/domain/infra/runtime_libpod.go
index 03e7ffb5d..162025969 100644
--- a/pkg/domain/infra/runtime_libpod.go
+++ b/pkg/domain/infra/runtime_libpod.go
@@ -342,7 +342,7 @@ func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []strin
options.HostUIDMapping = false
options.HostGIDMapping = false
- // Simply ignore the setting and do not setup an inner namespace for root as it is a no-op
+ // Simply ignore the setting and do not set up an inner namespace for root as it is a no-op
return &options, nil
}
@@ -394,7 +394,7 @@ func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []strin
// StartWatcher starts a new SIGHUP go routine for the current config.
func StartWatcher(rt *libpod.Runtime) {
- // Setup the signal notifier
+ // Set up the signal notifier
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGHUP)
diff --git a/pkg/domain/infra/tunnel/helpers.go b/pkg/domain/infra/tunnel/helpers.go
index 5b14fac37..6c043465c 100644
--- a/pkg/domain/infra/tunnel/helpers.go
+++ b/pkg/domain/infra/tunnel/helpers.go
@@ -20,7 +20,7 @@ func getContainersByContext(contextWithConnection context.Context, all, ignore b
func getContainersAndInputByContext(contextWithConnection context.Context, all, ignore bool, namesOrIDs []string) ([]entities.ListContainer, []string, error) {
if all && len(namesOrIDs) > 0 {
- return nil, nil, errors.New("cannot lookup containers and all")
+ return nil, nil, errors.New("cannot look up containers and all")
}
options := new(containers.ListOptions).WithAll(true).WithSync(true)
allContainers, err := containers.List(contextWithConnection, options)
@@ -77,7 +77,7 @@ func getContainersAndInputByContext(contextWithConnection context.Context, all,
func getPodsByContext(contextWithConnection context.Context, all bool, namesOrIDs []string) ([]*entities.ListPodsReport, error) {
if all && len(namesOrIDs) > 0 {
- return nil, errors.New("cannot lookup specific pods and all")
+ return nil, errors.New("cannot look up specific pods and all")
}
allPods, err := pods.List(contextWithConnection, nil)