diff options
Diffstat (limited to 'pkg')
-rw-r--r-- | pkg/api/handlers/libpod/images.go | 3 | ||||
-rw-r--r-- | pkg/autoupdate/autoupdate.go | 461 | ||||
-rw-r--r-- | pkg/bindings/images/types.go | 2 | ||||
-rw-r--r-- | pkg/bindings/images/types_remove_options.go | 15 | ||||
-rw-r--r-- | pkg/domain/entities/images.go | 2 | ||||
-rw-r--r-- | pkg/domain/infra/abi/containers.go | 46 | ||||
-rw-r--r-- | pkg/domain/infra/abi/images.go | 3 | ||||
-rw-r--r-- | pkg/domain/infra/tunnel/containers.go | 31 | ||||
-rw-r--r-- | pkg/domain/infra/tunnel/helpers.go | 11 | ||||
-rw-r--r-- | pkg/domain/infra/tunnel/images.go | 2 | ||||
-rw-r--r-- | pkg/k8s.io/api/core/v1/types.go | 3 | ||||
-rw-r--r-- | pkg/machine/wsl/machine.go | 134 |
12 files changed, 378 insertions, 335 deletions
diff --git a/pkg/api/handlers/libpod/images.go b/pkg/api/handlers/libpod/images.go index 67943ecf1..bccaad932 100644 --- a/pkg/api/handlers/libpod/images.go +++ b/pkg/api/handlers/libpod/images.go @@ -547,6 +547,7 @@ func ImagesBatchRemove(w http.ResponseWriter, r *http.Request) { Ignore bool `schema:"ignore"` LookupManifest bool `schema:"lookupManifest"` Images []string `schema:"images"` + NoPrune bool `schema:"noprune"` }{} if err := decoder.Decode(&query, r.URL.Query()); err != nil { @@ -554,7 +555,7 @@ func ImagesBatchRemove(w http.ResponseWriter, r *http.Request) { return } - opts := entities.ImageRemoveOptions{All: query.All, Force: query.Force, Ignore: query.Ignore, LookupManifest: query.LookupManifest} + opts := entities.ImageRemoveOptions{All: query.All, Force: query.Force, Ignore: query.Ignore, LookupManifest: query.LookupManifest, NoPrune: query.NoPrune} imageEngine := abi.ImageEngine{Libpod: runtime} rmReport, rmErrors := imageEngine.Remove(r.Context(), query.Images, opts) strErrs := errorhandling.ErrorsToStrings(rmErrors) diff --git a/pkg/autoupdate/autoupdate.go b/pkg/autoupdate/autoupdate.go index 8d9991622..297d6640e 100644 --- a/pkg/autoupdate/autoupdate.go +++ b/pkg/autoupdate/autoupdate.go @@ -43,15 +43,41 @@ const ( // Map for easy lookups of supported policies. var supportedPolicies = map[string]Policy{ - "": PolicyDefault, - "disabled": PolicyDefault, - "image": PolicyRegistryImage, - "registry": PolicyRegistryImage, - "local": PolicyLocalImage, + "": PolicyDefault, + string(PolicyDefault): PolicyDefault, + "image": PolicyRegistryImage, // Deprecated in favor of PolicyRegistryImage + string(PolicyRegistryImage): PolicyRegistryImage, + string(PolicyLocalImage): PolicyLocalImage, } -// policyMapper is used for tying a container to it's autoupdate policy -type policyMapper map[Policy][]*libpod.Container +// updater includes shared state for auto-updating one or more containers. +type updater struct { + conn *dbus.Conn // DBUS connection + options *entities.AutoUpdateOptions // User-specified options + unitToTasks map[string][]*task // Keeps track of tasks per unit + updatedRawImages map[string]bool // Keeps track of updated images + runtime *libpod.Runtime // The libpod runtime +} + +const ( + statusFailed = "failed" // The update has failed + statusUpdated = "true" // The update succeeded + statusNotUpdated = "false" // No update was needed + statusPending = "pending" // The update is pending (see options.DryRun) + statusRolledBack = "rolled back" // Rollback after a failed update +) + +// task includes data and state for updating a container +type task struct { + authfile string // Container-specific authfile + auto *updater // Reverse pointer to the updater + container *libpod.Container // Container to update + policy Policy // Update policy + image *libimage.Image // Original image before the update + rawImageName string // The container's raw image name + status string // Auto-update status + unit string // Name of the systemd unit +} // LookupPolicy looks up the corresponding Policy for the specified // string. If none is found, an errors is returned including the list of @@ -116,23 +142,22 @@ func ValidateImageReference(imageName string) error { // It returns a slice of successfully restarted systemd units and a slice of // errors encountered during auto update. func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.AutoUpdateOptions) ([]*entities.AutoUpdateReport, []error) { - // Create a map from `image ID -> []*Container`. - containerMap, errs := imageContainersMap(runtime) - if len(containerMap) == 0 { - return nil, errs - } + // Note that (most) errors are non-fatal such that a single + // misconfigured container does not prevent others from being updated + // (which could be a security threat). - // Create a map from `image ID -> *libimage.Image` for image lookups. - listOptions := &libimage.ListImagesOptions{ - Filters: []string{"readonly=false"}, + auto := updater{ + options: &options, + runtime: runtime, + updatedRawImages: make(map[string]bool), } - imagesSlice, err := runtime.LibimageRuntime().ListImages(ctx, nil, listOptions) - if err != nil { - return nil, []error{err} - } - imageMap := make(map[string]*libimage.Image) - for i := range imagesSlice { - imageMap[imagesSlice[i].ID()] = imagesSlice[i] + + // Find auto-update tasks and assemble them by unit. + errors := auto.assembleTasks(ctx) + + // Nothing to do. + if len(auto.unitToTasks) == 0 { + return nil, errors } // Connect to DBUS. @@ -142,185 +167,176 @@ func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.A return nil, []error{err} } defer conn.Close() + auto.conn = conn runtime.NewSystemEvent(events.AutoUpdate) // Update all images/container according to their auto-update policy. var allReports []*entities.AutoUpdateReport - updatedRawImages := make(map[string]bool) - for imageID, policyMapper := range containerMap { - image, exists := imageMap[imageID] - if !exists { - errs = append(errs, fmt.Errorf("container image ID %q not found in local storage", imageID)) - return nil, errs + for unit, tasks := range auto.unitToTasks { + // Sanity check: we'll support that in the future. + if len(tasks) != 1 { + errors = append(errors, fmt.Errorf("only 1 task per unit supported but unit %s has %d", unit, len(tasks))) + return nil, errors } - for _, ctr := range policyMapper[PolicyRegistryImage] { - report, err := autoUpdateRegistry(ctx, image, ctr, updatedRawImages, &options, conn, runtime) - if err != nil { - errs = append(errs, err) - } - if report != nil { - allReports = append(allReports, report) - } - } + for _, task := range tasks { + err := func() error { + // Transition from state to state. Will be + // split into multiple loops in the future to + // support more than one container/task per + // unit. + updateAvailable, err := task.updateAvailable(ctx) + if err != nil { + task.status = statusFailed + return fmt.Errorf("checking image updates for container %s: %w", task.container.ID(), err) + } + + if !updateAvailable { + task.status = statusNotUpdated + return nil + } + + if options.DryRun { + task.status = statusPending + return nil + } + + if err := task.update(ctx); err != nil { + task.status = statusFailed + return fmt.Errorf("updating image for container %s: %w", task.container.ID(), err) + } + + updateError := auto.restartSystemdUnit(ctx, unit) + if updateError == nil { + task.status = statusUpdated + return nil + } + + if !options.Rollback { + task.status = statusFailed + return fmt.Errorf("restarting unit %s for container %s: %w", task.unit, task.container.ID(), err) + } + + if err := task.rollbackImage(); err != nil { + task.status = statusFailed + return fmt.Errorf("rolling back image for container %s: %w", task.container.ID(), err) + } + + if err := auto.restartSystemdUnit(ctx, unit); err != nil { + task.status = statusFailed + return fmt.Errorf("restarting unit %s for container %s during rollback: %w", task.unit, task.container.ID(), err) + } + + task.status = statusRolledBack + return nil + }() - for _, ctr := range policyMapper[PolicyLocalImage] { - report, err := autoUpdateLocally(ctx, image, ctr, &options, conn, runtime) if err != nil { - errs = append(errs, err) - } - if report != nil { - allReports = append(allReports, report) + errors = append(errors, err) } + allReports = append(allReports, task.report()) } } - return allReports, errs + return allReports, errors } -// autoUpdateRegistry updates the image/container according to the "registry" policy. -func autoUpdateRegistry(ctx context.Context, image *libimage.Image, ctr *libpod.Container, updatedRawImages map[string]bool, options *entities.AutoUpdateOptions, conn *dbus.Conn, runtime *libpod.Runtime) (*entities.AutoUpdateReport, error) { - cid := ctr.ID() - rawImageName := ctr.RawImageName() - if rawImageName == "" { - return nil, fmt.Errorf("registry auto-updating container %q: raw-image name is empty", cid) +// report creates an auto-update report for the task. +func (t *task) report() *entities.AutoUpdateReport { + return &entities.AutoUpdateReport{ + ContainerID: t.container.ID(), + ContainerName: t.container.Name(), + ImageName: t.container.RawImageName(), + Policy: string(t.policy), + SystemdUnit: t.unit, + Updated: t.status, } +} - labels := ctr.Labels() - unit, exists := labels[systemdDefine.EnvVariable] - if !exists { - return nil, fmt.Errorf("auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable) +// updateAvailable returns whether an update for the task is available. +func (t *task) updateAvailable(ctx context.Context) (bool, error) { + switch t.policy { + case PolicyRegistryImage: + return t.registryUpdateAvailable(ctx) + case PolicyLocalImage: + return t.localUpdateAvailable() + default: + return false, fmt.Errorf("unexpected auto-update policy %s for container %s", t.policy, t.container.ID()) } +} - report := &entities.AutoUpdateReport{ - ContainerID: cid, - ContainerName: ctr.Name(), - ImageName: rawImageName, - Policy: PolicyRegistryImage, - SystemdUnit: unit, - Updated: "failed", +// update the task according to its auto-update policy. +func (t *task) update(ctx context.Context) error { + switch t.policy { + case PolicyRegistryImage: + return t.registryUpdate(ctx) + case PolicyLocalImage: + // Nothing to do as the image is already available in the local storage. + return nil + default: + return fmt.Errorf("unexpected auto-update policy %s for container %s", t.policy, t.container.ID()) } +} - if _, updated := updatedRawImages[rawImageName]; updated { - logrus.Infof("Auto-updating container %q using registry image %q", cid, rawImageName) - if err := restartSystemdUnit(ctx, ctr, unit, conn); err != nil { - return report, err - } - report.Updated = "true" - return report, nil +// registryUpdateAvailable returns whether a new image on the registry is available. +func (t *task) registryUpdateAvailable(ctx context.Context) (bool, error) { + // The newer image has already been pulled for another task, so we know + // there's a newer one available. + if _, exists := t.auto.updatedRawImages[t.rawImageName]; exists { + return true, nil } - authfile := getAuthfilePath(ctr, options) - needsUpdate, err := newerRemoteImageAvailable(ctx, image, rawImageName, authfile) + remoteRef, err := docker.ParseReference("//" + t.rawImageName) if err != nil { - return report, fmt.Errorf("registry auto-updating container %q: image check for %q failed: %w", cid, rawImageName, err) - } - - if !needsUpdate { - report.Updated = "false" - return report, nil - } - - if options.DryRun { - report.Updated = "pending" - return report, nil - } - - if _, err := updateImage(ctx, runtime, rawImageName, authfile); err != nil { - return report, fmt.Errorf("registry auto-updating container %q: image update for %q failed: %w", cid, rawImageName, err) - } - updatedRawImages[rawImageName] = true - - logrus.Infof("Auto-updating container %q using registry image %q", cid, rawImageName) - updateErr := restartSystemdUnit(ctx, ctr, unit, conn) - if updateErr == nil { - report.Updated = "true" - return report, nil - } - - if !options.Rollback { - return report, updateErr - } - - // To fallback, simply retag the old image and restart the service. - if err := image.Tag(rawImageName); err != nil { - return report, fmt.Errorf("falling back to previous image: %w", err) - } - if err := restartSystemdUnit(ctx, ctr, unit, conn); err != nil { - return report, fmt.Errorf("restarting unit with old image during fallback: %w", err) + return false, err } - - report.Updated = "rolled back" - return report, nil + options := &libimage.HasDifferentDigestOptions{AuthFilePath: t.authfile} + return t.image.HasDifferentDigest(ctx, remoteRef, options) } -// autoUpdateRegistry updates the image/container according to the "local" policy. -func autoUpdateLocally(ctx context.Context, image *libimage.Image, ctr *libpod.Container, options *entities.AutoUpdateOptions, conn *dbus.Conn, runtime *libpod.Runtime) (*entities.AutoUpdateReport, error) { - cid := ctr.ID() - rawImageName := ctr.RawImageName() - if rawImageName == "" { - return nil, fmt.Errorf("locally auto-updating container %q: raw-image name is empty", cid) +// registryUpdate pulls down the image from the registry. +func (t *task) registryUpdate(ctx context.Context) error { + // The newer image has already been pulled for another task. + if _, exists := t.auto.updatedRawImages[t.rawImageName]; exists { + return nil } - labels := ctr.Labels() - unit, exists := labels[systemdDefine.EnvVariable] - if !exists { - return nil, fmt.Errorf("auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable) + pullOptions := &libimage.PullOptions{} + pullOptions.AuthFilePath = t.authfile + pullOptions.Writer = os.Stderr + if _, err := t.auto.runtime.LibimageRuntime().Pull(ctx, t.rawImageName, config.PullPolicyAlways, pullOptions); err != nil { + return err } - report := &entities.AutoUpdateReport{ - ContainerID: cid, - ContainerName: ctr.Name(), - ImageName: rawImageName, - Policy: PolicyLocalImage, - SystemdUnit: unit, - Updated: "failed", - } + t.auto.updatedRawImages[t.rawImageName] = true + return nil +} - needsUpdate, err := newerLocalImageAvailable(runtime, image, rawImageName) +// localUpdateAvailable returns whether a new image in the local storage is available. +func (t *task) localUpdateAvailable() (bool, error) { + localImg, _, err := t.auto.runtime.LibimageRuntime().LookupImage(t.rawImageName, nil) if err != nil { - return report, fmt.Errorf("locally auto-updating container %q: image check for %q failed: %w", cid, rawImageName, err) - } - - if !needsUpdate { - report.Updated = "false" - return report, nil - } - - if options.DryRun { - report.Updated = "pending" - return report, nil - } - - logrus.Infof("Auto-updating container %q using local image %q", cid, rawImageName) - updateErr := restartSystemdUnit(ctx, ctr, unit, conn) - if updateErr == nil { - report.Updated = "true" - return report, nil - } - - if !options.Rollback { - return report, updateErr + return false, err } + return localImg.Digest().String() != t.image.Digest().String(), nil +} +// rollbackImage rolls back the task's image to the previous version before the update. +func (t *task) rollbackImage() error { // To fallback, simply retag the old image and restart the service. - if err := image.Tag(rawImageName); err != nil { - return report, fmt.Errorf("falling back to previous image: %w", err) - } - if err := restartSystemdUnit(ctx, ctr, unit, conn); err != nil { - return report, fmt.Errorf("restarting unit with old image during fallback: %w", err) + if err := t.image.Tag(t.rawImageName); err != nil { + return err } - - report.Updated = "rolled back" - return report, nil + t.auto.updatedRawImages[t.rawImageName] = false + return nil } // restartSystemdUnit restarts the systemd unit the container is running in. -func restartSystemdUnit(ctx context.Context, ctr *libpod.Container, unit string, conn *dbus.Conn) error { +func (u *updater) restartSystemdUnit(ctx context.Context, unit string) error { restartChan := make(chan string) - if _, err := conn.RestartUnitContext(ctx, unit, "replace", restartChan); err != nil { - return fmt.Errorf("auto-updating container %q: restarting systemd unit %q failed: %w", ctr.ID(), unit, err) + if _, err := u.conn.RestartUnitContext(ctx, unit, "replace", restartChan); err != nil { + return err } // Wait for the restart to finish and actually check if it was @@ -329,25 +345,34 @@ func restartSystemdUnit(ctx context.Context, ctr *libpod.Container, unit string, switch result { case "done": - logrus.Infof("Successfully restarted systemd unit %q of container %q", unit, ctr.ID()) + logrus.Infof("Successfully restarted systemd unit %q", unit) return nil default: - return fmt.Errorf("auto-updating container %q: restarting systemd unit %q failed: expected %q but received %q", ctr.ID(), unit, "done", result) + return fmt.Errorf("expected %q but received %q", "done", result) } } -// imageContainersMap generates a map[image ID] -> [containers using the image] -// of all containers with a valid auto-update policy. -func imageContainersMap(runtime *libpod.Runtime) (map[string]policyMapper, []error) { - allContainers, err := runtime.GetAllContainers() +// assembleTasks assembles update tasks per unit and populates a mapping from +// `unit -> []*task` such that multiple containers _can_ run in a single unit. +func (u *updater) assembleTasks(ctx context.Context) []error { + // Assemble a map `image ID -> *libimage.Image` that we can consult + // later on for lookups. + imageMap, err := u.assembleImageMap(ctx) if err != nil { - return nil, []error{err} + return []error{err} } + allContainers, err := u.runtime.GetAllContainers() + if err != nil { + return []error{err} + } + + u.unitToTasks = make(map[string][]*task) + errors := []error{} - containerMap := make(map[string]policyMapper) - for _, ctr := range allContainers { + for _, c := range allContainers { + ctr := c state, err := ctr.State() if err != nil { errors = append(errors, err) @@ -358,77 +383,75 @@ func imageContainersMap(runtime *libpod.Runtime) (map[string]policyMapper, []err continue } - // Only update containers with the specific label/policy set. + // Check the container's auto-update policy which is configured + // as a label. labels := ctr.Labels() value, exists := labels[Label] if !exists { continue } - policy, err := LookupPolicy(value) if err != nil { errors = append(errors, err) continue } - - // Skip labels not related to autoupdate if policy == PolicyDefault { continue - } else { - id, _ := ctr.Image() - policyMap, exists := containerMap[id] - if !exists { - policyMap = make(map[Policy][]*libpod.Container) - } - policyMap[policy] = append(policyMap[policy], ctr) - containerMap[id] = policyMap - // Now we know that `ctr` is configured for auto updates. } - } - return containerMap, errors -} + // Make sure the container runs in a systemd unit which is + // stored as a label at container creation. + unit, exists := labels[systemdDefine.EnvVariable] + if !exists { + errors = append(errors, fmt.Errorf("auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable)) + continue + } -// getAuthfilePath returns an authfile path, if set. The authfile label in the -// container, if set, as precedence over the one set in the options. -func getAuthfilePath(ctr *libpod.Container, options *entities.AutoUpdateOptions) string { - labels := ctr.Labels() - authFilePath, exists := labels[AuthfileLabel] - if exists { - return authFilePath - } - return options.Authfile -} + id, _ := ctr.Image() + image, exists := imageMap[id] + if !exists { + err := fmt.Errorf("internal error: no image found for ID %s", id) + errors = append(errors, err) + continue + } -// newerRemoteImageAvailable returns true if there corresponding image on the remote -// registry is newer. -func newerRemoteImageAvailable(ctx context.Context, img *libimage.Image, origName string, authfile string) (bool, error) { - remoteRef, err := docker.ParseReference("//" + origName) - if err != nil { - return false, err - } - options := &libimage.HasDifferentDigestOptions{AuthFilePath: authfile} - return img.HasDifferentDigest(ctx, remoteRef, options) -} + rawImageName := ctr.RawImageName() + if rawImageName == "" { + errors = append(errors, fmt.Errorf("locally auto-updating container %q: raw-image name is empty", ctr.ID())) + continue + } -// newerLocalImageAvailable returns true if the container and local image have different digests -func newerLocalImageAvailable(runtime *libpod.Runtime, img *libimage.Image, rawImageName string) (bool, error) { - localImg, _, err := runtime.LibimageRuntime().LookupImage(rawImageName, nil) - if err != nil { - return false, err + t := task{ + authfile: labels[AuthfileLabel], + auto: u, + container: ctr, + policy: policy, + image: image, + unit: unit, + rawImageName: rawImageName, + status: statusFailed, // must be updated later on + } + + // Add the task to the unit. + u.unitToTasks[unit] = append(u.unitToTasks[unit], &t) } - return localImg.Digest().String() != img.Digest().String(), nil -} -// updateImage pulls the specified image. -func updateImage(ctx context.Context, runtime *libpod.Runtime, name, authfile string) (*libimage.Image, error) { - pullOptions := &libimage.PullOptions{} - pullOptions.AuthFilePath = authfile - pullOptions.Writer = os.Stderr + return errors +} - pulledImages, err := runtime.LibimageRuntime().Pull(ctx, name, config.PullPolicyAlways, pullOptions) +// assembleImageMap creates a map from `image ID -> *libimage.Image` for image lookups. +func (u *updater) assembleImageMap(ctx context.Context) (map[string]*libimage.Image, error) { + listOptions := &libimage.ListImagesOptions{ + Filters: []string{"readonly=false"}, + } + imagesSlice, err := u.runtime.LibimageRuntime().ListImages(ctx, nil, listOptions) if err != nil { return nil, err } - return pulledImages[0], nil + imageMap := make(map[string]*libimage.Image) + for i := range imagesSlice { + imageMap[imagesSlice[i].ID()] = imagesSlice[i] + } + + return imageMap, nil } diff --git a/pkg/bindings/images/types.go b/pkg/bindings/images/types.go index 0664afc1b..9783a8e18 100644 --- a/pkg/bindings/images/types.go +++ b/pkg/bindings/images/types.go @@ -15,6 +15,8 @@ type RemoveOptions struct { Ignore *bool // Confirms if given name is a manifest list and removes it, otherwise returns error. LookupManifest *bool + // Does not remove dangling parent images + NoPrune *bool } //go:generate go run ../generator/generator.go DiffOptions diff --git a/pkg/bindings/images/types_remove_options.go b/pkg/bindings/images/types_remove_options.go index 559ebcfd5..8972ac93c 100644 --- a/pkg/bindings/images/types_remove_options.go +++ b/pkg/bindings/images/types_remove_options.go @@ -76,3 +76,18 @@ func (o *RemoveOptions) GetLookupManifest() bool { } return *o.LookupManifest } + +// WithNoPrune set field NoPrune to given value +func (o *RemoveOptions) WithNoPrune(value bool) *RemoveOptions { + o.NoPrune = &value + return o +} + +// GetNoPrune returns value of field NoPrune +func (o *RemoveOptions) GetNoPrune() bool { + if o.NoPrune == nil { + var z bool + return z + } + return *o.NoPrune +} diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go index dad2dc6cc..21c1372b9 100644 --- a/pkg/domain/entities/images.go +++ b/pkg/domain/entities/images.go @@ -94,6 +94,8 @@ type ImageRemoveOptions struct { Ignore bool // Confirms if given name is a manifest list and removes it, otherwise returns error. LookupManifest bool + // NoPrune will not remove dangling images + NoPrune bool } // ImageRemoveReport is the response for removing one or more image(s) from storage diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go index ed149a869..0df36ed64 100644 --- a/pkg/domain/infra/abi/containers.go +++ b/pkg/domain/infra/abi/containers.go @@ -40,6 +40,7 @@ import ( // is specified. It also returns a list of the corresponding input name used to lookup each container. func getContainersAndInputByContext(all, latest bool, names []string, filters map[string][]string, runtime *libpod.Runtime) (ctrs []*libpod.Container, rawInput []string, err error) { var ctr *libpod.Container + var filteredCtrs []*libpod.Container ctrs = []*libpod.Container{} filterFuncs := make([]libpod.ContainerFilter, 0, len(filters)) @@ -58,7 +59,17 @@ func getContainersAndInputByContext(all, latest bool, names []string, filters ma } rawInput = []string{} for _, candidate := range ctrs { - rawInput = append(rawInput, candidate.ID()) + if len(names) > 0 { + for _, name := range names { + if candidate.ID() == name || candidate.Name() == name { + rawInput = append(rawInput, candidate.ID()) + filteredCtrs = append(filteredCtrs, candidate) + } + } + ctrs = filteredCtrs + } else { + rawInput = append(rawInput, candidate.ID()) + } } case all: ctrs, err = runtime.GetAllContainers() @@ -899,38 +910,7 @@ func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrID s func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []string, options entities.ContainerStartOptions) ([]*entities.ContainerStartReport, error) { reports := []*entities.ContainerStartReport{} var exitCode = define.ExecErrorCodeGeneric - containersNamesOrIds := namesOrIds - all := options.All - if len(options.Filters) > 0 { - all = false - filterFuncs := make([]libpod.ContainerFilter, 0, len(options.Filters)) - if len(options.Filters) > 0 { - for k, v := range options.Filters { - generatedFunc, err := dfilters.GenerateContainerFilterFuncs(k, v, ic.Libpod) - if err != nil { - return nil, err - } - filterFuncs = append(filterFuncs, generatedFunc) - } - } - candidates, err := ic.Libpod.GetContainers(filterFuncs...) - if err != nil { - return nil, err - } - containersNamesOrIds = []string{} - for _, candidate := range candidates { - if options.All { - containersNamesOrIds = append(containersNamesOrIds, candidate.ID()) - continue - } - for _, nameOrID := range namesOrIds { - if nameOrID == candidate.ID() || nameOrID == candidate.Name() { - containersNamesOrIds = append(containersNamesOrIds, nameOrID) - } - } - } - } - ctrs, rawInputs, err := getContainersAndInputByContext(all, options.Latest, containersNamesOrIds, options.Filters, ic.Libpod) + ctrs, rawInputs, err := getContainersAndInputByContext(options.All, options.Latest, namesOrIds, options.Filters, ic.Libpod) if err != nil { return nil, err } diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go index 94178a8e2..1f34cbd01 100644 --- a/pkg/domain/infra/abi/images.go +++ b/pkg/domain/infra/abi/images.go @@ -565,6 +565,7 @@ func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entitie libimageOptions.Force = opts.Force libimageOptions.Ignore = opts.Ignore libimageOptions.LookupManifest = opts.LookupManifest + libimageOptions.NoPrune = opts.NoPrune if !opts.All { libimageOptions.Filters = append(libimageOptions.Filters, "intermediate=false") } @@ -581,7 +582,7 @@ func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entitie rmErrors = libimageErrors - return + return report, rmErrors } // Shutdown Libpod engine diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go index 225aee017..81fb6aef8 100644 --- a/pkg/domain/infra/tunnel/containers.go +++ b/pkg/domain/infra/tunnel/containers.go @@ -658,36 +658,7 @@ func logIfRmError(id string, err error, reports []*reports.RmReport) { func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []string, options entities.ContainerStartOptions) ([]*entities.ContainerStartReport, error) { reports := []*entities.ContainerStartReport{} var exitCode = define.ExecErrorCodeGeneric - containersNamesOrIds := namesOrIds - all := options.All - if len(options.Filters) > 0 { - all = false - containersNamesOrIds = []string{} - opts := new(containers.ListOptions).WithFilters(options.Filters).WithAll(true) - candidates, listErr := containers.List(ic.ClientCtx, opts) - if listErr != nil { - return nil, listErr - } - for _, candidate := range candidates { - if options.All { - containersNamesOrIds = append(containersNamesOrIds, candidate.ID) - continue - } - for _, nameOrID := range namesOrIds { - if nameOrID == candidate.ID { - containersNamesOrIds = append(containersNamesOrIds, nameOrID) - continue - } - for _, containerName := range candidate.Names { - if containerName == nameOrID { - containersNamesOrIds = append(containersNamesOrIds, nameOrID) - continue - } - } - } - } - } - ctrs, err := getContainersByContext(ic.ClientCtx, all, false, containersNamesOrIds) + ctrs, namesOrIds, err := getContainersAndInputByContext(ic.ClientCtx, options.All, false, namesOrIds, options.Filters) if err != nil { return nil, err } diff --git a/pkg/domain/infra/tunnel/helpers.go b/pkg/domain/infra/tunnel/helpers.go index a0b01dd71..90d558119 100644 --- a/pkg/domain/infra/tunnel/helpers.go +++ b/pkg/domain/infra/tunnel/helpers.go @@ -31,8 +31,17 @@ func getContainersAndInputByContext(contextWithConnection context.Context, all, rawInputs := []string{} switch { case len(filters) > 0: + namesOrIDs = nil for i := range allContainers { - namesOrIDs = append(namesOrIDs, allContainers[i].ID) + if len(namesOrIDs) > 0 { + for _, name := range namesOrIDs { + if name == allContainers[i].ID { + namesOrIDs = append(namesOrIDs, allContainers[i].ID) + } + } + } else { + namesOrIDs = append(namesOrIDs, allContainers[i].ID) + } } case all: for i := range allContainers { diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go index 4f79325fd..4fecefaa3 100644 --- a/pkg/domain/infra/tunnel/images.go +++ b/pkg/domain/infra/tunnel/images.go @@ -28,7 +28,7 @@ func (ir *ImageEngine) Exists(_ context.Context, nameOrID string) (*entities.Boo } func (ir *ImageEngine) Remove(ctx context.Context, imagesArg []string, opts entities.ImageRemoveOptions) (*entities.ImageRemoveReport, []error) { - options := new(images.RemoveOptions).WithForce(opts.Force).WithIgnore(opts.Ignore).WithAll(opts.All).WithLookupManifest(opts.LookupManifest) + options := new(images.RemoveOptions).WithForce(opts.Force).WithIgnore(opts.Ignore).WithAll(opts.All).WithLookupManifest(opts.LookupManifest).WithNoPrune(opts.NoPrune) return images.Remove(ir.ClientCtx, imagesArg, options) } diff --git a/pkg/k8s.io/api/core/v1/types.go b/pkg/k8s.io/api/core/v1/types.go index 39a675dae..384965769 100644 --- a/pkg/k8s.io/api/core/v1/types.go +++ b/pkg/k8s.io/api/core/v1/types.go @@ -56,7 +56,8 @@ type VolumeSource struct { // ConfigMap represents a configMap that should populate this volume // +optional ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty"` - Secret *SecretVolumeSource + // Secret represents a secret that should be mounted as a volume + Secret *SecretVolumeSource `json:"secret,omitempty"` } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. diff --git a/pkg/machine/wsl/machine.go b/pkg/machine/wsl/machine.go index 189723ac7..8f6ef7a43 100644 --- a/pkg/machine/wsl/machine.go +++ b/pkg/machine/wsl/machine.go @@ -56,7 +56,9 @@ rm -f /etc/systemd/system/getty.target.wants/getty@tty1.service rm -f /etc/systemd/system/multi-user.target.wants/systemd-resolved.service rm -f /etc/systemd/system/dbus-org.freedesktop.resolve1.service ln -fs /dev/null /etc/systemd/system/console-getty.service +ln -fs /dev/null /etc/systemd/system/systemd-oomd.socket mkdir -p /etc/systemd/system/systemd-sysusers.service.d/ +echo CREATE_MAIL_SPOOL=no >> /etc/default/useradd adduser -m [USER] -G wheel mkdir -p /home/[USER]/.config/systemd/[USER]/ chown [USER]:[USER] /home/[USER]/.config @@ -89,9 +91,18 @@ fi const enterns = "#!/bin/bash\n" + sysdpid + ` if [ ! -z "$SYSDPID" ] && [ "$SYSDPID" != "1" ]; then - nsenter -m -p -t $SYSDPID "$@" -fi -` + NSENTER=("nsenter" "-m" "-p" "-t" "$SYSDPID" "--wd=$PWD") + + if [ "$UID" != "0" ]; then + NSENTER=("sudo" "${NSENTER[@]}") + if [ "$#" != "0" ]; then + NSENTER+=("sudo" "-u" "$USER") + else + NSENTER+=("su" "-l" "$USER") + fi + fi + "${NSENTER[@]}" "$@" +fi` const waitTerm = sysdpid + ` if [ ! -z "$SYSDPID" ]; then @@ -99,6 +110,10 @@ if [ ! -z "$SYSDPID" ]; then fi ` +const wslConf = `[user] +default=[USER] +` + // WSL kernel does not have sg and crypto_user modules const overrideSysusers = `[Service] LoadCredential= @@ -349,14 +364,6 @@ func (v *MachineVM) Init(opts machine.InitOptions) (bool, error) { return false, err } - if err := v.writeConfig(); err != nil { - return false, err - } - - if err := setupConnections(v, opts, sshDir); err != nil { - return false, err - } - dist, err := provisionWSLDist(v) if err != nil { return false, err @@ -375,6 +382,17 @@ func (v *MachineVM) Init(opts machine.InitOptions) (bool, error) { return false, err } + // Cycle so that user change goes into effect + _ = terminateDist(dist) + + if err := v.writeConfig(); err != nil { + return false, err + } + + if err := setupConnections(v, opts, sshDir); err != nil { + return false, err + } + return true, nil } @@ -450,12 +468,12 @@ func provisionWSLDist(v *MachineVM) (string, error) { dist := toDist(v.Name) fmt.Println("Importing operating system into WSL (this may take a few minutes on a new WSL install)...") - if err = runCmdPassThrough("wsl", "--import", dist, distTarget, v.ImagePath); err != nil { + if err = runCmdPassThrough("wsl", "--import", dist, distTarget, v.ImagePath, "--version", "2"); err != nil { return "", fmt.Errorf("the WSL import of guest OS failed: %w", err) } // Fixes newuidmap - if err = runCmdPassThrough("wsl", "-d", dist, "rpm", "-q", "--restore", "shadow-utils", "2>/dev/null"); err != nil { + if err = wslInvoke(dist, "rpm", "-q", "--restore", "shadow-utils", "2>/dev/null"); err != nil { return "", fmt.Errorf("package permissions restore of shadow-utils on guest OS failed: %w", err) } @@ -463,7 +481,7 @@ func provisionWSLDist(v *MachineVM) (string, error) { // operation when mount was not present on the initial start. Force a cycle so that it won't // repeatedly complain. if winVersionAtLeast(10, 0, 22000) { - if err := runCmdPassThrough("wsl", "--terminate", dist); err != nil { + if err := terminateDist(dist); err != nil { logrus.Warnf("could not cycle WSL dist: %s", err.Error()) } } @@ -478,16 +496,16 @@ func createKeys(v *MachineVM, dist string, sshDir string) error { return fmt.Errorf("could not create ssh directory: %w", err) } - if err := runCmdPassThrough("wsl", "--terminate", dist); err != nil { + if err := terminateDist(dist); err != nil { return fmt.Errorf("could not cycle WSL dist: %w", err) } - key, err := machine.CreateSSHKeysPrefix(sshDir, v.Name, true, true, "wsl", "-d", dist) + key, err := wslCreateKeys(sshDir, v.Name, dist) if err != nil { return fmt.Errorf("could not create ssh keys: %w", err) } - if err := pipeCmdPassThrough("wsl", key+"\n", "-d", dist, "sh", "-c", "mkdir -p /root/.ssh;"+ + if err := wslPipe(key+"\n", dist, "sh", "-c", "mkdir -p /root/.ssh;"+ "cat >> /root/.ssh/authorized_keys; chmod 600 /root/.ssh/authorized_keys"); err != nil { return fmt.Errorf("could not create root authorized keys on guest OS: %w", err) } @@ -495,7 +513,7 @@ func createKeys(v *MachineVM, dist string, sshDir string) error { userAuthCmd := withUser("mkdir -p /home/[USER]/.ssh;"+ "cat >> /home/[USER]/.ssh/authorized_keys; chown -R [USER]:[USER] /home/[USER]/.ssh;"+ "chmod 600 /home/[USER]/.ssh/authorized_keys", user) - if err := pipeCmdPassThrough("wsl", key+"\n", "-d", dist, "sh", "-c", userAuthCmd); err != nil { + if err := wslPipe(key+"\n", dist, "sh", "-c", userAuthCmd); err != nil { return fmt.Errorf("could not create '%s' authorized keys on guest OS: %w", v.RemoteUsername, err) } @@ -504,25 +522,25 @@ func createKeys(v *MachineVM, dist string, sshDir string) error { func configureSystem(v *MachineVM, dist string) error { user := v.RemoteUsername - if err := runCmdPassThrough("wsl", "-d", dist, "sh", "-c", fmt.Sprintf(appendPort, v.Port, v.Port)); err != nil { + if err := wslInvoke(dist, "sh", "-c", fmt.Sprintf(appendPort, v.Port, v.Port)); err != nil { return fmt.Errorf("could not configure SSH port for guest OS: %w", err) } - if err := pipeCmdPassThrough("wsl", withUser(configServices, user), "-d", dist, "sh"); err != nil { + if err := wslPipe(withUser(configServices, user), dist, "sh"); err != nil { return fmt.Errorf("could not configure systemd settings for guest OS: %w", err) } - if err := pipeCmdPassThrough("wsl", sudoers, "-d", dist, "sh", "-c", "cat >> /etc/sudoers"); err != nil { + if err := wslPipe(sudoers, dist, "sh", "-c", "cat >> /etc/sudoers"); err != nil { return fmt.Errorf("could not add wheel to sudoers: %w", err) } - if err := pipeCmdPassThrough("wsl", overrideSysusers, "-d", dist, "sh", "-c", + if err := wslPipe(overrideSysusers, dist, "sh", "-c", "cat > /etc/systemd/system/systemd-sysusers.service.d/override.conf"); err != nil { return fmt.Errorf("could not generate systemd-sysusers override for guest OS: %w", err) } lingerCmd := withUser("cat > /home/[USER]/.config/systemd/[USER]/linger-example.service", user) - if err := pipeCmdPassThrough("wsl", lingerService, "-d", dist, "sh", "-c", lingerCmd); err != nil { + if err := wslPipe(lingerService, dist, "sh", "-c", lingerCmd); err != nil { return fmt.Errorf("could not generate linger service for guest OS: %w", err) } @@ -530,24 +548,28 @@ func configureSystem(v *MachineVM, dist string) error { return err } - if err := pipeCmdPassThrough("wsl", withUser(lingerSetup, user), "-d", dist, "sh"); err != nil { - return fmt.Errorf("could not configure systemd settomgs for guest OS: %w", err) + if err := wslPipe(withUser(lingerSetup, user), dist, "sh"); err != nil { + return fmt.Errorf("could not configure systemd settings for guest OS: %w", err) } - if err := pipeCmdPassThrough("wsl", containersConf, "-d", dist, "sh", "-c", "cat > /etc/containers/containers.conf"); err != nil { + if err := wslPipe(containersConf, dist, "sh", "-c", "cat > /etc/containers/containers.conf"); err != nil { return fmt.Errorf("could not create containers.conf for guest OS: %w", err) } - if err := runCmdPassThrough("wsl", "-d", dist, "sh", "-c", "echo wsl > /etc/containers/podman-machine"); err != nil { + if err := wslInvoke(dist, "sh", "-c", "echo wsl > /etc/containers/podman-machine"); err != nil { return fmt.Errorf("could not create podman-machine file for guest OS: %w", err) } + if err := wslPipe(withUser(wslConf, user), dist, "sh", "-c", "cat > /etc/wsl.conf"); err != nil { + return fmt.Errorf("could not configure wsl config for guest OS: %w", err) + } + return nil } func configureProxy(dist string, useProxy bool) error { if !useProxy { - _ = runCmdPassThrough("wsl", "-d", dist, "sh", "-c", clearProxySettings) + _ = wslInvoke(dist, "sh", "-c", clearProxySettings) return nil } var content string @@ -561,17 +583,17 @@ func configureProxy(dist string, useProxy bool) error { } } - if err := pipeCmdPassThrough("wsl", content, "-d", dist, "sh", "-c", proxyConfigAttempt); err != nil { + if err := wslPipe(content, dist, "sh", "-c", proxyConfigAttempt); err != nil { const failMessage = "Failure creating proxy configuration" if exitErr, isExit := err.(*exec.ExitError); isExit && exitErr.ExitCode() != 42 { return fmt.Errorf("%v: %w", failMessage, err) } fmt.Println("Installing proxy support") - _ = pipeCmdPassThrough("wsl", proxyConfigSetup, "-d", dist, "sh", "-c", + _ = wslPipe(proxyConfigSetup, dist, "sh", "-c", "cat > /usr/local/bin/proxyinit; chmod 755 /usr/local/bin/proxyinit") - if err = pipeCmdPassThrough("wsl", content, "-d", dist, "/usr/local/bin/proxyinit"); err != nil { + if err = wslPipe(content, dist, "/usr/local/bin/proxyinit"); err != nil { return fmt.Errorf("%v: %w", failMessage, err) } } @@ -581,7 +603,7 @@ func configureProxy(dist string, useProxy bool) error { func enableUserLinger(v *MachineVM, dist string) error { lingerCmd := "mkdir -p /var/lib/systemd/linger; touch /var/lib/systemd/linger/" + v.RemoteUsername - if err := runCmdPassThrough("wsl", "-d", dist, "sh", "-c", lingerCmd); err != nil { + if err := wslInvoke(dist, "sh", "-c", lingerCmd); err != nil { return fmt.Errorf("could not enable linger for remote user on guest OS: %w", err) } @@ -589,26 +611,26 @@ func enableUserLinger(v *MachineVM, dist string) error { } func installScripts(dist string) error { - if err := pipeCmdPassThrough("wsl", enterns, "-d", dist, "sh", "-c", + if err := wslPipe(enterns, dist, "sh", "-c", "cat > /usr/local/bin/enterns; chmod 755 /usr/local/bin/enterns"); err != nil { return fmt.Errorf("could not create enterns script for guest OS: %w", err) } - if err := pipeCmdPassThrough("wsl", profile, "-d", dist, "sh", "-c", + if err := wslPipe(profile, dist, "sh", "-c", "cat > /etc/profile.d/enterns.sh"); err != nil { return fmt.Errorf("could not create motd profile script for guest OS: %w", err) } - if err := pipeCmdPassThrough("wsl", wslmotd, "-d", dist, "sh", "-c", "cat > /etc/wslmotd"); err != nil { + if err := wslPipe(wslmotd, dist, "sh", "-c", "cat > /etc/wslmotd"); err != nil { return fmt.Errorf("could not create a WSL MOTD for guest OS: %w", err) } - if err := pipeCmdPassThrough("wsl", bootstrap, "-d", dist, "sh", "-c", + if err := wslPipe(bootstrap, dist, "sh", "-c", "cat > /root/bootstrap; chmod 755 /root/bootstrap"); err != nil { return fmt.Errorf("could not create bootstrap script for guest OS: %w", err) } - if err := pipeCmdPassThrough("wsl", proxyConfigSetup, "-d", dist, "sh", "-c", + if err := wslPipe(proxyConfigSetup, dist, "sh", "-c", "cat > /usr/local/bin/proxyinit; chmod 755 /usr/local/bin/proxyinit"); err != nil { return fmt.Errorf("could not create proxyinit script for guest OS: %w", err) } @@ -844,6 +866,22 @@ func withUser(s string, user string) string { return strings.ReplaceAll(s, "[USER]", user) } +func wslInvoke(dist string, arg ...string) error { + newArgs := []string{"-u", "root", "-d", dist} + newArgs = append(newArgs, arg...) + return runCmdPassThrough("wsl", newArgs...) +} + +func wslPipe(input string, dist string, arg ...string) error { + newArgs := []string{"-u", "root", "-d", dist} + newArgs = append(newArgs, arg...) + return pipeCmdPassThrough("wsl", input, newArgs...) +} + +func wslCreateKeys(sshDir string, name string, dist string) (string, error) { + return machine.CreateSSHKeysPrefix(sshDir, name, true, true, "wsl", "-u", "root", "-d", dist) +} + func runCmdPassThrough(name string, arg ...string) error { logrus.Debugf("Running command: %s %v", name, arg) cmd := exec.Command(name, arg...) @@ -935,7 +973,7 @@ func (v *MachineVM) Start(name string, _ machine.StartOptions) error { return err } - err := runCmdPassThrough("wsl", "-d", dist, "/root/bootstrap") + err := wslInvoke(dist, "/root/bootstrap") if err != nil { return fmt.Errorf("the WSL bootstrap script failed: %w", err) } @@ -1124,7 +1162,7 @@ func isWSLRunning(dist string) (bool, error) { } func isSystemdRunning(dist string) (bool, error) { - cmd := exec.Command("wsl", "-d", dist, "sh") + cmd := exec.Command("wsl", "-u", "root", "-d", dist, "sh") cmd.Stdin = strings.NewReader(sysdpid + "\necho $SYSDPID\n") out, err := cmd.StdoutPipe() if err != nil { @@ -1174,13 +1212,13 @@ func (v *MachineVM) Stop(name string, _ machine.StopOptions) error { fmt.Fprintf(os.Stderr, "Could not stop API forwarding service (win-sshproxy.exe): %s\n", err.Error()) } - cmd := exec.Command("wsl", "-d", dist, "sh") + cmd := exec.Command("wsl", "-u", "root", "-d", dist, "sh") cmd.Stdin = strings.NewReader(waitTerm) if err = cmd.Start(); err != nil { return fmt.Errorf("executing wait command: %w", err) } - exitCmd := exec.Command("wsl", "-d", dist, "/usr/local/bin/enterns", "systemctl", "exit", "0") + exitCmd := exec.Command("wsl", "-u", "root", "-d", dist, "/usr/local/bin/enterns", "systemctl", "exit", "0") if err = exitCmd.Run(); err != nil { return fmt.Errorf("stopping sysd: %w", err) } @@ -1189,12 +1227,12 @@ func (v *MachineVM) Stop(name string, _ machine.StopOptions) error { return err } - cmd = exec.Command("wsl", "--terminate", dist) - if err = cmd.Run(); err != nil { - return err - } + return terminateDist(dist) +} - return nil +func terminateDist(dist string) error { + cmd := exec.Command("wsl", "--terminate", dist) + return cmd.Run() } func (v *MachineVM) State(bypass bool) (machine.Status, error) { @@ -1438,7 +1476,7 @@ func getCPUs(vm *MachineVM) (uint64, error) { if run, _ := isWSLRunning(dist); !run { return 0, nil } - cmd := exec.Command("wsl", "-d", dist, "nproc") + cmd := exec.Command("wsl", "-u", "root", "-d", dist, "nproc") out, err := cmd.StdoutPipe() if err != nil { return 0, err @@ -1462,7 +1500,7 @@ func getMem(vm *MachineVM) (uint64, error) { if run, _ := isWSLRunning(dist); !run { return 0, nil } - cmd := exec.Command("wsl", "-d", dist, "cat", "/proc/meminfo") + cmd := exec.Command("wsl", "-u", "root", "-d", dist, "cat", "/proc/meminfo") out, err := cmd.StdoutPipe() if err != nil { return 0, err |