diff options
Diffstat (limited to 'pkg')
-rw-r--r-- | pkg/api/handlers/swagger/responses.go | 2 | ||||
-rw-r--r-- | pkg/api/server/register_play.go (renamed from pkg/api/server/register_kube.go) | 14 | ||||
-rw-r--r-- | pkg/api/server/server.go | 2 | ||||
-rw-r--r-- | pkg/autoupdate/autoupdate.go | 461 | ||||
-rw-r--r-- | pkg/bindings/kube/kube.go | 4 | ||||
-rw-r--r-- | pkg/domain/entities/containers.go | 1 | ||||
-rw-r--r-- | pkg/domain/entities/reports/containers.go | 5 | ||||
-rw-r--r-- | pkg/domain/infra/abi/containers.go | 43 | ||||
-rw-r--r-- | pkg/domain/infra/tunnel/containers.go | 57 | ||||
-rw-r--r-- | pkg/domain/infra/tunnel/helpers.go | 2 | ||||
-rw-r--r-- | pkg/k8s.io/api/core/v1/types.go | 3 | ||||
-rw-r--r-- | pkg/machine/wsl/machine.go | 118 | ||||
-rw-r--r-- | pkg/systemd/generate/containers.go | 3 | ||||
-rw-r--r-- | pkg/systemd/generate/containers_test.go | 50 |
14 files changed, 457 insertions, 308 deletions
diff --git a/pkg/api/handlers/swagger/responses.go b/pkg/api/handlers/swagger/responses.go index 5731f8edd..93a508b39 100644 --- a/pkg/api/handlers/swagger/responses.go +++ b/pkg/api/handlers/swagger/responses.go @@ -71,7 +71,7 @@ type imagesRemoveResponseLibpod struct { // PlayKube response // swagger:response -type kubePlayResponseLibpod struct { +type playKubeResponseLibpod struct { // in:body Body entities.PlayKubeReport } diff --git a/pkg/api/server/register_kube.go b/pkg/api/server/register_play.go index 6ae9e8123..76e150504 100644 --- a/pkg/api/server/register_kube.go +++ b/pkg/api/server/register_play.go @@ -7,8 +7,8 @@ import ( "github.com/gorilla/mux" ) -func (s *APIServer) registerKubeHandlers(r *mux.Router) error { - // swagger:operation POST /libpod/kube/play libpod KubePlayLibpod +func (s *APIServer) registerPlayHandlers(r *mux.Router) error { + // swagger:operation POST /libpod/play/kube libpod PlayKubeLibpod // --- // tags: // - containers @@ -57,12 +57,12 @@ func (s *APIServer) registerKubeHandlers(r *mux.Router) error { // - application/json // responses: // 200: - // $ref: "#/responses/kubePlayResponseLibpod" + // $ref: "#/responses/playKubeResponseLibpod" // 500: // $ref: "#/responses/internalError" - r.HandleFunc(VersionedPath("/libpod/kube/play"), s.APIHandler(libpod.KubePlay)).Methods(http.MethodPost) r.HandleFunc(VersionedPath("/libpod/play/kube"), s.APIHandler(libpod.PlayKube)).Methods(http.MethodPost) - // swagger:operation DELETE /libpod/kube/play libpod KubePlayDownLibpod + r.HandleFunc(VersionedPath("/libpod/kube/play"), s.APIHandler(libpod.KubePlay)).Methods(http.MethodPost) + // swagger:operation DELETE /libpod/play/kube libpod PlayKubeDownLibpod // --- // tags: // - containers @@ -73,10 +73,10 @@ func (s *APIServer) registerKubeHandlers(r *mux.Router) error { // - application/json // responses: // 200: - // $ref: "#/responses/kubePlayResponseLibpod" + // $ref: "#/responses/playKubeResponseLibpod" // 500: // $ref: "#/responses/internalError" - r.HandleFunc(VersionedPath("/libpod/kube/play"), s.APIHandler(libpod.KubePlayDown)).Methods(http.MethodDelete) r.HandleFunc(VersionedPath("/libpod/play/kube"), s.APIHandler(libpod.PlayKubeDown)).Methods(http.MethodDelete) + r.HandleFunc(VersionedPath("/libpod/kube/play"), s.APIHandler(libpod.KubePlayDown)).Methods(http.MethodDelete) return nil } diff --git a/pkg/api/server/server.go b/pkg/api/server/server.go index a6d8b5e4c..5482a8ec2 100644 --- a/pkg/api/server/server.go +++ b/pkg/api/server/server.go @@ -126,11 +126,11 @@ func newServer(runtime *libpod.Runtime, listener net.Listener, opts entities.Ser server.registerHealthCheckHandlers, server.registerImagesHandlers, server.registerInfoHandlers, - server.registerKubeHandlers, server.registerManifestHandlers, server.registerMonitorHandlers, server.registerNetworkHandlers, server.registerPingHandlers, + server.registerPlayHandlers, server.registerPluginsHandlers, server.registerPodsHandlers, server.registerSecretHandlers, diff --git a/pkg/autoupdate/autoupdate.go b/pkg/autoupdate/autoupdate.go index 8d9991622..297d6640e 100644 --- a/pkg/autoupdate/autoupdate.go +++ b/pkg/autoupdate/autoupdate.go @@ -43,15 +43,41 @@ const ( // Map for easy lookups of supported policies. var supportedPolicies = map[string]Policy{ - "": PolicyDefault, - "disabled": PolicyDefault, - "image": PolicyRegistryImage, - "registry": PolicyRegistryImage, - "local": PolicyLocalImage, + "": PolicyDefault, + string(PolicyDefault): PolicyDefault, + "image": PolicyRegistryImage, // Deprecated in favor of PolicyRegistryImage + string(PolicyRegistryImage): PolicyRegistryImage, + string(PolicyLocalImage): PolicyLocalImage, } -// policyMapper is used for tying a container to it's autoupdate policy -type policyMapper map[Policy][]*libpod.Container +// updater includes shared state for auto-updating one or more containers. +type updater struct { + conn *dbus.Conn // DBUS connection + options *entities.AutoUpdateOptions // User-specified options + unitToTasks map[string][]*task // Keeps track of tasks per unit + updatedRawImages map[string]bool // Keeps track of updated images + runtime *libpod.Runtime // The libpod runtime +} + +const ( + statusFailed = "failed" // The update has failed + statusUpdated = "true" // The update succeeded + statusNotUpdated = "false" // No update was needed + statusPending = "pending" // The update is pending (see options.DryRun) + statusRolledBack = "rolled back" // Rollback after a failed update +) + +// task includes data and state for updating a container +type task struct { + authfile string // Container-specific authfile + auto *updater // Reverse pointer to the updater + container *libpod.Container // Container to update + policy Policy // Update policy + image *libimage.Image // Original image before the update + rawImageName string // The container's raw image name + status string // Auto-update status + unit string // Name of the systemd unit +} // LookupPolicy looks up the corresponding Policy for the specified // string. If none is found, an errors is returned including the list of @@ -116,23 +142,22 @@ func ValidateImageReference(imageName string) error { // It returns a slice of successfully restarted systemd units and a slice of // errors encountered during auto update. func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.AutoUpdateOptions) ([]*entities.AutoUpdateReport, []error) { - // Create a map from `image ID -> []*Container`. - containerMap, errs := imageContainersMap(runtime) - if len(containerMap) == 0 { - return nil, errs - } + // Note that (most) errors are non-fatal such that a single + // misconfigured container does not prevent others from being updated + // (which could be a security threat). - // Create a map from `image ID -> *libimage.Image` for image lookups. - listOptions := &libimage.ListImagesOptions{ - Filters: []string{"readonly=false"}, + auto := updater{ + options: &options, + runtime: runtime, + updatedRawImages: make(map[string]bool), } - imagesSlice, err := runtime.LibimageRuntime().ListImages(ctx, nil, listOptions) - if err != nil { - return nil, []error{err} - } - imageMap := make(map[string]*libimage.Image) - for i := range imagesSlice { - imageMap[imagesSlice[i].ID()] = imagesSlice[i] + + // Find auto-update tasks and assemble them by unit. + errors := auto.assembleTasks(ctx) + + // Nothing to do. + if len(auto.unitToTasks) == 0 { + return nil, errors } // Connect to DBUS. @@ -142,185 +167,176 @@ func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.A return nil, []error{err} } defer conn.Close() + auto.conn = conn runtime.NewSystemEvent(events.AutoUpdate) // Update all images/container according to their auto-update policy. var allReports []*entities.AutoUpdateReport - updatedRawImages := make(map[string]bool) - for imageID, policyMapper := range containerMap { - image, exists := imageMap[imageID] - if !exists { - errs = append(errs, fmt.Errorf("container image ID %q not found in local storage", imageID)) - return nil, errs + for unit, tasks := range auto.unitToTasks { + // Sanity check: we'll support that in the future. + if len(tasks) != 1 { + errors = append(errors, fmt.Errorf("only 1 task per unit supported but unit %s has %d", unit, len(tasks))) + return nil, errors } - for _, ctr := range policyMapper[PolicyRegistryImage] { - report, err := autoUpdateRegistry(ctx, image, ctr, updatedRawImages, &options, conn, runtime) - if err != nil { - errs = append(errs, err) - } - if report != nil { - allReports = append(allReports, report) - } - } + for _, task := range tasks { + err := func() error { + // Transition from state to state. Will be + // split into multiple loops in the future to + // support more than one container/task per + // unit. + updateAvailable, err := task.updateAvailable(ctx) + if err != nil { + task.status = statusFailed + return fmt.Errorf("checking image updates for container %s: %w", task.container.ID(), err) + } + + if !updateAvailable { + task.status = statusNotUpdated + return nil + } + + if options.DryRun { + task.status = statusPending + return nil + } + + if err := task.update(ctx); err != nil { + task.status = statusFailed + return fmt.Errorf("updating image for container %s: %w", task.container.ID(), err) + } + + updateError := auto.restartSystemdUnit(ctx, unit) + if updateError == nil { + task.status = statusUpdated + return nil + } + + if !options.Rollback { + task.status = statusFailed + return fmt.Errorf("restarting unit %s for container %s: %w", task.unit, task.container.ID(), err) + } + + if err := task.rollbackImage(); err != nil { + task.status = statusFailed + return fmt.Errorf("rolling back image for container %s: %w", task.container.ID(), err) + } + + if err := auto.restartSystemdUnit(ctx, unit); err != nil { + task.status = statusFailed + return fmt.Errorf("restarting unit %s for container %s during rollback: %w", task.unit, task.container.ID(), err) + } + + task.status = statusRolledBack + return nil + }() - for _, ctr := range policyMapper[PolicyLocalImage] { - report, err := autoUpdateLocally(ctx, image, ctr, &options, conn, runtime) if err != nil { - errs = append(errs, err) - } - if report != nil { - allReports = append(allReports, report) + errors = append(errors, err) } + allReports = append(allReports, task.report()) } } - return allReports, errs + return allReports, errors } -// autoUpdateRegistry updates the image/container according to the "registry" policy. -func autoUpdateRegistry(ctx context.Context, image *libimage.Image, ctr *libpod.Container, updatedRawImages map[string]bool, options *entities.AutoUpdateOptions, conn *dbus.Conn, runtime *libpod.Runtime) (*entities.AutoUpdateReport, error) { - cid := ctr.ID() - rawImageName := ctr.RawImageName() - if rawImageName == "" { - return nil, fmt.Errorf("registry auto-updating container %q: raw-image name is empty", cid) +// report creates an auto-update report for the task. +func (t *task) report() *entities.AutoUpdateReport { + return &entities.AutoUpdateReport{ + ContainerID: t.container.ID(), + ContainerName: t.container.Name(), + ImageName: t.container.RawImageName(), + Policy: string(t.policy), + SystemdUnit: t.unit, + Updated: t.status, } +} - labels := ctr.Labels() - unit, exists := labels[systemdDefine.EnvVariable] - if !exists { - return nil, fmt.Errorf("auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable) +// updateAvailable returns whether an update for the task is available. +func (t *task) updateAvailable(ctx context.Context) (bool, error) { + switch t.policy { + case PolicyRegistryImage: + return t.registryUpdateAvailable(ctx) + case PolicyLocalImage: + return t.localUpdateAvailable() + default: + return false, fmt.Errorf("unexpected auto-update policy %s for container %s", t.policy, t.container.ID()) } +} - report := &entities.AutoUpdateReport{ - ContainerID: cid, - ContainerName: ctr.Name(), - ImageName: rawImageName, - Policy: PolicyRegistryImage, - SystemdUnit: unit, - Updated: "failed", +// update the task according to its auto-update policy. +func (t *task) update(ctx context.Context) error { + switch t.policy { + case PolicyRegistryImage: + return t.registryUpdate(ctx) + case PolicyLocalImage: + // Nothing to do as the image is already available in the local storage. + return nil + default: + return fmt.Errorf("unexpected auto-update policy %s for container %s", t.policy, t.container.ID()) } +} - if _, updated := updatedRawImages[rawImageName]; updated { - logrus.Infof("Auto-updating container %q using registry image %q", cid, rawImageName) - if err := restartSystemdUnit(ctx, ctr, unit, conn); err != nil { - return report, err - } - report.Updated = "true" - return report, nil +// registryUpdateAvailable returns whether a new image on the registry is available. +func (t *task) registryUpdateAvailable(ctx context.Context) (bool, error) { + // The newer image has already been pulled for another task, so we know + // there's a newer one available. + if _, exists := t.auto.updatedRawImages[t.rawImageName]; exists { + return true, nil } - authfile := getAuthfilePath(ctr, options) - needsUpdate, err := newerRemoteImageAvailable(ctx, image, rawImageName, authfile) + remoteRef, err := docker.ParseReference("//" + t.rawImageName) if err != nil { - return report, fmt.Errorf("registry auto-updating container %q: image check for %q failed: %w", cid, rawImageName, err) - } - - if !needsUpdate { - report.Updated = "false" - return report, nil - } - - if options.DryRun { - report.Updated = "pending" - return report, nil - } - - if _, err := updateImage(ctx, runtime, rawImageName, authfile); err != nil { - return report, fmt.Errorf("registry auto-updating container %q: image update for %q failed: %w", cid, rawImageName, err) - } - updatedRawImages[rawImageName] = true - - logrus.Infof("Auto-updating container %q using registry image %q", cid, rawImageName) - updateErr := restartSystemdUnit(ctx, ctr, unit, conn) - if updateErr == nil { - report.Updated = "true" - return report, nil - } - - if !options.Rollback { - return report, updateErr - } - - // To fallback, simply retag the old image and restart the service. - if err := image.Tag(rawImageName); err != nil { - return report, fmt.Errorf("falling back to previous image: %w", err) - } - if err := restartSystemdUnit(ctx, ctr, unit, conn); err != nil { - return report, fmt.Errorf("restarting unit with old image during fallback: %w", err) + return false, err } - - report.Updated = "rolled back" - return report, nil + options := &libimage.HasDifferentDigestOptions{AuthFilePath: t.authfile} + return t.image.HasDifferentDigest(ctx, remoteRef, options) } -// autoUpdateRegistry updates the image/container according to the "local" policy. -func autoUpdateLocally(ctx context.Context, image *libimage.Image, ctr *libpod.Container, options *entities.AutoUpdateOptions, conn *dbus.Conn, runtime *libpod.Runtime) (*entities.AutoUpdateReport, error) { - cid := ctr.ID() - rawImageName := ctr.RawImageName() - if rawImageName == "" { - return nil, fmt.Errorf("locally auto-updating container %q: raw-image name is empty", cid) +// registryUpdate pulls down the image from the registry. +func (t *task) registryUpdate(ctx context.Context) error { + // The newer image has already been pulled for another task. + if _, exists := t.auto.updatedRawImages[t.rawImageName]; exists { + return nil } - labels := ctr.Labels() - unit, exists := labels[systemdDefine.EnvVariable] - if !exists { - return nil, fmt.Errorf("auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable) + pullOptions := &libimage.PullOptions{} + pullOptions.AuthFilePath = t.authfile + pullOptions.Writer = os.Stderr + if _, err := t.auto.runtime.LibimageRuntime().Pull(ctx, t.rawImageName, config.PullPolicyAlways, pullOptions); err != nil { + return err } - report := &entities.AutoUpdateReport{ - ContainerID: cid, - ContainerName: ctr.Name(), - ImageName: rawImageName, - Policy: PolicyLocalImage, - SystemdUnit: unit, - Updated: "failed", - } + t.auto.updatedRawImages[t.rawImageName] = true + return nil +} - needsUpdate, err := newerLocalImageAvailable(runtime, image, rawImageName) +// localUpdateAvailable returns whether a new image in the local storage is available. +func (t *task) localUpdateAvailable() (bool, error) { + localImg, _, err := t.auto.runtime.LibimageRuntime().LookupImage(t.rawImageName, nil) if err != nil { - return report, fmt.Errorf("locally auto-updating container %q: image check for %q failed: %w", cid, rawImageName, err) - } - - if !needsUpdate { - report.Updated = "false" - return report, nil - } - - if options.DryRun { - report.Updated = "pending" - return report, nil - } - - logrus.Infof("Auto-updating container %q using local image %q", cid, rawImageName) - updateErr := restartSystemdUnit(ctx, ctr, unit, conn) - if updateErr == nil { - report.Updated = "true" - return report, nil - } - - if !options.Rollback { - return report, updateErr + return false, err } + return localImg.Digest().String() != t.image.Digest().String(), nil +} +// rollbackImage rolls back the task's image to the previous version before the update. +func (t *task) rollbackImage() error { // To fallback, simply retag the old image and restart the service. - if err := image.Tag(rawImageName); err != nil { - return report, fmt.Errorf("falling back to previous image: %w", err) - } - if err := restartSystemdUnit(ctx, ctr, unit, conn); err != nil { - return report, fmt.Errorf("restarting unit with old image during fallback: %w", err) + if err := t.image.Tag(t.rawImageName); err != nil { + return err } - - report.Updated = "rolled back" - return report, nil + t.auto.updatedRawImages[t.rawImageName] = false + return nil } // restartSystemdUnit restarts the systemd unit the container is running in. -func restartSystemdUnit(ctx context.Context, ctr *libpod.Container, unit string, conn *dbus.Conn) error { +func (u *updater) restartSystemdUnit(ctx context.Context, unit string) error { restartChan := make(chan string) - if _, err := conn.RestartUnitContext(ctx, unit, "replace", restartChan); err != nil { - return fmt.Errorf("auto-updating container %q: restarting systemd unit %q failed: %w", ctr.ID(), unit, err) + if _, err := u.conn.RestartUnitContext(ctx, unit, "replace", restartChan); err != nil { + return err } // Wait for the restart to finish and actually check if it was @@ -329,25 +345,34 @@ func restartSystemdUnit(ctx context.Context, ctr *libpod.Container, unit string, switch result { case "done": - logrus.Infof("Successfully restarted systemd unit %q of container %q", unit, ctr.ID()) + logrus.Infof("Successfully restarted systemd unit %q", unit) return nil default: - return fmt.Errorf("auto-updating container %q: restarting systemd unit %q failed: expected %q but received %q", ctr.ID(), unit, "done", result) + return fmt.Errorf("expected %q but received %q", "done", result) } } -// imageContainersMap generates a map[image ID] -> [containers using the image] -// of all containers with a valid auto-update policy. -func imageContainersMap(runtime *libpod.Runtime) (map[string]policyMapper, []error) { - allContainers, err := runtime.GetAllContainers() +// assembleTasks assembles update tasks per unit and populates a mapping from +// `unit -> []*task` such that multiple containers _can_ run in a single unit. +func (u *updater) assembleTasks(ctx context.Context) []error { + // Assemble a map `image ID -> *libimage.Image` that we can consult + // later on for lookups. + imageMap, err := u.assembleImageMap(ctx) if err != nil { - return nil, []error{err} + return []error{err} } + allContainers, err := u.runtime.GetAllContainers() + if err != nil { + return []error{err} + } + + u.unitToTasks = make(map[string][]*task) + errors := []error{} - containerMap := make(map[string]policyMapper) - for _, ctr := range allContainers { + for _, c := range allContainers { + ctr := c state, err := ctr.State() if err != nil { errors = append(errors, err) @@ -358,77 +383,75 @@ func imageContainersMap(runtime *libpod.Runtime) (map[string]policyMapper, []err continue } - // Only update containers with the specific label/policy set. + // Check the container's auto-update policy which is configured + // as a label. labels := ctr.Labels() value, exists := labels[Label] if !exists { continue } - policy, err := LookupPolicy(value) if err != nil { errors = append(errors, err) continue } - - // Skip labels not related to autoupdate if policy == PolicyDefault { continue - } else { - id, _ := ctr.Image() - policyMap, exists := containerMap[id] - if !exists { - policyMap = make(map[Policy][]*libpod.Container) - } - policyMap[policy] = append(policyMap[policy], ctr) - containerMap[id] = policyMap - // Now we know that `ctr` is configured for auto updates. } - } - return containerMap, errors -} + // Make sure the container runs in a systemd unit which is + // stored as a label at container creation. + unit, exists := labels[systemdDefine.EnvVariable] + if !exists { + errors = append(errors, fmt.Errorf("auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable)) + continue + } -// getAuthfilePath returns an authfile path, if set. The authfile label in the -// container, if set, as precedence over the one set in the options. -func getAuthfilePath(ctr *libpod.Container, options *entities.AutoUpdateOptions) string { - labels := ctr.Labels() - authFilePath, exists := labels[AuthfileLabel] - if exists { - return authFilePath - } - return options.Authfile -} + id, _ := ctr.Image() + image, exists := imageMap[id] + if !exists { + err := fmt.Errorf("internal error: no image found for ID %s", id) + errors = append(errors, err) + continue + } -// newerRemoteImageAvailable returns true if there corresponding image on the remote -// registry is newer. -func newerRemoteImageAvailable(ctx context.Context, img *libimage.Image, origName string, authfile string) (bool, error) { - remoteRef, err := docker.ParseReference("//" + origName) - if err != nil { - return false, err - } - options := &libimage.HasDifferentDigestOptions{AuthFilePath: authfile} - return img.HasDifferentDigest(ctx, remoteRef, options) -} + rawImageName := ctr.RawImageName() + if rawImageName == "" { + errors = append(errors, fmt.Errorf("locally auto-updating container %q: raw-image name is empty", ctr.ID())) + continue + } -// newerLocalImageAvailable returns true if the container and local image have different digests -func newerLocalImageAvailable(runtime *libpod.Runtime, img *libimage.Image, rawImageName string) (bool, error) { - localImg, _, err := runtime.LibimageRuntime().LookupImage(rawImageName, nil) - if err != nil { - return false, err + t := task{ + authfile: labels[AuthfileLabel], + auto: u, + container: ctr, + policy: policy, + image: image, + unit: unit, + rawImageName: rawImageName, + status: statusFailed, // must be updated later on + } + + // Add the task to the unit. + u.unitToTasks[unit] = append(u.unitToTasks[unit], &t) } - return localImg.Digest().String() != img.Digest().String(), nil -} -// updateImage pulls the specified image. -func updateImage(ctx context.Context, runtime *libpod.Runtime, name, authfile string) (*libimage.Image, error) { - pullOptions := &libimage.PullOptions{} - pullOptions.AuthFilePath = authfile - pullOptions.Writer = os.Stderr + return errors +} - pulledImages, err := runtime.LibimageRuntime().Pull(ctx, name, config.PullPolicyAlways, pullOptions) +// assembleImageMap creates a map from `image ID -> *libimage.Image` for image lookups. +func (u *updater) assembleImageMap(ctx context.Context) (map[string]*libimage.Image, error) { + listOptions := &libimage.ListImagesOptions{ + Filters: []string{"readonly=false"}, + } + imagesSlice, err := u.runtime.LibimageRuntime().ListImages(ctx, nil, listOptions) if err != nil { return nil, err } - return pulledImages[0], nil + imageMap := make(map[string]*libimage.Image) + for i := range imagesSlice { + imageMap[imagesSlice[i].ID()] = imagesSlice[i] + } + + return imageMap, nil } diff --git a/pkg/bindings/kube/kube.go b/pkg/bindings/kube/kube.go index b9cc0efa7..db40c5134 100644 --- a/pkg/bindings/kube/kube.go +++ b/pkg/bindings/kube/kube.go @@ -51,7 +51,7 @@ func PlayWithBody(ctx context.Context, body io.Reader, options *PlayOptions) (*e return nil, err } - response, err := conn.DoRequest(ctx, body, http.MethodPost, "/kube/play", params, header) + response, err := conn.DoRequest(ctx, body, http.MethodPost, "/play/kube", params, header) if err != nil { return nil, err } @@ -85,7 +85,7 @@ func DownWithBody(ctx context.Context, body io.Reader) (*entities.KubePlayReport return nil, err } - response, err := conn.DoRequest(ctx, body, http.MethodDelete, "/kube/play", nil, nil) + response, err := conn.DoRequest(ctx, body, http.MethodDelete, "/play/kube", nil, nil) if err != nil { return nil, err } diff --git a/pkg/domain/entities/containers.go b/pkg/domain/entities/containers.go index df793034b..7048cd1d2 100644 --- a/pkg/domain/entities/containers.go +++ b/pkg/domain/entities/containers.go @@ -131,6 +131,7 @@ type RestartReport struct { } type RmOptions struct { + Filters map[string][]string All bool Depend bool Force bool diff --git a/pkg/domain/entities/reports/containers.go b/pkg/domain/entities/reports/containers.go index db9a66012..6759fc402 100644 --- a/pkg/domain/entities/reports/containers.go +++ b/pkg/domain/entities/reports/containers.go @@ -1,8 +1,9 @@ package reports type RmReport struct { - Id string `json:"Id"` //nolint:revive,stylecheck - Err error `json:"Err,omitempty"` + Id string `json:"Id"` //nolint:revive,stylecheck + Err error `json:"Err,omitempty"` + RawInput string } func RmReportsIds(r []*RmReport) []string { diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go index 68fbe1c9d..0df36ed64 100644 --- a/pkg/domain/infra/abi/containers.go +++ b/pkg/domain/infra/abi/containers.go @@ -153,10 +153,10 @@ func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []stri if err != nil { return nil, err } - ctrMap := map[string]string{} + idToRawInput := map[string]string{} if len(rawInputs) == len(ctrs) { for i := range ctrs { - ctrMap[ctrs[i].ID()] = rawInputs[i] + idToRawInput[ctrs[i].ID()] = rawInputs[i] } } reports := make([]*entities.PauseUnpauseReport, 0, len(ctrs)) @@ -169,7 +169,7 @@ func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []stri reports = append(reports, &entities.PauseUnpauseReport{ Id: c.ID(), Err: err, - RawInput: ctrMap[c.ID()], + RawInput: idToRawInput[c.ID()], }) } return reports, nil @@ -180,10 +180,10 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st if err != nil { return nil, err } - ctrMap := map[string]string{} + idToRawInput := map[string]string{} if len(rawInputs) == len(ctrs) { for i := range ctrs { - ctrMap[ctrs[i].ID()] = rawInputs[i] + idToRawInput[ctrs[i].ID()] = rawInputs[i] } } reports := make([]*entities.PauseUnpauseReport, 0, len(ctrs)) @@ -196,7 +196,7 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st reports = append(reports, &entities.PauseUnpauseReport{ Id: c.ID(), Err: err, - RawInput: ctrMap[c.ID()], + RawInput: idToRawInput[c.ID()], }) } return reports, nil @@ -207,10 +207,10 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin if err != nil && !(options.Ignore && errors.Is(err, define.ErrNoSuchCtr)) { return nil, err } - ctrMap := map[string]string{} + idToRawInput := map[string]string{} if len(rawInputs) == len(ctrs) { for i := range ctrs { - ctrMap[ctrs[i].ID()] = rawInputs[i] + idToRawInput[ctrs[i].ID()] = rawInputs[i] } } errMap, err := parallelctr.ContainerOp(ctx, ctrs, func(c *libpod.Container) error { @@ -256,7 +256,7 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin if options.All { report.RawInput = ctr.ID() } else { - report.RawInput = ctrMap[ctr.ID()] + report.RawInput = idToRawInput[ctr.ID()] } report.Err = err reports = append(reports, report) @@ -286,10 +286,10 @@ func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []strin if err != nil { return nil, err } - ctrMap := map[string]string{} + idToRawInput := map[string]string{} if len(rawInputs) == len(ctrs) { for i := range ctrs { - ctrMap[ctrs[i].ID()] = rawInputs[i] + idToRawInput[ctrs[i].ID()] = rawInputs[i] } } reports := make([]*entities.KillReport, 0, len(ctrs)) @@ -302,7 +302,7 @@ func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []strin reports = append(reports, &entities.KillReport{ Id: con.ID(), Err: err, - RawInput: ctrMap[con.ID()], + RawInput: idToRawInput[con.ID()], }) } return reports, nil @@ -371,7 +371,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, // this will fail and code will fall through to removing the container from libpod.` tmpNames := []string{} for _, ctr := range names { - report := reports.RmReport{Id: ctr} + report := reports.RmReport{Id: ctr, RawInput: ctr} report.Err = ic.Libpod.RemoveStorageContainer(ctr, options.Force) //nolint:gocritic if report.Err == nil { @@ -392,7 +392,16 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, } names = tmpNames - ctrs, err := getContainersByContext(options.All, options.Latest, names, ic.Libpod) + ctrs, rawInputs, err := getContainersAndInputByContext(options.All, options.Latest, names, options.Filters, ic.Libpod) + if err != nil && !(options.Ignore && errors.Is(err, define.ErrNoSuchCtr)) { + return nil, err + } + idToRawInput := map[string]string{} + if len(rawInputs) == len(ctrs) { + for i := range ctrs { + idToRawInput[ctrs[i].ID()] = rawInputs[i] + } + } if err != nil && !(options.Ignore && errors.Is(err, define.ErrNoSuchCtr)) { // Failed to get containers. If force is specified, get the containers ID // and evict them @@ -402,7 +411,10 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, for _, ctr := range names { logrus.Debugf("Evicting container %q", ctr) - report := reports.RmReport{Id: ctr} + report := reports.RmReport{ + Id: ctr, + RawInput: idToRawInput[ctr], + } _, err := ic.Libpod.EvictContainer(ctx, ctr, options.Volumes) if err != nil { if options.Ignore && errors.Is(err, define.ErrNoSuchCtr) { @@ -472,6 +484,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, report := new(reports.RmReport) report.Id = ctr.ID() report.Err = err + report.RawInput = idToRawInput[ctr.ID()] rmReports = append(rmReports, report) } return rmReports, nil diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go index 6c1a7f97f..81fb6aef8 100644 --- a/pkg/domain/infra/tunnel/containers.go +++ b/pkg/domain/infra/tunnel/containers.go @@ -61,9 +61,9 @@ func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []stri if err != nil { return nil, err } - ctrMap := map[string]string{} + idToRawInput := map[string]string{} for i := range ctrs { - ctrMap[ctrs[i].ID] = rawInputs[i] + idToRawInput[ctrs[i].ID] = rawInputs[i] } reports := make([]*entities.PauseUnpauseReport, 0, len(ctrs)) for _, c := range ctrs { @@ -75,7 +75,7 @@ func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []stri reports = append(reports, &entities.PauseUnpauseReport{ Id: c.ID, Err: err, - RawInput: ctrMap[c.ID], + RawInput: idToRawInput[c.ID], }) } return reports, nil @@ -86,9 +86,9 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st if err != nil { return nil, err } - ctrMap := map[string]string{} + idToRawInput := map[string]string{} for i := range ctrs { - ctrMap[ctrs[i].ID] = rawInputs[i] + idToRawInput[ctrs[i].ID] = rawInputs[i] } reports := make([]*entities.PauseUnpauseReport, 0, len(ctrs)) for _, c := range ctrs { @@ -100,7 +100,7 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st reports = append(reports, &entities.PauseUnpauseReport{ Id: c.ID, Err: err, - RawInput: ctrMap[c.ID], + RawInput: idToRawInput[c.ID], }) } return reports, nil @@ -111,9 +111,9 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin if err != nil { return nil, err } - ctrMap := map[string]string{} + idToRawInput := map[string]string{} for i := range ctrs { - ctrMap[ctrs[i].ID] = rawInputs[i] + idToRawInput[ctrs[i].ID] = rawInputs[i] } options := new(containers.StopOptions).WithIgnore(opts.Ignore) if to := opts.Timeout; to != nil { @@ -123,7 +123,7 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin for _, c := range ctrs { report := entities.StopReport{ Id: c.ID, - RawInput: ctrMap[c.ID], + RawInput: idToRawInput[c.ID], } if err = containers.Stop(ic.ClientCtx, c.ID, options); err != nil { // These first two are considered non-fatal under the right conditions @@ -154,9 +154,9 @@ func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []strin if err != nil { return nil, err } - ctrMap := map[string]string{} + idToRawInput := map[string]string{} for i := range ctrs { - ctrMap[ctrs[i].ID] = rawInputs[i] + idToRawInput[ctrs[i].ID] = rawInputs[i] } options := new(containers.KillOptions).WithSignal(opts.Signal) reports := make([]*entities.KillReport, 0, len(ctrs)) @@ -169,7 +169,7 @@ func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []strin reports = append(reports, &entities.KillReport{ Id: c.ID, Err: err, - RawInput: ctrMap[c.ID], + RawInput: idToRawInput[c.ID], }) } return reports, nil @@ -208,11 +208,18 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, toRemove := []string{} alreadyRemoved := make(map[string]bool) // Avoids trying to remove already removed containers - if opts.All { - ctrs, err := getContainersByContext(ic.ClientCtx, opts.All, opts.Ignore, nil) + idToRawInput := map[string]string{} + + if opts.All || len(opts.Filters) > 0 { + ctrs, rawInputs, err := getContainersAndInputByContext(ic.ClientCtx, opts.All, opts.Ignore, nil, opts.Filters) if err != nil { return nil, err } + if len(rawInputs) == len(ctrs) { + for i := range ctrs { + idToRawInput[ctrs[i].ID] = rawInputs[i] + } + } for _, c := range ctrs { toRemove = append(toRemove, c.ID) } @@ -225,10 +232,15 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, // instead of the ID. Since this can only happen // with external containers, it poses no threat // to the `alreadyRemoved` checks below. - ctrs, err := getContainersByContext(ic.ClientCtx, false, true, []string{ctr}) + ctrs, rawInputs, err := getContainersAndInputByContext(ic.ClientCtx, false, true, []string{ctr}, opts.Filters) if err != nil { return nil, err } + if len(rawInputs) == len(ctrs) { + for i := range ctrs { + idToRawInput[ctrs[i].ID] = rawInputs[i] + } + } id := ctr if len(ctrs) == 1 { id = ctrs[0].ID @@ -238,13 +250,20 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, } rmReports := make([]*reports.RmReport, 0, len(toRemove)) - for _, nameOrID := range toRemove { - if alreadyRemoved[nameOrID] { + for _, rmCtr := range toRemove { + if alreadyRemoved[rmCtr] { continue } - newReports, err := containers.Remove(ic.ClientCtx, nameOrID, options) + if ctr, exist := idToRawInput[rmCtr]; exist { + rmCtr = ctr + } + newReports, err := containers.Remove(ic.ClientCtx, rmCtr, options) if err != nil { - rmReports = append(rmReports, &reports.RmReport{Id: nameOrID, Err: err}) + rmReports = append(rmReports, &reports.RmReport{ + Id: rmCtr, + Err: err, + RawInput: idToRawInput[rmCtr], + }) continue } for i := range newReports { diff --git a/pkg/domain/infra/tunnel/helpers.go b/pkg/domain/infra/tunnel/helpers.go index 20ea07948..90d558119 100644 --- a/pkg/domain/infra/tunnel/helpers.go +++ b/pkg/domain/infra/tunnel/helpers.go @@ -14,7 +14,7 @@ import ( // FIXME: the `ignore` parameter is very likely wrong here as it should rather // be used on *errors* from operations such as remove. -func getContainersByContext(contextWithConnection context.Context, all, ignore bool, namesOrIDs []string) ([]entities.ListContainer, error) { +func getContainersByContext(contextWithConnection context.Context, all, ignore bool, namesOrIDs []string) ([]entities.ListContainer, error) { //nolint:unparam ctrs, _, err := getContainersAndInputByContext(contextWithConnection, all, ignore, namesOrIDs, nil) return ctrs, err } diff --git a/pkg/k8s.io/api/core/v1/types.go b/pkg/k8s.io/api/core/v1/types.go index 39a675dae..384965769 100644 --- a/pkg/k8s.io/api/core/v1/types.go +++ b/pkg/k8s.io/api/core/v1/types.go @@ -56,7 +56,8 @@ type VolumeSource struct { // ConfigMap represents a configMap that should populate this volume // +optional ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty"` - Secret *SecretVolumeSource + // Secret represents a secret that should be mounted as a volume + Secret *SecretVolumeSource `json:"secret,omitempty"` } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. diff --git a/pkg/machine/wsl/machine.go b/pkg/machine/wsl/machine.go index 189723ac7..9a57102f0 100644 --- a/pkg/machine/wsl/machine.go +++ b/pkg/machine/wsl/machine.go @@ -56,7 +56,9 @@ rm -f /etc/systemd/system/getty.target.wants/getty@tty1.service rm -f /etc/systemd/system/multi-user.target.wants/systemd-resolved.service rm -f /etc/systemd/system/dbus-org.freedesktop.resolve1.service ln -fs /dev/null /etc/systemd/system/console-getty.service +ln -fs /dev/null /etc/systemd/system/systemd-oomd.socket mkdir -p /etc/systemd/system/systemd-sysusers.service.d/ +echo CREATE_MAIL_SPOOL=no >> /etc/default/useradd adduser -m [USER] -G wheel mkdir -p /home/[USER]/.config/systemd/[USER]/ chown [USER]:[USER] /home/[USER]/.config @@ -89,9 +91,18 @@ fi const enterns = "#!/bin/bash\n" + sysdpid + ` if [ ! -z "$SYSDPID" ] && [ "$SYSDPID" != "1" ]; then - nsenter -m -p -t $SYSDPID "$@" -fi -` + NSENTER=("nsenter" "-m" "-p" "-t" "$SYSDPID" "--wd=$PWD") + + if [ "$UID" != "0" ]; then + NSENTER=("sudo" "${NSENTER[@]}") + if [ "$#" != "0" ]; then + NSENTER+=("sudo" "-u" "$USER") + else + NSENTER+=("su" "-l" "$USER") + fi + fi + "${NSENTER[@]}" "$@" +fi` const waitTerm = sysdpid + ` if [ ! -z "$SYSDPID" ]; then @@ -99,6 +110,10 @@ if [ ! -z "$SYSDPID" ]; then fi ` +const wslConf = `[user] +default=[USER] +` + // WSL kernel does not have sg and crypto_user modules const overrideSysusers = `[Service] LoadCredential= @@ -375,6 +390,9 @@ func (v *MachineVM) Init(opts machine.InitOptions) (bool, error) { return false, err } + // Cycle so that user change goes into effect + _ = terminateDist(dist) + return true, nil } @@ -450,12 +468,12 @@ func provisionWSLDist(v *MachineVM) (string, error) { dist := toDist(v.Name) fmt.Println("Importing operating system into WSL (this may take a few minutes on a new WSL install)...") - if err = runCmdPassThrough("wsl", "--import", dist, distTarget, v.ImagePath); err != nil { + if err = runCmdPassThrough("wsl", "--import", dist, distTarget, v.ImagePath, "--version", "2"); err != nil { return "", fmt.Errorf("the WSL import of guest OS failed: %w", err) } // Fixes newuidmap - if err = runCmdPassThrough("wsl", "-d", dist, "rpm", "-q", "--restore", "shadow-utils", "2>/dev/null"); err != nil { + if err = wslInvoke(dist, "rpm", "-q", "--restore", "shadow-utils", "2>/dev/null"); err != nil { return "", fmt.Errorf("package permissions restore of shadow-utils on guest OS failed: %w", err) } @@ -463,7 +481,7 @@ func provisionWSLDist(v *MachineVM) (string, error) { // operation when mount was not present on the initial start. Force a cycle so that it won't // repeatedly complain. if winVersionAtLeast(10, 0, 22000) { - if err := runCmdPassThrough("wsl", "--terminate", dist); err != nil { + if err := terminateDist(dist); err != nil { logrus.Warnf("could not cycle WSL dist: %s", err.Error()) } } @@ -478,16 +496,16 @@ func createKeys(v *MachineVM, dist string, sshDir string) error { return fmt.Errorf("could not create ssh directory: %w", err) } - if err := runCmdPassThrough("wsl", "--terminate", dist); err != nil { + if err := terminateDist(dist); err != nil { return fmt.Errorf("could not cycle WSL dist: %w", err) } - key, err := machine.CreateSSHKeysPrefix(sshDir, v.Name, true, true, "wsl", "-d", dist) + key, err := wslCreateKeys(sshDir, v.Name, dist) if err != nil { return fmt.Errorf("could not create ssh keys: %w", err) } - if err := pipeCmdPassThrough("wsl", key+"\n", "-d", dist, "sh", "-c", "mkdir -p /root/.ssh;"+ + if err := wslPipe(key+"\n", dist, "sh", "-c", "mkdir -p /root/.ssh;"+ "cat >> /root/.ssh/authorized_keys; chmod 600 /root/.ssh/authorized_keys"); err != nil { return fmt.Errorf("could not create root authorized keys on guest OS: %w", err) } @@ -495,7 +513,7 @@ func createKeys(v *MachineVM, dist string, sshDir string) error { userAuthCmd := withUser("mkdir -p /home/[USER]/.ssh;"+ "cat >> /home/[USER]/.ssh/authorized_keys; chown -R [USER]:[USER] /home/[USER]/.ssh;"+ "chmod 600 /home/[USER]/.ssh/authorized_keys", user) - if err := pipeCmdPassThrough("wsl", key+"\n", "-d", dist, "sh", "-c", userAuthCmd); err != nil { + if err := wslPipe(key+"\n", dist, "sh", "-c", userAuthCmd); err != nil { return fmt.Errorf("could not create '%s' authorized keys on guest OS: %w", v.RemoteUsername, err) } @@ -504,25 +522,25 @@ func createKeys(v *MachineVM, dist string, sshDir string) error { func configureSystem(v *MachineVM, dist string) error { user := v.RemoteUsername - if err := runCmdPassThrough("wsl", "-d", dist, "sh", "-c", fmt.Sprintf(appendPort, v.Port, v.Port)); err != nil { + if err := wslInvoke(dist, "sh", "-c", fmt.Sprintf(appendPort, v.Port, v.Port)); err != nil { return fmt.Errorf("could not configure SSH port for guest OS: %w", err) } - if err := pipeCmdPassThrough("wsl", withUser(configServices, user), "-d", dist, "sh"); err != nil { + if err := wslPipe(withUser(configServices, user), dist, "sh"); err != nil { return fmt.Errorf("could not configure systemd settings for guest OS: %w", err) } - if err := pipeCmdPassThrough("wsl", sudoers, "-d", dist, "sh", "-c", "cat >> /etc/sudoers"); err != nil { + if err := wslPipe(sudoers, dist, "sh", "-c", "cat >> /etc/sudoers"); err != nil { return fmt.Errorf("could not add wheel to sudoers: %w", err) } - if err := pipeCmdPassThrough("wsl", overrideSysusers, "-d", dist, "sh", "-c", + if err := wslPipe(overrideSysusers, dist, "sh", "-c", "cat > /etc/systemd/system/systemd-sysusers.service.d/override.conf"); err != nil { return fmt.Errorf("could not generate systemd-sysusers override for guest OS: %w", err) } lingerCmd := withUser("cat > /home/[USER]/.config/systemd/[USER]/linger-example.service", user) - if err := pipeCmdPassThrough("wsl", lingerService, "-d", dist, "sh", "-c", lingerCmd); err != nil { + if err := wslPipe(lingerService, dist, "sh", "-c", lingerCmd); err != nil { return fmt.Errorf("could not generate linger service for guest OS: %w", err) } @@ -530,24 +548,28 @@ func configureSystem(v *MachineVM, dist string) error { return err } - if err := pipeCmdPassThrough("wsl", withUser(lingerSetup, user), "-d", dist, "sh"); err != nil { - return fmt.Errorf("could not configure systemd settomgs for guest OS: %w", err) + if err := wslPipe(withUser(lingerSetup, user), dist, "sh"); err != nil { + return fmt.Errorf("could not configure systemd settings for guest OS: %w", err) } - if err := pipeCmdPassThrough("wsl", containersConf, "-d", dist, "sh", "-c", "cat > /etc/containers/containers.conf"); err != nil { + if err := wslPipe(containersConf, dist, "sh", "-c", "cat > /etc/containers/containers.conf"); err != nil { return fmt.Errorf("could not create containers.conf for guest OS: %w", err) } - if err := runCmdPassThrough("wsl", "-d", dist, "sh", "-c", "echo wsl > /etc/containers/podman-machine"); err != nil { + if err := wslInvoke(dist, "sh", "-c", "echo wsl > /etc/containers/podman-machine"); err != nil { return fmt.Errorf("could not create podman-machine file for guest OS: %w", err) } + if err := wslPipe(withUser(wslConf, user), dist, "sh", "-c", "cat > /etc/wsl.conf"); err != nil { + return fmt.Errorf("could not configure wsl config for guest OS: %w", err) + } + return nil } func configureProxy(dist string, useProxy bool) error { if !useProxy { - _ = runCmdPassThrough("wsl", "-d", dist, "sh", "-c", clearProxySettings) + _ = wslInvoke(dist, "sh", "-c", clearProxySettings) return nil } var content string @@ -561,17 +583,17 @@ func configureProxy(dist string, useProxy bool) error { } } - if err := pipeCmdPassThrough("wsl", content, "-d", dist, "sh", "-c", proxyConfigAttempt); err != nil { + if err := wslPipe(content, dist, "sh", "-c", proxyConfigAttempt); err != nil { const failMessage = "Failure creating proxy configuration" if exitErr, isExit := err.(*exec.ExitError); isExit && exitErr.ExitCode() != 42 { return fmt.Errorf("%v: %w", failMessage, err) } fmt.Println("Installing proxy support") - _ = pipeCmdPassThrough("wsl", proxyConfigSetup, "-d", dist, "sh", "-c", + _ = wslPipe(proxyConfigSetup, dist, "sh", "-c", "cat > /usr/local/bin/proxyinit; chmod 755 /usr/local/bin/proxyinit") - if err = pipeCmdPassThrough("wsl", content, "-d", dist, "/usr/local/bin/proxyinit"); err != nil { + if err = wslPipe(content, dist, "/usr/local/bin/proxyinit"); err != nil { return fmt.Errorf("%v: %w", failMessage, err) } } @@ -581,7 +603,7 @@ func configureProxy(dist string, useProxy bool) error { func enableUserLinger(v *MachineVM, dist string) error { lingerCmd := "mkdir -p /var/lib/systemd/linger; touch /var/lib/systemd/linger/" + v.RemoteUsername - if err := runCmdPassThrough("wsl", "-d", dist, "sh", "-c", lingerCmd); err != nil { + if err := wslInvoke(dist, "sh", "-c", lingerCmd); err != nil { return fmt.Errorf("could not enable linger for remote user on guest OS: %w", err) } @@ -589,26 +611,26 @@ func enableUserLinger(v *MachineVM, dist string) error { } func installScripts(dist string) error { - if err := pipeCmdPassThrough("wsl", enterns, "-d", dist, "sh", "-c", + if err := wslPipe(enterns, dist, "sh", "-c", "cat > /usr/local/bin/enterns; chmod 755 /usr/local/bin/enterns"); err != nil { return fmt.Errorf("could not create enterns script for guest OS: %w", err) } - if err := pipeCmdPassThrough("wsl", profile, "-d", dist, "sh", "-c", + if err := wslPipe(profile, dist, "sh", "-c", "cat > /etc/profile.d/enterns.sh"); err != nil { return fmt.Errorf("could not create motd profile script for guest OS: %w", err) } - if err := pipeCmdPassThrough("wsl", wslmotd, "-d", dist, "sh", "-c", "cat > /etc/wslmotd"); err != nil { + if err := wslPipe(wslmotd, dist, "sh", "-c", "cat > /etc/wslmotd"); err != nil { return fmt.Errorf("could not create a WSL MOTD for guest OS: %w", err) } - if err := pipeCmdPassThrough("wsl", bootstrap, "-d", dist, "sh", "-c", + if err := wslPipe(bootstrap, dist, "sh", "-c", "cat > /root/bootstrap; chmod 755 /root/bootstrap"); err != nil { return fmt.Errorf("could not create bootstrap script for guest OS: %w", err) } - if err := pipeCmdPassThrough("wsl", proxyConfigSetup, "-d", dist, "sh", "-c", + if err := wslPipe(proxyConfigSetup, dist, "sh", "-c", "cat > /usr/local/bin/proxyinit; chmod 755 /usr/local/bin/proxyinit"); err != nil { return fmt.Errorf("could not create proxyinit script for guest OS: %w", err) } @@ -844,6 +866,22 @@ func withUser(s string, user string) string { return strings.ReplaceAll(s, "[USER]", user) } +func wslInvoke(dist string, arg ...string) error { + newArgs := []string{"-u", "root", "-d", dist} + newArgs = append(newArgs, arg...) + return runCmdPassThrough("wsl", newArgs...) +} + +func wslPipe(input string, dist string, arg ...string) error { + newArgs := []string{"-u", "root", "-d", dist} + newArgs = append(newArgs, arg...) + return pipeCmdPassThrough("wsl", input, newArgs...) +} + +func wslCreateKeys(sshDir string, name string, dist string) (string, error) { + return machine.CreateSSHKeysPrefix(sshDir, name, true, true, "wsl", "-u", "root", "-d", dist) +} + func runCmdPassThrough(name string, arg ...string) error { logrus.Debugf("Running command: %s %v", name, arg) cmd := exec.Command(name, arg...) @@ -935,7 +973,7 @@ func (v *MachineVM) Start(name string, _ machine.StartOptions) error { return err } - err := runCmdPassThrough("wsl", "-d", dist, "/root/bootstrap") + err := wslInvoke(dist, "/root/bootstrap") if err != nil { return fmt.Errorf("the WSL bootstrap script failed: %w", err) } @@ -1124,7 +1162,7 @@ func isWSLRunning(dist string) (bool, error) { } func isSystemdRunning(dist string) (bool, error) { - cmd := exec.Command("wsl", "-d", dist, "sh") + cmd := exec.Command("wsl", "-u", "root", "-d", dist, "sh") cmd.Stdin = strings.NewReader(sysdpid + "\necho $SYSDPID\n") out, err := cmd.StdoutPipe() if err != nil { @@ -1174,13 +1212,13 @@ func (v *MachineVM) Stop(name string, _ machine.StopOptions) error { fmt.Fprintf(os.Stderr, "Could not stop API forwarding service (win-sshproxy.exe): %s\n", err.Error()) } - cmd := exec.Command("wsl", "-d", dist, "sh") + cmd := exec.Command("wsl", "-u", "root", "-d", dist, "sh") cmd.Stdin = strings.NewReader(waitTerm) if err = cmd.Start(); err != nil { return fmt.Errorf("executing wait command: %w", err) } - exitCmd := exec.Command("wsl", "-d", dist, "/usr/local/bin/enterns", "systemctl", "exit", "0") + exitCmd := exec.Command("wsl", "-u", "root", "-d", dist, "/usr/local/bin/enterns", "systemctl", "exit", "0") if err = exitCmd.Run(); err != nil { return fmt.Errorf("stopping sysd: %w", err) } @@ -1189,12 +1227,12 @@ func (v *MachineVM) Stop(name string, _ machine.StopOptions) error { return err } - cmd = exec.Command("wsl", "--terminate", dist) - if err = cmd.Run(); err != nil { - return err - } + return terminateDist(dist) +} - return nil +func terminateDist(dist string) error { + cmd := exec.Command("wsl", "--terminate", dist) + return cmd.Run() } func (v *MachineVM) State(bypass bool) (machine.Status, error) { @@ -1438,7 +1476,7 @@ func getCPUs(vm *MachineVM) (uint64, error) { if run, _ := isWSLRunning(dist); !run { return 0, nil } - cmd := exec.Command("wsl", "-d", dist, "nproc") + cmd := exec.Command("wsl", "-u", "root", "-d", dist, "nproc") out, err := cmd.StdoutPipe() if err != nil { return 0, err @@ -1462,7 +1500,7 @@ func getMem(vm *MachineVM) (uint64, error) { if run, _ := isWSLRunning(dist); !run { return 0, nil } - cmd := exec.Command("wsl", "-d", dist, "cat", "/proc/meminfo") + cmd := exec.Command("wsl", "-u", "root", "-d", dist, "cat", "/proc/meminfo") out, err := cmd.StdoutPipe() if err != nil { return 0, err diff --git a/pkg/systemd/generate/containers.go b/pkg/systemd/generate/containers.go index 66905202d..1f8c519b7 100644 --- a/pkg/systemd/generate/containers.go +++ b/pkg/systemd/generate/containers.go @@ -378,6 +378,9 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst fs.StringArrayP("env", "e", nil, "") fs.String("sdnotify", "", "") fs.String("restart", "", "") + // have to define extra -h flag to prevent help error when parsing -h hostname + // https://github.com/containers/podman/issues/15124 + fs.StringP("help", "h", "", "") if err := fs.Parse(remainingCmd); err != nil { return "", fmt.Errorf("parsing remaining command-line arguments: %w", err) } diff --git a/pkg/systemd/generate/containers_test.go b/pkg/systemd/generate/containers_test.go index 9a9e03a58..873cbfbb3 100644 --- a/pkg/systemd/generate/containers_test.go +++ b/pkg/systemd/generate/containers_test.go @@ -815,6 +815,37 @@ NotifyAccess=all WantedBy=default.target ` + goodNewWithHostname := `# jadda-jadda.service +# autogenerated by Podman CI + +[Unit] +Description=Podman jadda-jadda.service +Documentation=man:podman-generate-systemd(1) +Wants=network-online.target +After=network-online.target +RequiresMountsFor=/var/run/containers/storage + +[Service] +Environment=PODMAN_SYSTEMD_UNIT=%n +Restart=on-failure +TimeoutStopSec=70 +ExecStartPre=/bin/rm -f %t/%n.ctr-id +ExecStart=/usr/bin/podman run \ + --cidfile=%t/%n.ctr-id \ + --cgroups=no-conmon \ + --rm \ + --sdnotify=conmon \ + -d \ + -h hostname awesome-image:latest +ExecStop=/usr/bin/podman stop --ignore --cidfile=%t/%n.ctr-id +ExecStopPost=/usr/bin/podman rm -f --ignore --cidfile=%t/%n.ctr-id +Type=notify +NotifyAccess=all + +[Install] +WantedBy=default.target +` + templateGood := `# container-foo@.service # autogenerated by Podman CI @@ -1432,6 +1463,25 @@ WantedBy=default.target false, false, }, + {"good with -h hostname", + containerInfo{ + Executable: "/usr/bin/podman", + ServiceName: "jadda-jadda", + ContainerNameOrID: "jadda-jadda", + PIDFile: "/var/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid", + StopTimeout: 10, + PodmanVersion: "CI", + GraphRoot: "/var/lib/containers/storage", + RunRoot: "/var/run/containers/storage", + CreateCommand: []string{"I'll get stripped", "create", "-h", "hostname", "awesome-image:latest"}, + EnvVariable: define.EnvVariable, + }, + goodNewWithHostname, + true, + false, + false, + false, + }, {"good template", containerInfo{ Executable: "/usr/bin/podman", |