diff options
Diffstat (limited to 'pkg')
-rw-r--r-- | pkg/api/handlers/compat/containers_logs.go | 14 | ||||
-rw-r--r-- | pkg/api/handlers/compat/networks.go | 47 | ||||
-rw-r--r-- | pkg/api/handlers/types.go | 43 | ||||
-rw-r--r-- | pkg/autoupdate/autoupdate.go | 254 | ||||
-rw-r--r-- | pkg/bindings/README.md | 77 | ||||
-rw-r--r-- | pkg/bindings/containers/attach.go | 19 | ||||
-rw-r--r-- | pkg/bindings/images/build.go | 16 | ||||
-rw-r--r-- | pkg/domain/entities/auto-update.go | 19 | ||||
-rw-r--r-- | pkg/domain/entities/engine_container.go | 2 | ||||
-rw-r--r-- | pkg/domain/infra/abi/auto-update.go | 9 | ||||
-rw-r--r-- | pkg/domain/infra/abi/system.go | 2 | ||||
-rw-r--r-- | pkg/domain/infra/tunnel/auto-update.go | 2 | ||||
-rw-r--r-- | pkg/systemd/generate/containers.go | 7 |
13 files changed, 336 insertions, 175 deletions
diff --git a/pkg/api/handlers/compat/containers_logs.go b/pkg/api/handlers/compat/containers_logs.go index cb4dee4d2..656e2c627 100644 --- a/pkg/api/handlers/compat/containers_logs.go +++ b/pkg/api/handlers/compat/containers_logs.go @@ -72,11 +72,12 @@ func LogsFromContainer(w http.ResponseWriter, r *http.Request) { var until time.Time if _, found := r.URL.Query()["until"]; found { - // FIXME: until != since but the logs backend does not yet support until. - since, err = util.ParseInputTime(query.Until) - if err != nil { - utils.BadRequest(w, "until", query.Until, err) - return + if query.Until != "0" { + until, err = util.ParseInputTime(query.Until) + if err != nil { + utils.BadRequest(w, "until", query.Until, err) + return + } } } @@ -84,6 +85,7 @@ func LogsFromContainer(w http.ResponseWriter, r *http.Request) { Details: true, Follow: query.Follow, Since: since, + Until: until, Tail: tail, Timestamps: query.Timestamps, } @@ -119,7 +121,7 @@ func LogsFromContainer(w http.ResponseWriter, r *http.Request) { for line := range logChannel { if _, found := r.URL.Query()["until"]; found { - if line.Time.After(until) { + if line.Time.After(until) && !until.IsZero() { break } } diff --git a/pkg/api/handlers/compat/networks.go b/pkg/api/handlers/compat/networks.go index 4e1f31404..b990a916b 100644 --- a/pkg/api/handlers/compat/networks.go +++ b/pkg/api/handlers/compat/networks.go @@ -25,6 +25,12 @@ import ( "github.com/sirupsen/logrus" ) +type pluginInterface struct { + PluginType string `json:"type"` + IPAM network.IPAMConfig `json:"ipam"` + IsGW bool `json:"isGateway"` +} + func InspectNetwork(w http.ResponseWriter, r *http.Request) { runtime := r.Context().Value("runtime").(*libpod.Runtime) @@ -103,12 +109,12 @@ func getNetworkResourceByNameOrID(nameOrID string, runtime *libpod.Runtime, filt } } - // No Bridge plugin means we bail - bridge, err := genericPluginsToBridge(conf.Plugins, network.DefaultNetworkDriver) + plugin, err := getPlugin(conf.Plugins) if err != nil { return nil, err } - for _, outer := range bridge.IPAM.Ranges { + + for _, outer := range plugin.IPAM.Ranges { for _, n := range outer { ipamConfig := dockerNetwork.IPAMConfig{ Subnet: n.Subnet, @@ -140,19 +146,26 @@ func getNetworkResourceByNameOrID(nameOrID string, runtime *libpod.Runtime, filt labels = map[string]string{} } + isInternal := false + dockerDriver := plugin.PluginType + if plugin.PluginType == network.DefaultNetworkDriver { + isInternal = !plugin.IsGW + dockerDriver = "default" + } + report := types.NetworkResource{ Name: conf.Name, ID: networkid.GetNetworkID(conf.Name), Created: time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec)), // nolint: unconvert Scope: "local", - Driver: network.DefaultNetworkDriver, + Driver: plugin.PluginType, EnableIPv6: false, IPAM: dockerNetwork.IPAM{ - Driver: "default", + Driver: dockerDriver, Options: map[string]string{}, Config: ipamConfigs, }, - Internal: !bridge.IsGW, + Internal: isInternal, Attachable: false, Ingress: false, ConfigFrom: dockerNetwork.ConfigReference{}, @@ -166,23 +179,19 @@ func getNetworkResourceByNameOrID(nameOrID string, runtime *libpod.Runtime, filt return &report, nil } -func genericPluginsToBridge(plugins []*libcni.NetworkConfig, pluginType string) (network.HostLocalBridge, error) { - var bridge network.HostLocalBridge - generic, err := findPluginByName(plugins, pluginType) - if err != nil { - return bridge, err - } - err = json.Unmarshal(generic, &bridge) - return bridge, err -} +func getPlugin(plugins []*libcni.NetworkConfig) (pluginInterface, error) { + var plugin pluginInterface -func findPluginByName(plugins []*libcni.NetworkConfig, pluginType string) ([]byte, error) { for _, p := range plugins { - if pluginType == p.Network.Type { - return p.Bytes, nil + for _, pluginType := range network.SupportedNetworkDrivers { + if pluginType == p.Network.Type { + err := json.Unmarshal(p.Bytes, &plugin) + return plugin, err + } } } - return nil, errors.New("unable to find bridge plugin") + + return plugin, errors.New("unable to find supported plugin") } func ListNetworks(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/api/handlers/types.go b/pkg/api/handlers/types.go index ee157cb56..59f948567 100644 --- a/pkg/api/handlers/types.go +++ b/pkg/api/handlers/types.go @@ -232,27 +232,32 @@ func ImageDataToImageInspect(ctx context.Context, l *libimage.Image) (*ImageInsp Name: info.GraphDriver.Name, Data: info.GraphDriver.Data, } + // Add in basic ContainerConfig to satisfy docker-compose + cc := new(dockerContainer.Config) + cc.Hostname = info.ID[0:11] // short ID is the hostname + cc.Volumes = info.Config.Volumes + dockerImageInspect := docker.ImageInspect{ - Architecture: info.Architecture, - Author: info.Author, - Comment: info.Comment, - Config: &config, - Created: l.Created().Format(time.RFC3339Nano), - DockerVersion: info.Version, - GraphDriver: graphDriver, - ID: "sha256:" + l.ID(), - Metadata: docker.ImageMetadata{}, - Os: info.Os, - OsVersion: info.Version, - Parent: info.Parent, - RepoDigests: info.RepoDigests, - RepoTags: info.RepoTags, - RootFS: rootfs, - Size: info.Size, - Variant: "", - VirtualSize: info.VirtualSize, + Architecture: info.Architecture, + Author: info.Author, + Comment: info.Comment, + Config: &config, + ContainerConfig: cc, + Created: l.Created().Format(time.RFC3339Nano), + DockerVersion: info.Version, + GraphDriver: graphDriver, + ID: "sha256:" + l.ID(), + Metadata: docker.ImageMetadata{}, + Os: info.Os, + OsVersion: info.Version, + Parent: info.Parent, + RepoDigests: info.RepoDigests, + RepoTags: info.RepoTags, + RootFS: rootfs, + Size: info.Size, + Variant: "", + VirtualSize: info.VirtualSize, } - // TODO: consider filling the container config. return &ImageInspect{dockerImageInspect}, nil } diff --git a/pkg/autoupdate/autoupdate.go b/pkg/autoupdate/autoupdate.go index 0a13e7e74..c51e2cd03 100644 --- a/pkg/autoupdate/autoupdate.go +++ b/pkg/autoupdate/autoupdate.go @@ -9,12 +9,13 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/transports/alltransports" "github.com/containers/podman/v3/libpod" "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v3/pkg/domain/entities" "github.com/containers/podman/v3/pkg/systemd" systemdDefine "github.com/containers/podman/v3/pkg/systemd/define" + "github.com/coreos/go-systemd/v22/dbus" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -74,12 +75,6 @@ func LookupPolicy(s string) (Policy, error) { return "", errors.Errorf("invalid auto-update policy %q: valid policies are %+q", s, keys) } -// Options include parameters for auto updates. -type Options struct { - // Authfile to use when contacting registries. - Authfile string -} - // ValidateImageReference checks if the specified imageName is a fully-qualified // image reference to the docker transport (without digest). Such a reference // includes a domain, name and tag (e.g., quay.io/podman/stable:latest). The @@ -119,7 +114,7 @@ func ValidateImageReference(imageName string) error { // // It returns a slice of successfully restarted systemd units and a slice of // errors encountered during auto update. -func AutoUpdate(runtime *libpod.Runtime, options Options) ([]string, []error) { +func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.AutoUpdateOptions) ([]*entities.AutoUpdateReport, []error) { // Create a map from `image ID -> []*Container`. containerMap, errs := imageContainersMap(runtime) if len(containerMap) == 0 { @@ -130,7 +125,7 @@ func AutoUpdate(runtime *libpod.Runtime, options Options) ([]string, []error) { listOptions := &libimage.ListImagesOptions{ Filters: []string{"readonly=false"}, } - imagesSlice, err := runtime.LibimageRuntime().ListImages(context.Background(), nil, listOptions) + imagesSlice, err := runtime.LibimageRuntime().ListImages(ctx, nil, listOptions) if err != nil { return nil, []error{err} } @@ -147,8 +142,8 @@ func AutoUpdate(runtime *libpod.Runtime, options Options) ([]string, []error) { } defer conn.Close() - // Update images. - containersToRestart := []*libpod.Container{} + // Update all images/container according to their auto-update policy. + var allReports []*entities.AutoUpdateReport updatedRawImages := make(map[string]bool) for imageID, policyMapper := range containerMap { image, exists := imageMap[imageID] @@ -156,76 +151,149 @@ func AutoUpdate(runtime *libpod.Runtime, options Options) ([]string, []error) { errs = append(errs, errors.Errorf("container image ID %q not found in local storage", imageID)) return nil, errs } - // Now we have to check if the image of any containers must be updated. - // Note that the image ID is NOT enough for this check as a given image - // may have multiple tags. - for _, registryCtr := range policyMapper[PolicyRegistryImage] { - cid := registryCtr.ID() - rawImageName := registryCtr.RawImageName() - if rawImageName == "" { - errs = append(errs, errors.Errorf("error registry auto-updating container %q: raw-image name is empty", cid)) - } - readAuthenticationPath(registryCtr, options) - needsUpdate, err := newerRemoteImageAvailable(runtime, image, rawImageName, options) + + for _, ctr := range policyMapper[PolicyRegistryImage] { + report, err := autoUpdateRegistry(ctx, image, ctr, updatedRawImages, &options, conn, runtime) if err != nil { - errs = append(errs, errors.Wrapf(err, "error registry auto-updating container %q: image check for %q failed", cid, rawImageName)) - continue + errs = append(errs, err) } - - if needsUpdate { - logrus.Infof("Auto-updating container %q using registry image %q", cid, rawImageName) - if _, updated := updatedRawImages[rawImageName]; !updated { - _, err = updateImage(runtime, rawImageName, options) - if err != nil { - errs = append(errs, errors.Wrapf(err, "error registry auto-updating container %q: image update for %q failed", cid, rawImageName)) - continue - } - updatedRawImages[rawImageName] = true - } - containersToRestart = append(containersToRestart, registryCtr) + if report != nil { + allReports = append(allReports, report) } } - for _, localCtr := range policyMapper[PolicyLocalImage] { - cid := localCtr.ID() - rawImageName := localCtr.RawImageName() - if rawImageName == "" { - errs = append(errs, errors.Errorf("error locally auto-updating container %q: raw-image name is empty", cid)) - } - // This avoids restarting containers unnecessarily. - needsUpdate, err := newerLocalImageAvailable(runtime, image, rawImageName) + for _, ctr := range policyMapper[PolicyLocalImage] { + report, err := autoUpdateLocally(ctx, image, ctr, &options, conn, runtime) if err != nil { - errs = append(errs, errors.Wrapf(err, "error locally auto-updating container %q: image check for %q failed", cid, rawImageName)) - continue + errs = append(errs, err) } - - if needsUpdate { - logrus.Infof("Auto-updating container %q using local image %q", cid, rawImageName) - containersToRestart = append(containersToRestart, localCtr) + if report != nil { + allReports = append(allReports, report) } } } - // Restart containers. - updatedUnits := []string{} - for _, ctr := range containersToRestart { - labels := ctr.Labels() - unit, exists := labels[systemdDefine.EnvVariable] - if !exists { - // Shouldn't happen but let's be sure of it. - errs = append(errs, errors.Errorf("error auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable)) - continue - } - _, err := conn.RestartUnit(unit, "replace", nil) - if err != nil { - errs = append(errs, errors.Wrapf(err, "error auto-updating container %q: restarting systemd unit %q failed", ctr.ID(), unit)) - continue + return allReports, errs +} + +// autoUpdateRegistry updates the image/container according to the "registry" policy. +func autoUpdateRegistry(ctx context.Context, image *libimage.Image, ctr *libpod.Container, updatedRawImages map[string]bool, options *entities.AutoUpdateOptions, conn *dbus.Conn, runtime *libpod.Runtime) (*entities.AutoUpdateReport, error) { + cid := ctr.ID() + rawImageName := ctr.RawImageName() + if rawImageName == "" { + return nil, errors.Errorf("error registry auto-updating container %q: raw-image name is empty", cid) + } + + labels := ctr.Labels() + unit, exists := labels[systemdDefine.EnvVariable] + if !exists { + return nil, errors.Errorf("error auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable) + } + + report := &entities.AutoUpdateReport{ + ContainerID: cid, + ContainerName: ctr.Name(), + ImageName: rawImageName, + Policy: PolicyRegistryImage, + SystemdUnit: unit, + Updated: "failed", + } + + if _, updated := updatedRawImages[rawImageName]; updated { + logrus.Infof("Auto-updating container %q using registry image %q", cid, rawImageName) + if err := restartSystemdUnit(ctr, unit, conn); err != nil { + return report, err } - logrus.Infof("Successfully restarted systemd unit %q", unit) - updatedUnits = append(updatedUnits, unit) + report.Updated = "true" + return report, nil + } + + authfile := getAuthfilePath(ctr, options) + needsUpdate, err := newerRemoteImageAvailable(ctx, runtime, image, rawImageName, authfile) + if err != nil { + return report, errors.Wrapf(err, "error registry auto-updating container %q: image check for %q failed", cid, rawImageName) + } + + if !needsUpdate { + report.Updated = "false" + return report, nil + } + + if options.DryRun { + report.Updated = "pending" + return report, nil + } + + if _, err := updateImage(ctx, runtime, rawImageName, options); err != nil { + return report, errors.Wrapf(err, "error registry auto-updating container %q: image update for %q failed", cid, rawImageName) + } + updatedRawImages[rawImageName] = true + + logrus.Infof("Auto-updating container %q using registry image %q", cid, rawImageName) + if err := restartSystemdUnit(ctr, unit, conn); err != nil { + return report, err } - return updatedUnits, errs + report.Updated = "true" + return report, nil +} + +// autoUpdateRegistry updates the image/container according to the "local" policy. +func autoUpdateLocally(ctx context.Context, image *libimage.Image, ctr *libpod.Container, options *entities.AutoUpdateOptions, conn *dbus.Conn, runtime *libpod.Runtime) (*entities.AutoUpdateReport, error) { + cid := ctr.ID() + rawImageName := ctr.RawImageName() + if rawImageName == "" { + return nil, errors.Errorf("error locally auto-updating container %q: raw-image name is empty", cid) + } + + labels := ctr.Labels() + unit, exists := labels[systemdDefine.EnvVariable] + if !exists { + return nil, errors.Errorf("error auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable) + } + + report := &entities.AutoUpdateReport{ + ContainerID: cid, + ContainerName: ctr.Name(), + ImageName: rawImageName, + Policy: PolicyLocalImage, + SystemdUnit: unit, + Updated: "failed", + } + + needsUpdate, err := newerLocalImageAvailable(runtime, image, rawImageName) + if err != nil { + return report, errors.Wrapf(err, "error locally auto-updating container %q: image check for %q failed", cid, rawImageName) + } + + if !needsUpdate { + report.Updated = "false" + return report, nil + } + + if options.DryRun { + report.Updated = "pending" + return report, nil + } + + logrus.Infof("Auto-updating container %q using local image %q", cid, rawImageName) + if err := restartSystemdUnit(ctr, unit, conn); err != nil { + return report, err + } + + report.Updated = "true" + return report, nil +} + +// restartSystemdUnit restarts the systemd unit the container is running in. +func restartSystemdUnit(ctr *libpod.Container, unit string, conn *dbus.Conn) error { + _, err := conn.RestartUnit(unit, "replace", nil) + if err != nil { + return errors.Wrapf(err, "error auto-updating container %q: restarting systemd unit %q failed", ctr.ID(), unit) + } + + logrus.Infof("Successfully restarted systemd unit %q of container %q", unit, ctr.ID()) + return nil } // imageContainersMap generates a map[image ID] -> [containers using the image] @@ -280,52 +348,25 @@ func imageContainersMap(runtime *libpod.Runtime) (map[string]policyMapper, []err return containerMap, errors } -// readAuthenticationPath reads a container's labels and reads authentication path into options -func readAuthenticationPath(ctr *libpod.Container, options Options) { +// getAuthfilePath returns an authfile path, if set. The authfile label in the +// container, if set, as precedence over the one set in the options. +func getAuthfilePath(ctr *libpod.Container, options *entities.AutoUpdateOptions) string { labels := ctr.Labels() authFilePath, exists := labels[AuthfileLabel] if exists { - options.Authfile = authFilePath + return authFilePath } + return options.Authfile } // newerRemoteImageAvailable returns true if there corresponding image on the remote // registry is newer. -func newerRemoteImageAvailable(runtime *libpod.Runtime, img *libimage.Image, origName string, options Options) (bool, error) { +func newerRemoteImageAvailable(ctx context.Context, runtime *libpod.Runtime, img *libimage.Image, origName string, authfile string) (bool, error) { remoteRef, err := docker.ParseReference("//" + origName) if err != nil { return false, err } - - data, err := img.Inspect(context.Background(), false) - if err != nil { - return false, err - } - - sys := runtime.SystemContext() - sys.AuthFilePath = options.Authfile - - // We need to account for the arch that the image uses. It seems - // common on ARM to tweak this option to pull the correct image. See - // github.com/containers/podman/issues/6613. - sys.ArchitectureChoice = data.Architecture - - remoteImg, err := remoteRef.NewImage(context.Background(), sys) - if err != nil { - return false, err - } - - rawManifest, _, err := remoteImg.Manifest(context.Background()) - if err != nil { - return false, err - } - - remoteDigest, err := manifest.Digest(rawManifest) - if err != nil { - return false, err - } - - return img.Digest().String() != remoteDigest.String(), nil + return img.HasDifferentDigest(ctx, remoteRef) } // newerLocalImageAvailable returns true if the container and local image have different digests @@ -334,21 +375,16 @@ func newerLocalImageAvailable(runtime *libpod.Runtime, img *libimage.Image, rawI if err != nil { return false, err } - - localDigest := localImg.Digest().String() - - ctrDigest := img.Digest().String() - - return localDigest != ctrDigest, nil + return localImg.Digest().String() != img.Digest().String(), nil } // updateImage pulls the specified image. -func updateImage(runtime *libpod.Runtime, name string, options Options) (*libimage.Image, error) { +func updateImage(ctx context.Context, runtime *libpod.Runtime, name string, options *entities.AutoUpdateOptions) (*libimage.Image, error) { pullOptions := &libimage.PullOptions{} pullOptions.AuthFilePath = options.Authfile pullOptions.Writer = os.Stderr - pulledImages, err := runtime.LibimageRuntime().Pull(context.Background(), name, config.PullPolicyAlways, pullOptions) + pulledImages, err := runtime.LibimageRuntime().Pull(ctx, name, config.PullPolicyAlways, pullOptions) if err != nil { return nil, err } diff --git a/pkg/bindings/README.md b/pkg/bindings/README.md index 6fd7d7831..f41304e0f 100644 --- a/pkg/bindings/README.md +++ b/pkg/bindings/README.md @@ -154,3 +154,80 @@ func main() { fmt.Println("Container started.") } ``` + +## Debugging tips <a name="debugging-tips"></a> + +To debug in a development setup, you can start the Podman system service +in debug mode like: + +```bash +$ podman --log-level=debug system service -t 0 +``` + +The `--log-level=debug` echoes all the logged requests and is useful to +trace the execution path at a finer granularity. A snippet of a sample run looks like: + +```bash +INFO[0000] podman filtering at log level debug +DEBU[0000] Called service.PersistentPreRunE(podman --log-level=debug system service -t0) +DEBU[0000] Ignoring libpod.conf EventsLogger setting "/home/lsm5/.config/containers/containers.conf". Use "journald" if you want to change this setting and remove libpod.conf files. +DEBU[0000] Reading configuration file "/usr/share/containers/containers.conf" +DEBU[0000] Merged system config "/usr/share/containers/containers.conf": {Editors note: the remainder of this line was removed due to Jekyll formatting errors.} +DEBU[0000] Using conmon: "/usr/bin/conmon" +DEBU[0000] Initializing boltdb state at /home/lsm5/.local/share/containers/storage/libpod/bolt_state.db +DEBU[0000] Overriding run root "/run/user/1000/containers" with "/run/user/1000" from database +DEBU[0000] Using graph driver overlay +DEBU[0000] Using graph root /home/lsm5/.local/share/containers/storage +DEBU[0000] Using run root /run/user/1000 +DEBU[0000] Using static dir /home/lsm5/.local/share/containers/storage/libpod +DEBU[0000] Using tmp dir /run/user/1000/libpod/tmp +DEBU[0000] Using volume path /home/lsm5/.local/share/containers/storage/volumes +DEBU[0000] Set libpod namespace to "" +DEBU[0000] Not configuring container store +DEBU[0000] Initializing event backend file +DEBU[0000] using runtime "/usr/bin/runc" +DEBU[0000] using runtime "/usr/bin/crun" +WARN[0000] Error initializing configured OCI runtime kata: no valid executable found for OCI runtime kata: invalid argument +DEBU[0000] using runtime "/usr/bin/crun" +INFO[0000] Setting parallel job count to 25 +INFO[0000] podman filtering at log level debug +DEBU[0000] Called service.PersistentPreRunE(podman --log-level=debug system service -t0) +DEBU[0000] Ignoring libpod.conf EventsLogger setting "/home/lsm5/.config/containers/containers.conf". Use "journald" if you want to change this setting and remove libpod.conf files. +DEBU[0000] Reading configuration file "/usr/share/containers/containers.conf" +``` + +If the Podman system service has been started via systemd socket activation, +you can view the logs using journalctl. The logs after a sample run look like: + +```bash +$ journalctl --user --no-pager -u podman.socket +-- Reboot -- +Jul 22 13:50:40 nagato.nanadai.me systemd[1048]: Listening on Podman API Socket. +$ +``` + +```bash +$ journalctl --user --no-pager -u podman.service +Jul 22 13:50:53 nagato.nanadai.me systemd[1048]: Starting Podman API Service... +Jul 22 13:50:54 nagato.nanadai.me podman[1527]: time="2020-07-22T13:50:54-04:00" level=error msg="Error refreshing volume 38480630a8bdaa3e1a0ebd34c94038591b0d7ad994b37be5b4f2072bb6ef0879: error acquiring lock 0 for volume 38480630a8bdaa3e1a0ebd34c94038591b0d7ad994b37be5b4f2072bb6ef0879: file exists" +Jul 22 13:50:54 nagato.nanadai.me podman[1527]: time="2020-07-22T13:50:54-04:00" level=error msg="Error refreshing volume 47d410af4d762a0cc456a89e58f759937146fa3be32b5e95a698a1d4069f4024: error acquiring lock 0 for volume 47d410af4d762a0cc456a89e58f759937146fa3be32b5e95a698a1d4069f4024: file exists" +Jul 22 13:50:54 nagato.nanadai.me podman[1527]: time="2020-07-22T13:50:54-04:00" level=error msg="Error refreshing volume 86e73f082e344dad38c8792fb86b2017c4f133f2a8db87f239d1d28a78cf0868: error acquiring lock 0 for volume 86e73f082e344dad38c8792fb86b2017c4f133f2a8db87f239d1d28a78cf0868: file exists" +Jul 22 13:50:54 nagato.nanadai.me podman[1527]: time="2020-07-22T13:50:54-04:00" level=error msg="Error refreshing volume 9a16ea764be490a5563e384d9074ab0495e4d9119be380c664037d6cf1215631: error acquiring lock 0 for volume 9a16ea764be490a5563e384d9074ab0495e4d9119be380c664037d6cf1215631: file exists" +Jul 22 13:50:54 nagato.nanadai.me podman[1527]: time="2020-07-22T13:50:54-04:00" level=error msg="Error refreshing volume bfd6b2a97217f8655add13e0ad3f6b8e1c79bc1519b7a1e15361a107ccf57fc0: error acquiring lock 0 for volume bfd6b2a97217f8655add13e0ad3f6b8e1c79bc1519b7a1e15361a107ccf57fc0: file exists" +Jul 22 13:50:54 nagato.nanadai.me podman[1527]: time="2020-07-22T13:50:54-04:00" level=error msg="Error refreshing volume f9b9f630982452ebcbed24bd229b142fbeecd5d4c85791fca440b21d56fef563: error acquiring lock 0 for volume f9b9f630982452ebcbed24bd229b142fbeecd5d4c85791fca440b21d56fef563: file exists" +Jul 22 13:50:54 nagato.nanadai.me podman[1527]: Trying to pull registry.fedoraproject.org/fedora:latest... +Jul 22 13:50:55 nagato.nanadai.me podman[1527]: Getting image source signatures +Jul 22 13:50:55 nagato.nanadai.me podman[1527]: Copying blob sha256:dd9f43919ba05f05d4f783c31e83e5e776c4f5d29dd72b9ec5056b9576c10053 +Jul 22 13:50:55 nagato.nanadai.me podman[1527]: Copying config sha256:00ff39a8bf19f810a7e641f7eb3ddc47635913a19c4996debd91fafb6b379069 +Jul 22 13:50:55 nagato.nanadai.me podman[1527]: Writing manifest to image destination +Jul 22 13:50:55 nagato.nanadai.me podman[1527]: Storing signatures +Jul 22 13:50:55 nagato.nanadai.me systemd[1048]: podman.service: unit configures an IP firewall, but not running as root. +Jul 22 13:50:55 nagato.nanadai.me systemd[1048]: (This warning is only shown for the first unit using IP firewalling.) +Jul 22 13:51:15 nagato.nanadai.me systemd[1048]: podman.service: Succeeded. +Jul 22 13:51:15 nagato.nanadai.me systemd[1048]: Finished Podman API Service. +Jul 22 13:51:15 nagato.nanadai.me systemd[1048]: podman.service: Consumed 1.339s CPU time. +$ +``` + +You can also verify that the information being passed back and forth is correct by putting +with a tool like `socat`, which can dump what the socket is seeing. diff --git a/pkg/bindings/containers/attach.go b/pkg/bindings/containers/attach.go index cc12c8ab7..01c14d350 100644 --- a/pkg/bindings/containers/attach.go +++ b/pkg/bindings/containers/attach.go @@ -25,6 +25,12 @@ import ( "golang.org/x/crypto/ssh/terminal" ) +// The CloseWriter interface is used to determine whether we can do a one-sided +// close of a hijacked connection. +type CloseWriter interface { + CloseWrite() error +} + // Attach attaches to a running container func Attach(ctx context.Context, nameOrID string, stdin io.Reader, stdout io.Writer, stderr io.Writer, attachReady chan bool, options *AttachOptions) error { if options == nil { @@ -161,6 +167,12 @@ func Attach(ctx context.Context, nameOrID string, stdin io.Reader, stdout io.Wri logrus.Error("failed to write input to service: " + err.Error()) } stdinChan <- err + + if closeWrite, ok := socket.(CloseWriter); ok { + if err := closeWrite.CloseWrite(); err != nil { + logrus.Warnf("Failed to close STDIN for writing: %v", err) + } + } }() } @@ -485,6 +497,13 @@ func ExecStartAndAttach(ctx context.Context, sessionID string, options *ExecStar if err != nil { logrus.Error("failed to write input to service: " + err.Error()) } + + if closeWrite, ok := socket.(CloseWriter); ok { + logrus.Debugf("Closing STDIN") + if err := closeWrite.CloseWrite(); err != nil { + logrus.Warnf("Failed to close STDIN for writing: %v", err) + } + } }() } diff --git a/pkg/bindings/images/build.go b/pkg/bindings/images/build.go index 95d9d4df7..142204f27 100644 --- a/pkg/bindings/images/build.go +++ b/pkg/bindings/images/build.go @@ -301,6 +301,8 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO tarContent := []string{options.ContextDirectory} newContainerFiles := []string{} + + dontexcludes := []string{"!Dockerfile", "!Containerfile", "!.dockerignore", "!.containerignore"} for _, c := range containerFiles { if c == "/dev/stdin" { content, err := ioutil.ReadAll(os.Stdin) @@ -328,6 +330,7 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO // Do NOT add to tarfile if strings.HasPrefix(containerfile, contextDir+string(filepath.Separator)) { containerfile = strings.TrimPrefix(containerfile, contextDir+string(filepath.Separator)) + dontexcludes = append(dontexcludes, "!"+containerfile) } else { // If Containerfile does not exists assume it is in context directory, do Not add to tarfile if _, err := os.Lstat(containerfile); err != nil { @@ -349,8 +352,7 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO } params.Set("dockerfile", string(cFileJSON)) } - - tarfile, err := nTar(excludes, tarContent...) + tarfile, err := nTar(append(excludes, dontexcludes...), tarContent...) if err != nil { logrus.Errorf("cannot tar container entries %v error: %v", tarContent, err) return nil, err @@ -548,9 +550,13 @@ func nTar(excludes []string, sources ...string) (io.ReadCloser, error) { } func parseDockerignore(root string) ([]string, error) { - ignore, err := ioutil.ReadFile(filepath.Join(root, ".dockerignore")) - if err != nil && !os.IsNotExist(err) { - return nil, errors.Wrapf(err, "error reading .dockerignore: '%s'", root) + ignore, err := ioutil.ReadFile(filepath.Join(root, ".containerignore")) + if err != nil { + var dockerIgnoreErr error + ignore, dockerIgnoreErr = ioutil.ReadFile(filepath.Join(root, ".dockerignore")) + if dockerIgnoreErr != nil && !os.IsNotExist(dockerIgnoreErr) { + return nil, errors.Wrapf(err, "error reading .containerignore: '%s'", root) + } } rawexcludes := strings.Split(string(ignore), "\n") excludes := make([]string, 0, len(rawexcludes)) diff --git a/pkg/domain/entities/auto-update.go b/pkg/domain/entities/auto-update.go index c51158816..eed617bf8 100644 --- a/pkg/domain/entities/auto-update.go +++ b/pkg/domain/entities/auto-update.go @@ -4,10 +4,25 @@ package entities type AutoUpdateOptions struct { // Authfile to use when contacting registries. Authfile string + // Only check for but do not perform any update. If an update is + // pending, it will be indicated in the Updated field of + // AutoUpdateReport. + DryRun bool } // AutoUpdateReport contains the results from running auto-update. type AutoUpdateReport struct { - // Units - the restarted systemd units during auto-update. - Units []string + // ID of the container *before* an update. + ContainerID string + // Name of the container *before* an update. + ContainerName string + // Name of the image. + ImageName string + // The configured auto-update policy. + Policy string + // SystemdUnit running a container configured for auto updates. + SystemdUnit string + // Indicates the update status: true, false, failed, pending (see + // DryRun). + Updated string } diff --git a/pkg/domain/entities/engine_container.go b/pkg/domain/entities/engine_container.go index 28e5160db..62e83fab3 100644 --- a/pkg/domain/entities/engine_container.go +++ b/pkg/domain/entities/engine_container.go @@ -14,7 +14,7 @@ import ( type ContainerCopyFunc func() error type ContainerEngine interface { - AutoUpdate(ctx context.Context, options AutoUpdateOptions) (*AutoUpdateReport, []error) + AutoUpdate(ctx context.Context, options AutoUpdateOptions) ([]*AutoUpdateReport, []error) Config(ctx context.Context) (*config.Config, error) ContainerAttach(ctx context.Context, nameOrID string, options AttachOptions) error ContainerCheckpoint(ctx context.Context, namesOrIds []string, options CheckpointOptions) ([]*CheckpointReport, error) diff --git a/pkg/domain/infra/abi/auto-update.go b/pkg/domain/infra/abi/auto-update.go index c9d7f2130..b98ee1cb2 100644 --- a/pkg/domain/infra/abi/auto-update.go +++ b/pkg/domain/infra/abi/auto-update.go @@ -7,11 +7,6 @@ import ( "github.com/containers/podman/v3/pkg/domain/entities" ) -func (ic *ContainerEngine) AutoUpdate(ctx context.Context, options entities.AutoUpdateOptions) (*entities.AutoUpdateReport, []error) { - // Convert the entities options to the autoupdate ones. We can't use - // them in the entities package as low-level packages must not leak - // into the remote client. - autoOpts := autoupdate.Options{Authfile: options.Authfile} - units, failures := autoupdate.AutoUpdate(ic.Libpod, autoOpts) - return &entities.AutoUpdateReport{Units: units}, failures +func (ic *ContainerEngine) AutoUpdate(ctx context.Context, options entities.AutoUpdateOptions) ([]*entities.AutoUpdateReport, []error) { + return autoupdate.AutoUpdate(ctx, ic.Libpod, options) } diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go index ebe59e871..155cda21d 100644 --- a/pkg/domain/infra/abi/system.go +++ b/pkg/domain/infra/abi/system.go @@ -403,6 +403,8 @@ func (ic *ContainerEngine) Unshare(ctx context.Context, args []string, options e if err != nil { return err } + // make sure to unlock, unshare can run for a long time + rootlesscni.Lock.Unlock() defer rootlesscni.Cleanup(ic.Libpod) return rootlesscni.Do(unshare) } diff --git a/pkg/domain/infra/tunnel/auto-update.go b/pkg/domain/infra/tunnel/auto-update.go index 41165cc74..038c60537 100644 --- a/pkg/domain/infra/tunnel/auto-update.go +++ b/pkg/domain/infra/tunnel/auto-update.go @@ -7,6 +7,6 @@ import ( "github.com/pkg/errors" ) -func (ic *ContainerEngine) AutoUpdate(ctx context.Context, options entities.AutoUpdateOptions) (*entities.AutoUpdateReport, []error) { +func (ic *ContainerEngine) AutoUpdate(ctx context.Context, options entities.AutoUpdateOptions) ([]*entities.AutoUpdateReport, []error) { return nil, []error{errors.New("not implemented")} } diff --git a/pkg/systemd/generate/containers.go b/pkg/systemd/generate/containers.go index 0e6e1b4df..083520316 100644 --- a/pkg/systemd/generate/containers.go +++ b/pkg/systemd/generate/containers.go @@ -160,16 +160,11 @@ func generateContainerInfo(ctr *libpod.Container, options entities.GenerateSyste nameOrID, serviceName := containerServiceName(ctr, options) - store := ctr.Runtime().GetStore() - if store == nil { - return nil, errors.Errorf("could not determine storage store for container") - } - var runRoot string if options.New { runRoot = "%t/containers" } else { - runRoot = store.RunRoot() + runRoot = ctr.Runtime().RunRoot() if runRoot == "" { return nil, errors.Errorf("could not lookup container's runroot: got empty string") } |