diff options
Diffstat (limited to 'libpod')
-rw-r--r-- | libpod/boltdb_state.go | 36 | ||||
-rw-r--r-- | libpod/container.go | 3 | ||||
-rw-r--r-- | libpod/container_api.go | 3 | ||||
-rw-r--r-- | libpod/container_config.go | 1 | ||||
-rw-r--r-- | libpod/container_exec.go | 2 | ||||
-rw-r--r-- | libpod/container_internal_linux.go | 14 | ||||
-rw-r--r-- | libpod/container_top_linux.go | 2 | ||||
-rw-r--r-- | libpod/define/container.go | 2 | ||||
-rw-r--r-- | libpod/diff.go | 23 | ||||
-rw-r--r-- | libpod/events.go | 4 | ||||
-rw-r--r-- | libpod/events/events.go | 4 | ||||
-rw-r--r-- | libpod/kube.go | 15 | ||||
-rw-r--r-- | libpod/networking_linux.go | 2 | ||||
-rw-r--r-- | libpod/options.go | 3 | ||||
-rw-r--r-- | libpod/plugin/volume_api.go | 7 | ||||
-rw-r--r-- | libpod/pod_api.go | 9 | ||||
-rw-r--r-- | libpod/pod_top_linux.go | 2 | ||||
-rw-r--r-- | libpod/runtime.go | 19 | ||||
-rw-r--r-- | libpod/runtime_ctr.go | 5 | ||||
-rw-r--r-- | libpod/runtime_pod.go | 2 | ||||
-rw-r--r-- | libpod/runtime_renumber.go | 2 | ||||
-rw-r--r-- | libpod/runtime_worker.go | 33 |
22 files changed, 96 insertions, 97 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go index 9745121c7..c3db6152a 100644 --- a/libpod/boltdb_state.go +++ b/libpod/boltdb_state.go @@ -162,6 +162,11 @@ func (s *BoltState) Refresh() error { return err } + namesBucket, err := getNamesBucket(tx) + if err != nil { + return err + } + ctrsBucket, err := getCtrBucket(tx) if err != nil { return err @@ -192,6 +197,7 @@ func (s *BoltState) Refresh() error { // PID, mountpoint, and state for all of them // Then save the modified state // Also clear all network namespaces + toRemoveIDs := []string{} err = idBucket.ForEach(func(id, name []byte) error { ctrBkt := ctrsBucket.Bucket(id) if ctrBkt == nil { @@ -199,8 +205,16 @@ func (s *BoltState) Refresh() error { podBkt := podsBucket.Bucket(id) if podBkt == nil { // This is neither a pod nor a container - // Error out on the dangling ID - return errors.Wrapf(define.ErrInternal, "id %s is not a pod or a container", string(id)) + // Something is seriously wrong, but + // continue on and try to clean up the + // state and become consistent. + // Just note what needs to be removed + // for now - ForEach says you shouldn't + // remove things from the table during + // it. + logrus.Errorf("Database issue: dangling ID %s found (not a pod or container) - removing", string(id)) + toRemoveIDs = append(toRemoveIDs, string(id)) + return nil } // Get the state @@ -285,6 +299,24 @@ func (s *BoltState) Refresh() error { return err } + // Remove dangling IDs. + for _, id := range toRemoveIDs { + // Look up the ID to see if we also have a dangling name + // in the DB. + name := idBucket.Get([]byte(id)) + if name != nil { + if testID := namesBucket.Get(name); testID != nil { + logrus.Infof("Found dangling name %s (ID %s) in database", string(name), id) + if err := namesBucket.Delete(name); err != nil { + return errors.Wrapf(err, "error removing dangling name %s (ID %s) from database", string(name), id) + } + } + } + if err := idBucket.Delete([]byte(id)); err != nil { + return errors.Wrapf(err, "error removing dangling ID %s from database", id) + } + } + // Now refresh volumes err = allVolsBucket.ForEach(func(id, name []byte) error { dbVol := volBucket.Bucket(id) diff --git a/libpod/container.go b/libpod/container.go index 64b4453fb..04a4ae64a 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -1331,8 +1331,7 @@ func (c *Container) getNetworkStatus() map[string]types.StatusBlock { } c.state.NetworkStatus = result _ = c.save() - // TODO remove debug for final version - logrus.Debugf("converted old network result to new result %v", result) + return result } return nil diff --git a/libpod/container_api.go b/libpod/container_api.go index a6fcf709d..0fab36bdc 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -202,9 +202,8 @@ func (c *Container) Kill(signal uint) error { } } - // TODO: Is killing a paused container OK? switch c.state.State { - case define.ContainerStateRunning, define.ContainerStateStopping: + case define.ContainerStateRunning, define.ContainerStateStopping, define.ContainerStatePaused: // Note that killing containers in "stopping" state is okay. // In that state, the Podman is waiting for the runtime to // stop the container and if that is taking too long, a user diff --git a/libpod/container_config.go b/libpod/container_config.go index 3e85ad4d5..ae3bc5865 100644 --- a/libpod/container_config.go +++ b/libpod/container_config.go @@ -372,7 +372,6 @@ type ContainerMiscConfig struct { // restart the container. Used only if RestartPolicy is set to // "on-failure". RestartRetries uint `json:"restart_retries,omitempty"` - // TODO log options for log drivers // PostConfigureNetNS needed when a user namespace is created by an OCI runtime // if the network namespace is created before the user namespace it will be // owned by the wrong user namespace. diff --git a/libpod/container_exec.go b/libpod/container_exec.go index c05e7fd94..1e8fce4da 100644 --- a/libpod/container_exec.go +++ b/libpod/container_exec.go @@ -279,8 +279,6 @@ func (c *Container) ExecStart(sessionID string) error { // ExecStartAndAttach starts and attaches to an exec session in a container. // newSize resizes the tty to this size before the process is started, must be nil if the exec session has no tty -// TODO: Should we include detach keys in the signature to allow override? -// TODO: How do we handle AttachStdin/AttachStdout/AttachStderr? func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachStreams, newSize *define.TerminalSize) error { if !c.batched { c.lock.Lock() diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index d7683cce9..e19d75deb 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -36,6 +36,7 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/subscriptions" "github.com/containers/common/pkg/umask" + cutil "github.com/containers/common/pkg/util" is "github.com/containers/image/v5/storage" "github.com/containers/podman/v4/libpod/define" "github.com/containers/podman/v4/libpod/events" @@ -393,7 +394,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { overrides := c.getUserOverrides() execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.config.User, overrides) if err != nil { - if util.StringInSlice(c.config.User, c.config.HostUsers) { + if cutil.StringInSlice(c.config.User, c.config.HostUsers) { execUser, err = lookupHostUser(c.config.User) } if err != nil { @@ -1090,7 +1091,6 @@ func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr g.AddProcessEnv("HOSTNAME", hostname) } - // TODO need unlocked version of this for use in pods nsPath, err := nsCtr.NamespacePath(ns) if err != nil { return err @@ -2389,7 +2389,7 @@ func (c *Container) generateResolvConf() error { } if len(c.config.DNSSearch) > 0 || len(c.runtime.config.Containers.DNSSearches) > 0 { - if !util.StringInSlice(".", c.config.DNSSearch) { + if !cutil.StringInSlice(".", c.config.DNSSearch) { search = append(search, c.runtime.config.Containers.DNSSearches...) search = append(search, c.config.DNSSearch...) } @@ -3108,7 +3108,7 @@ func (c *Container) getOCICgroupPath() (string, error) { case c.config.NoCgroups: return "", nil case c.config.CgroupsMode == cgroupSplit: - selfCgroup, err := utils.GetOwnCgroup() + selfCgroup, err := utils.GetOwnCgroupDisallowRoot() if err != nil { return "", err } @@ -3229,10 +3229,8 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error { return err } - // TODO: For now, I've disabled chowning volumes owned by non-Podman - // drivers. This may be safe, but it's really going to be a case-by-case - // thing, I think - safest to leave disabled now and re-enable later if - // there is a demand. + // Volumes owned by a volume driver are not chowned - we don't want to + // mess with a mount not managed by us. if vol.state.NeedsChown && !vol.UsesVolumeDriver() { vol.state.NeedsChown = false diff --git a/libpod/container_top_linux.go b/libpod/container_top_linux.go index 9b3dbc873..b30e0c732 100644 --- a/libpod/container_top_linux.go +++ b/libpod/container_top_linux.go @@ -96,7 +96,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) { // For more details, please refer to github.com/containers/psgo. func (c *Container) GetContainerPidInformation(descriptors []string) ([]string, error) { pid := strconv.Itoa(c.state.PID) - // TODO: psgo returns a [][]string to give users the ability to apply + // NOTE: psgo returns a [][]string to give users the ability to apply // filters on the data. We need to change the API here // to return a [][]string if we want to make use of // filtering. diff --git a/libpod/define/container.go b/libpod/define/container.go index bb44a6a4a..ba939578f 100644 --- a/libpod/define/container.go +++ b/libpod/define/container.go @@ -35,4 +35,6 @@ const ( // OneShotInitContainer is a container that only runs as init once // and is then deleted. OneShotInitContainer = "once" + // ContainerInitPath is the default path of the mounted container init. + ContainerInitPath = "/run/podman-init" ) diff --git a/libpod/diff.go b/libpod/diff.go index 794b26b48..86fa063ec 100644 --- a/libpod/diff.go +++ b/libpod/diff.go @@ -8,17 +8,18 @@ import ( ) var initInodes = map[string]bool{ - "/dev": true, - "/etc/hostname": true, - "/etc/hosts": true, - "/etc/resolv.conf": true, - "/proc": true, - "/run": true, - "/run/notify": true, - "/run/.containerenv": true, - "/run/secrets": true, - "/sys": true, - "/etc/mtab": true, + "/dev": true, + "/etc/hostname": true, + "/etc/hosts": true, + "/etc/resolv.conf": true, + "/proc": true, + "/run": true, + "/run/notify": true, + "/run/.containerenv": true, + "/run/secrets": true, + define.ContainerInitPath: true, + "/sys": true, + "/etc/mtab": true, } // GetDiff returns the differences between the two images, layers, or containers diff --git a/libpod/events.go b/libpod/events.go index 39f5786a4..f09d8402a 100644 --- a/libpod/events.go +++ b/libpod/events.go @@ -89,8 +89,8 @@ func (p *Pod) newPodEvent(status events.Status) { } } -// newSystemEvent creates a new event for libpod as a whole. -func (r *Runtime) newSystemEvent(status events.Status) { +// NewSystemEvent creates a new event for libpod as a whole. +func (r *Runtime) NewSystemEvent(status events.Status) { e := events.NewEvent(status) e.Type = events.System diff --git a/libpod/events/events.go b/libpod/events/events.go index 04417fd8d..a30e0f1ca 100644 --- a/libpod/events/events.go +++ b/libpod/events/events.go @@ -144,12 +144,12 @@ func StringToType(name string) (Type, error) { } // StringToStatus converts a string to an Event Status -// TODO if we add more events, we might consider a go-generator to -// create the switch statement func StringToStatus(name string) (Status, error) { switch name { case Attach.String(): return Attach, nil + case AutoUpdate.String(): + return AutoUpdate, nil case Build.String(): return Build, nil case Checkpoint.String(): diff --git a/libpod/kube.go b/libpod/kube.go index 5a5fe9d35..20c4612d1 100644 --- a/libpod/kube.go +++ b/libpod/kube.go @@ -14,6 +14,7 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" + cutil "github.com/containers/common/pkg/util" "github.com/containers/podman/v4/libpod/define" "github.com/containers/podman/v4/pkg/env" v1 "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1" @@ -515,7 +516,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod, podDNS.Nameservers = make([]string, 0) } for _, s := range servers { - if !util.StringInSlice(s, podDNS.Nameservers) { // only append if it does not exist + if !cutil.StringInSlice(s, podDNS.Nameservers) { // only append if it does not exist podDNS.Nameservers = append(podDNS.Nameservers, s) } } @@ -526,7 +527,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod, podDNS.Searches = make([]string, 0) } for _, d := range domains { - if !util.StringInSlice(d, podDNS.Searches) { // only append if it does not exist + if !cutil.StringInSlice(d, podDNS.Searches) { // only append if it does not exist podDNS.Searches = append(podDNS.Searches, d) } } @@ -543,7 +544,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod, podName := removeUnderscores(ctrs[0].Name()) // Check if the pod name and container name will end up conflicting // Append -pod if so - if util.StringInSlice(podName, ctrNames) { + if cutil.StringInSlice(podName, ctrNames) { podName += "-pod" } @@ -824,7 +825,7 @@ func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, []v1.Volume // generateKubePersistentVolumeClaim converts a ContainerNamedVolume to a Kubernetes PersistentVolumeClaim func generateKubePersistentVolumeClaim(v *ContainerNamedVolume) (v1.VolumeMount, v1.Volume) { - ro := util.StringInSlice("ro", v.Options) + ro := cutil.StringInSlice("ro", v.Options) // To avoid naming conflicts with any host path mounts, add a unique suffix to the volume's name. name := v.Name + "-pvc" @@ -857,7 +858,7 @@ func generateKubeVolumeMount(m specs.Mount) (v1.VolumeMount, v1.Volume, error) { name += "-host" vm.Name = name vm.MountPath = m.Destination - if util.StringInSlice("ro", m.Options) { + if cutil.StringInSlice("ro", m.Options) { vm.ReadOnly = true } @@ -915,7 +916,7 @@ func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v // Find caps in the defaultCaps but not in the container's // those indicate a dropped cap for _, capability := range defaultCaps { - if !util.StringInSlice(capability, containerCaps) { + if !cutil.StringInSlice(capability, containerCaps) { if _, ok := dedupDrop[capability]; !ok { drop = append(drop, v1.Capability(capability)) dedupDrop[capability] = true @@ -925,7 +926,7 @@ func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v // Find caps in the container but not in the defaults; those indicate // an added cap for _, capability := range containerCaps { - if !util.StringInSlice(capability, defaultCaps) { + if !cutil.StringInSlice(capability, defaultCaps) { if _, ok := dedupAdd[capability]; !ok { add = append(add, v1.Capability(capability)) dedupAdd[capability] = true diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index 0c124cf0b..73e64530e 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -25,13 +25,13 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/machine" "github.com/containers/common/pkg/netns" + "github.com/containers/common/pkg/util" "github.com/containers/podman/v4/libpod/define" "github.com/containers/podman/v4/libpod/events" "github.com/containers/podman/v4/pkg/errorhandling" "github.com/containers/podman/v4/pkg/namespaces" "github.com/containers/podman/v4/pkg/resolvconf" "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/util" "github.com/containers/podman/v4/utils" "github.com/containers/storage/pkg/lockfile" spec "github.com/opencontainers/runtime-spec/specs-go" diff --git a/libpod/options.go b/libpod/options.go index feb89510f..a02c05537 100644 --- a/libpod/options.go +++ b/libpod/options.go @@ -12,6 +12,7 @@ import ( nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/secrets" + cutil "github.com/containers/common/pkg/util" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/containers/podman/v4/libpod/define" @@ -605,7 +606,7 @@ func WithSdNotifyMode(mode string) CtrCreateOption { } // verify values - if len(mode) > 0 && !util.StringInSlice(strings.ToLower(mode), SdNotifyModeValues) { + if len(mode) > 0 && !cutil.StringInSlice(strings.ToLower(mode), SdNotifyModeValues) { return errors.Wrapf(define.ErrInvalidArg, "--sdnotify values must be one of %q", strings.Join(SdNotifyModeValues, ", ")) } diff --git a/libpod/plugin/volume_api.go b/libpod/plugin/volume_api.go index a6d66a034..2818e70c1 100644 --- a/libpod/plugin/volume_api.go +++ b/libpod/plugin/volume_api.go @@ -22,9 +22,6 @@ import ( var json = jsoniter.ConfigCompatibleWithStandardLibrary -// TODO: We should add syntax for specifying plugins to containers.conf, and -// support for loading based on that. - // Copied from docker/go-plugins-helpers/volume/api.go - not exported, so we // need to do this to get at them. // These are well-established paths that should not change unless the plugin API @@ -185,8 +182,7 @@ func (p *VolumePlugin) getURI() string { } // Verify the plugin is still available. -// TODO: Do we want to ping with an HTTP request? There's no ping endpoint so -// we'd need to hit Activate or Capabilities? +// Does not actually ping the API, just verifies that the socket still exists. func (p *VolumePlugin) verifyReachable() error { if _, err := os.Stat(p.SocketPath); err != nil { if os.IsNotExist(err) { @@ -307,7 +303,6 @@ func (p *VolumePlugin) ListVolumes() ([]*volume.Volume, error) { return nil, err } - // TODO: Can probably unify response reading under a helper volumeRespBytes, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, errors.Wrapf(err, "error reading response body from volume plugin %s", p.Name) diff --git a/libpod/pod_api.go b/libpod/pod_api.go index eede896a9..1c1e15984 100644 --- a/libpod/pod_api.go +++ b/libpod/pod_api.go @@ -152,8 +152,8 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m return nil, err } - // TODO: There may be cases where it makes sense to order stops based on - // dependencies. Should we bother with this? + // Stopping pods is not ordered by dependency. We haven't seen any case + // where this would actually matter. ctrErrChan := make(map[string]<-chan error) @@ -162,8 +162,9 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m c := ctr logrus.Debugf("Adding parallel job to stop container %s", c.ID()) retChan := parallel.Enqueue(ctx, func() error { - // TODO: Might be better to batch stop and cleanup - // together? + // Can't batch these without forcing Stop() to hold the + // lock for the full duration of the timeout. + // We probably don't want to do that. if timeout > -1 { if err := c.StopWithTimeout(uint(timeout)); err != nil { return err diff --git a/libpod/pod_top_linux.go b/libpod/pod_top_linux.go index 83a070807..544126dcd 100644 --- a/libpod/pod_top_linux.go +++ b/libpod/pod_top_linux.go @@ -53,7 +53,7 @@ func (p *Pod) GetPodPidInformation(descriptors []string) ([]string, error) { } } - // TODO: psgo returns a [][]string to give users the ability to apply + // NOTE: psgo returns a [][]string to give users the ability to apply // filters on the data. We need to change the API here to return // a [][]string if we want to make use of filtering. opts := psgo.JoinNamespaceOpts{FillMappings: rootless.IsRootless()} diff --git a/libpod/runtime.go b/libpod/runtime.go index 58f20ef5b..e268c2d17 100644 --- a/libpod/runtime.go +++ b/libpod/runtime.go @@ -11,6 +11,7 @@ import ( "regexp" "strconv" "strings" + "sync" "syscall" "time" @@ -87,8 +88,8 @@ type Runtime struct { lockManager lock.Manager // Worker - workerShutdown chan bool - workerChannel chan func() + workerChannel chan func() + workerGroup sync.WaitGroup // syslog describes whenever logrus should log to the syslog as well. // Note that the syslog hook will be enabled early in cmd/podman/syslog_linux.go @@ -411,7 +412,6 @@ func makeRuntime(runtime *Runtime) (retErr error) { return err } runtime.eventer = eventer - // TODO: events for libimage // Set up containers/image if runtime.imageContext == nil { @@ -516,8 +516,6 @@ func makeRuntime(runtime *Runtime) (retErr error) { } // Acquire the lock and hold it until we return // This ensures that no two processes will be in runtime.refresh at once - // TODO: we can't close the FD in this lock, so we should keep it around - // and use it to lock important operations aliveLock.Lock() doRefresh := false defer func() { @@ -823,12 +821,9 @@ func (r *Runtime) Shutdown(force bool) error { return define.ErrRuntimeStopped } - if r.workerShutdown != nil { - // Signal the worker routine to shutdown. The routine will - // process all pending work items and then read from the - // channel; we're blocked until all work items have been - // processed. - r.workerShutdown <- true + if r.workerChannel != nil { + r.workerGroup.Wait() + close(r.workerChannel) } r.valid = false @@ -930,7 +925,7 @@ func (r *Runtime) refresh(alivePath string) error { } defer file.Close() - r.newSystemEvent(events.Refresh) + r.NewSystemEvent(events.Refresh) return nil } diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go index 2eaa77572..bdfc102ba 100644 --- a/libpod/runtime_ctr.go +++ b/libpod/runtime_ctr.go @@ -13,6 +13,7 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/config" + cutil "github.com/containers/common/pkg/util" "github.com/containers/podman/v4/libpod/define" "github.com/containers/podman/v4/libpod/events" "github.com/containers/podman/v4/libpod/shutdown" @@ -246,7 +247,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai for _, opts := range ctr.config.Networks { if opts.InterfaceName != "" { // check that no name is assigned to more than network - if util.StringInSlice(opts.InterfaceName, usedIfNames) { + if cutil.StringInSlice(opts.InterfaceName, usedIfNames) { return nil, errors.Errorf("network interface name %q is already assigned to another network", opts.InterfaceName) } usedIfNames = append(usedIfNames, opts.InterfaceName) @@ -262,7 +263,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai if opts.InterfaceName == "" { for i < 100000 { ifName := fmt.Sprintf("eth%d", i) - if !util.StringInSlice(ifName, usedIfNames) { + if !cutil.StringInSlice(ifName, usedIfNames) { opts.InterfaceName = ifName usedIfNames = append(usedIfNames, ifName) break diff --git a/libpod/runtime_pod.go b/libpod/runtime_pod.go index dca0ffc8a..ee3d40484 100644 --- a/libpod/runtime_pod.go +++ b/libpod/runtime_pod.go @@ -4,8 +4,8 @@ import ( "context" "time" + "github.com/containers/common/pkg/util" "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/util" "github.com/pkg/errors" ) diff --git a/libpod/runtime_renumber.go b/libpod/runtime_renumber.go index 17e1d97e5..db055f40b 100644 --- a/libpod/runtime_renumber.go +++ b/libpod/runtime_renumber.go @@ -71,7 +71,7 @@ func (r *Runtime) renumberLocks() error { } } - r.newSystemEvent(events.Renumber) + r.NewSystemEvent(events.Renumber) return nil } diff --git a/libpod/runtime_worker.go b/libpod/runtime_worker.go index ca44a27f7..9d41321b2 100644 --- a/libpod/runtime_worker.go +++ b/libpod/runtime_worker.go @@ -1,40 +1,17 @@ package libpod -import ( - "time" -) - func (r *Runtime) startWorker() { - if r.workerChannel == nil { - r.workerChannel = make(chan func(), 1) - r.workerShutdown = make(chan bool) - } + r.workerChannel = make(chan func(), 10) go func() { - for { - // Make sure to read all workers before - // checking if we're about to shutdown. - for len(r.workerChannel) > 0 { - w := <-r.workerChannel - w() - } - - select { - // We'll read from the shutdown channel only when all - // items above have been processed. - // - // (*Runtime).Shutdown() will block until until the - // item is read. - case <-r.workerShutdown: - return - - default: - time.Sleep(100 * time.Millisecond) - } + for w := range r.workerChannel { + w() + r.workerGroup.Done() } }() } func (r *Runtime) queueWork(f func()) { + r.workerGroup.Add(1) go func() { r.workerChannel <- f }() |