diff options
author | Matthew Heon <mheon@redhat.com> | 2021-06-10 14:12:30 -0400 |
---|---|---|
committer | Matthew Heon <mheon@redhat.com> | 2021-06-10 14:17:41 -0400 |
commit | 62f4b0a1955853592c01310a2cf7e0ae041b9566 (patch) | |
tree | e3f2eb7cf3155760e4b0d9f878ec4f058dbcc1a2 /libpod/events.go | |
parent | 341e6a1628a35198500fcfc1bb65b377ff9b270b (diff) | |
download | podman-62f4b0a1955853592c01310a2cf7e0ae041b9566.tar.gz podman-62f4b0a1955853592c01310a2cf7e0ae041b9566.tar.bz2 podman-62f4b0a1955853592c01310a2cf7e0ae041b9566.zip |
Add ExecDied event and use it to retrieve exit codes
When making Exec Cleanup processes mandatory, I introduced a race
wherein attached exec sessions could be cleaned up and removed by
the cleanup process before the frontend had a chance to get their
exit code. Fortunately, we've dealt with this issue before in
containers, and the same solution can be applied here. I added an
event for an exec session's process exiting, `exec_died` (Docker
has an identical event, so this actually improves our
compatibility there) that includes the exit code of the exec
session. If the race happens and the exec session no longer
exists when we go to remove it, pick up exit code from the event
and exit cleanly.
Signed-off-by: Matthew Heon <mheon@redhat.com>
Diffstat (limited to 'libpod/events.go')
-rw-r--r-- | libpod/events.go | 39 |
1 files changed, 38 insertions, 1 deletions
diff --git a/libpod/events.go b/libpod/events.go index 839229674..22c51aeec 100644 --- a/libpod/events.go +++ b/libpod/events.go @@ -46,7 +46,22 @@ func (c *Container) newContainerExitedEvent(exitCode int32) { e.Type = events.Container e.ContainerExitCode = int(exitCode) if err := c.runtime.eventer.Write(e); err != nil { - logrus.Errorf("unable to write pod event: %q", err) + logrus.Errorf("unable to write container exited event: %q", err) + } +} + +// newExecDiedEvent creates a new event for an exec session's death +func (c *Container) newExecDiedEvent(sessionID string, exitCode int) { + e := events.NewEvent(events.ExecDied) + e.ID = c.ID() + e.Name = c.Name() + e.Image = c.config.RootfsImageName + e.Type = events.Container + e.ContainerExitCode = exitCode + e.Attributes = make(map[string]string) + e.Attributes["execID"] = sessionID + if err := c.runtime.eventer.Write(e); err != nil { + logrus.Errorf("unable to write exec died event: %q", err) } } @@ -154,3 +169,25 @@ func (r *Runtime) GetLastContainerEvent(ctx context.Context, nameOrID string, co // return the last element in the slice return containerEvents[len(containerEvents)-1], nil } + +// GetExecDiedEvent takes a container name or ID, exec session ID, and returns +// that exec session's Died event (if it has already occurred). +func (r *Runtime) GetExecDiedEvent(ctx context.Context, nameOrID, execSessionID string) (*events.Event, error) { + filters := []string{ + fmt.Sprintf("container=%s", nameOrID), + "event=exec_died", + "type=container", + fmt.Sprintf("label=execID=%s", execSessionID), + } + + containerEvents, err := r.GetEvents(ctx, filters) + if err != nil { + return nil, err + } + // There *should* only be one event maximum. + // But... just in case... let's not blow up if there's more than one. + if len(containerEvents) < 1 { + return nil, errors.Wrapf(events.ErrEventNotFound, "exec died event for session %s (container %s) not found", execSessionID, nameOrID) + } + return containerEvents[len(containerEvents)-1], nil +} |