summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@pm.me>2020-07-09 13:50:01 -0400
committerMatthew Heon <matthew.heon@pm.me>2020-07-22 14:47:03 -0400
commitf9eb204d0ddb7e0d5fbffc849bd0ca10e2b17a29 (patch)
treecc91281a7408b83ae3037dea3b693075e7df820d
parent79b005e638f110d6e8601816569827dbd0c8fdc2 (diff)
downloadpodman-f9eb204d0ddb7e0d5fbffc849bd0ca10e2b17a29.tar.gz
podman-f9eb204d0ddb7e0d5fbffc849bd0ca10e2b17a29.tar.bz2
podman-f9eb204d0ddb7e0d5fbffc849bd0ca10e2b17a29.zip
Remove all instances of named return "err" from Libpod
This was inspired by https://github.com/cri-o/cri-o/pull/3934 and much of the logic for it is contained there. However, in brief, a named return called "err" can cause lots of code confusion and encourages using the wrong err variable in defer statements, which can make them work incorrectly. Using a separate name which is not used elsewhere makes it very clear what the defer should be doing. As part of this, remove a large number of named returns that were not used anywhere. Most of them were once needed, but are no longer necessary after previous refactors (but were accidentally retained). Signed-off-by: Matthew Heon <matthew.heon@pm.me>
-rw-r--r--libpod/container_api.go12
-rw-r--r--libpod/container_exec.go20
-rw-r--r--libpod/container_internal.go44
-rw-r--r--libpod/container_internal_linux.go21
-rw-r--r--libpod/container_internal_unsupported.go2
-rw-r--r--libpod/events/events_linux.go10
-rw-r--r--libpod/networking_linux.go18
-rw-r--r--libpod/networking_unsupported.go6
-rw-r--r--libpod/oci_conmon_linux.go9
-rw-r--r--libpod/runtime.go17
-rw-r--r--libpod/runtime_ctr.go22
-rw-r--r--libpod/state_test.go8
-rw-r--r--libpod/storage.go8
13 files changed, 98 insertions, 99 deletions
diff --git a/libpod/container_api.go b/libpod/container_api.go
index b37b05ff2..d19dc651b 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -29,7 +29,7 @@ import (
// Init requires that all dependency containers be started (e.g. pod infra
// containers). The `recursive` parameter will, if set to true, start these
// dependency containers before initializing this container.
-func (c *Container) Init(ctx context.Context, recursive bool) (err error) {
+func (c *Container) Init(ctx context.Context, recursive bool) error {
span, _ := opentracing.StartSpanFromContext(ctx, "containerInit")
span.SetTag("struct", "container")
defer span.Finish()
@@ -85,7 +85,7 @@ func (c *Container) Init(ctx context.Context, recursive bool) (err error) {
// Start requites that all dependency containers (e.g. pod infra containers) be
// running before being run. The recursive parameter, if set, will start all
// dependencies before starting this container.
-func (c *Container) Start(ctx context.Context, recursive bool) (err error) {
+func (c *Container) Start(ctx context.Context, recursive bool) error {
span, _ := opentracing.StartSpanFromContext(ctx, "containerStart")
span.SetTag("struct", "container")
defer span.Finish()
@@ -112,7 +112,7 @@ func (c *Container) Start(ctx context.Context, recursive bool) (err error) {
// Attach call occurs before Start).
// In overall functionality, it is identical to the Start call, with the added
// side effect that an attach session will also be started.
-func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, recursive bool) (attachResChan <-chan error, err error) {
+func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, recursive bool) (<-chan error, error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -150,7 +150,7 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachSt
}
// RestartWithTimeout restarts a running container and takes a given timeout in uint
-func (c *Container) RestartWithTimeout(ctx context.Context, timeout uint) (err error) {
+func (c *Container) RestartWithTimeout(ctx context.Context, timeout uint) error {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -160,7 +160,7 @@ func (c *Container) RestartWithTimeout(ctx context.Context, timeout uint) (err e
}
}
- if err = c.checkDependenciesAndHandleError(); err != nil {
+ if err := c.checkDependenciesAndHandleError(); err != nil {
return err
}
@@ -760,7 +760,7 @@ func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointO
}
// Restore restores a container
-func (c *Container) Restore(ctx context.Context, options ContainerCheckpointOptions) (err error) {
+func (c *Container) Restore(ctx context.Context, options ContainerCheckpointOptions) error {
logrus.Debugf("Trying to restore container %s", c.ID())
if !c.batched {
c.lock.Lock()
diff --git a/libpod/container_exec.go b/libpod/container_exec.go
index 69da6fcfe..bd04ee9b9 100644
--- a/libpod/container_exec.go
+++ b/libpod/container_exec.go
@@ -779,25 +779,25 @@ func (c *Container) execOCILog(sessionID string) string {
}
// create a bundle path and associated files for an exec session
-func (c *Container) createExecBundle(sessionID string) (err error) {
+func (c *Container) createExecBundle(sessionID string) (retErr error) {
bundlePath := c.execBundlePath(sessionID)
- if createErr := os.MkdirAll(bundlePath, execDirPermission); createErr != nil {
- return createErr
+ if err := os.MkdirAll(bundlePath, execDirPermission); err != nil {
+ return err
}
defer func() {
- if err != nil {
- if err2 := os.RemoveAll(bundlePath); err != nil {
- logrus.Warnf("error removing exec bundle after creation caused another error: %v", err2)
+ if retErr != nil {
+ if err := os.RemoveAll(bundlePath); err != nil {
+ logrus.Warnf("error removing exec bundle after creation caused another error: %v", err)
}
}
}()
- if err2 := os.MkdirAll(c.execExitFileDir(sessionID), execDirPermission); err2 != nil {
+ if err := os.MkdirAll(c.execExitFileDir(sessionID), execDirPermission); err != nil {
// The directory is allowed to exist
- if !os.IsExist(err2) {
- err = errors.Wrapf(err2, "error creating OCI runtime exit file path %s", c.execExitFileDir(sessionID))
+ if !os.IsExist(err) {
+ return errors.Wrapf(err, "error creating OCI runtime exit file path %s", c.execExitFileDir(sessionID))
}
}
- return
+ return nil
}
// readExecExitCode reads the exit file for an exec session and returns
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 7a547e565..c44ba5fe6 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -209,7 +209,7 @@ func (c *Container) handleExitFile(exitFile string, fi os.FileInfo) error {
// Handle container restart policy.
// This is called when a container has exited, and was not explicitly stopped by
// an API call to stop the container or pod it is in.
-func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, err error) {
+func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr error) {
// If we did not get a restart policy match, exit immediately.
// Do the same if we're not a policy that restarts.
if !c.state.RestartPolicyMatch ||
@@ -240,7 +240,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er
logrus.Debugf("Restarting container %s due to restart policy %s", c.ID(), c.config.RestartPolicy)
// Need to check if dependencies are alive.
- if err = c.checkDependenciesAndHandleError(); err != nil {
+ if err := c.checkDependenciesAndHandleError(); err != nil {
return false, err
}
@@ -262,9 +262,9 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er
}
defer func() {
- if err != nil {
- if err2 := c.cleanup(ctx); err2 != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
+ if retErr != nil {
+ if err := c.cleanup(ctx); err != nil {
+ logrus.Errorf("error cleaning up container %s: %v", c.ID(), err)
}
}
}()
@@ -767,7 +767,7 @@ func (c *Container) save() error {
// Checks the container is in the right state, then initializes the container in preparation to start the container.
// If recursive is true, each of the containers dependencies will be started.
// Otherwise, this function will return with error if there are dependencies of this container that aren't running.
-func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err error) {
+func (c *Container) prepareToStart(ctx context.Context, recursive bool) (retErr error) {
// Container must be created or stopped to be started
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateExited) {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
@@ -784,9 +784,9 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err err
}
defer func() {
- if err != nil {
- if err2 := c.cleanup(ctx); err2 != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
+ if retErr != nil {
+ if err := c.cleanup(ctx); err != nil {
+ logrus.Errorf("error cleaning up container %s: %v", c.ID(), err)
}
}
}()
@@ -1132,7 +1132,7 @@ func (c *Container) reinit(ctx context.Context, retainRetries bool) error {
// Initialize (if necessary) and start a container
// Performs all necessary steps to start a container that is not running
// Does not lock or check validity
-func (c *Container) initAndStart(ctx context.Context) (err error) {
+func (c *Container) initAndStart(ctx context.Context) (retErr error) {
// If we are ContainerStateUnknown, throw an error
if c.state.State == define.ContainerStateUnknown {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in an unknown state", c.ID())
@@ -1150,9 +1150,9 @@ func (c *Container) initAndStart(ctx context.Context) (err error) {
}
defer func() {
- if err != nil {
- if err2 := c.cleanup(ctx); err2 != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
+ if retErr != nil {
+ if err := c.cleanup(ctx); err != nil {
+ logrus.Errorf("error cleaning up container %s: %v", c.ID(), err)
}
}
}()
@@ -1321,7 +1321,7 @@ func (c *Container) unpause() error {
}
// Internal, non-locking function to restart a container
-func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err error) {
+func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (retErr error) {
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopped, define.ContainerStateExited) {
return errors.Wrapf(define.ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state")
}
@@ -1358,9 +1358,9 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err e
}
}
defer func() {
- if err != nil {
- if err2 := c.cleanup(ctx); err2 != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
+ if retErr != nil {
+ if err := c.cleanup(ctx); err != nil {
+ logrus.Errorf("error cleaning up container %s: %v", c.ID(), err)
}
}
}()
@@ -1682,7 +1682,7 @@ func (c *Container) cleanup(ctx context.Context) error {
// delete deletes the container and runs any configured poststop
// hooks.
-func (c *Container) delete(ctx context.Context) (err error) {
+func (c *Container) delete(ctx context.Context) error {
span, _ := opentracing.StartSpanFromContext(ctx, "delete")
span.SetTag("struct", "container")
defer span.Finish()
@@ -1701,7 +1701,7 @@ func (c *Container) delete(ctx context.Context) (err error) {
// postDeleteHooks runs the poststop hooks (if any) as specified by
// the OCI Runtime Specification (which requires them to run
// post-delete, despite the stage name).
-func (c *Container) postDeleteHooks(ctx context.Context) (err error) {
+func (c *Container) postDeleteHooks(ctx context.Context) error {
span, _ := opentracing.StartSpanFromContext(ctx, "postDeleteHooks")
span.SetTag("struct", "container")
defer span.Finish()
@@ -1824,7 +1824,7 @@ func (c *Container) saveSpec(spec *spec.Spec) error {
}
// Warning: precreate hooks may alter 'config' in place.
-func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (extensionStageHooks map[string][]spec.Hook, err error) {
+func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (map[string][]spec.Hook, error) {
allHooks := make(map[string][]spec.Hook)
if c.runtime.config.Engine.HooksDir == nil {
if rootless.IsRootless() {
@@ -1938,7 +1938,7 @@ func (c *Container) checkReadyForRemoval() error {
// writeJSONFile marshalls and writes the given data to a JSON file
// in the bundle path
-func (c *Container) writeJSONFile(v interface{}, file string) (err error) {
+func (c *Container) writeJSONFile(v interface{}, file string) error {
fileJSON, err := json.MarshalIndent(v, "", " ")
if err != nil {
return errors.Wrapf(err, "error writing JSON to %s for container %s", file, c.ID())
@@ -1953,7 +1953,7 @@ func (c *Container) writeJSONFile(v interface{}, file string) (err error) {
// prepareCheckpointExport writes the config and spec to
// JSON files for later export
-func (c *Container) prepareCheckpointExport() (err error) {
+func (c *Container) prepareCheckpointExport() error {
// save live config
if err := c.writeJSONFile(c.Config(), "config.dump"); err != nil {
return err
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 018e2d5a4..cfcf9b823 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -610,7 +610,7 @@ func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr
return nil
}
-func (c *Container) exportCheckpoint(dest string, ignoreRootfs bool) (err error) {
+func (c *Container) exportCheckpoint(dest string, ignoreRootfs bool) error {
if (len(c.config.NamedVolumes) > 0) || (len(c.Dependencies()) > 0) {
return errors.Errorf("Cannot export checkpoints of containers with named volumes or dependencies")
}
@@ -721,7 +721,7 @@ func (c *Container) exportCheckpoint(dest string, ignoreRootfs bool) (err error)
return nil
}
-func (c *Container) checkpointRestoreSupported() (err error) {
+func (c *Container) checkpointRestoreSupported() error {
if !criu.CheckForCriu() {
return errors.Errorf("Checkpoint/Restore requires at least CRIU %d", criu.MinCriuVersion)
}
@@ -731,7 +731,7 @@ func (c *Container) checkpointRestoreSupported() (err error) {
return nil
}
-func (c *Container) checkpointRestoreLabelLog(fileName string) (err error) {
+func (c *Container) checkpointRestoreLabelLog(fileName string) error {
// Create the CRIU log file and label it
dumpLog := filepath.Join(c.bundlePath(), fileName)
@@ -748,7 +748,7 @@ func (c *Container) checkpointRestoreLabelLog(fileName string) (err error) {
return nil
}
-func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) (err error) {
+func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) error {
if err := c.checkpointRestoreSupported(); err != nil {
return err
}
@@ -818,7 +818,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
return c.save()
}
-func (c *Container) importCheckpoint(input string) (err error) {
+func (c *Container) importCheckpoint(input string) error {
archiveFile, err := os.Open(input)
if err != nil {
return errors.Wrapf(err, "Failed to open checkpoint archive %s for import", input)
@@ -847,8 +847,7 @@ func (c *Container) importCheckpoint(input string) (err error) {
return nil
}
-func (c *Container) restore(ctx context.Context, options ContainerCheckpointOptions) (err error) {
-
+func (c *Container) restore(ctx context.Context, options ContainerCheckpointOptions) (retErr error) {
if err := c.checkpointRestoreSupported(); err != nil {
return err
}
@@ -858,7 +857,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
}
if options.TargetFile != "" {
- if err = c.importCheckpoint(options.TargetFile); err != nil {
+ if err := c.importCheckpoint(options.TargetFile); err != nil {
return err
}
}
@@ -944,9 +943,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
}
defer func() {
- if err != nil {
- if err2 := c.cleanup(ctx); err2 != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
+ if retErr != nil {
+ if err := c.cleanup(ctx); err != nil {
+ logrus.Errorf("error cleaning up container %s: %v", c.ID(), err)
}
}
}()
diff --git a/libpod/container_internal_unsupported.go b/libpod/container_internal_unsupported.go
index a42c1d735..e6d94104c 100644
--- a/libpod/container_internal_unsupported.go
+++ b/libpod/container_internal_unsupported.go
@@ -18,7 +18,7 @@ func (c *Container) unmountSHM(mount string) error {
return define.ErrNotImplemented
}
-func (c *Container) prepare() (err error) {
+func (c *Container) prepare() error {
return define.ErrNotImplemented
}
diff --git a/libpod/events/events_linux.go b/libpod/events/events_linux.go
index ffb100be8..482d7d6dd 100644
--- a/libpod/events/events_linux.go
+++ b/libpod/events/events_linux.go
@@ -8,20 +8,20 @@ import (
)
// NewEventer creates an eventer based on the eventer type
-func NewEventer(options EventerOptions) (eventer Eventer, err error) {
+func NewEventer(options EventerOptions) (Eventer, error) {
logrus.Debugf("Initializing event backend %s", options.EventerType)
switch strings.ToUpper(options.EventerType) {
case strings.ToUpper(Journald.String()):
- eventer, err = newEventJournalD(options)
+ eventer, err := newEventJournalD(options)
if err != nil {
return nil, errors.Wrapf(err, "eventer creation")
}
+ return eventer, nil
case strings.ToUpper(LogFile.String()):
- eventer = EventLogFile{options}
+ return EventLogFile{options}, nil
case strings.ToUpper(Null.String()):
- eventer = NewNullEventer()
+ return NewNullEventer(), nil
default:
return nil, errors.Errorf("unknown event logger type: %s", strings.ToUpper(options.EventerType))
}
- return eventer, nil
}
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 5cc282f3a..1e79e8732 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -141,18 +141,18 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re
}
// Create and configure a new network namespace for a container
-func (r *Runtime) createNetNS(ctr *Container) (n ns.NetNS, q []*cnitypes.Result, err error) {
+func (r *Runtime) createNetNS(ctr *Container) (n ns.NetNS, q []*cnitypes.Result, retErr error) {
ctrNS, err := netns.NewNS()
if err != nil {
return nil, nil, errors.Wrapf(err, "error creating network namespace for container %s", ctr.ID())
}
defer func() {
- if err != nil {
- if err2 := netns.UnmountNS(ctrNS); err2 != nil {
- logrus.Errorf("Error unmounting partially created network namespace for container %s: %v", ctr.ID(), err2)
+ if retErr != nil {
+ if err := netns.UnmountNS(ctrNS); err != nil {
+ logrus.Errorf("Error unmounting partially created network namespace for container %s: %v", ctr.ID(), err)
}
- if err2 := ctrNS.Close(); err2 != nil {
- logrus.Errorf("Error closing partially created network namespace for container %s: %v", ctr.ID(), err2)
+ if err := ctrNS.Close(); err != nil {
+ logrus.Errorf("Error closing partially created network namespace for container %s: %v", ctr.ID(), err)
}
}
}()
@@ -188,7 +188,7 @@ func checkSlirpFlags(path string) (*slirpFeatures, error) {
}
// Configure the network namespace for a rootless container
-func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
+func (r *Runtime) setupRootlessNetNS(ctr *Container) error {
path := r.config.Engine.NetworkCmdPath
if path == "" {
@@ -342,7 +342,7 @@ func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout t
return nil
}
-func (r *Runtime) setupRootlessPortMapping(ctr *Container, netnsPath string) (err error) {
+func (r *Runtime) setupRootlessPortMapping(ctr *Container, netnsPath string) error {
syncR, syncW, err := os.Pipe()
if err != nil {
return errors.Wrapf(err, "failed to open pipe")
@@ -420,7 +420,7 @@ func (r *Runtime) setupRootlessPortMapping(ctr *Container, netnsPath string) (er
}
// Configure the network namespace using the container process
-func (r *Runtime) setupNetNS(ctr *Container) (err error) {
+func (r *Runtime) setupNetNS(ctr *Container) error {
nsProcess := fmt.Sprintf("/proc/%d/ns/net", ctr.state.PID)
b := make([]byte, 16)
diff --git a/libpod/networking_unsupported.go b/libpod/networking_unsupported.go
index d5189709c..69f470ff7 100644
--- a/libpod/networking_unsupported.go
+++ b/libpod/networking_unsupported.go
@@ -4,11 +4,11 @@ package libpod
import "github.com/containers/libpod/v2/libpod/define"
-func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
+func (r *Runtime) setupRootlessNetNS(ctr *Container) error {
return define.ErrNotImplemented
}
-func (r *Runtime) setupNetNS(ctr *Container) (err error) {
+func (r *Runtime) setupNetNS(ctr *Container) error {
return define.ErrNotImplemented
}
@@ -16,7 +16,7 @@ func (r *Runtime) teardownNetNS(ctr *Container) error {
return define.ErrNotImplemented
}
-func (r *Runtime) createNetNS(ctr *Container) (err error) {
+func (r *Runtime) createNetNS(ctr *Container) error {
return define.ErrNotImplemented
}
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index e9d5cbaa3..e38646eec 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -190,7 +190,7 @@ func hasCurrentUserMapped(ctr *Container) bool {
}
// CreateContainer creates a container.
-func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (err error) {
+func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) error {
if !hasCurrentUserMapped(ctr) {
for _, i := range []string{ctr.state.RunDir, ctr.runtime.config.Engine.TmpDir, ctr.config.StaticDir, ctr.state.Mountpoint, ctr.runtime.config.Engine.VolumePath} {
if err := makeAccessible(i, ctr.RootUID(), ctr.RootGID()); err != nil {
@@ -850,7 +850,7 @@ func (r *ConmonOCIRuntime) getLogTag(ctr *Container) (string, error) {
}
// createOCIContainer generates this container's main conmon instance and prepares it for starting
-func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (err error) {
+func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) error {
var stderrBuf bytes.Buffer
runtimeDir, err := util.GetRuntimeDir()
@@ -1297,8 +1297,9 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec
return nil
}
-// newPipe creates a unix socket pair for communication
-func newPipe() (parent *os.File, child *os.File, err error) {
+// newPipe creates a unix socket pair for communication.
+// Returns two files - first is parent, second is child.
+func newPipe() (*os.File, *os.File, error) {
fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_SEQPACKET|unix.SOCK_CLOEXEC, 0)
if err != nil {
return nil, nil, err
diff --git a/libpod/runtime.go b/libpod/runtime.go
index bf1e203c1..24370d50e 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -126,7 +126,7 @@ func SetXdgDirs() error {
// NewRuntime creates a new container runtime
// Options can be passed to override the default configuration for the runtime
-func NewRuntime(ctx context.Context, options ...RuntimeOption) (runtime *Runtime, err error) {
+func NewRuntime(ctx context.Context, options ...RuntimeOption) (*Runtime, error) {
conf, err := config.NewConfig("")
if err != nil {
return nil, err
@@ -140,13 +140,13 @@ func NewRuntime(ctx context.Context, options ...RuntimeOption) (runtime *Runtime
// functions can be used to mutate this configuration further.
// An error will be returned if the configuration file at the given path does
// not exist or cannot be loaded
-func NewRuntimeFromConfig(ctx context.Context, userConfig *config.Config, options ...RuntimeOption) (runtime *Runtime, err error) {
+func NewRuntimeFromConfig(ctx context.Context, userConfig *config.Config, options ...RuntimeOption) (*Runtime, error) {
return newRuntimeFromConfig(ctx, userConfig, options...)
}
-func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...RuntimeOption) (runtime *Runtime, err error) {
- runtime = new(Runtime)
+func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...RuntimeOption) (*Runtime, error) {
+ runtime := new(Runtime)
if conf.Engine.OCIRuntime == "" {
conf.Engine.OCIRuntime = "runc"
@@ -236,7 +236,7 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) {
// Make a new runtime based on the given configuration
// Sets up containers/storage, state store, OCI runtime
-func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
+func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) {
// Find a working conmon binary
cPath, err := runtime.config.FindConmon()
if err != nil {
@@ -316,12 +316,11 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
return err
}
defer func() {
- if err != nil && store != nil {
+ if retErr != nil && store != nil {
// Don't forcibly shut down
// We could be opening a store in use by another libpod
- _, err2 := store.Shutdown(false)
- if err2 != nil {
- logrus.Errorf("Error removing store for partially-created runtime: %s", err2)
+ if _, err := store.Shutdown(false); err != nil {
+ logrus.Errorf("Error removing store for partially-created runtime: %s", err)
}
}
}()
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index c073aabb5..e2bb696b0 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -34,7 +34,7 @@ type CtrCreateOption func(*Container) error
type ContainerFilter func(*Container) bool
// NewContainer creates a new container from a given OCI config.
-func (r *Runtime) NewContainer(ctx context.Context, rSpec *spec.Spec, options ...CtrCreateOption) (c *Container, err error) {
+func (r *Runtime) NewContainer(ctx context.Context, rSpec *spec.Spec, options ...CtrCreateOption) (*Container, error) {
r.lock.Lock()
defer r.lock.Unlock()
if !r.valid {
@@ -44,7 +44,7 @@ func (r *Runtime) NewContainer(ctx context.Context, rSpec *spec.Spec, options ..
}
// RestoreContainer re-creates a container from an imported checkpoint
-func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config *ContainerConfig) (c *Container, err error) {
+func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config *ContainerConfig) (*Container, error) {
r.lock.Lock()
defer r.lock.Unlock()
if !r.valid {
@@ -68,7 +68,7 @@ func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config
return r.setupContainer(ctx, ctr)
}
-func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (c *Container, err error) {
+func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (*Container, error) {
if rSpec == nil {
return nil, errors.Wrapf(define.ErrInvalidArg, "must provide a valid runtime spec to create container")
}
@@ -122,7 +122,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf
return ctr, nil
}
-func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ...CtrCreateOption) (c *Container, err error) {
+func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ...CtrCreateOption) (*Container, error) {
span, _ := opentracing.StartSpanFromContext(ctx, "newContainer")
span.SetTag("type", "runtime")
defer span.Finish()
@@ -141,7 +141,7 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
return r.setupContainer(ctx, ctr)
}
-func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Container, err error) {
+func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Container, retErr error) {
// Validate the container
if err := ctr.validate(); err != nil {
return nil, err
@@ -157,9 +157,9 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
logrus.Debugf("Allocated lock %d for container %s", ctr.lock.ID(), ctr.ID())
defer func() {
- if err != nil {
- if err2 := ctr.lock.Free(); err2 != nil {
- logrus.Errorf("Error freeing lock for container after creation failed: %v", err2)
+ if retErr != nil {
+ if err := ctr.lock.Free(); err != nil {
+ logrus.Errorf("Error freeing lock for container after creation failed: %v", err)
}
}
}()
@@ -272,9 +272,9 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
return nil, err
}
defer func() {
- if err != nil {
- if err2 := ctr.teardownStorage(); err2 != nil {
- logrus.Errorf("Error removing partially-created container root filesystem: %s", err2)
+ if retErr != nil {
+ if err := ctr.teardownStorage(); err != nil {
+ logrus.Errorf("Error removing partially-created container root filesystem: %s", err)
}
}
}()
diff --git a/libpod/state_test.go b/libpod/state_test.go
index 30ddf5e24..ef4f6f2be 100644
--- a/libpod/state_test.go
+++ b/libpod/state_test.go
@@ -34,13 +34,13 @@ var (
)
// Get an empty BoltDB state for use in tests
-func getEmptyBoltState() (s State, p string, m lock.Manager, err error) {
+func getEmptyBoltState() (_ State, _ string, _ lock.Manager, retErr error) {
tmpDir, err := ioutil.TempDir("", tmpDirPrefix)
if err != nil {
return nil, "", nil, err
}
defer func() {
- if err != nil {
+ if retErr != nil {
os.RemoveAll(tmpDir)
}
}()
@@ -66,13 +66,13 @@ func getEmptyBoltState() (s State, p string, m lock.Manager, err error) {
}
// Get an empty in-memory state for use in tests
-func getEmptyInMemoryState() (s State, p string, m lock.Manager, err error) {
+func getEmptyInMemoryState() (_ State, _ string, _ lock.Manager, retErr error) {
tmpDir, err := ioutil.TempDir("", tmpDirPrefix)
if err != nil {
return nil, "", nil, err
}
defer func() {
- if err != nil {
+ if retErr != nil {
os.RemoveAll(tmpDir)
}
}()
diff --git a/libpod/storage.go b/libpod/storage.go
index be79b3fc0..e497d0daf 100644
--- a/libpod/storage.go
+++ b/libpod/storage.go
@@ -66,7 +66,7 @@ func (metadata *RuntimeContainerMetadata) SetMountLabel(mountLabel string) {
// CreateContainerStorage creates the storage end of things. We already have the container spec created
// TO-DO We should be passing in an Image object in the future.
-func (r *storageService) CreateContainerStorage(ctx context.Context, systemContext *types.SystemContext, imageName, imageID, containerName, containerID string, options storage.ContainerOptions) (cinfo ContainerInfo, err error) {
+func (r *storageService) CreateContainerStorage(ctx context.Context, systemContext *types.SystemContext, imageName, imageID, containerName, containerID string, options storage.ContainerOptions) (_ ContainerInfo, retErr error) {
span, _ := opentracing.StartSpanFromContext(ctx, "createContainerStorage")
span.SetTag("type", "storageService")
defer span.Finish()
@@ -132,9 +132,9 @@ func (r *storageService) CreateContainerStorage(ctx context.Context, systemConte
// If anything fails after this point, we need to delete the incomplete
// container before returning.
defer func() {
- if err != nil {
- if err2 := r.store.DeleteContainer(container.ID); err2 != nil {
- logrus.Infof("%v deleting partially-created container %q", err2, container.ID)
+ if retErr != nil {
+ if err := r.store.DeleteContainer(container.ID); err != nil {
+ logrus.Infof("%v deleting partially-created container %q", err, container.ID)
return
}