aboutsummaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/boltdb_state.go4
-rw-r--r--libpod/boltdb_state_internal.go5
-rw-r--r--libpod/container_api.go4
-rw-r--r--libpod/container_inspect.go13
-rw-r--r--libpod/container_internal.go10
-rw-r--r--libpod/container_internal_linux.go6
-rw-r--r--libpod/define/info.go2
-rw-r--r--libpod/events.go6
-rw-r--r--libpod/events/filters.go2
-rw-r--r--libpod/image/image.go5
-rw-r--r--libpod/image/image_test.go8
-rw-r--r--libpod/image/prune.go6
-rw-r--r--libpod/image/pull.go2
-rw-r--r--libpod/image/search.go10
-rw-r--r--libpod/kube.go41
-rw-r--r--libpod/oci_missing.go6
-rw-r--r--libpod/pod_api.go5
-rw-r--r--libpod/pod_internal.go4
-rw-r--r--libpod/runtime.go13
-rw-r--r--libpod/runtime_ctr.go2
-rw-r--r--libpod/runtime_pod_linux.go5
-rw-r--r--libpod/runtime_volume_linux.go6
-rw-r--r--libpod/storage.go4
-rw-r--r--libpod/volume_internal.go5
24 files changed, 66 insertions, 108 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 4b6ff2c1d..be2787670 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -243,9 +243,7 @@ func (s *BoltState) Refresh() error {
return errors.Wrapf(err, "error unmarshalling state for container %s", string(id))
}
- if err := resetState(state); err != nil {
- return errors.Wrapf(err, "error resetting state for container %s", string(id))
- }
+ resetState(state)
newStateBytes, err := json.Marshal(state)
if err != nil {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index 21d55bf77..70abbb39c 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -407,10 +407,7 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
ociRuntime, ok := s.runtime.ociRuntimes[runtimeName]
if !ok {
// Use a MissingRuntime implementation
- ociRuntime, err = getMissingRuntime(runtimeName, s.runtime)
- if err != nil {
- return err
- }
+ ociRuntime = getMissingRuntime(runtimeName, s.runtime)
}
ctr.ociRuntime = ociRuntime
}
diff --git a/libpod/container_api.go b/libpod/container_api.go
index d366ffb84..d43cb4829 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -39,7 +39,7 @@ func (c *Container) Init(ctx context.Context) (err error) {
}
// don't recursively start
- if err := c.checkDependenciesAndHandleError(ctx); err != nil {
+ if err := c.checkDependenciesAndHandleError(); err != nil {
return err
}
@@ -146,7 +146,7 @@ func (c *Container) RestartWithTimeout(ctx context.Context, timeout uint) (err e
}
}
- if err = c.checkDependenciesAndHandleError(ctx); err != nil {
+ if err = c.checkDependenciesAndHandleError(); err != nil {
return err
}
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index b26dcddf6..c6d9e1a65 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -90,7 +90,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
}
namedVolumes, mounts := c.sortUserVolumes(ctrSpec)
- inspectMounts, err := c.getInspectMounts(ctrSpec, namedVolumes, mounts)
+ inspectMounts, err := c.getInspectMounts(namedVolumes, mounts)
if err != nil {
return nil, err
}
@@ -164,10 +164,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
}
data.NetworkSettings = networkConfig
- inspectConfig, err := c.generateInspectContainerConfig(ctrSpec)
- if err != nil {
- return nil, err
- }
+ inspectConfig := c.generateInspectContainerConfig(ctrSpec)
data.Config = inspectConfig
hostConfig, err := c.generateInspectContainerHostConfig(ctrSpec, namedVolumes, mounts)
@@ -195,7 +192,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
// Get inspect-formatted mounts list.
// Only includes user-specified mounts. Only includes bind mounts and named
// volumes, not tmpfs volumes.
-func (c *Container) getInspectMounts(ctrSpec *spec.Spec, namedVolumes []*ContainerNamedVolume, mounts []spec.Mount) ([]define.InspectMount, error) {
+func (c *Container) getInspectMounts(namedVolumes []*ContainerNamedVolume, mounts []spec.Mount) ([]define.InspectMount, error) {
inspectMounts := []define.InspectMount{}
// No mounts, return early
@@ -278,7 +275,7 @@ func parseMountOptionsForInspect(options []string, mount *define.InspectMount) {
}
// Generate the InspectContainerConfig struct for the Config field of Inspect.
-func (c *Container) generateInspectContainerConfig(spec *spec.Spec) (*define.InspectContainerConfig, error) {
+func (c *Container) generateInspectContainerConfig(spec *spec.Spec) *define.InspectContainerConfig {
ctrConfig := new(define.InspectContainerConfig)
ctrConfig.Hostname = c.Hostname()
@@ -325,7 +322,7 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) (*define.Ins
ctrConfig.CreateCommand = c.config.CreateCommand
- return ctrConfig, nil
+ return ctrConfig
}
// Generate the InspectContainerHostConfig struct for the HostConfig field of
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index f6fc3c1a4..73e0b2118 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -239,7 +239,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er
logrus.Debugf("Restarting container %s due to restart policy %s", c.ID(), c.config.RestartPolicy)
// Need to check if dependencies are alive.
- if err = c.checkDependenciesAndHandleError(ctx); err != nil {
+ if err = c.checkDependenciesAndHandleError(); err != nil {
return false, err
}
@@ -513,7 +513,7 @@ func (c *Container) teardownStorage() error {
// Reset resets state fields to default values.
// It is performed before a refresh and clears the state after a reboot.
// It does not save the results - assumes the database will do that for us.
-func resetState(state *ContainerState) error {
+func resetState(state *ContainerState) {
state.PID = 0
state.ConmonPID = 0
state.Mountpoint = ""
@@ -527,8 +527,6 @@ func resetState(state *ContainerState) error {
state.StoppedByUser = false
state.RestartPolicyMatch = false
state.RestartCount = 0
-
- return nil
}
// Refresh refreshes the container's state after a restart.
@@ -756,7 +754,7 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err err
}
if !recursive {
- if err := c.checkDependenciesAndHandleError(ctx); err != nil {
+ if err := c.checkDependenciesAndHandleError(); err != nil {
return err
}
} else {
@@ -792,7 +790,7 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err err
}
// checks dependencies are running and prints a helpful message
-func (c *Container) checkDependenciesAndHandleError(ctx context.Context) error {
+func (c *Container) checkDependenciesAndHandleError() error {
notRunning, err := c.checkDependenciesRunning()
if err != nil {
return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 9afe11b2b..12c1abf1c 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -1313,7 +1313,7 @@ func (c *Container) generateResolvConf() (string, error) {
}
}
- var dns []net.IP
+ dns := make([]net.IP, 0, len(c.runtime.config.Containers.DNSServers))
for _, i := range c.runtime.config.Containers.DNSServers {
result := net.ParseIP(i)
if result == nil {
@@ -1393,7 +1393,9 @@ func (c *Container) generateHosts(path string) (string, error) {
// local hosts file. netCtr is the container from which the netNS information is
// taken.
// path is the basis of the hosts file, into which netCtr's netNS information will be appended.
-func (c *Container) appendHosts(path string, netCtr *Container) (string, error) {
+// FIXME. Path should be used by this function,but I am not sure what is correct; remove //lint
+// once this is fixed
+func (c *Container) appendHosts(path string, netCtr *Container) (string, error) { //nolint
return c.appendStringToRundir("hosts", netCtr.getHosts())
}
diff --git a/libpod/define/info.go b/libpod/define/info.go
index f136936f7..47c53d067 100644
--- a/libpod/define/info.go
+++ b/libpod/define/info.go
@@ -43,7 +43,7 @@ type RemoteSocket struct {
Exists bool `json:"exists,omitempty"`
}
-// SlirpInfo describes the slirp exectuable that
+// SlirpInfo describes the slirp executable that
// is being being used.
type SlirpInfo struct {
Executable string `json:"executable"`
diff --git a/libpod/events.go b/libpod/events.go
index 20ebecc66..3d07c5d76 100644
--- a/libpod/events.go
+++ b/libpod/events.go
@@ -85,10 +85,7 @@ func (r *Runtime) Events(options events.ReadOptions) error {
// GetEvents reads the event log and returns events based on input filters
func (r *Runtime) GetEvents(filters []string) ([]*events.Event, error) {
- var (
- logEvents []*events.Event
- readErr error
- )
+ var readErr error
eventChannel := make(chan *events.Event)
options := events.ReadOptions{
EventChannel: eventChannel,
@@ -106,6 +103,7 @@ func (r *Runtime) GetEvents(filters []string) ([]*events.Event, error) {
if readErr != nil {
return nil, readErr
}
+ logEvents := make([]*events.Event, 0, len(eventChannel))
for e := range eventChannel {
logEvents = append(logEvents, e)
}
diff --git a/libpod/events/filters.go b/libpod/events/filters.go
index b3c5eda6e..6eed1f61d 100644
--- a/libpod/events/filters.go
+++ b/libpod/events/filters.go
@@ -81,7 +81,7 @@ func parseFilter(filter string) (string, string, error) {
}
func generateEventOptions(filters []string, since, until string) ([]EventFilter, error) {
- var options []EventFilter
+ options := make([]EventFilter, 0, len(filters))
for _, filter := range filters {
key, val, err := parseFilter(filter)
if err != nil {
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 60787b826..1101e35dc 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -172,8 +172,6 @@ func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile
// LoadFromArchiveReference creates a new image object for images pulled from a tar archive and the like (podman load)
// This function is needed because it is possible for a tar archive to have multiple tags for one image
func (ir *Runtime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*Image, error) {
- var newImages []*Image
-
if signaturePolicyPath == "" {
signaturePolicyPath = ir.SignaturePolicyPath
}
@@ -182,6 +180,7 @@ func (ir *Runtime) LoadFromArchiveReference(ctx context.Context, srcRef types.Im
return nil, errors.Wrapf(err, "unable to pull %s", transports.ImageName(srcRef))
}
+ newImages := make([]*Image, 0, len(imageNames))
for _, name := range imageNames {
newImage, err := ir.NewFromLocal(name)
if err != nil {
@@ -475,11 +474,11 @@ func (ir *Runtime) GetRWImages() ([]*Image, error) {
// getImages retrieves all images present in storage
func (ir *Runtime) getImages(rwOnly bool) ([]*Image, error) {
- var newImages []*Image
images, err := ir.store.Images()
if err != nil {
return nil, err
}
+ newImages := make([]*Image, 0, len(images))
for _, i := range images {
if rwOnly && i.ReadOnly {
continue
diff --git a/libpod/image/image_test.go b/libpod/image/image_test.go
index 3cd368cdc..74067853e 100644
--- a/libpod/image/image_test.go
+++ b/libpod/image/image_test.go
@@ -44,7 +44,7 @@ func cleanup(workdir string, ir *Runtime) {
}
}
-func makeLocalMatrix(b, bg *Image) ([]localImageTest, error) {
+func makeLocalMatrix(b, bg *Image) []localImageTest {
var l []localImageTest
// busybox
busybox := localImageTest{
@@ -65,7 +65,7 @@ func makeLocalMatrix(b, bg *Image) ([]localImageTest, error) {
busyboxGlibc.names = bbGlibcNames
l = append(l, busybox, busyboxGlibc)
- return l, nil
+ return l
}
@@ -100,9 +100,7 @@ func TestImage_NewFromLocal(t *testing.T) {
bbglibc, err := ir.New(context.Background(), "docker.io/library/busybox:glibc", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing)
assert.NoError(t, err)
- tm, err := makeLocalMatrix(bb, bbglibc)
- assert.NoError(t, err)
-
+ tm := makeLocalMatrix(bb, bbglibc)
for _, image := range tm {
// tag our images
err = image.img.TagImage(image.taggedName)
diff --git a/libpod/image/prune.go b/libpod/image/prune.go
index 3b4ea74c4..518795173 100644
--- a/libpod/image/prune.go
+++ b/libpod/image/prune.go
@@ -104,10 +104,7 @@ func (ir *Runtime) GetPruneImages(ctx context.Context, all bool, filterFuncs []I
// PruneImages prunes dangling and optionally all unused images from the local
// image store
func (ir *Runtime) PruneImages(ctx context.Context, all bool, filter []string) ([]string, error) {
- var (
- prunedCids []string
- filterFuncs []ImageFilter
- )
+ filterFuncs := make([]ImageFilter, 0, len(filter))
for _, f := range filter {
filterSplit := strings.SplitN(f, "=", 2)
if len(filterSplit) < 2 {
@@ -125,6 +122,7 @@ func (ir *Runtime) PruneImages(ctx context.Context, all bool, filter []string) (
if err != nil {
return nil, errors.Wrap(err, "unable to get images to prune")
}
+ prunedCids := make([]string, 0, len(pruneImages))
for _, p := range pruneImages {
repotags, err := p.RepoTags()
if err != nil {
diff --git a/libpod/image/pull.go b/libpod/image/pull.go
index 6b4c40ba2..24909a59a 100644
--- a/libpod/image/pull.go
+++ b/libpod/image/pull.go
@@ -366,7 +366,7 @@ func (ir *Runtime) pullGoalFromPossiblyUnqualifiedName(inputName string) (*pullG
if err != nil {
return nil, err
}
- var refPairs []pullRefPair
+ refPairs := make([]pullRefPair, 0, len(searchRegistries))
for _, registry := range searchRegistries {
ref, err := decomposedImage.referenceWithRegistry(registry)
if err != nil {
diff --git a/libpod/image/search.go b/libpod/image/search.go
index fd29dac45..f8d45d576 100644
--- a/libpod/image/search.go
+++ b/libpod/image/search.go
@@ -93,8 +93,8 @@ func SearchImages(term string, options SearchOptions) ([]SearchResult, error) {
searchImageInRegistryHelper := func(index int, registry string) {
defer sem.Release(1)
defer wg.Done()
- searchOutput, err := searchImageInRegistry(term, registry, options)
- data[index] = searchOutputData{data: searchOutput, err: err}
+ searchOutput := searchImageInRegistry(term, registry, options)
+ data[index] = searchOutputData{data: searchOutput}
}
ctx := context.Background()
@@ -131,7 +131,7 @@ func getRegistries(registry string) ([]string, error) {
return registries, nil
}
-func searchImageInRegistry(term string, registry string, options SearchOptions) ([]SearchResult, error) {
+func searchImageInRegistry(term string, registry string, options SearchOptions) []SearchResult {
// Max number of queries by default is 25
limit := maxQueries
if options.Limit > 0 {
@@ -147,7 +147,7 @@ func searchImageInRegistry(term string, registry string, options SearchOptions)
results, err := docker.SearchRegistry(context.TODO(), sc, registry, term, limit)
if err != nil {
logrus.Errorf("error searching registry %q: %v", registry, err)
- return []SearchResult{}, nil
+ return []SearchResult{}
}
index := registry
arr := strings.Split(registry, ".")
@@ -201,7 +201,7 @@ func searchImageInRegistry(term string, registry string, options SearchOptions)
}
paramsArr = append(paramsArr, params)
}
- return paramsArr, nil
+ return paramsArr
}
// ParseSearchFilter turns the filter into a SearchFilter that can be used for
diff --git a/libpod/kube.go b/libpod/kube.go
index a3c5e912f..90acd2541 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -31,8 +31,8 @@ func (c *Container) GenerateForKube() (*v1.Pod, error) {
func (p *Pod) GenerateForKube() (*v1.Pod, []v1.ServicePort, error) {
// Generate the v1.Pod yaml description
var (
- servicePorts []v1.ServicePort
- ports []v1.ContainerPort
+ ports []v1.ContainerPort //nolint
+ servicePorts []v1.ServicePort //nolint
)
allContainers, err := p.allContainers()
@@ -99,7 +99,7 @@ func GenerateKubeServiceFromV1Pod(pod *v1.Pod, servicePorts []v1.ServicePort) v1
// containerPortsToServicePorts takes a slice of containerports and generates a
// slice of service ports
func containerPortsToServicePorts(containerPorts []v1.ContainerPort) []v1.ServicePort {
- var sps []v1.ServicePort
+ sps := make([]v1.ServicePort, 0, len(containerPorts))
for _, cp := range containerPorts {
nodePort := 30000 + rand.Intn(32767-30000+1)
servicePort := v1.ServicePort{
@@ -116,11 +116,11 @@ func containerPortsToServicePorts(containerPorts []v1.ContainerPort) []v1.Servic
// containersToServicePorts takes a slice of v1.Containers and generates an
// inclusive list of serviceports to expose
func containersToServicePorts(containers []v1.Container) []v1.ServicePort {
- var sps []v1.ServicePort
// Without the call to rand.Seed, a program will produce the same sequence of pseudo-random numbers
// for each execution. Legal nodeport range is 30000-32767
rand.Seed(time.Now().UnixNano())
+ sps := make([]v1.ServicePort, 0, len(containers))
for _, ctr := range containers {
sps = append(sps, containerPortsToServicePorts(ctr.Ports)...)
}
@@ -128,11 +128,9 @@ func containersToServicePorts(containers []v1.Container) []v1.ServicePort {
}
func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPort) (*v1.Pod, error) {
- var (
- podContainers []v1.Container
- )
deDupPodVolumes := make(map[string]*v1.Volume)
first := true
+ podContainers := make([]v1.Container, 0, len(containers))
for _, ctr := range containers {
if !ctr.IsInfra() {
ctr, volumes, err := containerToV1Container(ctr)
@@ -201,13 +199,11 @@ func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1.
// simplePodWithV1Container is a function used by inspect when kube yaml needs to be generated
// for a single container. we "insert" that container description in a pod.
func simplePodWithV1Container(ctr *Container) (*v1.Pod, error) {
- var containers []v1.Container
kubeCtr, kubeVols, err := containerToV1Container(ctr)
if err != nil {
return nil, err
}
- containers = append(containers, kubeCtr)
- return addContainersAndVolumesToPodObject(containers, kubeVols, ctr.Name()), nil
+ return addContainersAndVolumesToPodObject([]v1.Container{kubeCtr}, kubeVols, ctr.Name()), nil
}
@@ -223,11 +219,7 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) {
if len(c.config.Spec.Linux.Devices) > 0 {
// TODO Enable when we can support devices and their names
- devices, err := generateKubeVolumeDeviceFromLinuxDevice(c.Spec().Linux.Devices)
- if err != nil {
- return kubeContainer, kubeVolumes, err
- }
- kubeContainer.VolumeDevices = devices
+ kubeContainer.VolumeDevices = generateKubeVolumeDeviceFromLinuxDevice(c.Spec().Linux.Devices)
return kubeContainer, kubeVolumes, errors.Wrapf(define.ErrNotImplemented, "linux devices")
}
@@ -283,7 +275,7 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) {
// ocicniPortMappingToContainerPort takes an ocicni portmapping and converts
// it to a v1.ContainerPort format for kube output
func ocicniPortMappingToContainerPort(portMappings []ocicni.PortMapping) ([]v1.ContainerPort, error) {
- var containerPorts []v1.ContainerPort
+ containerPorts := make([]v1.ContainerPort, 0, len(portMappings))
for _, p := range portMappings {
var protocol v1.Protocol
switch strings.ToUpper(p.Protocol) {
@@ -308,7 +300,7 @@ func ocicniPortMappingToContainerPort(portMappings []ocicni.PortMapping) ([]v1.C
// libpodEnvVarsToKubeEnvVars converts a key=value string slice to []v1.EnvVar
func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) {
- var envVars []v1.EnvVar
+ envVars := make([]v1.EnvVar, 0, len(envs))
for _, e := range envs {
split := strings.SplitN(e, "=", 2)
if len(split) != 2 {
@@ -325,11 +317,10 @@ func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) {
// libpodMountsToKubeVolumeMounts converts the containers mounts to a struct kube understands
func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, []v1.Volume, error) {
- var vms []v1.VolumeMount
- var vos []v1.Volume
-
// TjDO when named volumes are supported in play kube, also parse named volumes here
_, mounts := c.sortUserVolumes(c.config.Spec)
+ vms := make([]v1.VolumeMount, 0, len(mounts))
+ vos := make([]v1.Volume, 0, len(mounts))
for _, m := range mounts {
vm, vo, err := generateKubeVolumeMount(m)
if err != nil {
@@ -404,8 +395,8 @@ func convertVolumePathToName(hostSourcePath string) (string, error) {
func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v1.Capabilities {
var (
- drop []v1.Capability
- add []v1.Capability
+ drop = []v1.Capability{}
+ add = []v1.Capability{}
)
dedupDrop := make(map[string]bool)
dedupAdd := make(map[string]bool)
@@ -518,8 +509,8 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
}
// generateKubeVolumeDeviceFromLinuxDevice takes a list of devices and makes a VolumeDevice struct for kube
-func generateKubeVolumeDeviceFromLinuxDevice(devices []specs.LinuxDevice) ([]v1.VolumeDevice, error) {
- var volumeDevices []v1.VolumeDevice
+func generateKubeVolumeDeviceFromLinuxDevice(devices []specs.LinuxDevice) []v1.VolumeDevice {
+ volumeDevices := make([]v1.VolumeDevice, 0, len(devices))
for _, d := range devices {
vd := v1.VolumeDevice{
// TBD How are we going to sync up these names
@@ -528,7 +519,7 @@ func generateKubeVolumeDeviceFromLinuxDevice(devices []specs.LinuxDevice) ([]v1.
}
volumeDevices = append(volumeDevices, vd)
}
- return volumeDevices, nil
+ return volumeDevices
}
func removeUnderscores(s string) string {
diff --git a/libpod/oci_missing.go b/libpod/oci_missing.go
index 90e90cc6c..8caf00e6e 100644
--- a/libpod/oci_missing.go
+++ b/libpod/oci_missing.go
@@ -32,7 +32,7 @@ type MissingRuntime struct {
// Get a new MissingRuntime for the given name.
// Requires a libpod Runtime so we can make a sane path for the exits dir.
-func getMissingRuntime(name string, r *Runtime) (OCIRuntime, error) {
+func getMissingRuntime(name string, r *Runtime) OCIRuntime {
missingRuntimesLock.Lock()
defer missingRuntimesLock.Unlock()
@@ -42,7 +42,7 @@ func getMissingRuntime(name string, r *Runtime) (OCIRuntime, error) {
runtime, ok := missingRuntimes[name]
if ok {
- return runtime, nil
+ return runtime
}
// Once for each missing runtime, we want to error.
@@ -54,7 +54,7 @@ func getMissingRuntime(name string, r *Runtime) (OCIRuntime, error) {
missingRuntimes[name] = newRuntime
- return newRuntime, nil
+ return newRuntime
}
// Name is the name of the missing runtime
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index c8605eb69..98f4cad73 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -432,10 +432,6 @@ func containerStatusFromContainers(allCtrs []*Container) (map[string]define.Cont
// Inspect returns a PodInspect struct to describe the pod
func (p *Pod) Inspect() (*define.InspectPodData, error) {
- var (
- ctrs []define.InspectPodContainerInfo
- )
-
p.lock.Lock()
defer p.lock.Unlock()
if err := p.updatePod(); err != nil {
@@ -446,6 +442,7 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
if err != nil {
return nil, err
}
+ ctrs := make([]define.InspectPodContainerInfo, 0, len(containers))
ctrStatuses := make(map[string]define.ContainerStatus, len(containers))
for _, c := range containers {
containerStatus := "unknown"
diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go
index 851f52a4e..9e60d3c07 100644
--- a/libpod/pod_internal.go
+++ b/libpod/pod_internal.go
@@ -13,7 +13,7 @@ import (
)
// Creates a new, empty pod
-func newPod(runtime *Runtime) (*Pod, error) {
+func newPod(runtime *Runtime) *Pod {
pod := new(Pod)
pod.config = new(PodConfig)
pod.config.ID = stringid.GenerateNonCryptoID()
@@ -23,7 +23,7 @@ func newPod(runtime *Runtime) (*Pod, error) {
pod.state = new(podState)
pod.runtime = runtime
- return pod, nil
+ return pod
}
// Update pod state from database
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 4744de1a2..b1e48b3b3 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -286,9 +286,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
return errors.Wrapf(err, "error retrieving runtime configuration from database")
}
- if err := runtime.mergeDBConfig(dbConfig); err != nil {
- return errors.Wrapf(err, "error merging database config into runtime config")
- }
+ runtime.mergeDBConfig(dbConfig)
logrus.Debugf("Using graph driver %s", runtime.storageConfig.GraphDriverName)
logrus.Debugf("Using graph root %s", runtime.storageConfig.GraphRoot)
@@ -696,11 +694,7 @@ func (r *Runtime) configureStore() error {
// Set up a storage service for creating container root filesystems from
// images
- storageService, err := getStorageService(r.store)
- if err != nil {
- return err
- }
- r.storageService = storageService
+ r.storageService = getStorageService(r.store)
ir := image.NewImageRuntimeFromStore(r.store)
ir.SignaturePolicyPath = r.config.Engine.SignaturePolicyPath
@@ -751,7 +745,7 @@ type DBConfig struct {
}
// mergeDBConfig merges the configuration from the database.
-func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) error {
+func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) {
c := &r.config.Engine
if !r.storageSet.RunRootSet && dbConfig.StorageTmp != "" {
@@ -802,7 +796,6 @@ func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) error {
}
c.VolumePath = dbConfig.VolumePath
}
- return nil
}
func (r *Runtime) EnableLabeling() bool {
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index aa91dff03..f0beb0941 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -813,7 +813,7 @@ func (r *Runtime) GetRunningContainers() ([]*Container, error) {
// GetContainersByList is a helper function for GetContainers
// which takes a []string of container IDs or names
func (r *Runtime) GetContainersByList(containers []string) ([]*Container, error) {
- var ctrs []*Container
+ ctrs := make([]*Container, 0, len(containers))
for _, inputContainer := range containers {
ctr, err := r.LookupContainer(inputContainer)
if err != nil {
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 73b6c5d9b..25584c5ad 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -28,10 +28,7 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (_ *Po
return nil, define.ErrRuntimeStopped
}
- pod, err := newPod(r)
- if err != nil {
- return nil, errors.Wrapf(err, "error creating pod")
- }
+ pod := newPod(r)
// Set default namespace to runtime's namespace
// Do so before options run so they can override it
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index d4b46cc94..f6ecae4ab 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -29,11 +29,7 @@ func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption)
// newVolume creates a new empty volume
func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (_ *Volume, deferredErr error) {
- volume, err := newVolume(r)
- if err != nil {
- return nil, errors.Wrapf(err, "error creating volume")
- }
-
+ volume := newVolume(r)
for _, option := range options {
if err := option(volume); err != nil {
return nil, errors.Wrapf(err, "error running volume create option")
diff --git a/libpod/storage.go b/libpod/storage.go
index 34e40f699..c90020833 100644
--- a/libpod/storage.go
+++ b/libpod/storage.go
@@ -21,8 +21,8 @@ type storageService struct {
// getStorageService returns a storageService which can create container root
// filesystems from images
-func getStorageService(store storage.Store) (*storageService, error) {
- return &storageService{store: store}, nil
+func getStorageService(store storage.Store) *storageService {
+ return &storageService{store: store}
}
// ContainerInfo wraps a subset of information about a container: the locations
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
index 781ff77ca..d7d5a2494 100644
--- a/libpod/volume_internal.go
+++ b/libpod/volume_internal.go
@@ -9,7 +9,7 @@ import (
)
// Creates a new volume
-func newVolume(runtime *Runtime) (*Volume, error) {
+func newVolume(runtime *Runtime) *Volume {
volume := new(Volume)
volume.config = new(VolumeConfig)
volume.state = new(VolumeState)
@@ -17,8 +17,7 @@ func newVolume(runtime *Runtime) (*Volume, error) {
volume.config.Labels = make(map[string]string)
volume.config.Options = make(map[string]string)
volume.state.NeedsCopyUp = true
-
- return volume, nil
+ return volume
}
// teardownStorage deletes the volume from volumePath