aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.golangci.yml5
-rw-r--r--cmd/podman/common/specgen.go27
-rw-r--r--cmd/podman/containers/mount.go4
-rw-r--r--cmd/podman/containers/ps.go9
-rw-r--r--cmd/podman/containers/stats.go4
-rw-r--r--cmd/podman/generate/systemd.go2
-rw-r--r--cmd/podman/images/history.go2
-rw-r--r--cmd/podman/inspect/inspect.go4
-rw-r--r--cmd/podman/networks/list.go5
-rw-r--r--cmd/podman/pods/ps.go8
-rw-r--r--cmd/podman/system/df.go8
-rw-r--r--contrib/varlink/io.podman.service2
-rw-r--r--go.mod2
-rw-r--r--go.sum4
-rw-r--r--libpod/boltdb_state.go4
-rw-r--r--libpod/boltdb_state_internal.go5
-rw-r--r--libpod/container_api.go4
-rw-r--r--libpod/container_inspect.go13
-rw-r--r--libpod/container_internal.go10
-rw-r--r--libpod/container_internal_linux.go6
-rw-r--r--libpod/define/info.go2
-rw-r--r--libpod/events.go6
-rw-r--r--libpod/events/filters.go2
-rw-r--r--libpod/image/image.go5
-rw-r--r--libpod/image/image_test.go8
-rw-r--r--libpod/image/prune.go6
-rw-r--r--libpod/image/pull.go2
-rw-r--r--libpod/image/search.go10
-rw-r--r--libpod/kube.go41
-rw-r--r--libpod/oci_missing.go6
-rw-r--r--libpod/pod_api.go5
-rw-r--r--libpod/pod_internal.go4
-rw-r--r--libpod/runtime.go13
-rw-r--r--libpod/runtime_ctr.go2
-rw-r--r--libpod/runtime_pod_linux.go5
-rw-r--r--libpod/runtime_volume_linux.go6
-rw-r--r--libpod/storage.go4
-rw-r--r--libpod/volume_internal.go5
-rw-r--r--pkg/api/handlers/compat/containers_create.go8
-rw-r--r--pkg/api/handlers/compat/images_history.go2
-rw-r--r--pkg/api/handlers/compat/info.go2
-rw-r--r--pkg/api/handlers/compat/networks.go6
-rw-r--r--pkg/api/handlers/libpod/pods.go22
-rw-r--r--pkg/api/handlers/libpod/system.go2
-rw-r--r--pkg/api/handlers/libpod/volumes.go12
-rw-r--r--pkg/api/handlers/utils/images.go7
-rw-r--r--pkg/api/handlers/utils/pods.go6
-rw-r--r--pkg/bindings/connection.go15
-rw-r--r--pkg/bindings/containers/checkpoint.go2
-rw-r--r--pkg/bindings/containers/types.go2
-rw-r--r--pkg/bindings/images/images.go2
-rw-r--r--pkg/bindings/system/system.go1
-rw-r--r--pkg/cgroups/cgroups.go4
-rw-r--r--pkg/cgroups/cpu.go3
-rw-r--r--pkg/domain/entities/generate.go2
-rw-r--r--pkg/domain/infra/abi/containers.go91
-rw-r--r--pkg/domain/infra/abi/containers_runlabel.go12
-rw-r--r--pkg/domain/infra/abi/cp.go6
-rw-r--r--pkg/domain/infra/abi/images.go7
-rw-r--r--pkg/domain/infra/abi/images_list.go9
-rw-r--r--pkg/domain/infra/abi/manifest.go4
-rw-r--r--pkg/domain/infra/abi/network.go7
-rw-r--r--pkg/domain/infra/abi/play.go13
-rw-r--r--pkg/domain/infra/abi/pods.go41
-rw-r--r--pkg/domain/infra/abi/system.go12
-rw-r--r--pkg/domain/infra/abi/volumes.go17
-rw-r--r--pkg/domain/infra/tunnel/containers.go50
-rw-r--r--pkg/domain/infra/tunnel/images.go2
-rw-r--r--pkg/domain/infra/tunnel/network.go4
-rw-r--r--pkg/domain/infra/tunnel/pods.go25
-rw-r--r--pkg/domain/infra/tunnel/volumes.go9
-rw-r--r--pkg/hooks/exec/runtimeconfigfilter_test.go54
-rw-r--r--pkg/lookup/lookup.go2
-rw-r--r--pkg/network/files.go11
-rw-r--r--pkg/network/network.go4
-rw-r--r--pkg/ps/ps.go2
-rw-r--r--pkg/signal/signal_linux.go2
-rw-r--r--pkg/util/mountOpts.go3
-rw-r--r--vendor/github.com/uber/jaeger-client-go/.travis.yml8
-rw-r--r--vendor/github.com/uber/jaeger-client-go/CHANGELOG.md8
-rw-r--r--vendor/github.com/uber/jaeger-client-go/README.md42
-rw-r--r--vendor/github.com/uber/jaeger-client-go/RELEASE.md1
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/config.go44
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/config_env.go14
-rw-r--r--vendor/github.com/uber/jaeger-client-go/constants.go4
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler_remote.go14
-rw-r--r--vendor/modules.txt2
87 files changed, 383 insertions, 499 deletions
diff --git a/.golangci.yml b/.golangci.yml
index 33a8b4f59..4d5ff549c 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -10,6 +10,7 @@ run:
- pkg/spec
- pkg/varlink
- pkg/varlinkapi
+ - docs/varlink
skip-files:
- iopodman.go
- swagger.go
@@ -25,10 +26,6 @@ linters:
- gosec
- lll
- maligned
- - misspell
- - prealloc
- - unparam
- - nakedret
linters-settings:
errcheck:
check-blank: false
diff --git a/cmd/podman/common/specgen.go b/cmd/podman/common/specgen.go
index 2a5ebea3c..0b44ef544 100644
--- a/cmd/podman/common/specgen.go
+++ b/cmd/podman/common/specgen.go
@@ -23,7 +23,7 @@ import (
"github.com/pkg/errors"
)
-func getCPULimits(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string) (*specs.LinuxCPU, error) {
+func getCPULimits(c *ContainerCLIOpts) *specs.LinuxCPU {
cpu := &specs.LinuxCPU{}
hasLimits := false
@@ -67,12 +67,12 @@ func getCPULimits(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string)
}
if !hasLimits {
- return nil, nil
+ return nil
}
- return cpu, nil
+ return cpu
}
-func getIOLimits(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string) (*specs.LinuxBlockIO, error) {
+func getIOLimits(s *specgen.SpecGenerator, c *ContainerCLIOpts) (*specs.LinuxBlockIO, error) {
var err error
io := &specs.LinuxBlockIO{}
hasLimits := false
@@ -87,7 +87,7 @@ func getIOLimits(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string) (
}
if len(c.BlkIOWeightDevice) > 0 {
- if err := parseWeightDevices(c.BlkIOWeightDevice, s); err != nil {
+ if err := parseWeightDevices(s, c.BlkIOWeightDevice); err != nil {
return nil, err
}
hasLimits = true
@@ -127,7 +127,7 @@ func getIOLimits(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string) (
return io, nil
}
-func getPidsLimits(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string) *specs.LinuxPids {
+func getPidsLimits(c *ContainerCLIOpts) *specs.LinuxPids {
pids := &specs.LinuxPids{}
if c.CGroupsMode == "disabled" && c.PIDsLimit != 0 {
return nil
@@ -146,7 +146,7 @@ func getPidsLimits(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string)
return nil
}
-func getMemoryLimits(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string) (*specs.LinuxMemory, error) {
+func getMemoryLimits(s *specgen.SpecGenerator, c *ContainerCLIOpts) (*specs.LinuxMemory, error) {
var err error
memory := &specs.LinuxMemory{}
hasLimits := false
@@ -446,19 +446,16 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string
if s.ResourceLimits == nil {
s.ResourceLimits = &specs.LinuxResources{}
}
- s.ResourceLimits.Memory, err = getMemoryLimits(s, c, args)
- if err != nil {
- return err
- }
- s.ResourceLimits.BlockIO, err = getIOLimits(s, c, args)
+ s.ResourceLimits.Memory, err = getMemoryLimits(s, c)
if err != nil {
return err
}
- s.ResourceLimits.Pids = getPidsLimits(s, c, args)
- s.ResourceLimits.CPU, err = getCPULimits(s, c, args)
+ s.ResourceLimits.BlockIO, err = getIOLimits(s, c)
if err != nil {
return err
}
+ s.ResourceLimits.Pids = getPidsLimits(c)
+ s.ResourceLimits.CPU = getCPULimits(c)
if s.ResourceLimits.CPU == nil && s.ResourceLimits.Pids == nil && s.ResourceLimits.BlockIO == nil && s.ResourceLimits.Memory == nil {
s.ResourceLimits = nil
}
@@ -700,7 +697,7 @@ func makeHealthCheckFromCli(inCmd, interval string, retries uint, timeout, start
return &hc, nil
}
-func parseWeightDevices(weightDevs []string, s *specgen.SpecGenerator) error {
+func parseWeightDevices(s *specgen.SpecGenerator, weightDevs []string) error {
for _, val := range weightDevs {
split := strings.SplitN(val, ":", 2)
if len(split) != 2 {
diff --git a/cmd/podman/containers/mount.go b/cmd/podman/containers/mount.go
index af4d52caa..7f15616de 100644
--- a/cmd/podman/containers/mount.go
+++ b/cmd/podman/containers/mount.go
@@ -71,7 +71,6 @@ func init() {
func mount(cmd *cobra.Command, args []string) error {
var (
errs utils.OutputErrors
- mrs []mountReporter
)
reports, err := registry.ContainerEngine().ContainerMount(registry.GetContext(), args, mountOpts)
if err != nil {
@@ -90,6 +89,7 @@ func mount(cmd *cobra.Command, args []string) error {
if mountOpts.Format == "json" {
return printJSON(reports)
}
+ mrs := make([]mountReporter, 0, len(reports))
for _, r := range reports {
mrs = append(mrs, mountReporter{r})
}
@@ -110,7 +110,7 @@ func printJSON(reports []*entities.ContainerMountReport) error {
Names []string
Mountpoint string `json:"mountpoint"`
}
- var jreports []jreport
+ jreports := make([]jreport, 0, len(reports))
for _, r := range reports {
jreports = append(jreports, jreport{
diff --git a/cmd/podman/containers/ps.go b/cmd/podman/containers/ps.go
index a29b4da3d..ffd2054a6 100644
--- a/cmd/podman/containers/ps.go
+++ b/cmd/podman/containers/ps.go
@@ -74,7 +74,7 @@ func listFlagSet(flags *pflag.FlagSet) {
_ = flags.MarkHidden("latest")
}
}
-func checkFlags(c *cobra.Command, args []string) error {
+func checkFlags(c *cobra.Command) error {
// latest, and last are mutually exclusive.
if listOpts.Last >= 0 && listOpts.Latest {
return errors.Errorf("last and latest are mutually exclusive")
@@ -144,8 +144,7 @@ func getResponses() ([]entities.ListContainer, error) {
}
func ps(cmd *cobra.Command, args []string) error {
- var responses []psReporter
- if err := checkFlags(cmd, args); err != nil {
+ if err := checkFlags(cmd); err != nil {
return err
}
for _, f := range filters {
@@ -172,6 +171,7 @@ func ps(cmd *cobra.Command, args []string) error {
return quietOut(listContainers)
}
+ responses := make([]psReporter, 0, len(listContainers))
for _, r := range listContainers {
responses = append(responses, psReporter{r})
}
@@ -351,7 +351,8 @@ func portsToString(ports []ocicni.PortMapping) string {
first int32
last int32
}
- var portDisplay []string
+ portDisplay := []string{}
+
if len(ports) == 0 {
return ""
}
diff --git a/cmd/podman/containers/stats.go b/cmd/podman/containers/stats.go
index 11aa3a4d2..260cbd25d 100644
--- a/cmd/podman/containers/stats.go
+++ b/cmd/podman/containers/stats.go
@@ -134,7 +134,7 @@ func outputStats(reports []*define.ContainerStats) error {
tm.MoveCursor(1, 1)
tm.Flush()
}
- var stats []*containerStats
+ stats := make([]*containerStats, 0, len(reports))
for _, r := range reports {
stats = append(stats, &containerStats{r})
}
@@ -228,7 +228,7 @@ func outputJSON(stats []*containerStats) error {
BlockIO string `json:"block_io"`
Pids string `json:"pids"`
}
- var jstats []jstat
+ jstats := make([]jstat, 0, len(stats))
for _, j := range stats {
jstats = append(jstats, jstat{
Id: j.ID(),
diff --git a/cmd/podman/generate/systemd.go b/cmd/podman/generate/systemd.go
index 75031e070..e4fdd8690 100644
--- a/cmd/podman/generate/systemd.go
+++ b/cmd/podman/generate/systemd.go
@@ -41,7 +41,7 @@ func init() {
flags.BoolVarP(&systemdOptions.New, "new", "", false, "Create a new container instead of starting an existing one")
flags.StringVar(&systemdOptions.ContainerPrefix, "container-prefix", "container", "Systemd unit name prefix for containers")
flags.StringVar(&systemdOptions.PodPrefix, "pod-prefix", "pod", "Systemd unit name prefix for pods")
- flags.StringVar(&systemdOptions.Separator, "separator", "-", "Systemd unit name seperator between name/id and prefix")
+ flags.StringVar(&systemdOptions.Separator, "separator", "-", "Systemd unit name separator between name/id and prefix")
flags.SetNormalizeFunc(utils.AliasFlags)
}
diff --git a/cmd/podman/images/history.go b/cmd/podman/images/history.go
index 17a80557e..ea4b9983f 100644
--- a/cmd/podman/images/history.go
+++ b/cmd/podman/images/history.go
@@ -100,7 +100,7 @@ func history(cmd *cobra.Command, args []string) error {
}
return err
}
- var hr []historyreporter
+ hr := make([]historyreporter, 0, len(results.Layers))
for _, l := range results.Layers {
hr = append(hr, historyreporter{l})
}
diff --git a/cmd/podman/inspect/inspect.go b/cmd/podman/inspect/inspect.go
index 223ce00f0..1ed033ec3 100644
--- a/cmd/podman/inspect/inspect.go
+++ b/cmd/podman/inspect/inspect.go
@@ -77,7 +77,7 @@ func newInspector(options entities.InspectOptions) (*inspector, error) {
// inspect inspects the specified container/image names or IDs.
func (i *inspector) inspect(namesOrIDs []string) error {
// data - dumping place for inspection results.
- var data []interface{}
+ var data []interface{} //nolint
ctx := context.Background()
if len(namesOrIDs) == 0 {
@@ -132,7 +132,7 @@ func (i *inspector) inspect(namesOrIDs []string) error {
}
func (i *inspector) inspectAll(ctx context.Context, namesOrIDs []string) ([]interface{}, error) {
- var data []interface{}
+ var data []interface{} //nolint
for _, name := range namesOrIDs {
imgData, err := i.imageEngine.Inspect(ctx, []string{name}, i.options)
if err == nil {
diff --git a/cmd/podman/networks/list.go b/cmd/podman/networks/list.go
index 498a4dc18..747fef26d 100644
--- a/cmd/podman/networks/list.go
+++ b/cmd/podman/networks/list.go
@@ -56,10 +56,6 @@ func init() {
}
func networkList(cmd *cobra.Command, args []string) error {
- var (
- nlprs []ListPrintReports
- )
-
// validate the filter pattern.
if len(networkListOptions.Filter) > 0 {
tokens := strings.Split(networkListOptions.Filter, "=")
@@ -82,6 +78,7 @@ func networkList(cmd *cobra.Command, args []string) error {
return jsonOut(responses)
}
+ nlprs := make([]ListPrintReports, 0, len(responses))
for _, r := range responses {
nlprs = append(nlprs, ListPrintReports{r})
}
diff --git a/cmd/podman/pods/ps.go b/cmd/podman/pods/ps.go
index bcd1db84c..0171bb243 100644
--- a/cmd/podman/pods/ps.go
+++ b/cmd/podman/pods/ps.go
@@ -68,7 +68,6 @@ func pods(cmd *cobra.Command, args []string) error {
var (
w io.Writer = os.Stdout
row string
- lpr []ListPodReporter
)
if psInput.Quiet && len(psInput.Format) > 0 {
@@ -102,6 +101,7 @@ func pods(cmd *cobra.Command, args []string) error {
return nil
}
+ lpr := make([]ListPodReporter, 0, len(responses))
for _, r := range responses {
lpr = append(lpr, ListPodReporter{r})
}
@@ -220,7 +220,7 @@ func (l ListPodReporter) InfraId() string { //nolint
}
func (l ListPodReporter) ContainerIds() string {
- var ctrids []string
+ ctrids := make([]string, 0, len(l.Containers))
for _, c := range l.Containers {
id := c.Id
if !noTrunc {
@@ -232,7 +232,7 @@ func (l ListPodReporter) ContainerIds() string {
}
func (l ListPodReporter) ContainerNames() string {
- var ctrNames []string
+ ctrNames := make([]string, 0, len(l.Containers))
for _, c := range l.Containers {
ctrNames = append(ctrNames, c.Names)
}
@@ -240,7 +240,7 @@ func (l ListPodReporter) ContainerNames() string {
}
func (l ListPodReporter) ContainerStatuses() string {
- var statuses []string
+ statuses := make([]string, 0, len(l.Containers))
for _, c := range l.Containers {
statuses = append(statuses, c.Status)
}
diff --git a/cmd/podman/system/df.go b/cmd/podman/system/df.go
index 9318bba12..c56990cb5 100644
--- a/cmd/podman/system/df.go
+++ b/cmd/podman/system/df.go
@@ -147,15 +147,13 @@ func printSummary(reports *entities.SystemDfReport, userFormat string) error {
func printVerbose(reports *entities.SystemDfReport) error {
var (
- dfImages []*dfImage
- dfContainers []*dfContainer
- dfVolumes []*dfVolume
- w io.Writer = os.Stdout
+ w io.Writer = os.Stdout
)
// Images
fmt.Print("\nImages space usage:\n\n")
// convert to dfImage for output
+ dfImages := make([]*dfImage, 0, len(reports.Images))
for _, d := range reports.Images {
dfImages = append(dfImages, &dfImage{SystemDfImageReport: d})
}
@@ -170,6 +168,7 @@ func printVerbose(reports *entities.SystemDfReport) error {
fmt.Print("\nContainers space usage:\n\n")
// convert to dfContainers for output
+ dfContainers := make([]*dfContainer, 0, len(reports.Containers))
for _, d := range reports.Containers {
dfContainers = append(dfContainers, &dfContainer{SystemDfContainerReport: d})
}
@@ -183,6 +182,7 @@ func printVerbose(reports *entities.SystemDfReport) error {
// Volumes
fmt.Print("\nLocal Volumes space usage:\n\n")
+ dfVolumes := make([]*dfVolume, 0, len(reports.Volumes))
// convert to dfVolume for output
for _, d := range reports.Volumes {
dfVolumes = append(dfVolumes, &dfVolume{SystemDfVolumeReport: d})
diff --git a/contrib/varlink/io.podman.service b/contrib/varlink/io.podman.service
index 5be5329f4..99d28560d 100644
--- a/contrib/varlink/io.podman.service
+++ b/contrib/varlink/io.podman.service
@@ -6,7 +6,7 @@ Documentation=man:podman-varlink(1)
[Service]
Type=simple
-ExecStart=/usr/bin/podman varlink unix:%t/podman/io.podman --timeout=60000
+ExecStart=/usr/bin/podman system service --varlink --timeout=60000 unix:%t/podman/io.podman
TimeoutStopSec=30
KillMode=process
diff --git a/go.mod b/go.mod
index 4135f2579..81c906e60 100644
--- a/go.mod
+++ b/go.mod
@@ -53,7 +53,7 @@ require (
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.6.1
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
- github.com/uber/jaeger-client-go v2.23.1+incompatible
+ github.com/uber/jaeger-client-go v2.24.0+incompatible
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b
github.com/vishvananda/netlink v1.1.0
diff --git a/go.sum b/go.sum
index 79deb1885..c559ccdd4 100644
--- a/go.sum
+++ b/go.sum
@@ -455,8 +455,8 @@ github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmD
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/u-root/u-root v6.0.0+incompatible/go.mod h1:RYkpo8pTHrNjW08opNd/U6p/RJE7K0D8fXO0d47+3YY=
-github.com/uber/jaeger-client-go v2.23.1+incompatible h1:uArBYHQR0HqLFFAypI7RsWTzPSj/bDpmZZuQjMLSg1A=
-github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
+github.com/uber/jaeger-client-go v2.24.0+incompatible h1:CGchgJcHsDd2jWnaL4XngByMrXoGHh3n8oCqAKx0uMo=
+github.com/uber/jaeger-client-go v2.24.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 4b6ff2c1d..be2787670 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -243,9 +243,7 @@ func (s *BoltState) Refresh() error {
return errors.Wrapf(err, "error unmarshalling state for container %s", string(id))
}
- if err := resetState(state); err != nil {
- return errors.Wrapf(err, "error resetting state for container %s", string(id))
- }
+ resetState(state)
newStateBytes, err := json.Marshal(state)
if err != nil {
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index 21d55bf77..70abbb39c 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -407,10 +407,7 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
ociRuntime, ok := s.runtime.ociRuntimes[runtimeName]
if !ok {
// Use a MissingRuntime implementation
- ociRuntime, err = getMissingRuntime(runtimeName, s.runtime)
- if err != nil {
- return err
- }
+ ociRuntime = getMissingRuntime(runtimeName, s.runtime)
}
ctr.ociRuntime = ociRuntime
}
diff --git a/libpod/container_api.go b/libpod/container_api.go
index d366ffb84..d43cb4829 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -39,7 +39,7 @@ func (c *Container) Init(ctx context.Context) (err error) {
}
// don't recursively start
- if err := c.checkDependenciesAndHandleError(ctx); err != nil {
+ if err := c.checkDependenciesAndHandleError(); err != nil {
return err
}
@@ -146,7 +146,7 @@ func (c *Container) RestartWithTimeout(ctx context.Context, timeout uint) (err e
}
}
- if err = c.checkDependenciesAndHandleError(ctx); err != nil {
+ if err = c.checkDependenciesAndHandleError(); err != nil {
return err
}
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index b26dcddf6..c6d9e1a65 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -90,7 +90,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
}
namedVolumes, mounts := c.sortUserVolumes(ctrSpec)
- inspectMounts, err := c.getInspectMounts(ctrSpec, namedVolumes, mounts)
+ inspectMounts, err := c.getInspectMounts(namedVolumes, mounts)
if err != nil {
return nil, err
}
@@ -164,10 +164,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
}
data.NetworkSettings = networkConfig
- inspectConfig, err := c.generateInspectContainerConfig(ctrSpec)
- if err != nil {
- return nil, err
- }
+ inspectConfig := c.generateInspectContainerConfig(ctrSpec)
data.Config = inspectConfig
hostConfig, err := c.generateInspectContainerHostConfig(ctrSpec, namedVolumes, mounts)
@@ -195,7 +192,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
// Get inspect-formatted mounts list.
// Only includes user-specified mounts. Only includes bind mounts and named
// volumes, not tmpfs volumes.
-func (c *Container) getInspectMounts(ctrSpec *spec.Spec, namedVolumes []*ContainerNamedVolume, mounts []spec.Mount) ([]define.InspectMount, error) {
+func (c *Container) getInspectMounts(namedVolumes []*ContainerNamedVolume, mounts []spec.Mount) ([]define.InspectMount, error) {
inspectMounts := []define.InspectMount{}
// No mounts, return early
@@ -278,7 +275,7 @@ func parseMountOptionsForInspect(options []string, mount *define.InspectMount) {
}
// Generate the InspectContainerConfig struct for the Config field of Inspect.
-func (c *Container) generateInspectContainerConfig(spec *spec.Spec) (*define.InspectContainerConfig, error) {
+func (c *Container) generateInspectContainerConfig(spec *spec.Spec) *define.InspectContainerConfig {
ctrConfig := new(define.InspectContainerConfig)
ctrConfig.Hostname = c.Hostname()
@@ -325,7 +322,7 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) (*define.Ins
ctrConfig.CreateCommand = c.config.CreateCommand
- return ctrConfig, nil
+ return ctrConfig
}
// Generate the InspectContainerHostConfig struct for the HostConfig field of
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index f6fc3c1a4..73e0b2118 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -239,7 +239,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er
logrus.Debugf("Restarting container %s due to restart policy %s", c.ID(), c.config.RestartPolicy)
// Need to check if dependencies are alive.
- if err = c.checkDependenciesAndHandleError(ctx); err != nil {
+ if err = c.checkDependenciesAndHandleError(); err != nil {
return false, err
}
@@ -513,7 +513,7 @@ func (c *Container) teardownStorage() error {
// Reset resets state fields to default values.
// It is performed before a refresh and clears the state after a reboot.
// It does not save the results - assumes the database will do that for us.
-func resetState(state *ContainerState) error {
+func resetState(state *ContainerState) {
state.PID = 0
state.ConmonPID = 0
state.Mountpoint = ""
@@ -527,8 +527,6 @@ func resetState(state *ContainerState) error {
state.StoppedByUser = false
state.RestartPolicyMatch = false
state.RestartCount = 0
-
- return nil
}
// Refresh refreshes the container's state after a restart.
@@ -756,7 +754,7 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err err
}
if !recursive {
- if err := c.checkDependenciesAndHandleError(ctx); err != nil {
+ if err := c.checkDependenciesAndHandleError(); err != nil {
return err
}
} else {
@@ -792,7 +790,7 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err err
}
// checks dependencies are running and prints a helpful message
-func (c *Container) checkDependenciesAndHandleError(ctx context.Context) error {
+func (c *Container) checkDependenciesAndHandleError() error {
notRunning, err := c.checkDependenciesRunning()
if err != nil {
return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 9afe11b2b..12c1abf1c 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -1313,7 +1313,7 @@ func (c *Container) generateResolvConf() (string, error) {
}
}
- var dns []net.IP
+ dns := make([]net.IP, 0, len(c.runtime.config.Containers.DNSServers))
for _, i := range c.runtime.config.Containers.DNSServers {
result := net.ParseIP(i)
if result == nil {
@@ -1393,7 +1393,9 @@ func (c *Container) generateHosts(path string) (string, error) {
// local hosts file. netCtr is the container from which the netNS information is
// taken.
// path is the basis of the hosts file, into which netCtr's netNS information will be appended.
-func (c *Container) appendHosts(path string, netCtr *Container) (string, error) {
+// FIXME. Path should be used by this function,but I am not sure what is correct; remove //lint
+// once this is fixed
+func (c *Container) appendHosts(path string, netCtr *Container) (string, error) { //nolint
return c.appendStringToRundir("hosts", netCtr.getHosts())
}
diff --git a/libpod/define/info.go b/libpod/define/info.go
index f136936f7..47c53d067 100644
--- a/libpod/define/info.go
+++ b/libpod/define/info.go
@@ -43,7 +43,7 @@ type RemoteSocket struct {
Exists bool `json:"exists,omitempty"`
}
-// SlirpInfo describes the slirp exectuable that
+// SlirpInfo describes the slirp executable that
// is being being used.
type SlirpInfo struct {
Executable string `json:"executable"`
diff --git a/libpod/events.go b/libpod/events.go
index 20ebecc66..3d07c5d76 100644
--- a/libpod/events.go
+++ b/libpod/events.go
@@ -85,10 +85,7 @@ func (r *Runtime) Events(options events.ReadOptions) error {
// GetEvents reads the event log and returns events based on input filters
func (r *Runtime) GetEvents(filters []string) ([]*events.Event, error) {
- var (
- logEvents []*events.Event
- readErr error
- )
+ var readErr error
eventChannel := make(chan *events.Event)
options := events.ReadOptions{
EventChannel: eventChannel,
@@ -106,6 +103,7 @@ func (r *Runtime) GetEvents(filters []string) ([]*events.Event, error) {
if readErr != nil {
return nil, readErr
}
+ logEvents := make([]*events.Event, 0, len(eventChannel))
for e := range eventChannel {
logEvents = append(logEvents, e)
}
diff --git a/libpod/events/filters.go b/libpod/events/filters.go
index b3c5eda6e..6eed1f61d 100644
--- a/libpod/events/filters.go
+++ b/libpod/events/filters.go
@@ -81,7 +81,7 @@ func parseFilter(filter string) (string, string, error) {
}
func generateEventOptions(filters []string, since, until string) ([]EventFilter, error) {
- var options []EventFilter
+ options := make([]EventFilter, 0, len(filters))
for _, filter := range filters {
key, val, err := parseFilter(filter)
if err != nil {
diff --git a/libpod/image/image.go b/libpod/image/image.go
index 60787b826..1101e35dc 100644
--- a/libpod/image/image.go
+++ b/libpod/image/image.go
@@ -172,8 +172,6 @@ func (ir *Runtime) New(ctx context.Context, name, signaturePolicyPath, authfile
// LoadFromArchiveReference creates a new image object for images pulled from a tar archive and the like (podman load)
// This function is needed because it is possible for a tar archive to have multiple tags for one image
func (ir *Runtime) LoadFromArchiveReference(ctx context.Context, srcRef types.ImageReference, signaturePolicyPath string, writer io.Writer) ([]*Image, error) {
- var newImages []*Image
-
if signaturePolicyPath == "" {
signaturePolicyPath = ir.SignaturePolicyPath
}
@@ -182,6 +180,7 @@ func (ir *Runtime) LoadFromArchiveReference(ctx context.Context, srcRef types.Im
return nil, errors.Wrapf(err, "unable to pull %s", transports.ImageName(srcRef))
}
+ newImages := make([]*Image, 0, len(imageNames))
for _, name := range imageNames {
newImage, err := ir.NewFromLocal(name)
if err != nil {
@@ -475,11 +474,11 @@ func (ir *Runtime) GetRWImages() ([]*Image, error) {
// getImages retrieves all images present in storage
func (ir *Runtime) getImages(rwOnly bool) ([]*Image, error) {
- var newImages []*Image
images, err := ir.store.Images()
if err != nil {
return nil, err
}
+ newImages := make([]*Image, 0, len(images))
for _, i := range images {
if rwOnly && i.ReadOnly {
continue
diff --git a/libpod/image/image_test.go b/libpod/image/image_test.go
index 3cd368cdc..74067853e 100644
--- a/libpod/image/image_test.go
+++ b/libpod/image/image_test.go
@@ -44,7 +44,7 @@ func cleanup(workdir string, ir *Runtime) {
}
}
-func makeLocalMatrix(b, bg *Image) ([]localImageTest, error) {
+func makeLocalMatrix(b, bg *Image) []localImageTest {
var l []localImageTest
// busybox
busybox := localImageTest{
@@ -65,7 +65,7 @@ func makeLocalMatrix(b, bg *Image) ([]localImageTest, error) {
busyboxGlibc.names = bbGlibcNames
l = append(l, busybox, busyboxGlibc)
- return l, nil
+ return l
}
@@ -100,9 +100,7 @@ func TestImage_NewFromLocal(t *testing.T) {
bbglibc, err := ir.New(context.Background(), "docker.io/library/busybox:glibc", "", "", writer, nil, SigningOptions{}, nil, util.PullImageMissing)
assert.NoError(t, err)
- tm, err := makeLocalMatrix(bb, bbglibc)
- assert.NoError(t, err)
-
+ tm := makeLocalMatrix(bb, bbglibc)
for _, image := range tm {
// tag our images
err = image.img.TagImage(image.taggedName)
diff --git a/libpod/image/prune.go b/libpod/image/prune.go
index 3b4ea74c4..518795173 100644
--- a/libpod/image/prune.go
+++ b/libpod/image/prune.go
@@ -104,10 +104,7 @@ func (ir *Runtime) GetPruneImages(ctx context.Context, all bool, filterFuncs []I
// PruneImages prunes dangling and optionally all unused images from the local
// image store
func (ir *Runtime) PruneImages(ctx context.Context, all bool, filter []string) ([]string, error) {
- var (
- prunedCids []string
- filterFuncs []ImageFilter
- )
+ filterFuncs := make([]ImageFilter, 0, len(filter))
for _, f := range filter {
filterSplit := strings.SplitN(f, "=", 2)
if len(filterSplit) < 2 {
@@ -125,6 +122,7 @@ func (ir *Runtime) PruneImages(ctx context.Context, all bool, filter []string) (
if err != nil {
return nil, errors.Wrap(err, "unable to get images to prune")
}
+ prunedCids := make([]string, 0, len(pruneImages))
for _, p := range pruneImages {
repotags, err := p.RepoTags()
if err != nil {
diff --git a/libpod/image/pull.go b/libpod/image/pull.go
index 6b4c40ba2..24909a59a 100644
--- a/libpod/image/pull.go
+++ b/libpod/image/pull.go
@@ -366,7 +366,7 @@ func (ir *Runtime) pullGoalFromPossiblyUnqualifiedName(inputName string) (*pullG
if err != nil {
return nil, err
}
- var refPairs []pullRefPair
+ refPairs := make([]pullRefPair, 0, len(searchRegistries))
for _, registry := range searchRegistries {
ref, err := decomposedImage.referenceWithRegistry(registry)
if err != nil {
diff --git a/libpod/image/search.go b/libpod/image/search.go
index fd29dac45..f8d45d576 100644
--- a/libpod/image/search.go
+++ b/libpod/image/search.go
@@ -93,8 +93,8 @@ func SearchImages(term string, options SearchOptions) ([]SearchResult, error) {
searchImageInRegistryHelper := func(index int, registry string) {
defer sem.Release(1)
defer wg.Done()
- searchOutput, err := searchImageInRegistry(term, registry, options)
- data[index] = searchOutputData{data: searchOutput, err: err}
+ searchOutput := searchImageInRegistry(term, registry, options)
+ data[index] = searchOutputData{data: searchOutput}
}
ctx := context.Background()
@@ -131,7 +131,7 @@ func getRegistries(registry string) ([]string, error) {
return registries, nil
}
-func searchImageInRegistry(term string, registry string, options SearchOptions) ([]SearchResult, error) {
+func searchImageInRegistry(term string, registry string, options SearchOptions) []SearchResult {
// Max number of queries by default is 25
limit := maxQueries
if options.Limit > 0 {
@@ -147,7 +147,7 @@ func searchImageInRegistry(term string, registry string, options SearchOptions)
results, err := docker.SearchRegistry(context.TODO(), sc, registry, term, limit)
if err != nil {
logrus.Errorf("error searching registry %q: %v", registry, err)
- return []SearchResult{}, nil
+ return []SearchResult{}
}
index := registry
arr := strings.Split(registry, ".")
@@ -201,7 +201,7 @@ func searchImageInRegistry(term string, registry string, options SearchOptions)
}
paramsArr = append(paramsArr, params)
}
- return paramsArr, nil
+ return paramsArr
}
// ParseSearchFilter turns the filter into a SearchFilter that can be used for
diff --git a/libpod/kube.go b/libpod/kube.go
index a3c5e912f..90acd2541 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -31,8 +31,8 @@ func (c *Container) GenerateForKube() (*v1.Pod, error) {
func (p *Pod) GenerateForKube() (*v1.Pod, []v1.ServicePort, error) {
// Generate the v1.Pod yaml description
var (
- servicePorts []v1.ServicePort
- ports []v1.ContainerPort
+ ports []v1.ContainerPort //nolint
+ servicePorts []v1.ServicePort //nolint
)
allContainers, err := p.allContainers()
@@ -99,7 +99,7 @@ func GenerateKubeServiceFromV1Pod(pod *v1.Pod, servicePorts []v1.ServicePort) v1
// containerPortsToServicePorts takes a slice of containerports and generates a
// slice of service ports
func containerPortsToServicePorts(containerPorts []v1.ContainerPort) []v1.ServicePort {
- var sps []v1.ServicePort
+ sps := make([]v1.ServicePort, 0, len(containerPorts))
for _, cp := range containerPorts {
nodePort := 30000 + rand.Intn(32767-30000+1)
servicePort := v1.ServicePort{
@@ -116,11 +116,11 @@ func containerPortsToServicePorts(containerPorts []v1.ContainerPort) []v1.Servic
// containersToServicePorts takes a slice of v1.Containers and generates an
// inclusive list of serviceports to expose
func containersToServicePorts(containers []v1.Container) []v1.ServicePort {
- var sps []v1.ServicePort
// Without the call to rand.Seed, a program will produce the same sequence of pseudo-random numbers
// for each execution. Legal nodeport range is 30000-32767
rand.Seed(time.Now().UnixNano())
+ sps := make([]v1.ServicePort, 0, len(containers))
for _, ctr := range containers {
sps = append(sps, containerPortsToServicePorts(ctr.Ports)...)
}
@@ -128,11 +128,9 @@ func containersToServicePorts(containers []v1.Container) []v1.ServicePort {
}
func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPort) (*v1.Pod, error) {
- var (
- podContainers []v1.Container
- )
deDupPodVolumes := make(map[string]*v1.Volume)
first := true
+ podContainers := make([]v1.Container, 0, len(containers))
for _, ctr := range containers {
if !ctr.IsInfra() {
ctr, volumes, err := containerToV1Container(ctr)
@@ -201,13 +199,11 @@ func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1.
// simplePodWithV1Container is a function used by inspect when kube yaml needs to be generated
// for a single container. we "insert" that container description in a pod.
func simplePodWithV1Container(ctr *Container) (*v1.Pod, error) {
- var containers []v1.Container
kubeCtr, kubeVols, err := containerToV1Container(ctr)
if err != nil {
return nil, err
}
- containers = append(containers, kubeCtr)
- return addContainersAndVolumesToPodObject(containers, kubeVols, ctr.Name()), nil
+ return addContainersAndVolumesToPodObject([]v1.Container{kubeCtr}, kubeVols, ctr.Name()), nil
}
@@ -223,11 +219,7 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) {
if len(c.config.Spec.Linux.Devices) > 0 {
// TODO Enable when we can support devices and their names
- devices, err := generateKubeVolumeDeviceFromLinuxDevice(c.Spec().Linux.Devices)
- if err != nil {
- return kubeContainer, kubeVolumes, err
- }
- kubeContainer.VolumeDevices = devices
+ kubeContainer.VolumeDevices = generateKubeVolumeDeviceFromLinuxDevice(c.Spec().Linux.Devices)
return kubeContainer, kubeVolumes, errors.Wrapf(define.ErrNotImplemented, "linux devices")
}
@@ -283,7 +275,7 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) {
// ocicniPortMappingToContainerPort takes an ocicni portmapping and converts
// it to a v1.ContainerPort format for kube output
func ocicniPortMappingToContainerPort(portMappings []ocicni.PortMapping) ([]v1.ContainerPort, error) {
- var containerPorts []v1.ContainerPort
+ containerPorts := make([]v1.ContainerPort, 0, len(portMappings))
for _, p := range portMappings {
var protocol v1.Protocol
switch strings.ToUpper(p.Protocol) {
@@ -308,7 +300,7 @@ func ocicniPortMappingToContainerPort(portMappings []ocicni.PortMapping) ([]v1.C
// libpodEnvVarsToKubeEnvVars converts a key=value string slice to []v1.EnvVar
func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) {
- var envVars []v1.EnvVar
+ envVars := make([]v1.EnvVar, 0, len(envs))
for _, e := range envs {
split := strings.SplitN(e, "=", 2)
if len(split) != 2 {
@@ -325,11 +317,10 @@ func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) {
// libpodMountsToKubeVolumeMounts converts the containers mounts to a struct kube understands
func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, []v1.Volume, error) {
- var vms []v1.VolumeMount
- var vos []v1.Volume
-
// TjDO when named volumes are supported in play kube, also parse named volumes here
_, mounts := c.sortUserVolumes(c.config.Spec)
+ vms := make([]v1.VolumeMount, 0, len(mounts))
+ vos := make([]v1.Volume, 0, len(mounts))
for _, m := range mounts {
vm, vo, err := generateKubeVolumeMount(m)
if err != nil {
@@ -404,8 +395,8 @@ func convertVolumePathToName(hostSourcePath string) (string, error) {
func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v1.Capabilities {
var (
- drop []v1.Capability
- add []v1.Capability
+ drop = []v1.Capability{}
+ add = []v1.Capability{}
)
dedupDrop := make(map[string]bool)
dedupAdd := make(map[string]bool)
@@ -518,8 +509,8 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
}
// generateKubeVolumeDeviceFromLinuxDevice takes a list of devices and makes a VolumeDevice struct for kube
-func generateKubeVolumeDeviceFromLinuxDevice(devices []specs.LinuxDevice) ([]v1.VolumeDevice, error) {
- var volumeDevices []v1.VolumeDevice
+func generateKubeVolumeDeviceFromLinuxDevice(devices []specs.LinuxDevice) []v1.VolumeDevice {
+ volumeDevices := make([]v1.VolumeDevice, 0, len(devices))
for _, d := range devices {
vd := v1.VolumeDevice{
// TBD How are we going to sync up these names
@@ -528,7 +519,7 @@ func generateKubeVolumeDeviceFromLinuxDevice(devices []specs.LinuxDevice) ([]v1.
}
volumeDevices = append(volumeDevices, vd)
}
- return volumeDevices, nil
+ return volumeDevices
}
func removeUnderscores(s string) string {
diff --git a/libpod/oci_missing.go b/libpod/oci_missing.go
index 90e90cc6c..8caf00e6e 100644
--- a/libpod/oci_missing.go
+++ b/libpod/oci_missing.go
@@ -32,7 +32,7 @@ type MissingRuntime struct {
// Get a new MissingRuntime for the given name.
// Requires a libpod Runtime so we can make a sane path for the exits dir.
-func getMissingRuntime(name string, r *Runtime) (OCIRuntime, error) {
+func getMissingRuntime(name string, r *Runtime) OCIRuntime {
missingRuntimesLock.Lock()
defer missingRuntimesLock.Unlock()
@@ -42,7 +42,7 @@ func getMissingRuntime(name string, r *Runtime) (OCIRuntime, error) {
runtime, ok := missingRuntimes[name]
if ok {
- return runtime, nil
+ return runtime
}
// Once for each missing runtime, we want to error.
@@ -54,7 +54,7 @@ func getMissingRuntime(name string, r *Runtime) (OCIRuntime, error) {
missingRuntimes[name] = newRuntime
- return newRuntime, nil
+ return newRuntime
}
// Name is the name of the missing runtime
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index c8605eb69..98f4cad73 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -432,10 +432,6 @@ func containerStatusFromContainers(allCtrs []*Container) (map[string]define.Cont
// Inspect returns a PodInspect struct to describe the pod
func (p *Pod) Inspect() (*define.InspectPodData, error) {
- var (
- ctrs []define.InspectPodContainerInfo
- )
-
p.lock.Lock()
defer p.lock.Unlock()
if err := p.updatePod(); err != nil {
@@ -446,6 +442,7 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
if err != nil {
return nil, err
}
+ ctrs := make([]define.InspectPodContainerInfo, 0, len(containers))
ctrStatuses := make(map[string]define.ContainerStatus, len(containers))
for _, c := range containers {
containerStatus := "unknown"
diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go
index 851f52a4e..9e60d3c07 100644
--- a/libpod/pod_internal.go
+++ b/libpod/pod_internal.go
@@ -13,7 +13,7 @@ import (
)
// Creates a new, empty pod
-func newPod(runtime *Runtime) (*Pod, error) {
+func newPod(runtime *Runtime) *Pod {
pod := new(Pod)
pod.config = new(PodConfig)
pod.config.ID = stringid.GenerateNonCryptoID()
@@ -23,7 +23,7 @@ func newPod(runtime *Runtime) (*Pod, error) {
pod.state = new(podState)
pod.runtime = runtime
- return pod, nil
+ return pod
}
// Update pod state from database
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 4744de1a2..b1e48b3b3 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -286,9 +286,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
return errors.Wrapf(err, "error retrieving runtime configuration from database")
}
- if err := runtime.mergeDBConfig(dbConfig); err != nil {
- return errors.Wrapf(err, "error merging database config into runtime config")
- }
+ runtime.mergeDBConfig(dbConfig)
logrus.Debugf("Using graph driver %s", runtime.storageConfig.GraphDriverName)
logrus.Debugf("Using graph root %s", runtime.storageConfig.GraphRoot)
@@ -696,11 +694,7 @@ func (r *Runtime) configureStore() error {
// Set up a storage service for creating container root filesystems from
// images
- storageService, err := getStorageService(r.store)
- if err != nil {
- return err
- }
- r.storageService = storageService
+ r.storageService = getStorageService(r.store)
ir := image.NewImageRuntimeFromStore(r.store)
ir.SignaturePolicyPath = r.config.Engine.SignaturePolicyPath
@@ -751,7 +745,7 @@ type DBConfig struct {
}
// mergeDBConfig merges the configuration from the database.
-func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) error {
+func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) {
c := &r.config.Engine
if !r.storageSet.RunRootSet && dbConfig.StorageTmp != "" {
@@ -802,7 +796,6 @@ func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) error {
}
c.VolumePath = dbConfig.VolumePath
}
- return nil
}
func (r *Runtime) EnableLabeling() bool {
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index aa91dff03..f0beb0941 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -813,7 +813,7 @@ func (r *Runtime) GetRunningContainers() ([]*Container, error) {
// GetContainersByList is a helper function for GetContainers
// which takes a []string of container IDs or names
func (r *Runtime) GetContainersByList(containers []string) ([]*Container, error) {
- var ctrs []*Container
+ ctrs := make([]*Container, 0, len(containers))
for _, inputContainer := range containers {
ctr, err := r.LookupContainer(inputContainer)
if err != nil {
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 73b6c5d9b..25584c5ad 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -28,10 +28,7 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (_ *Po
return nil, define.ErrRuntimeStopped
}
- pod, err := newPod(r)
- if err != nil {
- return nil, errors.Wrapf(err, "error creating pod")
- }
+ pod := newPod(r)
// Set default namespace to runtime's namespace
// Do so before options run so they can override it
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index d4b46cc94..f6ecae4ab 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -29,11 +29,7 @@ func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption)
// newVolume creates a new empty volume
func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (_ *Volume, deferredErr error) {
- volume, err := newVolume(r)
- if err != nil {
- return nil, errors.Wrapf(err, "error creating volume")
- }
-
+ volume := newVolume(r)
for _, option := range options {
if err := option(volume); err != nil {
return nil, errors.Wrapf(err, "error running volume create option")
diff --git a/libpod/storage.go b/libpod/storage.go
index 34e40f699..c90020833 100644
--- a/libpod/storage.go
+++ b/libpod/storage.go
@@ -21,8 +21,8 @@ type storageService struct {
// getStorageService returns a storageService which can create container root
// filesystems from images
-func getStorageService(store storage.Store) (*storageService, error) {
- return &storageService{store: store}, nil
+func getStorageService(store storage.Store) *storageService {
+ return &storageService{store: store}
}
// ContainerInfo wraps a subset of information about a container: the locations
diff --git a/libpod/volume_internal.go b/libpod/volume_internal.go
index 781ff77ca..d7d5a2494 100644
--- a/libpod/volume_internal.go
+++ b/libpod/volume_internal.go
@@ -9,7 +9,7 @@ import (
)
// Creates a new volume
-func newVolume(runtime *Runtime) (*Volume, error) {
+func newVolume(runtime *Runtime) *Volume {
volume := new(Volume)
volume.config = new(VolumeConfig)
volume.state = new(VolumeState)
@@ -17,8 +17,7 @@ func newVolume(runtime *Runtime) (*Volume, error) {
volume.config.Labels = make(map[string]string)
volume.config.Options = make(map[string]string)
volume.state.NeedsCopyUp = true
-
- return volume, nil
+ return volume
}
// teardownStorage deletes the volume from volumePath
diff --git a/pkg/api/handlers/compat/containers_create.go b/pkg/api/handlers/compat/containers_create.go
index 3d4bd4fb5..3ae9d9ab3 100644
--- a/pkg/api/handlers/compat/containers_create.go
+++ b/pkg/api/handlers/compat/containers_create.go
@@ -62,10 +62,8 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
func makeCreateConfig(containerConfig *config.Config, input handlers.CreateContainerConfig, newImage *image2.Image) (createconfig.CreateConfig, error) {
var (
- err error
- init bool
- tmpfs []string
- volumes []string
+ err error
+ init bool
)
env := make(map[string]string)
stopSignal := unix.SIGTERM
@@ -137,6 +135,7 @@ func makeCreateConfig(containerConfig *config.Config, input handlers.CreateConta
User: input.User,
}
pidConfig := createconfig.PidConfig{PidMode: namespaces.PidMode(input.HostConfig.PidMode)}
+ volumes := make([]string, 0, len(input.Volumes))
for k := range input.Volumes {
volumes = append(volumes, k)
}
@@ -158,6 +157,7 @@ func makeCreateConfig(containerConfig *config.Config, input handlers.CreateConta
}
// format the tmpfs mounts into a []string from map
+ tmpfs := make([]string, 0, len(input.HostConfig.Tmpfs))
for k, v := range input.HostConfig.Tmpfs {
tmpfs = append(tmpfs, fmt.Sprintf("%s:%s", k, v))
}
diff --git a/pkg/api/handlers/compat/images_history.go b/pkg/api/handlers/compat/images_history.go
index afadf4c48..7c0bbf828 100644
--- a/pkg/api/handlers/compat/images_history.go
+++ b/pkg/api/handlers/compat/images_history.go
@@ -12,7 +12,6 @@ import (
func HistoryImage(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
name := utils.GetName(r)
- var allHistory []handlers.HistoryResponse
newImage, err := runtime.ImageRuntime().NewFromLocal(name)
if err != nil {
@@ -25,6 +24,7 @@ func HistoryImage(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
return
}
+ allHistory := make([]handlers.HistoryResponse, 0, len(history))
for _, h := range history {
l := handlers.HistoryResponse{
ID: h.ID,
diff --git a/pkg/api/handlers/compat/info.go b/pkg/api/handlers/compat/info.go
index d4a933c54..5c3f610a4 100644
--- a/pkg/api/handlers/compat/info.go
+++ b/pkg/api/handlers/compat/info.go
@@ -129,7 +129,7 @@ func GetInfo(w http.ResponseWriter, r *http.Request) {
}
func getGraphStatus(storeInfo map[string]string) [][2]string {
- var graphStatus [][2]string
+ graphStatus := make([][2]string, 0, len(storeInfo))
for k, v := range storeInfo {
graphStatus = append(graphStatus, [2]string{k, v})
}
diff --git a/pkg/api/handlers/compat/networks.go b/pkg/api/handlers/compat/networks.go
index 8734ba405..0f1eca5e5 100644
--- a/pkg/api/handlers/compat/networks.go
+++ b/pkg/api/handlers/compat/networks.go
@@ -162,9 +162,6 @@ func findPluginByName(plugins []*libcni.NetworkConfig, pluginType string) ([]byt
}
func ListNetworks(w http.ResponseWriter, r *http.Request) {
- var (
- reports []*types.NetworkResource
- )
runtime := r.Context().Value("runtime").(*libpod.Runtime)
decoder := r.Context().Value("decoder").(*schema.Decoder)
query := struct {
@@ -191,6 +188,7 @@ func ListNetworks(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
return
}
+ reports := make([]*types.NetworkResource, 0, len(netNames))
for _, name := range netNames {
report, err := getNetworkResourceByName(name, runtime)
if err != nil {
@@ -215,7 +213,7 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) {
if len(networkCreate.Name) > 0 {
name = networkCreate.Name
}
- // At present I think we should just suport the bridge driver
+ // At present I think we should just support the bridge driver
// and allow demand to make us consider more
if networkCreate.Driver != network.DefaultNetworkDriver {
utils.InternalServerError(w, errors.New("network create only supports the bridge driver"))
diff --git a/pkg/api/handlers/libpod/pods.go b/pkg/api/handlers/libpod/pods.go
index 7d4d03144..4b57ef26a 100644
--- a/pkg/api/handlers/libpod/pods.go
+++ b/pkg/api/handlers/libpod/pods.go
@@ -89,7 +89,6 @@ func PodStop(w http.ResponseWriter, r *http.Request) {
runtime = r.Context().Value("runtime").(*libpod.Runtime)
decoder = r.Context().Value("decoder").(*schema.Decoder)
responses map[string]error
- errs []error
)
query := struct {
Timeout int `schema:"t"`
@@ -128,6 +127,7 @@ func PodStop(w http.ResponseWriter, r *http.Request) {
utils.Error(w, "Something went wrong", http.StatusInternalServerError, err)
return
}
+ var errs []error //nolint
for _, err := range responses {
errs = append(errs, err)
}
@@ -139,9 +139,7 @@ func PodStop(w http.ResponseWriter, r *http.Request) {
}
func PodStart(w http.ResponseWriter, r *http.Request) {
- var (
- errs []error
- )
+ var errs []error //nolint
runtime := r.Context().Value("runtime").(*libpod.Runtime)
name := utils.GetName(r)
pod, err := runtime.LookupPod(name)
@@ -206,9 +204,7 @@ func PodDelete(w http.ResponseWriter, r *http.Request) {
}
func PodRestart(w http.ResponseWriter, r *http.Request) {
- var (
- errs []error
- )
+ var errs []error //nolint
runtime := r.Context().Value("runtime").(*libpod.Runtime)
name := utils.GetName(r)
pod, err := runtime.LookupPod(name)
@@ -243,12 +239,12 @@ func PodPrune(w http.ResponseWriter, r *http.Request) {
func PodPruneHelper(w http.ResponseWriter, r *http.Request) ([]*entities.PodPruneReport, error) {
var (
runtime = r.Context().Value("runtime").(*libpod.Runtime)
- reports []*entities.PodPruneReport
)
responses, err := runtime.PrunePods(r.Context())
if err != nil {
return nil, err
}
+ reports := make([]*entities.PodPruneReport, 0, len(responses))
for k, v := range responses {
reports = append(reports, &entities.PodPruneReport{
Err: v,
@@ -259,9 +255,7 @@ func PodPruneHelper(w http.ResponseWriter, r *http.Request) ([]*entities.PodPrun
}
func PodPause(w http.ResponseWriter, r *http.Request) {
- var (
- errs []error
- )
+ var errs []error //nolint
runtime := r.Context().Value("runtime").(*libpod.Runtime)
name := utils.GetName(r)
pod, err := runtime.LookupPod(name)
@@ -285,9 +279,7 @@ func PodPause(w http.ResponseWriter, r *http.Request) {
}
func PodUnpause(w http.ResponseWriter, r *http.Request) {
- var (
- errs []error
- )
+ var errs []error //nolint
runtime := r.Context().Value("runtime").(*libpod.Runtime)
name := utils.GetName(r)
pod, err := runtime.LookupPod(name)
@@ -357,7 +349,7 @@ func PodKill(w http.ResponseWriter, r *http.Request) {
runtime = r.Context().Value("runtime").(*libpod.Runtime)
decoder = r.Context().Value("decoder").(*schema.Decoder)
signal = "SIGKILL"
- errs []error
+ errs []error //nolint
)
query := struct {
Signal string `schema:"signal"`
diff --git a/pkg/api/handlers/libpod/system.go b/pkg/api/handlers/libpod/system.go
index f575546c9..52d3b91ab 100644
--- a/pkg/api/handlers/libpod/system.go
+++ b/pkg/api/handlers/libpod/system.go
@@ -61,7 +61,7 @@ func SystemPrune(w http.ResponseWriter, r *http.Request) {
systemPruneReport.ImagePruneReport = &report
if query.Volumes {
- volumePruneReport, err := pruneVolumesHelper(w, r)
+ volumePruneReport, err := pruneVolumesHelper(r)
if err != nil {
utils.InternalServerError(w, err)
return
diff --git a/pkg/api/handlers/libpod/volumes.go b/pkg/api/handlers/libpod/volumes.go
index b5574b87b..ea035fc4d 100644
--- a/pkg/api/handlers/libpod/volumes.go
+++ b/pkg/api/handlers/libpod/volumes.go
@@ -102,9 +102,8 @@ func InspectVolume(w http.ResponseWriter, r *http.Request) {
func ListVolumes(w http.ResponseWriter, r *http.Request) {
var (
- decoder = r.Context().Value("decoder").(*schema.Decoder)
- runtime = r.Context().Value("runtime").(*libpod.Runtime)
- volumeConfigs []*entities.VolumeListReport
+ decoder = r.Context().Value("decoder").(*schema.Decoder)
+ runtime = r.Context().Value("runtime").(*libpod.Runtime)
)
query := struct {
Filters map[string][]string `schema:"filters"`
@@ -129,6 +128,7 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
return
}
+ volumeConfigs := make([]*entities.VolumeListReport, 0, len(vols))
for _, v := range vols {
config := entities.VolumeConfigResponse{
Name: v.Name(),
@@ -147,7 +147,7 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) {
}
func PruneVolumes(w http.ResponseWriter, r *http.Request) {
- reports, err := pruneVolumesHelper(w, r)
+ reports, err := pruneVolumesHelper(r)
if err != nil {
utils.InternalServerError(w, err)
return
@@ -155,15 +155,15 @@ func PruneVolumes(w http.ResponseWriter, r *http.Request) {
utils.WriteResponse(w, http.StatusOK, reports)
}
-func pruneVolumesHelper(w http.ResponseWriter, r *http.Request) ([]*entities.VolumePruneReport, error) {
+func pruneVolumesHelper(r *http.Request) ([]*entities.VolumePruneReport, error) {
var (
runtime = r.Context().Value("runtime").(*libpod.Runtime)
- reports []*entities.VolumePruneReport
)
pruned, err := runtime.PruneVolumes(r.Context())
if err != nil {
return nil, err
}
+ reports := make([]*entities.VolumePruneReport, 0, len(pruned))
for k, v := range pruned {
reports = append(reports, &entities.VolumePruneReport{
Err: v,
diff --git a/pkg/api/handlers/utils/images.go b/pkg/api/handlers/utils/images.go
index 7fb31a177..521f727be 100644
--- a/pkg/api/handlers/utils/images.go
+++ b/pkg/api/handlers/utils/images.go
@@ -3,6 +3,7 @@ package utils
import (
"fmt"
"net/http"
+ "strings"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/storage"
@@ -77,9 +78,7 @@ func GetImages(w http.ResponseWriter, r *http.Request) ([]*image.Image, error) {
if len(query.Filters) > 0 {
for k, v := range query.Filters {
- for _, val := range v {
- filters = append(filters, fmt.Sprintf("%s=%s", k, val))
- }
+ filters = append(filters, fmt.Sprintf("%s=%s", k, strings.Join(v, "=")))
}
images, err = runtime.ImageRuntime().GetImagesWithFilters(filters)
if err != nil {
@@ -94,7 +93,7 @@ func GetImages(w http.ResponseWriter, r *http.Request) ([]*image.Image, error) {
if query.All {
return images, nil
}
- var returnImages []*image.Image
+ returnImages := []*image.Image{}
for _, img := range images {
if len(img.Names()) == 0 {
parent, err := img.IsParent(r.Context())
diff --git a/pkg/api/handlers/utils/pods.go b/pkg/api/handlers/utils/pods.go
index 4a5cbd05c..0bb818c1c 100644
--- a/pkg/api/handlers/utils/pods.go
+++ b/pkg/api/handlers/utils/pods.go
@@ -11,7 +11,6 @@ import (
func GetPods(w http.ResponseWriter, r *http.Request) ([]*entities.ListPodsReport, error) {
var (
- lps []*entities.ListPodsReport
pods []*libpod.Pod
filters []libpod.PodFilter
)
@@ -45,6 +44,11 @@ func GetPods(w http.ResponseWriter, r *http.Request) ([]*entities.ListPodsReport
return nil, err
}
+ if len(pods) == 0 {
+ return nil, nil
+ }
+
+ lps := make([]*entities.ListPodsReport, 0, len(pods))
for _, pod := range pods {
status, err := pod.GetPodStatus()
if err != nil {
diff --git a/pkg/bindings/connection.go b/pkg/bindings/connection.go
index aa7f3707c..a9c61e5ae 100644
--- a/pkg/bindings/connection.go
+++ b/pkg/bindings/connection.go
@@ -115,12 +115,12 @@ func NewConnectionWithIdentity(ctx context.Context, uri string, passPhrase strin
_url.Path = JoinURL(_url.Host, _url.Path)
_url.Host = ""
}
- connection, err = unixClient(_url)
+ connection = unixClient(_url)
case "tcp":
if !strings.HasPrefix(uri, "tcp://") {
return nil, errors.New("tcp URIs should begin with tcp://")
}
- connection, err = tcpClient(_url)
+ connection = tcpClient(_url)
default:
return nil, errors.Errorf("'%s' is not a supported schema", _url.Scheme)
}
@@ -135,7 +135,7 @@ func NewConnectionWithIdentity(ctx context.Context, uri string, passPhrase strin
return ctx, nil
}
-func tcpClient(_url *url.URL) (Connection, error) {
+func tcpClient(_url *url.URL) Connection {
connection := Connection{
URI: _url,
}
@@ -147,7 +147,7 @@ func tcpClient(_url *url.URL) (Connection, error) {
DisableCompression: true,
},
}
- return connection, nil
+ return connection
}
// pingNewConnection pings to make sure the RESTFUL service is up
@@ -186,8 +186,7 @@ func pingNewConnection(ctx context.Context) error {
}
func sshClient(_url *url.URL, secure bool, passPhrase string, identities ...string) (Connection, error) {
- var authMethods []ssh.AuthMethod
-
+ authMethods := []ssh.AuthMethod{}
for _, i := range identities {
auth, err := publicKey(i, []byte(passPhrase))
if err != nil {
@@ -256,7 +255,7 @@ func sshClient(_url *url.URL, secure bool, passPhrase string, identities ...stri
return connection, nil
}
-func unixClient(_url *url.URL) (Connection, error) {
+func unixClient(_url *url.URL) Connection {
connection := Connection{URI: _url}
connection.Client = &http.Client{
Transport: &http.Transport{
@@ -266,7 +265,7 @@ func unixClient(_url *url.URL) (Connection, error) {
DisableCompression: true,
},
}
- return connection, nil
+ return connection
}
// DoRequest assembles the http request and returns the response
diff --git a/pkg/bindings/containers/checkpoint.go b/pkg/bindings/containers/checkpoint.go
index 916ec8071..8a3932e80 100644
--- a/pkg/bindings/containers/checkpoint.go
+++ b/pkg/bindings/containers/checkpoint.go
@@ -42,7 +42,7 @@ func Checkpoint(ctx context.Context, nameOrID string, keep, leaveRunning, tcpEst
}
// Restore restores a checkpointed container to running. The container is identified by the nameOrID option. All
-// additional options are optional and allow finer control of the restore processs.
+// additional options are optional and allow finer control of the restore process.
func Restore(ctx context.Context, nameOrID string, keep, tcpEstablished, ignoreRootFS, ignoreStaticIP, ignoreStaticMAC *bool, name, importArchive *string) (*entities.RestoreReport, error) {
var report entities.RestoreReport
conn, err := bindings.GetClient(ctx)
diff --git a/pkg/bindings/containers/types.go b/pkg/bindings/containers/types.go
index 31daaf565..f288c2944 100644
--- a/pkg/bindings/containers/types.go
+++ b/pkg/bindings/containers/types.go
@@ -12,7 +12,7 @@ type LogOptions struct {
Until *string
}
-// CommitOptions describe details about the resulting commited
+// CommitOptions describe details about the resulting committed
// image as defined by repo and tag. None of these options
// are required.
type CommitOptions struct {
diff --git a/pkg/bindings/images/images.go b/pkg/bindings/images/images.go
index a82a9080b..9cb6a0ac5 100644
--- a/pkg/bindings/images/images.go
+++ b/pkg/bindings/images/images.go
@@ -394,7 +394,7 @@ func Pull(ctx context.Context, rawImage string, options entities.ImagePullOption
}
// Push is the binding for libpod's v2 endpoints for push images. Note that
-// `source` must be a refering to an image in the remote's container storage.
+// `source` must be a referring to an image in the remote's container storage.
// The destination must be a reference to a registry (i.e., of docker transport
// or be normalized to one). Other transports are rejected as they do not make
// sense in a remote context.
diff --git a/pkg/bindings/system/system.go b/pkg/bindings/system/system.go
index 010762bef..b2ee3951b 100644
--- a/pkg/bindings/system/system.go
+++ b/pkg/bindings/system/system.go
@@ -125,6 +125,7 @@ func Version(ctx context.Context) (*entities.SystemVersionReport, error) {
Version: component.Version.Version,
GoVersion: component.GoVersion,
GitCommit: component.GitCommit,
+ BuiltTime: time.Unix(b.Unix(), 0).Format(time.ANSIC),
Built: b.Unix(),
OsArch: fmt.Sprintf("%s/%s", component.Os, component.Arch),
}
diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go
index 3b56f944f..399072108 100644
--- a/pkg/cgroups/cgroups.go
+++ b/pkg/cgroups/cgroups.go
@@ -133,7 +133,7 @@ func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool)
if err != nil {
return nil, errors.Wrapf(err, "read directory %s", cgroupRoot)
}
- var controllers []controller
+ controllers := []controller{}
for _, i := range infos {
name := i.Name()
if _, found := exclude[name]; found {
@@ -505,7 +505,7 @@ func (c *CgroupControl) AddPid(pid int) error {
return nil
}
- var names []string
+ names := make([]string, 0, len(handlers))
for n := range handlers {
names = append(names, n)
}
diff --git a/pkg/cgroups/cpu.go b/pkg/cgroups/cpu.go
index 5f0a18031..3745c6e50 100644
--- a/pkg/cgroups/cpu.go
+++ b/pkg/cgroups/cpu.go
@@ -29,13 +29,12 @@ func readAcct(ctr *CgroupControl, name string) (uint64, error) {
}
func readAcctList(ctr *CgroupControl, name string) ([]uint64, error) {
- var r []uint64
-
p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name)
data, err := ioutil.ReadFile(p)
if err != nil {
return nil, errors.Wrapf(err, "reading %s", p)
}
+ r := []uint64{}
for _, s := range strings.Split(string(data), " ") {
s = cleanString(s)
if s == "" {
diff --git a/pkg/domain/entities/generate.go b/pkg/domain/entities/generate.go
index 68a42d897..a8ad13705 100644
--- a/pkg/domain/entities/generate.go
+++ b/pkg/domain/entities/generate.go
@@ -18,7 +18,7 @@ type GenerateSystemdOptions struct {
ContainerPrefix string
// PodPrefix - systemd unit name prefix for pods
PodPrefix string
- // Separator - systemd unit name seperator between name/id and prefix
+ // Separator - systemd unit name separator between name/id and prefix
Separator string
}
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index 4d6d0d59a..d2c8aefdc 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -84,13 +84,11 @@ func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrID string)
}
func (ic *ContainerEngine) ContainerWait(ctx context.Context, namesOrIds []string, options entities.WaitOptions) ([]entities.WaitReport, error) {
- var (
- responses []entities.WaitReport
- )
ctrs, err := getContainersByContext(false, options.Latest, namesOrIds, ic.Libpod)
if err != nil {
return nil, err
}
+ responses := make([]entities.WaitReport, 0, len(ctrs))
for _, c := range ctrs {
response := entities.WaitReport{Id: c.ID()}
exitCode, err := c.WaitForConditionWithInterval(options.Interval, options.Condition)
@@ -106,10 +104,9 @@ func (ic *ContainerEngine) ContainerWait(ctx context.Context, namesOrIds []strin
func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []string, options entities.PauseUnPauseOptions) ([]*entities.PauseUnpauseReport, error) {
var (
- ctrs []*libpod.Container
- err error
- report []*entities.PauseUnpauseReport
+ err error
)
+ ctrs := []*libpod.Container{} //nolint
if options.All {
ctrs, err = ic.Libpod.GetAllContainers()
} else {
@@ -118,6 +115,7 @@ func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []stri
if err != nil {
return nil, err
}
+ report := make([]*entities.PauseUnpauseReport, 0, len(ctrs))
for _, c := range ctrs {
err := c.Pause()
report = append(report, &entities.PauseUnpauseReport{Id: c.ID(), Err: err})
@@ -127,10 +125,9 @@ func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []stri
func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []string, options entities.PauseUnPauseOptions) ([]*entities.PauseUnpauseReport, error) {
var (
- ctrs []*libpod.Container
- err error
- report []*entities.PauseUnpauseReport
+ err error
)
+ ctrs := []*libpod.Container{} //nolint
if options.All {
ctrs, err = ic.Libpod.GetAllContainers()
} else {
@@ -139,6 +136,7 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st
if err != nil {
return nil, err
}
+ report := make([]*entities.PauseUnpauseReport, 0, len(ctrs))
for _, c := range ctrs {
err := c.Unpause()
report = append(report, &entities.PauseUnpauseReport{Id: c.ID(), Err: err})
@@ -146,9 +144,6 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st
return report, nil
}
func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []string, options entities.StopOptions) ([]*entities.StopReport, error) {
- var (
- reports []*entities.StopReport
- )
names := namesOrIds
for _, cidFile := range options.CIDFiles {
content, err := ioutil.ReadFile(cidFile)
@@ -184,6 +179,7 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin
if err != nil {
return nil, err
}
+ reports := make([]*entities.StopReport, 0, len(errMap))
for ctr, err := range errMap {
report := new(entities.StopReport)
report.Id = ctr.ID()
@@ -204,10 +200,10 @@ func (ic *ContainerEngine) ContainerPrune(ctx context.Context, options entities.
filterFuncs = append(filterFuncs, generatedFunc)
}
}
- return ic.pruneContainersHelper(ctx, filterFuncs)
+ return ic.pruneContainersHelper(filterFuncs)
}
-func (ic *ContainerEngine) pruneContainersHelper(ctx context.Context, filterFuncs []libpod.ContainerFilter) (*entities.ContainerPruneReport, error) {
+func (ic *ContainerEngine) pruneContainersHelper(filterFuncs []libpod.ContainerFilter) (*entities.ContainerPruneReport, error) {
prunedContainers, pruneErrors, err := ic.Libpod.PruneContainers(filterFuncs)
if err != nil {
return nil, err
@@ -220,9 +216,6 @@ func (ic *ContainerEngine) pruneContainersHelper(ctx context.Context, filterFunc
}
func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []string, options entities.KillOptions) ([]*entities.KillReport, error) {
- var (
- reports []*entities.KillReport
- )
sig, err := signal.ParseSignalNameOrNumber(options.Signal)
if err != nil {
return nil, err
@@ -231,6 +224,7 @@ func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []strin
if err != nil {
return nil, err
}
+ reports := make([]*entities.KillReport, 0, len(ctrs))
for _, con := range ctrs {
reports = append(reports, &entities.KillReport{
Id: con.ID(),
@@ -241,9 +235,8 @@ func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []strin
}
func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []string, options entities.RestartOptions) ([]*entities.RestartReport, error) {
var (
- ctrs []*libpod.Container
- err error
- reports []*entities.RestartReport
+ ctrs []*libpod.Container
+ err error
)
if options.Running {
@@ -258,6 +251,7 @@ func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []st
}
}
+ reports := make([]*entities.RestartReport, 0, len(ctrs))
for _, con := range ctrs {
timeout := con.StopTimeout()
if options.Timeout != nil {
@@ -272,9 +266,7 @@ func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []st
}
func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, options entities.RmOptions) ([]*entities.RmReport, error) {
- var (
- reports []*entities.RmReport
- )
+ reports := []*entities.RmReport{}
if options.Storage {
for _, ctr := range namesOrIds {
report := entities.RmReport{Id: ctr}
@@ -347,11 +339,11 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
}
func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []string, options entities.InspectOptions) ([]*entities.ContainerInspectReport, error) {
- var reports []*entities.ContainerInspectReport
ctrs, err := getContainersByContext(false, options.Latest, namesOrIds, ic.Libpod)
if err != nil {
return nil, err
}
+ reports := make([]*entities.ContainerInspectReport, 0, len(ctrs))
for _, c := range ctrs {
data, err := c.Inspect(options.Size)
if err != nil {
@@ -439,9 +431,8 @@ func (ic *ContainerEngine) ContainerExport(ctx context.Context, nameOrID string,
func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds []string, options entities.CheckpointOptions) ([]*entities.CheckpointReport, error) {
var (
- err error
- cons []*libpod.Container
- reports []*entities.CheckpointReport
+ err error
+ cons []*libpod.Container
)
checkOpts := libpod.ContainerCheckpointOptions{
Keep: options.Keep,
@@ -463,6 +454,7 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [
if err != nil {
return nil, err
}
+ reports := make([]*entities.CheckpointReport, 0, len(cons))
for _, con := range cons {
err = con.Checkpoint(ctx, checkOpts)
reports = append(reports, &entities.CheckpointReport{
@@ -475,10 +467,8 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [
func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []string, options entities.RestoreOptions) ([]*entities.RestoreReport, error) {
var (
- cons []*libpod.Container
- err error
- filterFuncs []libpod.ContainerFilter
- reports []*entities.RestoreReport
+ cons []*libpod.Container
+ err error
)
restoreOptions := libpod.ContainerCheckpointOptions{
@@ -491,10 +481,12 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st
IgnoreStaticMAC: options.IgnoreStaticMAC,
}
- filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
- state, _ := c.State()
- return state == define.ContainerStateExited
- })
+ filterFuncs := []libpod.ContainerFilter{
+ func(c *libpod.Container) bool {
+ state, _ := c.State()
+ return state == define.ContainerStateExited
+ },
+ }
switch {
case options.Import != "":
@@ -507,6 +499,7 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st
if err != nil {
return nil, err
}
+ reports := make([]*entities.RestoreReport, 0, len(cons))
for _, con := range cons {
err := con.Restore(ctx, restoreOptions)
reports = append(reports, &entities.RestoreReport{
@@ -565,34 +558,34 @@ func makeExecConfig(options entities.ExecOptions) *libpod.ExecConfig {
return execConfig
}
-func checkExecPreserveFDs(options entities.ExecOptions) (int, error) {
- ec := define.ExecErrorCodeGeneric
+func checkExecPreserveFDs(options entities.ExecOptions) error {
if options.PreserveFDs > 0 {
entries, err := ioutil.ReadDir("/proc/self/fd")
if err != nil {
- return ec, errors.Wrapf(err, "unable to read /proc/self/fd")
+ return errors.Wrapf(err, "unable to read /proc/self/fd")
}
m := make(map[int]bool)
for _, e := range entries {
i, err := strconv.Atoi(e.Name())
if err != nil {
- return ec, errors.Wrapf(err, "cannot parse %s in /proc/self/fd", e.Name())
+ return errors.Wrapf(err, "cannot parse %s in /proc/self/fd", e.Name())
}
m[i] = true
}
for i := 3; i < 3+int(options.PreserveFDs); i++ {
if _, found := m[i]; !found {
- return ec, errors.New("invalid --preserve-fds=N specified. Not enough FDs available")
+ return errors.New("invalid --preserve-fds=N specified. Not enough FDs available")
}
}
}
- return ec, nil
+ return nil
}
func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrID string, options entities.ExecOptions, streams define.AttachStreams) (int, error) {
- ec, err := checkExecPreserveFDs(options)
+ ec := define.ExecErrorCodeGeneric
+ err := checkExecPreserveFDs(options)
if err != nil {
return ec, err
}
@@ -609,7 +602,7 @@ func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrID string, o
}
func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrID string, options entities.ExecOptions) (string, error) {
- _, err := checkExecPreserveFDs(options)
+ err := checkExecPreserveFDs(options)
if err != nil {
return "", err
}
@@ -648,7 +641,7 @@ func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrID s
}
func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []string, options entities.ContainerStartOptions) ([]*entities.ContainerStartReport, error) {
- var reports []*entities.ContainerStartReport
+ reports := []*entities.ContainerStartReport{}
var exitCode = define.ExecErrorCodeGeneric
ctrs, rawInputs, err := getContainersAndInputByContext(false, options.Latest, namesOrIds, ic.Libpod)
if err != nil {
@@ -907,7 +900,7 @@ func (ic *ContainerEngine) ContainerLogs(ctx context.Context, containers []strin
}
func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []string, options entities.ContainerCleanupOptions) ([]*entities.ContainerCleanupReport, error) {
- var reports []*entities.ContainerCleanupReport
+ reports := []*entities.ContainerCleanupReport{}
ctrs, err := getContainersByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil {
return nil, err
@@ -958,11 +951,11 @@ func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []st
}
func (ic *ContainerEngine) ContainerInit(ctx context.Context, namesOrIds []string, options entities.ContainerInitOptions) ([]*entities.ContainerInitReport, error) {
- var reports []*entities.ContainerInitReport
ctrs, err := getContainersByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil {
return nil, err
}
+ reports := make([]*entities.ContainerInitReport, 0, len(ctrs))
for _, ctr := range ctrs {
report := entities.ContainerInitReport{Id: ctr.ID()}
err := ctr.Init(ctx)
@@ -993,11 +986,11 @@ func (ic *ContainerEngine) ContainerMount(ctx context.Context, nameOrIDs []strin
os.Exit(ret)
}
}
- var reports []*entities.ContainerMountReport
ctrs, err := getContainersByContext(options.All, options.Latest, nameOrIDs, ic.Libpod)
if err != nil {
return nil, err
}
+ reports := make([]*entities.ContainerMountReport, 0, len(ctrs))
for _, ctr := range ctrs {
report := entities.ContainerMountReport{Id: ctr.ID()}
report.Path, report.Err = ctr.Mount()
@@ -1030,11 +1023,11 @@ func (ic *ContainerEngine) ContainerMount(ctx context.Context, nameOrIDs []strin
}
func (ic *ContainerEngine) ContainerUnmount(ctx context.Context, nameOrIDs []string, options entities.ContainerUnmountOptions) ([]*entities.ContainerUnmountReport, error) {
- var reports []*entities.ContainerUnmountReport
ctrs, err := getContainersByContext(options.All, options.Latest, nameOrIDs, ic.Libpod)
if err != nil {
return nil, err
}
+ reports := []*entities.ContainerUnmountReport{}
for _, ctr := range ctrs {
state, err := ctr.State()
if err != nil {
@@ -1065,11 +1058,11 @@ func (ic *ContainerEngine) Config(_ context.Context) (*config.Config, error) {
}
func (ic *ContainerEngine) ContainerPort(ctx context.Context, nameOrID string, options entities.ContainerPortOptions) ([]*entities.ContainerPortReport, error) {
- var reports []*entities.ContainerPortReport
ctrs, err := getContainersByContext(options.All, options.Latest, []string{nameOrID}, ic.Libpod)
if err != nil {
return nil, err
}
+ reports := []*entities.ContainerPortReport{}
for _, con := range ctrs {
state, err := con.State()
if err != nil {
diff --git a/pkg/domain/infra/abi/containers_runlabel.go b/pkg/domain/infra/abi/containers_runlabel.go
index 41f4444d5..37422aac5 100644
--- a/pkg/domain/infra/abi/containers_runlabel.go
+++ b/pkg/domain/infra/abi/containers_runlabel.go
@@ -116,7 +116,7 @@ func generateRunlabelCommand(runlabel string, img *image.Image, args []string, o
err error
name, imageName string
globalOpts string
- cmd, env []string
+ cmd []string
)
// TODO: How do we get global opts as done in v1?
@@ -149,7 +149,7 @@ func generateRunlabelCommand(runlabel string, img *image.Image, args []string, o
return nil, nil, err
}
- env = generateRunEnvironment(name, imageName, options)
+ env := generateRunEnvironment(options)
env = append(env, "PODMAN_RUNLABEL_NESTED=1")
envmap, err := envLib.ParseSlice(env)
if err != nil {
@@ -185,9 +185,6 @@ func generateRunlabelCommand(runlabel string, img *image.Image, args []string, o
// generateCommand takes a label (string) and converts it to an executable command
func generateCommand(command, imageName, name, globalOpts string) ([]string, error) {
- var (
- newCommand []string
- )
if name == "" {
name = imageName
}
@@ -201,8 +198,7 @@ func generateCommand(command, imageName, name, globalOpts string) ([]string, err
if err != nil {
return nil, err
}
- newCommand = append(newCommand, prog)
-
+ newCommand := []string{prog}
for _, arg := range cmd[1:] {
var newArg string
switch arg {
@@ -234,7 +230,7 @@ func generateCommand(command, imageName, name, globalOpts string) ([]string, err
// GenerateRunEnvironment merges the current environment variables with optional
// environment variables provided by the user
-func generateRunEnvironment(name, imageName string, options entities.ContainerRunlabelOptions) []string {
+func generateRunEnvironment(options entities.ContainerRunlabelOptions) []string {
newEnv := os.Environ()
if options.Optional1 != "" {
newEnv = append(newEnv, fmt.Sprintf("OPT1=%s", options.Optional1))
diff --git a/pkg/domain/infra/abi/cp.go b/pkg/domain/infra/abi/cp.go
index 9fc1e3bee..7567d5a70 100644
--- a/pkg/domain/infra/abi/cp.go
+++ b/pkg/domain/infra/abi/cp.go
@@ -92,7 +92,7 @@ func (ic *ContainerEngine) ContainerCp(ctx context.Context, source, dest string,
if isFromHostToCtr {
if isVol, volDestName, volName := isVolumeDestName(destPath, ctr); isVol { //nolint(gocritic)
- path, err := pathWithVolumeMount(ctr, ic.Libpod, volDestName, volName, destPath)
+ path, err := pathWithVolumeMount(ic.Libpod, volDestName, volName, destPath)
if err != nil {
return nil, errors.Wrapf(err, "error getting destination path from volume %s", volDestName)
}
@@ -126,7 +126,7 @@ func (ic *ContainerEngine) ContainerCp(ctx context.Context, source, dest string,
} else {
destOwner = idtools.IDPair{UID: os.Getuid(), GID: os.Getgid()}
if isVol, volDestName, volName := isVolumeDestName(srcPath, ctr); isVol { //nolint(gocritic)
- path, err := pathWithVolumeMount(ctr, ic.Libpod, volDestName, volName, srcPath)
+ path, err := pathWithVolumeMount(ic.Libpod, volDestName, volName, srcPath)
if err != nil {
return nil, errors.Wrapf(err, "error getting source path from volume %s", volDestName)
}
@@ -384,7 +384,7 @@ func isVolumeDestName(path string, ctr *libpod.Container) (bool, string, string)
}
// if SRCPATH or DESTPATH is from volume mount's destination -v or --mount type=volume, generates the path with volume mount point
-func pathWithVolumeMount(ctr *libpod.Container, runtime *libpod.Runtime, volDestName, volName, path string) (string, error) {
+func pathWithVolumeMount(runtime *libpod.Runtime, volDestName, volName, path string) (string, error) {
destVolume, err := runtime.GetVolume(volName)
if err != nil {
return "", errors.Wrapf(err, "error getting volume destination %s", volName)
diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go
index 67f331aac..e630d9bc8 100644
--- a/pkg/domain/infra/abi/images.go
+++ b/pkg/domain/infra/abi/images.go
@@ -167,7 +167,7 @@ func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, options entiti
return nil, errors.Wrapf(err, "error getting repository tags")
}
- var foundIDs []string
+ foundIDs := []string{}
for _, tag := range tags {
name := rawImage + ":" + tag
newImage, err := ir.Libpod.ImageRuntime().New(ctx, name, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, util.PullImageAlways)
@@ -443,7 +443,7 @@ func removeErrorsToExitCode(rmErrors []error) int {
// container.
inUseErrors bool
// otherErrors indicates that at least one error other than the two
- // above occured.
+ // above occurred.
otherErrors bool
)
@@ -549,8 +549,7 @@ func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entitie
rmErrors = append(rmErrors, err)
}
}
-
- return
+ return //nolint
}
// Shutdown Libpod engine
diff --git a/pkg/domain/infra/abi/images_list.go b/pkg/domain/infra/abi/images_list.go
index 3034e36ec..98c041c15 100644
--- a/pkg/domain/infra/abi/images_list.go
+++ b/pkg/domain/infra/abi/images_list.go
@@ -8,17 +8,12 @@ import (
)
func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) ([]*entities.ImageSummary, error) {
- var (
- images []*libpodImage.Image
- err error
- )
-
- images, err = ir.Libpod.ImageRuntime().GetImagesWithFilters(opts.Filter)
+ images, err := ir.Libpod.ImageRuntime().GetImagesWithFilters(opts.Filter)
if err != nil {
return nil, err
}
- var summaries []*entities.ImageSummary
+ summaries := []*entities.ImageSummary{}
for _, img := range images {
var repoTags []string
if opts.All {
diff --git a/pkg/domain/infra/abi/manifest.go b/pkg/domain/infra/abi/manifest.go
index a2b5fc0fc..a6f5bab6b 100644
--- a/pkg/domain/infra/abi/manifest.go
+++ b/pkg/domain/infra/abi/manifest.go
@@ -153,7 +153,7 @@ func (ir *ImageEngine) ManifestRemove(ctx context.Context, names []string) (stri
}
listImage, err := ir.Libpod.ImageRuntime().NewFromLocal(names[0])
if err != nil {
- return "", errors.Wrapf(err, "error retriving local image from image name %s", names[0])
+ return "", errors.Wrapf(err, "error retrieving local image from image name %s", names[0])
}
updatedListID, err := listImage.RemoveManifest(instanceDigest)
if err == nil {
@@ -166,7 +166,7 @@ func (ir *ImageEngine) ManifestRemove(ctx context.Context, names []string) (stri
func (ir *ImageEngine) ManifestPush(ctx context.Context, names []string, opts entities.ManifestPushOptions) error {
listImage, err := ir.Libpod.ImageRuntime().NewFromLocal(names[0])
if err != nil {
- return errors.Wrapf(err, "error retriving local image from image name %s", names[0])
+ return errors.Wrapf(err, "error retrieving local image from image name %s", names[0])
}
dest, err := alltransports.ParseImageName(names[1])
if err != nil {
diff --git a/pkg/domain/infra/abi/network.go b/pkg/domain/infra/abi/network.go
index 8e3515824..eba1af362 100644
--- a/pkg/domain/infra/abi/network.go
+++ b/pkg/domain/infra/abi/network.go
@@ -48,15 +48,12 @@ func (ic *ContainerEngine) NetworkList(ctx context.Context, options entities.Net
}
func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []string, options entities.NetworkInspectOptions) ([]entities.NetworkInspectReport, error) {
- var (
- rawCNINetworks []entities.NetworkInspectReport
- )
-
config, err := ic.Libpod.GetConfig()
if err != nil {
return nil, err
}
+ rawCNINetworks := make([]entities.NetworkInspectReport, 0, len(namesOrIds))
for _, name := range namesOrIds {
rawList, err := network.InspectNetwork(config, name)
if err != nil {
@@ -68,7 +65,7 @@ func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []stri
}
func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, options entities.NetworkRmOptions) ([]*entities.NetworkRmReport, error) {
- var reports []*entities.NetworkRmReport
+ reports := []*entities.NetworkRmReport{}
config, err := ic.Libpod.GetConfig()
if err != nil {
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index f5b93c51b..7053cec9e 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -109,9 +109,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
var (
- containers []*libpod.Container
pod *libpod.Pod
- podOptions []libpod.PodCreateOption
registryCreds *types.DockerAuthConfig
writer io.Writer
playKubePod entities.PlayKubePod
@@ -130,8 +128,10 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
}
}
- podOptions = append(podOptions, libpod.WithInfraContainer())
- podOptions = append(podOptions, libpod.WithPodName(podName))
+ podOptions := []libpod.PodCreateOption{
+ libpod.WithInfraContainer(),
+ libpod.WithPodName(podName),
+ }
// TODO for now we just used the default kernel namespaces; we need to add/subtract this from yaml
hostname := podYAML.Spec.Hostname
@@ -271,6 +271,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
return nil, err
}
+ containers := make([]*libpod.Container, 0, len(podYAML.Spec.Containers))
for _, container := range podYAML.Spec.Containers {
pullPolicy := util.PullImageMissing
if len(container.ImagePullPolicy) > 0 {
@@ -293,7 +294,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
if err != nil {
return nil, err
}
- conf, err := kubeContainerToCreateConfig(ctx, container, ic.Libpod, newImage, namespaces, volumes, pod.ID(), podName, podInfraID, seccompPaths)
+ conf, err := kubeContainerToCreateConfig(ctx, container, newImage, namespaces, volumes, pod.ID(), podName, podInfraID, seccompPaths)
if err != nil {
return nil, err
}
@@ -407,7 +408,7 @@ func setupSecurityContext(securityConfig *createconfig.SecurityConfig, userConfi
}
// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container
-func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, podName, infraID string, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) {
+func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, podName, infraID string, seccompPaths *kubeSeccompPaths) (*createconfig.CreateConfig, error) {
var (
containerConfig createconfig.CreateConfig
pidConfig createconfig.PidConfig
diff --git a/pkg/domain/infra/abi/pods.go b/pkg/domain/infra/abi/pods.go
index 054b59b06..4a122f54d 100644
--- a/pkg/domain/infra/abi/pods.go
+++ b/pkg/domain/infra/abi/pods.go
@@ -54,9 +54,7 @@ func (ic *ContainerEngine) PodExists(ctx context.Context, nameOrID string) (*ent
}
func (ic *ContainerEngine) PodKill(ctx context.Context, namesOrIds []string, options entities.PodKillOptions) ([]*entities.PodKillReport, error) {
- var (
- reports []*entities.PodKillReport
- )
+ reports := []*entities.PodKillReport{}
sig, err := signal.ParseSignalNameOrNumber(options.Signal)
if err != nil {
return nil, err
@@ -87,9 +85,7 @@ func (ic *ContainerEngine) PodKill(ctx context.Context, namesOrIds []string, opt
}
func (ic *ContainerEngine) PodPause(ctx context.Context, namesOrIds []string, options entities.PodPauseOptions) ([]*entities.PodPauseReport, error) {
- var (
- reports []*entities.PodPauseReport
- )
+ reports := []*entities.PodPauseReport{}
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil {
return nil, err
@@ -114,9 +110,7 @@ func (ic *ContainerEngine) PodPause(ctx context.Context, namesOrIds []string, op
}
func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string, options entities.PodunpauseOptions) ([]*entities.PodUnpauseReport, error) {
- var (
- reports []*entities.PodUnpauseReport
- )
+ reports := []*entities.PodUnpauseReport{}
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil {
return nil, err
@@ -141,10 +135,7 @@ func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string,
}
func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, options entities.PodStopOptions) ([]*entities.PodStopReport, error) {
- var (
- reports []*entities.PodStopReport
- )
-
+ reports := []*entities.PodStopReport{}
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
return nil, err
@@ -169,9 +160,7 @@ func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, opt
}
func (ic *ContainerEngine) PodRestart(ctx context.Context, namesOrIds []string, options entities.PodRestartOptions) ([]*entities.PodRestartReport, error) {
- var (
- reports []*entities.PodRestartReport
- )
+ reports := []*entities.PodRestartReport{}
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil {
return nil, err
@@ -197,10 +186,7 @@ func (ic *ContainerEngine) PodRestart(ctx context.Context, namesOrIds []string,
}
func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, options entities.PodStartOptions) ([]*entities.PodStartReport, error) {
- var (
- reports []*entities.PodStartReport
- )
-
+ reports := []*entities.PodStartReport{}
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil {
return nil, err
@@ -227,14 +213,11 @@ func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, op
}
func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, options entities.PodRmOptions) ([]*entities.PodRmReport, error) {
- var (
- reports []*entities.PodRmReport
- )
-
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
return nil, err
}
+ reports := make([]*entities.PodRmReport, 0, len(pods))
for _, p := range pods {
report := entities.PodRmReport{Id: p.ID()}
err := ic.Libpod.RemovePod(ctx, p, true, options.Force)
@@ -251,13 +234,11 @@ func (ic *ContainerEngine) PodPrune(ctx context.Context, options entities.PodPru
}
func (ic *ContainerEngine) prunePodHelper(ctx context.Context) ([]*entities.PodPruneReport, error) {
- var (
- reports []*entities.PodPruneReport
- )
response, err := ic.Libpod.PrunePods(ctx)
if err != nil {
return nil, err
}
+ reports := make([]*entities.PodPruneReport, 0, len(response))
for k, v := range response {
reports = append(reports, &entities.PodPruneReport{
Err: v,
@@ -302,9 +283,8 @@ func (ic *ContainerEngine) PodTop(ctx context.Context, options entities.PodTopOp
func (ic *ContainerEngine) PodPs(ctx context.Context, options entities.PodPSOptions) ([]*entities.ListPodsReport, error) {
var (
err error
- filters []libpod.PodFilter
- pds []*libpod.Pod
- reports []*entities.ListPodsReport
+ filters = []libpod.PodFilter{}
+ pds = []*libpod.Pod{}
)
for k, v := range options.Filters {
@@ -330,6 +310,7 @@ func (ic *ContainerEngine) PodPs(ctx context.Context, options entities.PodPSOpti
}
}
+ reports := make([]*entities.ListPodsReport, 0, len(pds))
for _, p := range pds {
var lpcs []*entities.ListPodContainer
status, err := p.GetPodStatus()
diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go
index 33ba58558..90002326e 100644
--- a/pkg/domain/infra/abi/system.go
+++ b/pkg/domain/infra/abi/system.go
@@ -172,7 +172,7 @@ func checkInput() error { // nolint:deadcode,unused
return nil
}
-// SystemPrune removes unsed data from the system. Pruning pods, containers, volumes and images.
+// SystemPrune removes unused data from the system. Pruning pods, containers, volumes and images.
func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.SystemPruneOptions) (*entities.SystemPruneReport, error) {
var systemPruneReport = new(entities.SystemPruneReport)
podPruneReport, err := ic.prunePodHelper(ctx)
@@ -181,7 +181,7 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.Sys
}
systemPruneReport.PodPruneReport = podPruneReport
- containerPruneReport, err := ic.pruneContainersHelper(ctx, nil)
+ containerPruneReport, err := ic.pruneContainersHelper(nil)
if err != nil {
return nil, err
}
@@ -212,10 +212,7 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.Sys
func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.SystemDfOptions) (*entities.SystemDfReport, error) {
var (
- dfImages []*entities.SystemDfImageReport
- dfContainers []*entities.SystemDfContainerReport
- dfVolumes []*entities.SystemDfVolumeReport
- runningContainers []string
+ dfImages = []*entities.SystemDfImageReport{}
)
// Get Images and iterate them
@@ -282,6 +279,7 @@ func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.System
if err != nil {
return nil, err
}
+ dfContainers := make([]*entities.SystemDfContainerReport, 0, len(cons))
for _, c := range cons {
iid, _ := c.Image()
conSize, err := c.RootFsSize()
@@ -320,10 +318,12 @@ func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.System
if err != nil {
return nil, err
}
+ runningContainers := make([]string, 0, len(running))
for _, c := range running {
runningContainers = append(runningContainers, c.ID())
}
+ dfVolumes := make([]*entities.SystemDfVolumeReport, 0, len(vols))
for _, v := range vols {
var consInUse int
volSize, err := sizeOfPath(v.MountPoint())
diff --git a/pkg/domain/infra/abi/volumes.go b/pkg/domain/infra/abi/volumes.go
index a311e0c4e..702e11003 100644
--- a/pkg/domain/infra/abi/volumes.go
+++ b/pkg/domain/infra/abi/volumes.go
@@ -40,9 +40,10 @@ func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.Volum
func (ic *ContainerEngine) VolumeRm(ctx context.Context, namesOrIds []string, opts entities.VolumeRmOptions) ([]*entities.VolumeRmReport, error) {
var (
err error
- reports []*entities.VolumeRmReport
vols []*libpod.Volume
+ reports = []*entities.VolumeRmReport{}
)
+
if opts.All {
vols, err = ic.Libpod.Volumes()
if err != nil {
@@ -72,9 +73,8 @@ func (ic *ContainerEngine) VolumeRm(ctx context.Context, namesOrIds []string, op
func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []string, opts entities.VolumeInspectOptions) ([]*entities.VolumeInspectReport, error) {
var (
- err error
- reports []*entities.VolumeInspectReport
- vols []*libpod.Volume
+ err error
+ vols []*libpod.Volume
)
// Note: as with previous implementation, a single failure here
@@ -93,6 +93,7 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin
vols = append(vols, vol)
}
}
+ reports := make([]*entities.VolumeInspectReport, 0, len(vols))
for _, v := range vols {
config := entities.VolumeConfigResponse{
Name: v.Name(),
@@ -115,13 +116,11 @@ func (ic *ContainerEngine) VolumePrune(ctx context.Context, opts entities.Volume
}
func (ic *ContainerEngine) pruneVolumesHelper(ctx context.Context) ([]*entities.VolumePruneReport, error) {
- var (
- reports []*entities.VolumePruneReport
- )
pruned, err := ic.Libpod.PruneVolumes(ctx)
if err != nil {
return nil, err
}
+ reports := make([]*entities.VolumePruneReport, 0, len(pruned))
for k, v := range pruned {
reports = append(reports, &entities.VolumePruneReport{
Err: v,
@@ -132,9 +131,6 @@ func (ic *ContainerEngine) pruneVolumesHelper(ctx context.Context) ([]*entities.
}
func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeListOptions) ([]*entities.VolumeListReport, error) {
- var (
- reports []*entities.VolumeListReport
- )
volumeFilters, err := filters.GenerateVolumeFilters(opts.Filter)
if err != nil {
return nil, err
@@ -143,6 +139,7 @@ func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeL
if err != nil {
return nil, err
}
+ reports := make([]*entities.VolumeListReport, 0, len(vols))
for _, v := range vols {
config := entities.VolumeConfigResponse{
Name: v.Name(),
diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go
index 68a8b0329..4bd813847 100644
--- a/pkg/domain/infra/tunnel/containers.go
+++ b/pkg/domain/infra/tunnel/containers.go
@@ -32,13 +32,11 @@ func (ic *ContainerEngine) ContainerExists(ctx context.Context, nameOrID string)
}
func (ic *ContainerEngine) ContainerWait(ctx context.Context, namesOrIds []string, options entities.WaitOptions) ([]entities.WaitReport, error) {
- var (
- responses []entities.WaitReport
- )
cons, err := getContainersByContext(ic.ClientCxt, false, namesOrIds)
if err != nil {
return nil, err
}
+ responses := make([]entities.WaitReport, 0, len(cons))
for _, c := range cons {
response := entities.WaitReport{Id: c.ID}
exitCode, err := containers.Wait(ic.ClientCxt, c.ID, &options.Condition)
@@ -53,13 +51,11 @@ func (ic *ContainerEngine) ContainerWait(ctx context.Context, namesOrIds []strin
}
func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []string, options entities.PauseUnPauseOptions) ([]*entities.PauseUnpauseReport, error) {
- var (
- reports []*entities.PauseUnpauseReport
- )
ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
if err != nil {
return nil, err
}
+ reports := make([]*entities.PauseUnpauseReport, 0, len(ctrs))
for _, c := range ctrs {
err := containers.Pause(ic.ClientCxt, c.ID)
reports = append(reports, &entities.PauseUnpauseReport{Id: c.ID, Err: err})
@@ -68,13 +64,11 @@ func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []stri
}
func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []string, options entities.PauseUnPauseOptions) ([]*entities.PauseUnpauseReport, error) {
- var (
- reports []*entities.PauseUnpauseReport
- )
ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
if err != nil {
return nil, err
}
+ reports := make([]*entities.PauseUnpauseReport, 0, len(ctrs))
for _, c := range ctrs {
err := containers.Unpause(ic.ClientCxt, c.ID)
reports = append(reports, &entities.PauseUnpauseReport{Id: c.ID, Err: err})
@@ -83,9 +77,7 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st
}
func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []string, options entities.StopOptions) ([]*entities.StopReport, error) {
- var (
- reports []*entities.StopReport
- )
+ reports := []*entities.StopReport{}
for _, cidFile := range options.CIDFiles {
content, err := ioutil.ReadFile(cidFile)
if err != nil {
@@ -125,13 +117,11 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin
}
func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []string, options entities.KillOptions) ([]*entities.KillReport, error) {
- var (
- reports []*entities.KillReport
- )
ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
if err != nil {
return nil, err
}
+ reports := make([]*entities.KillReport, 0, len(ctrs))
for _, c := range ctrs {
reports = append(reports, &entities.KillReport{
Id: c.ID,
@@ -143,7 +133,7 @@ func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []strin
func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []string, options entities.RestartOptions) ([]*entities.RestartReport, error) {
var (
- reports []*entities.RestartReport
+ reports = []*entities.RestartReport{}
timeout *int
)
if options.Timeout != nil {
@@ -168,9 +158,6 @@ func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []st
}
func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, options entities.RmOptions) ([]*entities.RmReport, error) {
- var (
- reports []*entities.RmReport
- )
for _, cidFile := range options.CIDFiles {
content, err := ioutil.ReadFile(cidFile)
if err != nil {
@@ -184,6 +171,7 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
return nil, err
}
// TODO there is no endpoint for container eviction. Need to discuss
+ reports := make([]*entities.RmReport, 0, len(ctrs))
for _, c := range ctrs {
reports = append(reports, &entities.RmReport{
Id: c.ID,
@@ -198,13 +186,11 @@ func (ic *ContainerEngine) ContainerPrune(ctx context.Context, options entities.
}
func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []string, options entities.InspectOptions) ([]*entities.ContainerInspectReport, error) {
- var (
- reports []*entities.ContainerInspectReport
- )
ctrs, err := getContainersByContext(ic.ClientCxt, false, namesOrIds)
if err != nil {
return nil, err
}
+ reports := make([]*entities.ContainerInspectReport, 0, len(ctrs))
for _, con := range ctrs {
data, err := containers.Inspect(ic.ClientCxt, con.ID, &options.Size)
if err != nil {
@@ -282,9 +268,8 @@ func (ic *ContainerEngine) ContainerExport(ctx context.Context, nameOrID string,
func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds []string, options entities.CheckpointOptions) ([]*entities.CheckpointReport, error) {
var (
- reports []*entities.CheckpointReport
- err error
- ctrs []entities.ListContainer
+ err error
+ ctrs = []entities.ListContainer{}
)
if options.All {
@@ -305,6 +290,7 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [
return nil, err
}
}
+ reports := make([]*entities.CheckpointReport, 0, len(ctrs))
for _, c := range ctrs {
report, err := containers.Checkpoint(ic.ClientCxt, c.ID, &options.Keep, &options.LeaveRunning, &options.TCPEstablished, &options.IgnoreRootFS, &options.Export)
if err != nil {
@@ -317,9 +303,8 @@ func (ic *ContainerEngine) ContainerCheckpoint(ctx context.Context, namesOrIds [
func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []string, options entities.RestoreOptions) ([]*entities.RestoreReport, error) {
var (
- reports []*entities.RestoreReport
- err error
- ctrs []entities.ListContainer
+ err error
+ ctrs = []entities.ListContainer{}
)
if options.All {
allCtrs, err := getContainersByContext(ic.ClientCxt, true, []string{})
@@ -339,6 +324,7 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st
return nil, err
}
}
+ reports := make([]*entities.RestoreReport, 0, len(ctrs))
for _, c := range ctrs {
report, err := containers.Restore(ic.ClientCxt, c.ID, &options.Keep, &options.TCPEstablished, &options.IgnoreRootFS, &options.IgnoreStaticIP, &options.IgnoreStaticMAC, &options.Name, &options.Import)
if err != nil {
@@ -467,7 +453,7 @@ func startAndAttach(ic *ContainerEngine, name string, detachKeys *string, input,
}
func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []string, options entities.ContainerStartOptions) ([]*entities.ContainerStartReport, error) {
- var reports []*entities.ContainerStartReport
+ reports := []*entities.ContainerStartReport{}
for _, name := range namesOrIds {
report := entities.ContainerStartReport{
Id: name,
@@ -535,11 +521,11 @@ func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []st
}
func (ic *ContainerEngine) ContainerInit(ctx context.Context, namesOrIds []string, options entities.ContainerInitOptions) ([]*entities.ContainerInitReport, error) {
- var reports []*entities.ContainerInitReport
ctrs, err := getContainersByContext(ic.ClientCxt, options.All, namesOrIds)
if err != nil {
return nil, err
}
+ reports := make([]*entities.ContainerInitReport, 0, len(ctrs))
for _, ctr := range ctrs {
err := containers.ContainerInit(ic.ClientCxt, ctr.ID)
// When using all, it is NOT considered an error if a container
@@ -569,8 +555,8 @@ func (ic *ContainerEngine) Config(_ context.Context) (*config.Config, error) {
func (ic *ContainerEngine) ContainerPort(ctx context.Context, nameOrID string, options entities.ContainerPortOptions) ([]*entities.ContainerPortReport, error) {
var (
- reports []*entities.ContainerPortReport
- namesOrIds []string
+ reports = []*entities.ContainerPortReport{}
+ namesOrIds = []string{}
)
if len(nameOrID) > 0 {
namesOrIds = append(namesOrIds, nameOrID)
diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go
index fc7ac0aa8..ec2c53c4f 100644
--- a/pkg/domain/infra/tunnel/images.go
+++ b/pkg/domain/infra/tunnel/images.go
@@ -39,7 +39,7 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions)
return nil, err
}
- is := make([]*entities.ImageSummary, len(images))
+ is := make([]*entities.ImageSummary, 0, len(images))
for i, img := range images {
hold := entities.ImageSummary{}
if err := utils.DeepCopy(&hold, img); err != nil {
diff --git a/pkg/domain/infra/tunnel/network.go b/pkg/domain/infra/tunnel/network.go
index 7725d8257..e7cc5fb26 100644
--- a/pkg/domain/infra/tunnel/network.go
+++ b/pkg/domain/infra/tunnel/network.go
@@ -12,7 +12,7 @@ func (ic *ContainerEngine) NetworkList(ctx context.Context, options entities.Net
}
func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []string, options entities.NetworkInspectOptions) ([]entities.NetworkInspectReport, error) {
- var reports []entities.NetworkInspectReport
+ reports := make([]entities.NetworkInspectReport, 0, len(namesOrIds))
for _, name := range namesOrIds {
report, err := network.Inspect(ic.ClientCxt, name)
if err != nil {
@@ -24,7 +24,7 @@ func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []stri
}
func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, options entities.NetworkRmOptions) ([]*entities.NetworkRmReport, error) {
- var reports []*entities.NetworkRmReport
+ reports := make([]*entities.NetworkRmReport, 0, len(namesOrIds))
for _, name := range namesOrIds {
report, err := network.Remove(ic.ClientCxt, name, &options.Force)
if err != nil {
diff --git a/pkg/domain/infra/tunnel/pods.go b/pkg/domain/infra/tunnel/pods.go
index 5ca4a6a80..d18e9937c 100644
--- a/pkg/domain/infra/tunnel/pods.go
+++ b/pkg/domain/infra/tunnel/pods.go
@@ -17,10 +17,6 @@ func (ic *ContainerEngine) PodExists(ctx context.Context, nameOrID string) (*ent
}
func (ic *ContainerEngine) PodKill(ctx context.Context, namesOrIds []string, options entities.PodKillOptions) ([]*entities.PodKillReport, error) {
- var (
- reports []*entities.PodKillReport
- )
-
_, err := util.ParseSignal(options.Signal)
if err != nil {
return nil, err
@@ -30,6 +26,7 @@ func (ic *ContainerEngine) PodKill(ctx context.Context, namesOrIds []string, opt
if err != nil {
return nil, err
}
+ reports := make([]*entities.PodKillReport, 0, len(foundPods))
for _, p := range foundPods {
response, err := pods.Kill(ic.ClientCxt, p.Id, &options.Signal)
if err != nil {
@@ -46,13 +43,11 @@ func (ic *ContainerEngine) PodKill(ctx context.Context, namesOrIds []string, opt
}
func (ic *ContainerEngine) PodPause(ctx context.Context, namesOrIds []string, options entities.PodPauseOptions) ([]*entities.PodPauseReport, error) {
- var (
- reports []*entities.PodPauseReport
- )
foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
if err != nil {
return nil, err
}
+ reports := make([]*entities.PodPauseReport, 0, len(foundPods))
for _, p := range foundPods {
response, err := pods.Pause(ic.ClientCxt, p.Id)
if err != nil {
@@ -69,13 +64,11 @@ func (ic *ContainerEngine) PodPause(ctx context.Context, namesOrIds []string, op
}
func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string, options entities.PodunpauseOptions) ([]*entities.PodUnpauseReport, error) {
- var (
- reports []*entities.PodUnpauseReport
- )
foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
if err != nil {
return nil, err
}
+ reports := make([]*entities.PodUnpauseReport, 0, len(foundPods))
for _, p := range foundPods {
response, err := pods.Unpause(ic.ClientCxt, p.Id)
if err != nil {
@@ -92,10 +85,7 @@ func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string,
}
func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, options entities.PodStopOptions) ([]*entities.PodStopReport, error) {
- var (
- reports []*entities.PodStopReport
- timeout = -1
- )
+ timeout := -1
foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
return nil, err
@@ -103,6 +93,7 @@ func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, opt
if options.Timeout != -1 {
timeout = options.Timeout
}
+ reports := make([]*entities.PodStopReport, 0, len(foundPods))
for _, p := range foundPods {
response, err := pods.Stop(ic.ClientCxt, p.Id, &timeout)
if err != nil {
@@ -119,11 +110,11 @@ func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, opt
}
func (ic *ContainerEngine) PodRestart(ctx context.Context, namesOrIds []string, options entities.PodRestartOptions) ([]*entities.PodRestartReport, error) {
- var reports []*entities.PodRestartReport
foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
if err != nil {
return nil, err
}
+ reports := make([]*entities.PodRestartReport, 0, len(foundPods))
for _, p := range foundPods {
response, err := pods.Restart(ic.ClientCxt, p.Id)
if err != nil {
@@ -140,11 +131,11 @@ func (ic *ContainerEngine) PodRestart(ctx context.Context, namesOrIds []string,
}
func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, options entities.PodStartOptions) ([]*entities.PodStartReport, error) {
- var reports []*entities.PodStartReport
foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
if err != nil {
return nil, err
}
+ reports := make([]*entities.PodStartReport, 0, len(foundPods))
for _, p := range foundPods {
response, err := pods.Start(ic.ClientCxt, p.Id)
if err != nil {
@@ -161,11 +152,11 @@ func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, op
}
func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, options entities.PodRmOptions) ([]*entities.PodRmReport, error) {
- var reports []*entities.PodRmReport
foundPods, err := getPodsByContext(ic.ClientCxt, options.All, namesOrIds)
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
return nil, err
}
+ reports := make([]*entities.PodRmReport, 0, len(foundPods))
for _, p := range foundPods {
response, err := pods.Remove(ic.ClientCxt, p.Id, &options.Force)
if err != nil {
diff --git a/pkg/domain/infra/tunnel/volumes.go b/pkg/domain/infra/tunnel/volumes.go
index 5b65c66ea..af7273ac4 100644
--- a/pkg/domain/infra/tunnel/volumes.go
+++ b/pkg/domain/infra/tunnel/volumes.go
@@ -16,10 +16,6 @@ func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.Volum
}
func (ic *ContainerEngine) VolumeRm(ctx context.Context, namesOrIds []string, opts entities.VolumeRmOptions) ([]*entities.VolumeRmReport, error) {
- var (
- reports []*entities.VolumeRmReport
- )
-
if opts.All {
vols, err := volumes.List(ic.ClientCxt, nil)
if err != nil {
@@ -29,6 +25,7 @@ func (ic *ContainerEngine) VolumeRm(ctx context.Context, namesOrIds []string, op
namesOrIds = append(namesOrIds, v.Name)
}
}
+ reports := make([]*entities.VolumeRmReport, 0, len(namesOrIds))
for _, id := range namesOrIds {
reports = append(reports, &entities.VolumeRmReport{
Err: volumes.Remove(ic.ClientCxt, id, &opts.Force),
@@ -39,9 +36,6 @@ func (ic *ContainerEngine) VolumeRm(ctx context.Context, namesOrIds []string, op
}
func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []string, opts entities.VolumeInspectOptions) ([]*entities.VolumeInspectReport, error) {
- var (
- reports []*entities.VolumeInspectReport
- )
if opts.All {
vols, err := volumes.List(ic.ClientCxt, nil)
if err != nil {
@@ -51,6 +45,7 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin
namesOrIds = append(namesOrIds, v.Name)
}
}
+ reports := make([]*entities.VolumeInspectReport, 0, len(namesOrIds))
for _, id := range namesOrIds {
data, err := volumes.Inspect(ic.ClientCxt, id)
if err != nil {
diff --git a/pkg/hooks/exec/runtimeconfigfilter_test.go b/pkg/hooks/exec/runtimeconfigfilter_test.go
index 48dd2f998..f4b6cf86a 100644
--- a/pkg/hooks/exec/runtimeconfigfilter_test.go
+++ b/pkg/hooks/exec/runtimeconfigfilter_test.go
@@ -12,21 +12,11 @@ import (
"github.com/stretchr/testify/assert"
)
-func pointerInt(value int) *int {
- return &value
-}
-
-func pointerUInt32(value uint32) *uint32 {
- return &value
-}
-
-func pointerFileMode(value os.FileMode) *os.FileMode {
- return &value
-}
-
func TestRuntimeConfigFilter(t *testing.T) {
unexpectedEndOfJSONInput := json.Unmarshal([]byte("{\n"), nil) //nolint
-
+ fileMode := os.FileMode(0600)
+ rootUint32 := uint32(0)
+ binUser := int(1)
for _, tt := range []struct {
name string
contextTimeout time.Duration
@@ -77,9 +67,9 @@ func TestRuntimeConfigFilter(t *testing.T) {
Type: "c",
Major: 10,
Minor: 229,
- FileMode: pointerFileMode(0600),
- UID: pointerUInt32(0),
- GID: pointerUInt32(0),
+ FileMode: &fileMode,
+ UID: &rootUint32,
+ GID: &rootUint32,
},
},
},
@@ -96,18 +86,18 @@ func TestRuntimeConfigFilter(t *testing.T) {
Type: "c",
Major: 10,
Minor: 229,
- FileMode: pointerFileMode(0600),
- UID: pointerUInt32(0),
- GID: pointerUInt32(0),
+ FileMode: &fileMode,
+ UID: &rootUint32,
+ GID: &rootUint32,
},
{
Path: "/dev/sda",
Type: "b",
Major: 8,
Minor: 0,
- FileMode: pointerFileMode(0600),
- UID: pointerUInt32(0),
- GID: pointerUInt32(0),
+ FileMode: &fileMode,
+ UID: &rootUint32,
+ GID: &rootUint32,
},
},
},
@@ -137,9 +127,9 @@ func TestRuntimeConfigFilter(t *testing.T) {
Type: "c",
Major: 10,
Minor: 229,
- FileMode: pointerFileMode(0600),
- UID: pointerUInt32(0),
- GID: pointerUInt32(0),
+ FileMode: &fileMode,
+ UID: &rootUint32,
+ GID: &rootUint32,
},
},
},
@@ -156,18 +146,18 @@ func TestRuntimeConfigFilter(t *testing.T) {
Type: "c",
Major: 10,
Minor: 229,
- FileMode: pointerFileMode(0600),
- UID: pointerUInt32(0),
- GID: pointerUInt32(0),
+ FileMode: &fileMode,
+ UID: &rootUint32,
+ GID: &rootUint32,
},
{
Path: "/dev/sdb",
Type: "b",
Major: 8,
Minor: 0,
- FileMode: pointerFileMode(0600),
- UID: pointerUInt32(0),
- GID: pointerUInt32(0),
+ FileMode: &fileMode,
+ UID: &rootUint32,
+ GID: &rootUint32,
},
},
},
@@ -203,7 +193,7 @@ func TestRuntimeConfigFilter(t *testing.T) {
{
Path: path,
Args: []string{"sh", "-c", "sleep 2"},
- Timeout: pointerInt(1),
+ Timeout: &binUser,
},
},
input: &spec.Spec{
diff --git a/pkg/lookup/lookup.go b/pkg/lookup/lookup.go
index dff25f74f..8f241edf2 100644
--- a/pkg/lookup/lookup.go
+++ b/pkg/lookup/lookup.go
@@ -79,7 +79,6 @@ func GetContainerGroups(groups []string, containerMount string, override *Overri
var (
groupDest string
err error
- uintgids []uint32
)
groupPath := etcgroup
@@ -96,6 +95,7 @@ func GetContainerGroups(groups []string, containerMount string, override *Overri
if err != nil {
return nil, err
}
+ uintgids := make([]uint32, 0, len(gids))
// For libpod, we want []uint32s
for _, gid := range gids {
uintgids = append(uintgids, uint32(gid))
diff --git a/pkg/network/files.go b/pkg/network/files.go
index 81c0e1a28..beb3289f3 100644
--- a/pkg/network/files.go
+++ b/pkg/network/files.go
@@ -22,13 +22,13 @@ func GetCNIConfDir(config *config.Config) string {
// LoadCNIConfsFromDir loads all the CNI configurations from a dir
func LoadCNIConfsFromDir(dir string) ([]*libcni.NetworkConfigList, error) {
- var configs []*libcni.NetworkConfigList
files, err := libcni.ConfFiles(dir, []string{".conflist"})
if err != nil {
return nil, err
}
sort.Strings(files)
+ configs := make([]*libcni.NetworkConfigList, 0, len(files))
for _, confFile := range files {
conf, err := libcni.ConfListFromFile(confFile)
if err != nil {
@@ -72,7 +72,7 @@ func ReadRawCNIConfByName(config *config.Config, name string) ([]byte, error) {
// GetCNIPlugins returns a list of plugins that a given network
// has in the form of a string
func GetCNIPlugins(list *libcni.NetworkConfigList) string {
- var plugins []string
+ plugins := make([]string, 0, len(list.Plugins))
for _, plug := range list.Plugins {
plugins = append(plugins, plug.Network.Type)
}
@@ -106,12 +106,11 @@ func GetNetworksFromFilesystem(config *config.Config) ([]*allocator.Net, error)
// GetNetworkNamesFromFileSystem gets all the names from the cni network
// configuration files
func GetNetworkNamesFromFileSystem(config *config.Config) ([]string, error) {
- var networkNames []string
-
networks, err := LoadCNIConfsFromDir(GetCNIConfDir(config))
if err != nil {
return nil, err
}
+ networkNames := []string{}
for _, n := range networks {
networkNames = append(networkNames, n.Name)
}
@@ -144,12 +143,12 @@ func GetInterfaceNameFromConfig(path string) (string, error) {
// GetBridgeNamesFromFileSystem is a convenience function to get all the bridge
// names from the configured networks
func GetBridgeNamesFromFileSystem(config *config.Config) ([]string, error) {
- var bridgeNames []string
-
networks, err := LoadCNIConfsFromDir(GetCNIConfDir(config))
if err != nil {
return nil, err
}
+
+ bridgeNames := []string{}
for _, n := range networks {
var name string
// iterate network conflists
diff --git a/pkg/network/network.go b/pkg/network/network.go
index 3ff664316..997aaf8a2 100644
--- a/pkg/network/network.go
+++ b/pkg/network/network.go
@@ -30,11 +30,11 @@ func IsSupportedDriver(driver string) error {
// GetLiveNetworks returns a slice of networks representing what the system
// has defined as network interfaces
func GetLiveNetworks() ([]*net.IPNet, error) {
- var nets []*net.IPNet
addrs, err := net.InterfaceAddrs()
if err != nil {
return nil, err
}
+ nets := make([]*net.IPNet, 0, len(addrs))
for _, address := range addrs {
_, n, err := net.ParseCIDR(address.String())
if err != nil {
@@ -47,11 +47,11 @@ func GetLiveNetworks() ([]*net.IPNet, error) {
// GetLiveNetworkNames returns a list of network interfaces on the system
func GetLiveNetworkNames() ([]string, error) {
- var interfaceNames []string
liveInterfaces, err := net.Interfaces()
if err != nil {
return nil, err
}
+ interfaceNames := make([]string, 0, len(liveInterfaces))
for _, i := range liveInterfaces {
interfaceNames = append(interfaceNames, i.Name)
}
diff --git a/pkg/ps/ps.go b/pkg/ps/ps.go
index ec96367cb..b07eb7f9a 100644
--- a/pkg/ps/ps.go
+++ b/pkg/ps/ps.go
@@ -21,7 +21,7 @@ import (
func GetContainerLists(runtime *libpod.Runtime, options entities.ContainerListOptions) ([]entities.ListContainer, error) {
var (
filterFuncs []libpod.ContainerFilter
- pss []entities.ListContainer
+ pss = []entities.ListContainer{}
)
all := options.All || options.Last > 0
if len(options.Filters) > 0 {
diff --git a/pkg/signal/signal_linux.go b/pkg/signal/signal_linux.go
index 6eebf7e5a..72ab1b97b 100644
--- a/pkg/signal/signal_linux.go
+++ b/pkg/signal/signal_linux.go
@@ -93,7 +93,7 @@ var signalMap = map[string]syscall.Signal{
// CatchAll catches all signals and relays them to the specified channel.
func CatchAll(sigc chan os.Signal) {
- var handledSigs []os.Signal
+ handledSigs := make([]os.Signal, 0, len(signalMap))
for _, s := range signalMap {
handledSigs = append(handledSigs, s)
}
diff --git a/pkg/util/mountOpts.go b/pkg/util/mountOpts.go
index 929223244..416e60728 100644
--- a/pkg/util/mountOpts.go
+++ b/pkg/util/mountOpts.go
@@ -28,8 +28,7 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string
foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ bool
)
- var newOptions []string
-
+ newOptions := make([]string, 0, len(options))
for _, opt := range options {
// Some options have parameters - size, mode
splitOpt := strings.SplitN(opt, "=", 2)
diff --git a/vendor/github.com/uber/jaeger-client-go/.travis.yml b/vendor/github.com/uber/jaeger-client-go/.travis.yml
index e81cc8805..f9c9a7776 100644
--- a/vendor/github.com/uber/jaeger-client-go/.travis.yml
+++ b/vendor/github.com/uber/jaeger-client-go/.travis.yml
@@ -7,22 +7,22 @@ dist: trusty
matrix:
include:
- - go: 1.13.x
+ - go: 1.14.x
env:
- TESTS=true
- USE_DEP=true
- COVERAGE=true
- - go: 1.13.x
+ - go: 1.14.x
env:
- USE_DEP=true
- CROSSDOCK=true
- - go: 1.13.x
+ - go: 1.14.x
env:
- TESTS=true
- USE_DEP=false
- USE_GLIDE=true
# test with previous version of Go
- - go: 1.12.x
+ - go: 1.13.x
env:
- TESTS=true
- USE_DEP=true
diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
index 944feb2c8..6a7e3c5ca 100644
--- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
+++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
@@ -1,6 +1,14 @@
Changes by Version
==================
+2.24.0 (2020-06-14)
+-------------------
+- Mention FromEnv() in the README, docs, and examples (#518) -- Martin Lercher
+- Serialize access to RemotelyControlledSampler.sampler (#515) -- Dima
+- Override reporter config only when agent host/port is set in env (#513) -- ilylia
+- Converge on JAEGER_SAMPLING_ENDPOINT env variable (#511) -- Eundoo Song
+
+
2.23.1 (2020-04-28)
-------------------
- Fix regression by handling nil logger correctly ([#507](https://github.com/jaegertracing/jaeger-client-go/pull/507)) -- Prithvi Raj
diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md
index 7c348e73a..e7b13b1c2 100644
--- a/vendor/github.com/uber/jaeger-client-go/README.md
+++ b/vendor/github.com/uber/jaeger-client-go/README.md
@@ -44,28 +44,32 @@ and [config/example_test.go](./config/example_test.go).
### Environment variables
-The tracer can be initialized with values coming from environment variables. None of the env vars are required
-and all of them can be overridden via direct setting of the property on the configuration object.
+The tracer can be initialized with values coming from environment variables, if it is
+[built from a config](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#Configuration.NewTracer)
+that was created via [FromEnv()](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#FromEnv).
+None of the env vars are required and all of them can be overridden via direct setting
+of the property on the configuration object.
Property| Description
--- | ---
-JAEGER_SERVICE_NAME | The service name
-JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP
-JAEGER_AGENT_PORT | The port for communicating with agent via UDP
-JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces
-JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint
-JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint
-JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans
-JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size
-JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. "500ms" or "2s" ([valid units][timeunits])
-JAEGER_SAMPLER_TYPE | The sampler type
-JAEGER_SAMPLER_PARAM | The sampler parameter (number)
-JAEGER_SAMPLER_MANAGER_HOST_PORT | The HTTP endpoint when using the remote sampler, i.e. http://jaeger-agent:5778/sampling
-JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of
-JAEGER_SAMPLER_REFRESH_INTERVAL | How often the remotely controlled sampler will poll jaeger-agent for the appropriate sampling strategy, with units, e.g. "1m" or "30s" ([valid units][timeunits])
-JAEGER_TAGS | A comma separated list of `name = value` tracer level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:default}`, where the `:default` is optional, and identifies a value to be used if the environment variable cannot be found
-JAEGER_DISABLED | Whether the tracer is disabled or not. If true, the default `opentracing.NoopTracer` is used.
-JAEGER_RPC_METRICS | Whether to store RPC metrics
+JAEGER_SERVICE_NAME | The service name.
+JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP (default `localhost`).
+JAEGER_AGENT_PORT | The port for communicating with agent via UDP (default `6831`).
+JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. If specified, the agent host/port are ignored.
+JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint.
+JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint.
+JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans" `true` or `false` (default `false`).
+JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size (default `100`).
+JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. `500ms` or `2s` ([valid units][timeunits]; default `1s`).
+JAEGER_SAMPLER_TYPE | The sampler type: `remote`, `const`, `probabilistic`, `ratelimiting` (default `remote`). See also https://www.jaegertracing.io/docs/latest/sampling/.
+JAEGER_SAMPLER_PARAM | The sampler parameter (number).
+JAEGER_SAMPLER_MANAGER_HOST_PORT | (deprecated) The HTTP endpoint when using the `remote` sampler.
+JAEGER_SAMPLING_ENDPOINT | The URL for the sampling configuration server when using sampler type `remote` (default `http://127.0.0.1:5778/sampling`).
+JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of (default `2000`).
+JAEGER_SAMPLER_REFRESH_INTERVAL | How often the `remote` sampler should poll the configuration server for the appropriate sampling strategy, e.g. "1m" or "30s" ([valid units][timeunits]; default `1m`).
+JAEGER_TAGS | A comma separated list of `name=value` tracer-level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:defaultValue}`.
+JAEGER_DISABLED | Whether the tracer is disabled or not. If `true`, the `opentracing.NoopTracer` is used (default `false`).
+JAEGER_RPC_METRICS | Whether to store RPC metrics, `true` or `false` (default `false`).
By default, the client sends traces via UDP to the agent at `localhost:6831`. Use `JAEGER_AGENT_HOST` and
`JAEGER_AGENT_PORT` to send UDP traces to a different `host:port`. If `JAEGER_ENDPOINT` is set, the client sends traces
diff --git a/vendor/github.com/uber/jaeger-client-go/RELEASE.md b/vendor/github.com/uber/jaeger-client-go/RELEASE.md
index 115e49ab8..12438d841 100644
--- a/vendor/github.com/uber/jaeger-client-go/RELEASE.md
+++ b/vendor/github.com/uber/jaeger-client-go/RELEASE.md
@@ -2,6 +2,7 @@
1. Create a PR "Preparing for release X.Y.Z" against master branch
* Alter CHANGELOG.md from `<placeholder_version> (unreleased)` to `<X.Y.Z> (YYYY-MM-DD)`
+ * Use `git log --pretty=format:'- %s -- %an'` as the basis for for changelog entries
* Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z`
2. Create a release "Release X.Y.Z" on Github
* Create Tag `vX.Y.Z`
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go
index 44e93533c..e6ffb987d 100644
--- a/vendor/github.com/uber/jaeger-client-go/config/config.go
+++ b/vendor/github.com/uber/jaeger-client-go/config/config.go
@@ -36,16 +36,16 @@ const defaultSamplingProbability = 0.001
// Configuration configures and creates Jaeger Tracer
type Configuration struct {
// ServiceName specifies the service name to use on the tracer.
- // Can be provided via environment variable named JAEGER_SERVICE_NAME
+ // Can be provided by FromEnv() via the environment variable named JAEGER_SERVICE_NAME
ServiceName string `yaml:"serviceName"`
- // Disabled can be provided via environment variable named JAEGER_DISABLED
+ // Disabled can be provided by FromEnv() via the environment variable named JAEGER_DISABLED
Disabled bool `yaml:"disabled"`
- // RPCMetrics can be provided via environment variable named JAEGER_RPC_METRICS
+ // RPCMetrics can be provided by FromEnv() via the environment variable named JAEGER_RPC_METRICS
RPCMetrics bool `yaml:"rpc_metrics"`
- // Tags can be provided via environment variable named JAEGER_TAGS
+ // Tags can be provided by FromEnv() via the environment variable named JAEGER_TAGS
Tags []opentracing.Tag `yaml:"tags"`
Sampler *SamplerConfig `yaml:"sampler"`
@@ -57,8 +57,8 @@ type Configuration struct {
// SamplerConfig allows initializing a non-default sampler. All fields are optional.
type SamplerConfig struct {
- // Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
- // Can be set by exporting an environment variable named JAEGER_SAMPLER_TYPE
+ // Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_TYPE
Type string `yaml:"type"`
// Param is a value passed to the sampler.
@@ -69,22 +69,23 @@ type SamplerConfig struct {
// - for "remote" sampler, param is the same as for "probabilistic"
// and indicates the initial sampling rate before the actual one
// is received from the mothership.
- // Can be set by exporting an environment variable named JAEGER_SAMPLER_PARAM
+ // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_PARAM
Param float64 `yaml:"param"`
- // SamplingServerURL is the address of jaeger-agent's HTTP sampling server
- // Can be set by exporting an environment variable named JAEGER_SAMPLER_MANAGER_HOST_PORT
+ // SamplingServerURL is the URL of sampling manager that can provide
+ // sampling strategy to this service.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLING_ENDPOINT
SamplingServerURL string `yaml:"samplingServerURL"`
// SamplingRefreshInterval controls how often the remotely controlled sampler will poll
- // jaeger-agent for the appropriate sampling strategy.
- // Can be set by exporting an environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL
+ // sampling manager for the appropriate sampling strategy.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL
SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"`
// MaxOperations is the maximum number of operations that the PerOperationSampler
// will keep track of. If an operation is not tracked, a default probabilistic
// sampler will be used rather than the per operation specific sampler.
- // Can be set by exporting an environment variable named JAEGER_SAMPLER_MAX_OPERATIONS.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_MAX_OPERATIONS.
MaxOperations int `yaml:"maxOperations"`
// Opt-in feature for applications that require late binding of span name via explicit
@@ -105,34 +106,35 @@ type ReporterConfig struct {
// QueueSize controls how many spans the reporter can keep in memory before it starts dropping
// new spans. The queue is continuously drained by a background go-routine, as fast as spans
// can be sent out of process.
- // Can be set by exporting an environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE
+ // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE
QueueSize int `yaml:"queueSize"`
// BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full.
// It is generally not useful, as it only matters for very low traffic services.
- // Can be set by exporting an environment variable named JAEGER_REPORTER_FLUSH_INTERVAL
+ // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_FLUSH_INTERVAL
BufferFlushInterval time.Duration
// LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter
// and logs all submitted spans. Main Configuration.Logger must be initialized in the code
// for this option to have any effect.
- // Can be set by exporting an environment variable named JAEGER_REPORTER_LOG_SPANS
+ // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_LOG_SPANS
LogSpans bool `yaml:"logSpans"`
- // LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address
- // Can be set by exporting an environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT
+ // LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT
LocalAgentHostPort string `yaml:"localAgentHostPort"`
- // CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL
- // Can be set by exporting an environment variable named JAEGER_ENDPOINT
+ // CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_ENDPOINT
CollectorEndpoint string `yaml:"collectorEndpoint"`
// User instructs reporter to include a user for basic http authentication when sending spans to jaeger-collector.
- // Can be set by exporting an environment variable named JAEGER_USER
+ // Can be provided by FromEnv() via the environment variable named JAEGER_USER
User string `yaml:"user"`
// Password instructs reporter to include a password for basic http authentication when sending spans to
- // jaeger-collector. Can be set by exporting an environment variable named JAEGER_PASSWORD
+ // jaeger-collector.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_PASSWORD
Password string `yaml:"password"`
// HTTPHeaders instructs the reporter to add these headers to the http request when reporting spans.
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
index a729bd8fe..f38eb9d93 100644
--- a/vendor/github.com/uber/jaeger-client-go/config/config_env.go
+++ b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
@@ -36,7 +36,8 @@ const (
envTags = "JAEGER_TAGS"
envSamplerType = "JAEGER_SAMPLER_TYPE"
envSamplerParam = "JAEGER_SAMPLER_PARAM"
- envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT"
+ envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT" // Deprecated by envSamplingEndpoint
+ envSamplingEndpoint = "JAEGER_SAMPLING_ENDPOINT"
envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS"
envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL"
envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE"
@@ -118,7 +119,9 @@ func (sc *SamplerConfig) samplerConfigFromEnv() (*SamplerConfig, error) {
}
}
- if e := os.Getenv(envSamplerManagerHostPort); e != "" {
+ if e := os.Getenv(envSamplingEndpoint); e != "" {
+ sc.SamplingServerURL = e
+ } else if e := os.Getenv(envSamplerManagerHostPort); e != "" {
sc.SamplingServerURL = e
} else if e := os.Getenv(envAgentHost); e != "" {
// Fallback if we know the agent host - try the sampling endpoint there
@@ -184,20 +187,25 @@ func (rc *ReporterConfig) reporterConfigFromEnv() (*ReporterConfig, error) {
rc.User = user
rc.Password = pswd
} else {
+ useEnv := false
host := jaeger.DefaultUDPSpanServerHost
if e := os.Getenv(envAgentHost); e != "" {
host = e
+ useEnv = true
}
port := jaeger.DefaultUDPSpanServerPort
if e := os.Getenv(envAgentPort); e != "" {
if value, err := strconv.ParseInt(e, 10, 0); err == nil {
port = int(value)
+ useEnv = true
} else {
return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e)
}
}
- rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port)
+ if useEnv || rc.LocalAgentHostPort == "" {
+ rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port)
+ }
}
return rc, nil
diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go
index 1f8578fbd..feaf344ad 100644
--- a/vendor/github.com/uber/jaeger-client-go/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/constants.go
@@ -22,7 +22,7 @@ import (
const (
// JaegerClientVersion is the version of the client library reported as Span tag.
- JaegerClientVersion = "Go-2.23.1"
+ JaegerClientVersion = "Go-2.24.0"
// JaegerClientVersionTagKey is the name of the tag used to report client version.
JaegerClientVersionTagKey = "jaeger.version"
@@ -102,5 +102,5 @@ const (
var (
// DefaultSamplingServerURL is the default url to fetch sampling config from, via http
- DefaultSamplingServerURL = fmt.Sprintf("http://localhost:%d/sampling", DefaultSamplingServerPort)
+ DefaultSamplingServerURL = fmt.Sprintf("http://127.0.0.1:%d/sampling", DefaultSamplingServerPort)
)
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
index 112e3e1cb..f2edd5ca9 100644
--- a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
@@ -64,7 +64,7 @@ type RemotelyControlledSampler struct {
// Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
closed int64 // 0 - not closed, 1 - closed
- sync.RWMutex
+ sync.RWMutex // used to serialize access to samplerOptions.sampler
samplerOptions
serviceName string
@@ -95,22 +95,22 @@ func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (boo
// OnCreateSpan implements OnCreateSpan of SamplerV2.
func (s *RemotelyControlledSampler) OnCreateSpan(span *Span) SamplingDecision {
- return s.sampler.OnCreateSpan(span)
+ return s.Sampler().OnCreateSpan(span)
}
// OnSetOperationName implements OnSetOperationName of SamplerV2.
func (s *RemotelyControlledSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
- return s.sampler.OnSetOperationName(span, operationName)
+ return s.Sampler().OnSetOperationName(span, operationName)
}
// OnSetTag implements OnSetTag of SamplerV2.
func (s *RemotelyControlledSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
- return s.sampler.OnSetTag(span, key, value)
+ return s.Sampler().OnSetTag(span, key, value)
}
// OnFinishSpan implements OnFinishSpan of SamplerV2.
func (s *RemotelyControlledSampler) OnFinishSpan(span *Span) SamplingDecision {
- return s.sampler.OnFinishSpan(span)
+ return s.Sampler().OnFinishSpan(span)
}
// Close implements Close() of Sampler.
@@ -153,8 +153,8 @@ func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker
// Sampler returns the currently active sampler.
func (s *RemotelyControlledSampler) Sampler() SamplerV2 {
- s.Lock()
- defer s.Unlock()
+ s.RLock()
+ defer s.RUnlock()
return s.sampler
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index c7c4584a9..03a6e19de 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -498,7 +498,7 @@ github.com/stretchr/testify/require
github.com/syndtr/gocapability/capability
# github.com/tchap/go-patricia v2.3.0+incompatible
github.com/tchap/go-patricia/patricia
-# github.com/uber/jaeger-client-go v2.23.1+incompatible
+# github.com/uber/jaeger-client-go v2.24.0+incompatible
github.com/uber/jaeger-client-go
github.com/uber/jaeger-client-go/config
github.com/uber/jaeger-client-go/internal/baggage