summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.golangci.yml15
-rw-r--r--Makefile7
-rw-r--r--cmd/podman/common/create.go4
-rw-r--r--cmd/podman/containers/clone.go3
-rw-r--r--cmd/podman/containers/cp.go2
-rw-r--r--cmd/podman/containers/start.go2
-rw-r--r--cmd/podman/images/build.go4
-rw-r--r--cmd/podman/images/rm.go1
-rw-r--r--cmd/podman/images/scp.go10
-rw-r--r--cmd/podman/images/search.go2
-rw-r--r--cmd/podman/machine/init.go2
-rw-r--r--cmd/podman/networks/create.go2
-rw-r--r--cmd/podman/parse/json.go2
-rw-r--r--cmd/podman/root.go2
-rw-r--r--cmd/podman/system/connection/add.go2
-rw-r--r--cmd/podman/system/connection/default.go2
-rw-r--r--cmd/podman/system/connection/remove.go2
-rw-r--r--cmd/podman/system/df.go11
-rw-r--r--cmd/podman/system/service.go2
-rw-r--r--cmd/podman/validate/args.go2
-rw-r--r--cmd/rootlessport/main.go4
-rwxr-xr-xcontrib/cirrus/logformatter2
-rwxr-xr-xcontrib/cirrus/logformatter.t5
-rw-r--r--docs/source/markdown/podman-build.1.md4
-rw-r--r--docs/source/markdown/podman-rmi.1.md14
-rw-r--r--docs/source/markdown/podman-system-service.1.md2
-rw-r--r--docs/source/markdown/podman-unshare.1.md4
-rw-r--r--go.mod2
-rw-r--r--go.sum9
-rwxr-xr-xhack/make-and-check-size131
-rw-r--r--libpod/boltdb_state.go2
-rw-r--r--libpod/common/common.go6
-rw-r--r--libpod/container_api.go6
-rw-r--r--libpod/container_internal.go12
-rw-r--r--libpod/container_internal_linux.go4
-rw-r--r--libpod/events/logfile.go5
-rw-r--r--libpod/networking_linux.go2
-rw-r--r--libpod/oci_conmon_linux.go9
-rw-r--r--libpod/runtime_pod_linux.go3
-rw-r--r--pkg/annotations/annotations.go76
-rw-r--r--pkg/api/handlers/compat/images_prune.go2
-rw-r--r--pkg/api/handlers/compat/networks.go2
-rw-r--r--pkg/api/handlers/libpod/images.go3
-rw-r--r--pkg/api/handlers/utils/images.go2
-rw-r--r--pkg/api/server/register_exec.go2
-rw-r--r--pkg/api/server/register_images.go4
-rw-r--r--pkg/api/server/register_networks.go11
-rw-r--r--pkg/api/server/swagger.go9
-rw-r--r--pkg/bindings/containers/attach.go2
-rw-r--r--pkg/bindings/images/types.go2
-rw-r--r--pkg/bindings/images/types_remove_options.go15
-rw-r--r--pkg/bindings/test/attach_test.go3
-rw-r--r--pkg/bindings/test/auth_test.go3
-rw-r--r--pkg/bindings/test/common_test.go2
-rw-r--r--pkg/bindings/test/containers_test.go3
-rw-r--r--pkg/checkpoint/crutils/checkpoint_restore_utils.go9
-rw-r--r--pkg/domain/entities/images.go2
-rw-r--r--pkg/domain/entities/types.go4
-rw-r--r--pkg/domain/filters/containers.go6
-rw-r--r--pkg/domain/filters/pods.go6
-rw-r--r--pkg/domain/infra/abi/images.go12
-rw-r--r--pkg/domain/infra/abi/play.go4
-rw-r--r--pkg/domain/infra/abi/terminal/sigproxy_linux.go2
-rw-r--r--pkg/domain/infra/abi/terminal/terminal_linux.go2
-rw-r--r--pkg/domain/infra/abi/volumes.go3
-rw-r--r--pkg/domain/infra/tunnel/containers.go6
-rw-r--r--pkg/domain/infra/tunnel/events.go2
-rw-r--r--pkg/domain/infra/tunnel/images.go2
-rw-r--r--pkg/env/env.go2
-rw-r--r--pkg/errorhandling/errorhandling.go2
-rw-r--r--pkg/inspect/inspect.go6
-rw-r--r--pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go6
-rw-r--r--pkg/lookup/lookup.go2
-rw-r--r--pkg/machine/config.go6
-rw-r--r--pkg/machine/fedora.go5
-rw-r--r--pkg/machine/qemu/config.go2
-rw-r--r--pkg/machine/qemu/machine.go23
-rw-r--r--pkg/rootless/rootless.go14
-rw-r--r--pkg/signal/signal_common.go2
-rw-r--r--pkg/specgen/generate/config_linux.go4
-rw-r--r--pkg/specgen/generate/ports_bench_test.go2
-rw-r--r--pkg/specgenutil/specgen.go3
-rw-r--r--pkg/systemd/dbus.go1
-rw-r--r--pkg/systemd/generate/containers.go4
-rw-r--r--pkg/systemd/generate/pods.go4
-rw-r--r--pkg/terminal/console_unix.go2
-rw-r--r--pkg/util/camelcase/camelcase.go4
-rw-r--r--pkg/util/utils.go2
-rw-r--r--test/compose/slirp4netns_opts/tests.sh9
-rw-r--r--test/e2e/attach_test.go8
-rw-r--r--test/e2e/checkpoint_test.go8
-rw-r--r--test/e2e/commit_test.go7
-rw-r--r--test/e2e/common_test.go3
-rw-r--r--test/e2e/create_test.go7
-rw-r--r--test/e2e/healthcheck_run_test.go10
-rw-r--r--test/e2e/inspect_test.go8
-rw-r--r--test/e2e/system_df_test.go17
-rw-r--r--test/system/010-images.bats9
-rw-r--r--test/system/200-pod.bats9
-rw-r--r--test/utils/common_function_test.go11
-rw-r--r--test/utils/matchers.go8
-rw-r--r--test/utils/utils.go5
-rw-r--r--troubleshooting.md15
-rw-r--r--utils/ports.go2
-rw-r--r--utils/utils.go2
-rw-r--r--vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod2
-rw-r--r--vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum4
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go46
-rw-r--r--vendor/github.com/containers/storage/go.mod6
-rw-r--r--vendor/github.com/containers/storage/go.sum12
-rw-r--r--vendor/github.com/containers/storage/types/options.go26
-rw-r--r--vendor/github.com/klauspost/compress/README.md17
-rw-r--r--vendor/github.com/klauspost/compress/huff0/autogen.go5
-rw-r--r--vendor/github.com/klauspost/compress/huff0/bitreader.go5
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress.go183
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s488
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in197
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_amd64.go181
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_amd64.s506
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in195
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_generic.go193
-rw-r--r--vendor/github.com/klauspost/compress/zstd/README.md72
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockdec.go18
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder.go13
-rw-r--r--vendor/github.com/klauspost/compress/zstd/framedec.go13
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fuzz.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fuzz_none.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec.go20
-rw-r--r--vendor/github.com/klauspost/compress/zstd/zip.go18
-rw-r--r--vendor/github.com/klauspost/compress/zstd/zstd.go7
-rw-r--r--vendor/modules.txt6
131 files changed, 2449 insertions, 532 deletions
diff --git a/.golangci.yml b/.golangci.yml
index f3338b9ae..956e528ef 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -13,7 +13,8 @@ linters:
enable-all: true
disable:
# All these break for one reason or another
- - nolintlint
+ - nolintlint # some linter must be disabled (see `nolint` in the code)
+ - tagliatelle # too many JSON keys cannot be changed due to compat
- gocognit
- testpackage
- goerr113
@@ -38,7 +39,6 @@ linters:
- gofumpt
- gci
- godot
- - makezero
- dupl
- funlen
- gochecknoglobals
@@ -51,6 +51,17 @@ linters:
- gocritic
- gosec
- maligned
+ - gomoddirectives
+ - revive
+ - containedctx
+ - contextcheck
+ - cyclop
+ - errname
+ - forcetypeassert
+ - ireturn
+ - varnamelen
+ - maintidx
+ - nilnil
linters-settings:
errcheck:
check-blank: false
diff --git a/Makefile b/Makefile
index 375586370..bef6b4b91 100644
--- a/Makefile
+++ b/Makefile
@@ -336,7 +336,10 @@ $(SRCBINDIR)/podman$(BINSFX): $(SRCBINDIR) .gopathok $(SOURCES) go.mod go.sum
-o $@ ./cmd/podman
$(SRCBINDIR)/podman-remote-static: $(SRCBINDIR) .gopathok $(SOURCES) go.mod go.sum
- $(GOCMD) build \
+ CGO_ENABLED=0 \
+ GOOS=$(GOOS) \
+ GOARCH=$(GOARCH) \
+ $(GO) build \
$(BUILDFLAGS) \
$(GO_LDFLAGS) '$(LDFLAGS_PODMAN_STATIC)' \
-tags "${REMOTETAGS}" \
@@ -882,7 +885,7 @@ install.tools: .install.goimports .install.gitvalidation .install.md2man .instal
.PHONY: .install.golangci-lint
.install.golangci-lint: .gopathok
- VERSION=1.36.0 GOBIN=$(GOBIN) ./hack/install_golangci.sh
+ VERSION=1.45.0 GOBIN=$(GOBIN) ./hack/install_golangci.sh
.PHONY: .install.bats
.install.bats: .gopathok
diff --git a/cmd/podman/common/create.go b/cmd/podman/common/create.go
index f3e2e4d6d..8d9a255ec 100644
--- a/cmd/podman/common/create.go
+++ b/cmd/podman/common/create.go
@@ -721,7 +721,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
"Optional parent cgroup for the container",
)
_ = cmd.RegisterFlagCompletionFunc(cgroupParentFlagName, completion.AutocompleteDefault)
- conmonPidfileFlagName := ""
+ var conmonPidfileFlagName string
if !isInfra {
conmonPidfileFlagName = "conmon-pidfile"
} else {
@@ -734,7 +734,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
)
_ = cmd.RegisterFlagCompletionFunc(conmonPidfileFlagName, completion.AutocompleteDefault)
- entrypointFlagName := ""
+ var entrypointFlagName string
if !isInfra {
entrypointFlagName = "entrypoint"
} else {
diff --git a/cmd/podman/containers/clone.go b/cmd/podman/containers/clone.go
index d095d24ba..8a1473608 100644
--- a/cmd/podman/containers/clone.go
+++ b/cmd/podman/containers/clone.go
@@ -58,9 +58,8 @@ func clone(cmd *cobra.Command, args []string) error {
case 3:
ctrClone.CreateOpts.Name = args[1]
ctrClone.Image = args[2]
- rawImageName := ""
if !cliVals.RootFS {
- rawImageName = args[0]
+ rawImageName := args[0]
name, err := PullImage(ctrClone.Image, ctrClone.CreateOpts)
if err != nil {
return err
diff --git a/cmd/podman/containers/cp.go b/cmd/podman/containers/cp.go
index 7ac28b799..eb18dfce4 100644
--- a/cmd/podman/containers/cp.go
+++ b/cmd/podman/containers/cp.go
@@ -449,7 +449,7 @@ func resolvePathOnDestinationContainer(container string, containerPath string, i
containerInfo, err = registry.ContainerEngine().ContainerStat(registry.GetContext(), container, containerPath)
if err == nil {
baseName = filepath.Base(containerInfo.LinkTarget)
- return
+ return // nolint: nilerr
}
if strings.HasSuffix(containerPath, "/") {
diff --git a/cmd/podman/containers/start.go b/cmd/podman/containers/start.go
index a7731a0a1..b70e975b7 100644
--- a/cmd/podman/containers/start.go
+++ b/cmd/podman/containers/start.go
@@ -122,7 +122,7 @@ func start(cmd *cobra.Command, args []string) error {
startOptions.Stdout = os.Stdout
}
- var containers []string = args
+ containers := args
if len(filters) > 0 {
for _, f := range filters {
split := strings.SplitN(f, "=", 2)
diff --git a/cmd/podman/images/build.go b/cmd/podman/images/build.go
index 729951a31..1f9e7ea9e 100644
--- a/cmd/podman/images/build.go
+++ b/cmd/podman/images/build.go
@@ -375,7 +375,7 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
}
}
- cleanTmpFile := false
+ var cleanTmpFile bool
flags.Authfile, cleanTmpFile = buildahUtil.MirrorToTempFileIfPathIsDescriptor(flags.Authfile)
if cleanTmpFile {
defer os.Remove(flags.Authfile)
@@ -474,7 +474,7 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
return nil, err
}
- format := ""
+ var format string
flags.Format = strings.ToLower(flags.Format)
switch {
case strings.HasPrefix(flags.Format, buildahDefine.OCI):
diff --git a/cmd/podman/images/rm.go b/cmd/podman/images/rm.go
index dd138d410..13dab62d4 100644
--- a/cmd/podman/images/rm.go
+++ b/cmd/podman/images/rm.go
@@ -56,6 +56,7 @@ func init() {
func imageRemoveFlagSet(flags *pflag.FlagSet) {
flags.BoolVarP(&imageOpts.All, "all", "a", false, "Remove all images")
+ flags.BoolVarP(&imageOpts.Ignore, "ignore", "i", false, "Ignore errors if a specified image does not exist")
flags.BoolVarP(&imageOpts.Force, "force", "f", false, "Force Removal of the image")
}
diff --git a/cmd/podman/images/scp.go b/cmd/podman/images/scp.go
index d07a5d99d..152275c68 100644
--- a/cmd/podman/images/scp.go
+++ b/cmd/podman/images/scp.go
@@ -105,7 +105,7 @@ func scp(cmd *cobra.Command, args []string) (finalErr error) {
}
locations := []*entities.ImageScpOptions{}
cliConnections := []string{}
- flipConnections := false
+ var flipConnections bool
for _, arg := range args {
loc, connect, err := parseImageSCPArg(arg)
if err != nil {
@@ -233,7 +233,7 @@ func loadToRemote(localFile string, tag string, url *urlP.URL, iden string) (str
errOut := strconv.Itoa(int(n)) + " Bytes copied before error"
return " ", errors.Wrapf(err, errOut)
}
- run := ""
+ var run string
if tag != "" {
return "", errors.Wrapf(define.ErrInvalidArg, "Renaming of an image is currently not supported")
}
@@ -264,10 +264,12 @@ func saveToRemote(image, localFile string, tag string, uri *urlP.URL, iden strin
run := podman + " image save " + image + " --format=oci-archive --output=" + remoteFile // run ssh image load of the file copied via scp. Files are reverse in this case...
_, err = connection.ExecRemoteCommand(dial, run)
if err != nil {
- return nil
+ return err
}
n, err := scpD.CopyFrom(dial, remoteFile, localFile)
- connection.ExecRemoteCommand(dial, "rm "+remoteFile)
+ if _, conErr := connection.ExecRemoteCommand(dial, "rm "+remoteFile); conErr != nil {
+ logrus.Errorf("Error removing file on endpoint: %v", conErr)
+ }
if err != nil {
errOut := strconv.Itoa(int(n)) + " Bytes copied before error"
return errors.Wrapf(err, errOut)
diff --git a/cmd/podman/images/search.go b/cmd/podman/images/search.go
index 292a1d060..aa11cf254 100644
--- a/cmd/podman/images/search.go
+++ b/cmd/podman/images/search.go
@@ -105,7 +105,7 @@ func searchFlags(cmd *cobra.Command) {
// imageSearch implements the command for searching images.
func imageSearch(cmd *cobra.Command, args []string) error {
- searchTerm := ""
+ var searchTerm string
switch len(args) {
case 1:
searchTerm = args[0]
diff --git a/cmd/podman/machine/init.go b/cmd/podman/machine/init.go
index 8fb9f17c7..e07b6fbfa 100644
--- a/cmd/podman/machine/init.go
+++ b/cmd/podman/machine/init.go
@@ -83,7 +83,7 @@ func init() {
"reexec", false,
"process was rexeced",
)
- flags.MarkHidden("reexec")
+ _ = flags.MarkHidden("reexec")
ImagePathFlagName := "image-path"
flags.StringVar(&initOpts.ImagePath, ImagePathFlagName, cfg.Machine.Image, "Path to qcow image")
diff --git a/cmd/podman/networks/create.go b/cmd/podman/networks/create.go
index 3dd393c46..8cf9bcada 100644
--- a/cmd/podman/networks/create.go
+++ b/cmd/podman/networks/create.go
@@ -60,7 +60,7 @@ func networkCreateFlags(cmd *cobra.Command) {
macvlanFlagName := "macvlan"
flags.StringVar(&networkCreateOptions.MacVLAN, macvlanFlagName, "", "create a Macvlan connection based on this device")
// This option is deprecated
- flags.MarkHidden(macvlanFlagName)
+ _ = flags.MarkHidden(macvlanFlagName)
labelFlagName := "label"
flags.StringArrayVar(&labels, labelFlagName, nil, "set metadata on a network")
diff --git a/cmd/podman/parse/json.go b/cmd/podman/parse/json.go
index d7486d0b1..85572a057 100644
--- a/cmd/podman/parse/json.go
+++ b/cmd/podman/parse/json.go
@@ -4,7 +4,7 @@ import "regexp"
var jsonFormatRegex = regexp.MustCompile(`^\s*(json|{{\s*json\s*(\.)?\s*}})\s*$`)
-// MatchesJSONFormat test CLI --format string to be a JSON request
+// MatchesJSONFormat test CLI --format string to be a JSON request.
func MatchesJSONFormat(s string) bool {
return jsonFormatRegex.Match([]byte(s))
}
diff --git a/cmd/podman/root.go b/cmd/podman/root.go
index 6d768c2e6..500a475bd 100644
--- a/cmd/podman/root.go
+++ b/cmd/podman/root.go
@@ -402,7 +402,7 @@ func rootFlags(cmd *cobra.Command, opts *entities.PodmanConfig) {
networkBackendFlagName := "network-backend"
pFlags.StringVar(&cfg.Network.NetworkBackend, networkBackendFlagName, cfg.Network.NetworkBackend, `Network backend to use ("cni"|"netavark")`)
_ = cmd.RegisterFlagCompletionFunc(networkBackendFlagName, common.AutocompleteNetworkBackend)
- pFlags.MarkHidden(networkBackendFlagName)
+ _ = pFlags.MarkHidden(networkBackendFlagName)
rootFlagName := "root"
pFlags.StringVar(&cfg.Engine.StaticDir, rootFlagName, "", "Path to the root directory in which data, including images, is stored")
diff --git a/cmd/podman/system/connection/add.go b/cmd/podman/system/connection/add.go
index 324e02db4..db575a689 100644
--- a/cmd/podman/system/connection/add.go
+++ b/cmd/podman/system/connection/add.go
@@ -244,7 +244,7 @@ func getUDS(cmd *cobra.Command, uri *url.URL, iden string) (string, error) {
// ValidateAndConfigure will take a ssh url and an identity key (rsa and the like) and ensure the information given is valid
// iden iden can be blank to mean no identity key
-// once the function validates the information it creates and returns an ssh.ClientConfig
+// once the function validates the information it creates and returns an ssh.ClientConfig.
func ValidateAndConfigure(uri *url.URL, iden string) (*ssh.ClientConfig, error) {
var signers []ssh.Signer
passwd, passwdSet := uri.User.Password()
diff --git a/cmd/podman/system/connection/default.go b/cmd/podman/system/connection/default.go
index c59ff36af..81866df55 100644
--- a/cmd/podman/system/connection/default.go
+++ b/cmd/podman/system/connection/default.go
@@ -11,7 +11,7 @@ import (
)
var (
- // Skip creating engines since this command will obtain connection information to said engines
+ // Skip creating engines since this command will obtain connection information to said engines.
dfltCmd = &cobra.Command{
Use: "default NAME",
Args: cobra.ExactArgs(1),
diff --git a/cmd/podman/system/connection/remove.go b/cmd/podman/system/connection/remove.go
index 84ec3e2ee..463eae9fa 100644
--- a/cmd/podman/system/connection/remove.go
+++ b/cmd/podman/system/connection/remove.go
@@ -10,7 +10,7 @@ import (
)
var (
- // Skip creating engines since this command will obtain connection information to said engines
+ // Skip creating engines since this command will obtain connection information to said engines.
rmCmd = &cobra.Command{
Use: "remove [options] NAME",
Aliases: []string{"rm"},
diff --git a/cmd/podman/system/df.go b/cmd/podman/system/df.go
index b2325507a..dad14df6b 100644
--- a/cmd/podman/system/df.go
+++ b/cmd/podman/system/df.go
@@ -2,6 +2,7 @@ package system
import (
"fmt"
+ "math"
"os"
"strings"
"time"
@@ -170,7 +171,7 @@ func printVerbose(cmd *cobra.Command, reports *entities.SystemDfReport) error {
return err
}
if err := writeTemplate(rpt, hdrs, dfImages); err != nil {
- return nil
+ return err
}
fmt.Fprint(rpt.Writer(), "\nContainers space usage:\n\n")
@@ -190,7 +191,7 @@ func printVerbose(cmd *cobra.Command, reports *entities.SystemDfReport) error {
return err
}
if err := writeTemplate(rpt, hdrs, dfContainers); err != nil {
- return nil
+ return err
}
fmt.Fprint(rpt.Writer(), "\nLocal Volumes space usage:\n\n")
@@ -288,6 +289,10 @@ func (d *dfSummary) Size() string {
}
func (d *dfSummary) Reclaimable() string {
- percent := int(float64(d.reclaimable)/float64(d.size)) * 100
+ percent := 0
+ // make sure to check this to prevent div by zero problems
+ if d.size > 0 {
+ percent = int(math.Round(float64(d.reclaimable) / float64(d.size) * float64(100)))
+ }
return fmt.Sprintf("%s (%d%%)", units.HumanSize(float64(d.reclaimable)), percent)
}
diff --git a/cmd/podman/system/service.go b/cmd/podman/system/service.go
index dd64db169..1a93b3137 100644
--- a/cmd/podman/system/service.go
+++ b/cmd/podman/system/service.go
@@ -67,7 +67,7 @@ func init() {
flags.StringVarP(&srvArgs.PProfAddr, "pprof-address", "", "",
"Binding network address for pprof profile endpoints, default: do not expose endpoints")
- flags.MarkHidden("pprof-address")
+ _ = flags.MarkHidden("pprof-address")
}
func aliasTimeoutFlag(_ *pflag.FlagSet, name string) pflag.NormalizedName {
diff --git a/cmd/podman/validate/args.go b/cmd/podman/validate/args.go
index 1642e2280..743ee1837 100644
--- a/cmd/podman/validate/args.go
+++ b/cmd/podman/validate/args.go
@@ -27,7 +27,7 @@ func SubCommandExists(cmd *cobra.Command, args []string) error {
}
return errors.Errorf("unrecognized command `%[1]s %[2]s`\n\nDid you mean this?\n\t%[3]s\n\nTry '%[1]s --help' for more information.", cmd.CommandPath(), args[0], strings.Join(suggestions, "\n\t"))
}
- cmd.Help()
+ cmd.Help() // nolint: errcheck
return errors.Errorf("missing command '%[1]s COMMAND'", cmd.CommandPath())
}
diff --git a/cmd/rootlessport/main.go b/cmd/rootlessport/main.go
index 37e91fca8..e9ab8b076 100644
--- a/cmd/rootlessport/main.go
+++ b/cmd/rootlessport/main.go
@@ -253,9 +253,9 @@ func serve(listener net.Listener, pm rkport.Manager) {
ctx := context.TODO()
err = handler(ctx, conn, pm)
if err != nil {
- conn.Write([]byte(err.Error()))
+ _, _ = conn.Write([]byte(err.Error()))
} else {
- conn.Write([]byte("OK"))
+ _, _ = conn.Write([]byte("OK"))
}
conn.Close()
}
diff --git a/contrib/cirrus/logformatter b/contrib/cirrus/logformatter
index 5b8e2f2a1..3ec839aa8 100755
--- a/contrib/cirrus/logformatter
+++ b/contrib/cirrus/logformatter
@@ -401,7 +401,7 @@ END_HTML
# Highlight the important (non-boilerplate) podman command.
$line =~ s/\s+--remote\s+/ /g; # --remote takes no args
# Strip out the global podman options, but show them on hover
- $line =~ s{(\S+\/podman(-remote)?)((\s+--(root|runroot|runtime|tmpdir|storage-opt|conmon|cgroup-manager|network-config-dir|storage-driver|events-backend|url) \S+)*)(.*)}{
+ $line =~ s{(\S+\/podman(-remote)?)((\s+--(root|runroot|runtime|tmpdir|storage-opt|conmon|cgroup-manager|network-backend|network-config-dir|storage-driver|events-backend|url) \S+)*)(.*)}{
my ($full_path, $remote, $options, $args) = ($1, $2||'', $3, $6);
$options =~ s/^\s+//;
diff --git a/contrib/cirrus/logformatter.t b/contrib/cirrus/logformatter.t
index 7f5973a15..2c191769c 100755
--- a/contrib/cirrus/logformatter.t
+++ b/contrib/cirrus/logformatter.t
@@ -131,7 +131,7 @@ $SCRIPT_BASE/integration_test.sh |& ${TIMESTAMP}
[+0103s] /var/tmp/go/src/github.com/containers/podman/test/e2e/pod_restart_test.go:18
[+0103s] [It] podman pod restart single empty pod
[+0103s] /var/tmp/go/src/github.com/containers/podman/test/e2e/pod_restart_test.go:41
-[+0103s] Running: /var/tmp/go/src/github.com/containers/podman/bin/podman --storage-opt vfs.imagestore=/tmp/podman/imagecachedir --root /tmp/podman_test553496330/crio --runroot /tmp/podman_test553496330/crio-run --runtime /usr/bin/runc --conmon /usr/bin/conmon --network-config-dir /etc/cni/net.d --cgroup-manager systemd --tmpdir /tmp/podman_test553496330 --events-backend file --storage-driver vfs pod create --infra=false --share
+[+0103s] Running: /var/tmp/go/src/github.com/containers/podman/bin/podman --network-backend netavark --storage-opt vfs.imagestore=/tmp/podman/imagecachedir --root /tmp/podman_test553496330/crio --runroot /tmp/podman_test553496330/crio-run --runtime /usr/bin/runc --conmon /usr/bin/conmon --network-config-dir /etc/cni/net.d --cgroup-manager systemd --tmpdir /tmp/podman_test553496330 --events-backend file --storage-driver vfs pod create --infra=false --share
[+0103s] 4810be0cfbd42241e349dbe7d50fbc54405cd320a6637c65fd5323f34d64af89
[+0103s] output: 4810be0cfbd42241e349dbe7d50fbc54405cd320a6637c65fd5323f34d64af89
[+0103s] Running: /var/tmp/go/src/github.com/containers/podman/bin/podman --storage-opt vfs.imagestore=/tmp/podman/imagecachedir --root /tmp/podman_test553496330/crio --runroot /tmp/podman_test553496330/crio-run --runtime /usr/bin/runc --conmon /usr/bin/conmon --network-config-dir /etc/cni/net.d --cgroup-manager systemd --tmpdir /tmp/podman_test553496330 --events-backend file --storage-driver vfs pod restart 4810be0cfbd42241e349dbe7d50fbc54405cd320a6637c65fd5323f34d64af89
@@ -187,7 +187,8 @@ $SCRIPT_BASE/integration_test.sh |& ${TIMESTAMP}
<span class="timestamp"> </span> /var/tmp/go/src/github.com<a class="codelink" href='https://github.com/containers/podman/blob/40f5d8b1becd381c4e8283ed3940d09193e4fe06/test/e2e/pod_restart_test.go#L18'>/containers/podman/test/e2e/pod_restart_test.go:18</a>
<span class="timestamp"> </span><span class="testname">[It] podman pod restart single empty pod</span>
<span class="timestamp"> </span> /var/tmp/go/src/github.com<a class="codelink" href='https://github.com/containers/podman/blob/40f5d8b1becd381c4e8283ed3940d09193e4fe06/test/e2e/pod_restart_test.go#L41'>/containers/podman/test/e2e/pod_restart_test.go:41</a>
-<span class="timestamp"> </span><span class="boring">#</span> <span title="/var/tmp/go/src/github.com/containers/podman/bin/podman"><b>podman</b></span> <span class="boring" title="--storage-opt vfs.imagestore=/tmp/podman/imagecachedir
+<span class="timestamp"> </span><span class="boring">#</span> <span title="/var/tmp/go/src/github.com/containers/podman/bin/podman"><b>podman</b></span> <span class="boring" title="--network-backend netavark
+--storage-opt vfs.imagestore=/tmp/podman/imagecachedir
--root /tmp/podman_test553496330/crio
--runroot /tmp/podman_test553496330/crio-run
--runtime /usr/bin/runc
diff --git a/docs/source/markdown/podman-build.1.md b/docs/source/markdown/podman-build.1.md
index b9542fec5..c96f60c48 100644
--- a/docs/source/markdown/podman-build.1.md
+++ b/docs/source/markdown/podman-build.1.md
@@ -1010,7 +1010,7 @@ Exclude all doc files except Help.doc from the image.
This functionality is compatible with the handling of .containerignore files
described here:
-https://github.com/containers/buildah/blob/main/docs/containerignore.5.md
+https://github.com/containers/common/blob/main/docs/containerignore.5.md
**registries.conf** (`/etc/containers/registries.conf`)
@@ -1032,7 +1032,7 @@ If you are using `useradd` within your build script, you should pass the
useradd to stop creating the lastlog file.
## SEE ALSO
-**[podman(1)](podman.1.md)**, **[buildah(1)](https://github.com/containers/buildah/blob/main/docs/buildah.1.md)**, **[containers-certs.d(5)](https://github.com/containers/image/blob/main/docs/containers-certs.d.5.md)**, **[containers-registries.conf(5)](https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md)**, **[crun(1)](https://github.com/containers/crun/blob/main/crun.1.md)**, **[runc(8)](https://github.com/opencontainers/runc/blob/master/man/runc.8.md)**, **[useradd(8)](https://www.unix.com/man-page/redhat/8/useradd)**, **[podman-ps(1)](podman-ps.1.md)**, **[podman-rm(1)](podman-rm.1.md)**, **[Containerfile(5)](https://github.com/containers/buildah/blob/main/docs/Containerfile.5.md)**, **[containerignore(5)](https://github.com/containers/buildah/blob/main/docs/containerignore.5.md)**
+**[podman(1)](podman.1.md)**, **[buildah(1)](https://github.com/containers/buildah/blob/main/docs/buildah.1.md)**, **[containers-certs.d(5)](https://github.com/containers/image/blob/main/docs/containers-certs.d.5.md)**, **[containers-registries.conf(5)](https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md)**, **[crun(1)](https://github.com/containers/crun/blob/main/crun.1.md)**, **[runc(8)](https://github.com/opencontainers/runc/blob/master/man/runc.8.md)**, **[useradd(8)](https://www.unix.com/man-page/redhat/8/useradd)**, **[podman-ps(1)](podman-ps.1.md)**, **[podman-rm(1)](podman-rm.1.md)**, **[Containerfile(5)](https://github.com/containers/common/blob/main/docs/Containerfile.5.md)**, **[containerignore(5)](https://github.com/containers/common/blob/main/docs/containerignore.5.md)**
## HISTORY
Aug 2020, Additional options and .containerignore added by Dan Walsh `<dwalsh@redhat.com>`
diff --git a/docs/source/markdown/podman-rmi.1.md b/docs/source/markdown/podman-rmi.1.md
index 5fe0efa18..8d0e5e500 100644
--- a/docs/source/markdown/podman-rmi.1.md
+++ b/docs/source/markdown/podman-rmi.1.md
@@ -24,6 +24,10 @@ Remove all images in the local storage.
This option will cause podman to remove all containers that are using the image before removing the image from the system.
+#### **--ignore**, **-i**
+
+If a specified image does not exist in the local storage, ignore it and do not throw an error.
+
Remove an image by its short ID
```
@@ -43,6 +47,16 @@ Remove all images and containers.
```
$ podman rmi -a -f
```
+
+Remove an absent image with and without the `--ignore` flag.
+```
+$ podman rmi --ignore nothing
+$ podman rmi nothing
+Error: nothing: image not known
+
+```
+
+
## Exit Status
**0** All specified images removed
diff --git a/docs/source/markdown/podman-system-service.1.md b/docs/source/markdown/podman-system-service.1.md
index 3bc4fc7f1..17d8ea06a 100644
--- a/docs/source/markdown/podman-system-service.1.md
+++ b/docs/source/markdown/podman-system-service.1.md
@@ -15,7 +15,7 @@ example *unix:///run/user/1000/podman/podman.sock*)
To access the API service inside a container:
- mount the socket as a volume
-- run the container with `--security-opt label:disable`
+- run the container with `--security-opt label=disable`
The REST API provided by **podman system service** is split into two parts: a compatibility layer offering support for the Docker v1.40 API, and a Podman-native Libpod layer.
Documentation for the latter is available at *https://docs.podman.io/en/latest/_static/api.html*.
diff --git a/docs/source/markdown/podman-unshare.1.md b/docs/source/markdown/podman-unshare.1.md
index 01393a862..db1bc5387 100644
--- a/docs/source/markdown/podman-unshare.1.md
+++ b/docs/source/markdown/podman-unshare.1.md
@@ -4,7 +4,7 @@
podman\-unshare - Run a command inside of a modified user namespace
## SYNOPSIS
-**podman unshare** [*--*] [*command*]
+**podman unshare** [*options*] [*command*]
## DESCRIPTION
Launches a process (by default, *$SHELL*) in a new user namespace. The user
@@ -24,6 +24,8 @@ The unshare session defines two environment variables:
- **CONTAINERS_GRAPHROOT**: the path to the persistent container's data.
- **CONTAINERS_RUNROOT**: the path to the volatile container's data.
+*IMPORTANT: This command is not available with the remote Podman client.*
+
## OPTIONS
#### **--help**, **-h**
diff --git a/go.mod b/go.mod
index b78bd83b3..ada985908 100644
--- a/go.mod
+++ b/go.mod
@@ -17,7 +17,7 @@ require (
github.com/containers/image/v5 v5.20.1-0.20220310094651-0d8056ee346f
github.com/containers/ocicrypt v1.1.2
github.com/containers/psgo v1.7.2
- github.com/containers/storage v1.38.3-0.20220308085612-93ce26691863
+ github.com/containers/storage v1.38.3-0.20220321121613-8e565392dd91
github.com/coreos/go-systemd/v22 v22.3.2
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
github.com/cyphar/filepath-securejoin v0.2.3
diff --git a/go.sum b/go.sum
index ae45625bd..19c625288 100644
--- a/go.sum
+++ b/go.sum
@@ -324,8 +324,9 @@ github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sb
github.com/containerd/stargz-snapshotter/estargz v0.10.1/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
github.com/containerd/stargz-snapshotter/estargz v0.11.0/go.mod h1:/KsZXsJRllMbTKFfG0miFQWViQKdI9+9aSXs+HN0+ac=
github.com/containerd/stargz-snapshotter/estargz v0.11.1/go.mod h1:6VoPcf4M1wvnogWxqc4TqBWWErCS+R+ucnPZId2VbpQ=
-github.com/containerd/stargz-snapshotter/estargz v0.11.2 h1:0P0vWmfrEeTtZ4BBRrpuyu/HxR9HPBLfeljGOra5f6g=
github.com/containerd/stargz-snapshotter/estargz v0.11.2/go.mod h1:rjbdAXaytDSIrAy2WAy2kUrJ4ehzDS0eUQLlIb5UCY0=
+github.com/containerd/stargz-snapshotter/estargz v0.11.3 h1:k2kN16Px6LYuv++qFqK+JTcYqc8bEVxzGpf8/gFBL5M=
+github.com/containerd/stargz-snapshotter/estargz v0.11.3/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
@@ -377,8 +378,9 @@ github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c
github.com/containers/storage v1.38.0/go.mod h1:lBzt28gAk5ADZuRtwdndRJyqX22vnRaXmlF+7ktfMYc=
github.com/containers/storage v1.38.2/go.mod h1:INP0RPLHWBxx+pTsO5uiHlDUGHDFvWZPWprAbAlQWPQ=
github.com/containers/storage v1.38.3-0.20220301151551-d06b0f81c0aa/go.mod h1:LkkL34WRi4dI4jt9Cp+ImdZi/P5i36glSHimT5CP5zM=
-github.com/containers/storage v1.38.3-0.20220308085612-93ce26691863 h1:10k6Dl+Bm9zgsxP7qv0mnrhd7+XlCmgQWKgkydwZ7vQ=
github.com/containers/storage v1.38.3-0.20220308085612-93ce26691863/go.mod h1:uhf9mPUP+uYajC2/S0A9NaCVa2JJ6+1C254ue4Edv2g=
+github.com/containers/storage v1.38.3-0.20220321121613-8e565392dd91 h1:gEbkqcBM3XFbIz6L9bpJyUEcuDd8vi8jzyrneVS8At4=
+github.com/containers/storage v1.38.3-0.20220321121613-8e565392dd91/go.mod h1:UAD0cKLouN4BOQRgZut/nMjrh/EnTCjSNPgp4ZuGWMs=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -876,8 +878,9 @@ github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e
github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.14.3/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U=
github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
+github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
diff --git a/hack/make-and-check-size b/hack/make-and-check-size
new file mode 100755
index 000000000..a6a77e8ca
--- /dev/null
+++ b/hack/make-and-check-size
@@ -0,0 +1,131 @@
+#!/bin/bash
+#
+# make-and-check-size - wrapper around 'make' that also checks binary growth
+#
+# This script is intended to be run via 'git rebase -x', in a Makefile rule
+# such as:
+#
+# build-all-new-commits:
+# CONTEXT_DIR=$(shell mktemp -d --tmpdir make-size-check.XXXXXXX); \
+# git rebase $(GIT_BASE_BRANCH)^ -x "hack/make-and-check-size $$CONTEXT_DIR"; \
+# $(RM) -rf $$CONTEXT_DIR
+#
+# ...which has long been a part of our usual CI, one that makes sure that
+# each commit (in a multi-commit PR) can be compiled individually. By
+# adding the '^' to GIT_BASE_BRANCH we establish a baseline and store
+# the binary sizes of each file (podman, podman-remote) prior to our PR.
+#
+# CONTEXT_DIR is a temporary directory used to store the original sizes
+# of each binary file under bin/
+#
+# *IMPORTANT NOTE*: this script will leave the git checkout in a funky state!
+# (because we rebase onto a nonterminal commit). I believe this is OK, since
+# this makefile target is used only in CI and only in a scratch VM. Running
+# this in a development environment would yield unpredictable results anyway,
+# by rebasing onto origin/main by default and by leaving an aborted rebase
+# on failure.
+#
+ME=$(basename $0)
+
+###############################################################################
+# BEGIN end-user-customizable settings
+
+# Maximum allowable size, in bytes
+MAX_BIN_GROWTH=$((50 * 1024))
+
+# Github label which allows overriding this check
+OVERRIDE_LABEL=bloat_approved
+
+# END end-user-customizable settings
+###############################################################################
+
+#
+# Helper function: queries github for labels on this PR
+#
+function bloat_approved() {
+ # Argument is the actual size increase in this build.
+ # FIXME: 2022-03-21: this is not actually used atm, but Ed hopes some day
+ # to implement a more robust size-override mechanism, such as by
+ # requiring a MAX_BIN_GROWTH=nnn statement in github comments.
+ local actual_growth="$1"
+
+ if [[ -z "$CIRRUS_PR" ]]; then
+ echo "$ME: cannot query github: \$CIRRUS_PR is undefined" >&2
+ return 1
+ fi
+ if [[ -z "$CIRRUS_REPO_CLONE_TOKEN" ]]; then
+ echo "$ME: cannot query github: \$CIRRUS_REPO_CLONE_TOKEN is undefined" >&2
+ return 1
+ fi
+
+ query="{
+ \"query\": \"query {
+ repository(owner: \\\"containers\\\", name: \\\"podman\\\") {
+ pullRequest(number: $CIRRUS_PR) {
+ labels(first: 100) {
+ nodes {
+ name
+ }
+ }
+ }
+ }
+}\"
+}"
+
+ result=$(curl -s -H "Authorization: bearer $CIRRUS_REPO_CLONE_TOKEN" -H "Accept: application/vnd.github.antiope-preview+json" -H "Content-Type: application/json" -X POST --data @- https://api.github.com/graphql <<<"$query")
+
+ labels=$(jq -r '.data.repository.pullRequest.labels.nodes[].name' <<<"$result")
+
+ grep -q -w "$OVERRIDE_LABEL" <<<"$labels"
+}
+
+# ACTUAL CODE BEGINS HERE
+set -e
+
+# Must be invoked with one argument, an existing context directory
+context_dir=${1?Missing CONTEXT-DIR argument}
+if [[ ! -d $context_dir ]]; then
+ echo "$ME: directory '$context_dir' does not exist"
+ exit 1
+fi
+
+# This is the original (and primary) purpose of this check: if 'make' fails,
+# there is no point in continuing
+echo
+echo "Building: $(git rev-parse HEAD)"
+make
+
+# Determine size of each built file.
+# - If this is our first time through, preserve that size in a tmpfile
+# - On all subsequent runs, compare built size to initial size
+for bin in bin/*;do
+ size=$(stat -c %s $bin)
+
+ saved_size_file=$context_dir/$(basename $bin)
+ if [[ -e $saved_size_file ]]; then
+ # Not the first time through: compare to original size
+ size_orig=$(< $saved_size_file)
+ delta_size=$(( size - size_orig ))
+
+ if [[ $delta_size -gt $MAX_BIN_GROWTH ]]; then
+ separator=$(printf "%.0s*" {1..75}) # row of stars, for highlight
+ echo "$separator"
+ echo "* $bin grew by $delta_size bytes; max allowed is $MAX_BIN_GROWTH."
+ echo "*"
+ if bloat_approved $delta_size; then
+ echo "* Continuing due to '$OVERRIDE_LABEL' label"
+ echo "*"
+ echo "$separator"
+ else
+ echo "* Please investigate, and fix if possible."
+ echo "*"
+ echo "* A repo admin can override by setting the $OVERRIDE_LABEL label"
+ echo "$separator"
+ exit 1
+ fi
+ fi
+ else
+ # First time through: preserve original file size
+ echo $size >$saved_size_file
+ fi
+done
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 6389431ab..9745121c7 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -366,7 +366,7 @@ func (s *BoltState) GetDBConfig() (*DBConfig, error) {
err = db.View(func(tx *bolt.Tx) error {
configBucket, err := getRuntimeConfigBucket(tx)
if err != nil {
- return nil
+ return err
}
// Some of these may be nil
diff --git a/libpod/common/common.go b/libpod/common/common.go
index 93a736af2..34cabeadc 100644
--- a/libpod/common/common.go
+++ b/libpod/common/common.go
@@ -1,16 +1,16 @@
package common
-// IsTrue determines whether the given string equals "true"
+// IsTrue determines whether the given string equals "true".
func IsTrue(str string) bool {
return str == "true"
}
-// IsFalse determines whether the given string equals "false"
+// IsFalse determines whether the given string equals "false".
func IsFalse(str string) bool {
return str == "false"
}
-// IsValidBool determines whether the given string equals "true" or "false"
+// IsValidBool determines whether the given string equals "true" or "false".
func IsValidBool(str string) bool {
return IsTrue(str) || IsFalse(str)
}
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 03b3dcc04..0b6139335 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -921,7 +921,11 @@ func (c *Container) Stat(ctx context.Context, containerPath string) (*define.Fil
if err != nil {
return nil, err
}
- defer c.unmount(false)
+ defer func() {
+ if err := c.unmount(false); err != nil {
+ logrus.Errorf("Unmounting container %s: %v", c.ID(), err)
+ }
+ }()
}
info, _, _, err := c.stat(ctx, mountPoint, containerPath)
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 3c21cade8..b7362e7fb 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -1087,13 +1087,6 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
// With the spec complete, do an OCI create
if _, err = c.ociRuntime.CreateContainer(c, nil); err != nil {
- // Fedora 31 is carrying a patch to display improved error
- // messages to better handle the V2 transition. This is NOT
- // upstream in any OCI runtime.
- // TODO: Remove once runc supports cgroupsv2
- if strings.Contains(err.Error(), "this version of runc doesn't work on cgroups v2") {
- logrus.Errorf("Oci runtime %q does not support Cgroups V2: use system migrate to mitigate", c.ociRuntime.Name())
- }
return err
}
@@ -1268,7 +1261,10 @@ func (c *Container) start() error {
}
}
- if c.config.HealthCheckConfig != nil {
+ // Check if healthcheck is not nil and --no-healthcheck option is not set.
+ // If --no-healthcheck is set Test will be always set to `[NONE]` so no need
+ // to update status in such case.
+ if c.config.HealthCheckConfig != nil && !(len(c.config.HealthCheckConfig.Test) == 1 && c.config.HealthCheckConfig.Test[0] == "NONE") {
if err := c.updateHealthStatus(define.HealthCheckStarting); err != nil {
logrus.Error(err)
}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 75250b9b1..4d6922d73 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -2587,7 +2587,7 @@ func (c *Container) generateUserGroupEntry(addedGID int) (string, int, error) {
gid, err := strconv.ParseUint(group, 10, 32)
if err != nil {
- return "", 0, nil
+ return "", 0, nil // nolint: nilerr
}
if addedGID != 0 && addedGID == int(gid) {
@@ -2740,7 +2740,7 @@ func (c *Container) generateUserPasswdEntry(addedUID int) (string, int, int, err
// If a non numeric User, then don't generate passwd
uid, err := strconv.ParseUint(userspec, 10, 32)
if err != nil {
- return "", 0, 0, nil
+ return "", 0, 0, nil // nolint: nilerr
}
if addedUID != 0 && int(uid) == addedUID {
diff --git a/libpod/events/logfile.go b/libpod/events/logfile.go
index be2aaacca..76173cde9 100644
--- a/libpod/events/logfile.go
+++ b/libpod/events/logfile.go
@@ -9,6 +9,7 @@ import (
"github.com/containers/podman/v4/pkg/util"
"github.com/containers/storage/pkg/lockfile"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// EventLogFile is the structure for event writing to a logfile. It contains the eventer
@@ -59,7 +60,9 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error {
}
go func() {
time.Sleep(time.Until(untilTime))
- t.Stop()
+ if err := t.Stop(); err != nil {
+ logrus.Errorf("Stopping logger: %v", err)
+ }
}()
}
funcDone := make(chan bool)
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index d2d1e12cb..20c8059a5 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -1149,7 +1149,7 @@ func (c *Container) inspectJoinedNetworkNS(networkns string) (q types.StatusBloc
// result
func resultToBasicNetworkConfig(result types.StatusBlock) (define.InspectBasicNetworkConfig, error) {
config := define.InspectBasicNetworkConfig{}
- interfaceNames := make([]string, len(result.Interfaces))
+ interfaceNames := make([]string, 0, len(result.Interfaces))
for interfaceName := range result.Interfaces {
interfaceNames = append(interfaceNames, interfaceName)
}
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index 735b1f09b..ba4079bed 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -750,7 +750,7 @@ func openControlFile(ctr *Container, parentDir string) (*os.File, error) {
for i := 0; i < 600; i++ {
controlFile, err := os.OpenFile(controlPath, unix.O_WRONLY|unix.O_NONBLOCK, 0)
if err == nil {
- return controlFile, err
+ return controlFile, nil
}
if !isRetryable(err) {
return nil, errors.Wrapf(err, "could not open ctl file for terminal resize for container %s", ctr.ID())
@@ -1015,7 +1015,8 @@ func (r *ConmonOCIRuntime) getLogTag(ctr *Container) (string, error) {
}
data, err := ctr.inspectLocked(false)
if err != nil {
- return "", nil
+ // FIXME: this error should probably be returned
+ return "", nil // nolint: nilerr
}
tmpl, err := template.New("container").Parse(logTag)
if err != nil {
@@ -1371,7 +1372,7 @@ func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, p
case define.JSONLogging:
fallthrough
//lint:ignore ST1015 the default case has to be here
- default: //nolint-stylecheck
+ default: //nolint:stylecheck
// No case here should happen except JSONLogging, but keep this here in case the options are extended
logrus.Errorf("%s logging specified but not supported. Choosing k8s-file logging instead", ctr.LogDriver())
fallthrough
@@ -1596,7 +1597,7 @@ func readConmonPipeData(runtimeName string, pipe *os.File, ociLog string) (int,
ch <- syncStruct{si: si}
}()
- data := -1
+ data := -1 //nolint: wastedassign
select {
case ss := <-ch:
if ss.err != nil {
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 230491c1a..2bbccfdf6 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -6,6 +6,7 @@ package libpod
import (
"context"
"fmt"
+ "os"
"path"
"path/filepath"
"strings"
@@ -239,7 +240,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
// Don't try if we failed to retrieve the cgroup
if err == nil {
- if err := conmonCgroup.Update(resLimits); err != nil {
+ if err := conmonCgroup.Update(resLimits); err != nil && !os.IsNotExist(err) {
logrus.Warnf("Error updating pod %s conmon cgroup PID limit: %v", p.ID(), err)
}
}
diff --git a/pkg/annotations/annotations.go b/pkg/annotations/annotations.go
index 8badab20d..a22222f10 100644
--- a/pkg/annotations/annotations.go
+++ b/pkg/annotations/annotations.go
@@ -1,122 +1,122 @@
package annotations
const (
- // Annotations carries the received Kubelet annotations
+ // Annotations carries the received Kubelet annotations.
Annotations = "io.kubernetes.cri-o.Annotations"
- // ContainerID is the container ID annotation
+ // ContainerID is the container ID annotation.
ContainerID = "io.kubernetes.cri-o.ContainerID"
- // ContainerName is the container name annotation
+ // ContainerName is the container name annotation.
ContainerName = "io.kubernetes.cri-o.ContainerName"
- // ContainerType is the container type (sandbox or container) annotation
+ // ContainerType is the container type (sandbox or container) annotation.
ContainerType = "io.kubernetes.cri-o.ContainerType"
- // Created is the container creation time annotation
+ // Created is the container creation time annotation.
Created = "io.kubernetes.cri-o.Created"
- // HostName is the container host name annotation
+ // HostName is the container host name annotation.
HostName = "io.kubernetes.cri-o.HostName"
- // CgroupParent is the sandbox cgroup parent
+ // CgroupParent is the sandbox cgroup parent.
CgroupParent = "io.kubernetes.cri-o.CgroupParent"
- // IP is the container ipv4 or ipv6 address
+ // IP is the container ipv4 or ipv6 address.
IP = "io.kubernetes.cri-o.IP"
- // NamespaceOptions store the options for namespaces
+ // NamespaceOptions store the options for namespaces.
NamespaceOptions = "io.kubernetes.cri-o.NamespaceOptions"
- // SeccompProfilePath is the node seccomp profile path
+ // SeccompProfilePath is the node seccomp profile path.
SeccompProfilePath = "io.kubernetes.cri-o.SeccompProfilePath"
- // Image is the container image ID annotation
+ // Image is the container image ID annotation.
Image = "io.kubernetes.cri-o.Image"
- // ImageName is the container image name annotation
+ // ImageName is the container image name annotation.
ImageName = "io.kubernetes.cri-o.ImageName"
- // ImageRef is the container image ref annotation
+ // ImageRef is the container image ref annotation.
ImageRef = "io.kubernetes.cri-o.ImageRef"
- // KubeName is the kubernetes name annotation
+ // KubeName is the kubernetes name annotation.
KubeName = "io.kubernetes.cri-o.KubeName"
- // PortMappings holds the port mappings for the sandbox
+ // PortMappings holds the port mappings for the sandbox.
PortMappings = "io.kubernetes.cri-o.PortMappings"
- // Labels are the kubernetes labels annotation
+ // Labels are the kubernetes labels annotation.
Labels = "io.kubernetes.cri-o.Labels"
- // LogPath is the container logging path annotation
+ // LogPath is the container logging path annotation.
LogPath = "io.kubernetes.cri-o.LogPath"
- // Metadata is the container metadata annotation
+ // Metadata is the container metadata annotation.
Metadata = "io.kubernetes.cri-o.Metadata"
- // Name is the pod name annotation
+ // Name is the pod name annotation.
Name = "io.kubernetes.cri-o.Name"
- // Namespace is the pod namespace annotation
+ // Namespace is the pod namespace annotation.
Namespace = "io.kubernetes.cri-o.Namespace"
- // PrivilegedRuntime is the annotation for the privileged runtime path
+ // PrivilegedRuntime is the annotation for the privileged runtime path.
PrivilegedRuntime = "io.kubernetes.cri-o.PrivilegedRuntime"
- // ResolvPath is the resolver configuration path annotation
+ // ResolvPath is the resolver configuration path annotation.
ResolvPath = "io.kubernetes.cri-o.ResolvPath"
- // HostnamePath is the path to /etc/hostname to bind mount annotation
+ // HostnamePath is the path to /etc/hostname to bind mount annotation.
HostnamePath = "io.kubernetes.cri-o.HostnamePath"
- // SandboxID is the sandbox ID annotation
+ // SandboxID is the sandbox ID annotation.
SandboxID = "io.kubernetes.cri-o.SandboxID"
- // SandboxName is the sandbox name annotation
+ // SandboxName is the sandbox name annotation.
SandboxName = "io.kubernetes.cri-o.SandboxName"
- // ShmPath is the shared memory path annotation
+ // ShmPath is the shared memory path annotation.
ShmPath = "io.kubernetes.cri-o.ShmPath"
- // MountPoint is the mount point of the container rootfs
+ // MountPoint is the mount point of the container rootfs.
MountPoint = "io.kubernetes.cri-o.MountPoint"
- // RuntimeHandler is the annotation for runtime handler
+ // RuntimeHandler is the annotation for runtime handler.
RuntimeHandler = "io.kubernetes.cri-o.RuntimeHandler"
- // TTY is the terminal path annotation
+ // TTY is the terminal path annotation.
TTY = "io.kubernetes.cri-o.TTY"
- // Stdin is the stdin annotation
+ // Stdin is the stdin annotation.
Stdin = "io.kubernetes.cri-o.Stdin"
- // StdinOnce is the stdin_once annotation
+ // StdinOnce is the stdin_once annotation.
StdinOnce = "io.kubernetes.cri-o.StdinOnce"
- // Volumes is the volumes annotation
+ // Volumes is the volumes annotation.
Volumes = "io.kubernetes.cri-o.Volumes"
- // HostNetwork indicates whether the host network namespace is used or not
+ // HostNetwork indicates whether the host network namespace is used or not.
HostNetwork = "io.kubernetes.cri-o.HostNetwork"
- // CNIResult is the JSON string representation of the Result from CNI
+ // CNIResult is the JSON string representation of the Result from CNI.
CNIResult = "io.kubernetes.cri-o.CNIResult"
// ContainerManager is the annotation key for indicating the creator and
- // manager of the container
+ // manager of the container.
ContainerManager = "io.container.manager"
)
// ContainerType values
const (
- // ContainerTypeSandbox represents a pod sandbox container
+ // ContainerTypeSandbox represents a pod sandbox container.
ContainerTypeSandbox = "sandbox"
- // ContainerTypeContainer represents a container running within a pod
+ // ContainerTypeContainer represents a container running within a pod.
ContainerTypeContainer = "container"
)
// ContainerManagerLibpod indicates that libpod created and manages the
-// container
+// container.
const ContainerManagerLibpod = "libpod"
diff --git a/pkg/api/handlers/compat/images_prune.go b/pkg/api/handlers/compat/images_prune.go
index 88776dc49..c0be9da7d 100644
--- a/pkg/api/handlers/compat/images_prune.go
+++ b/pkg/api/handlers/compat/images_prune.go
@@ -43,7 +43,7 @@ func PruneImages(w http.ResponseWriter, r *http.Request) {
return
}
- idr := make([]types.ImageDeleteResponseItem, len(imagePruneReports))
+ idr := make([]types.ImageDeleteResponseItem, 0, len(imagePruneReports))
var reclaimedSpace uint64
var errorMsg bytes.Buffer
for _, p := range imagePruneReports {
diff --git a/pkg/api/handlers/compat/networks.go b/pkg/api/handlers/compat/networks.go
index eb1a5d59c..89d914e0a 100644
--- a/pkg/api/handlers/compat/networks.go
+++ b/pkg/api/handlers/compat/networks.go
@@ -242,7 +242,7 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) {
body := struct {
ID string `json:"Id"`
- Warning []string
+ Warning string
}{
ID: newNetwork.ID,
}
diff --git a/pkg/api/handlers/libpod/images.go b/pkg/api/handlers/libpod/images.go
index eb9fb12a6..d59a83342 100644
--- a/pkg/api/handlers/libpod/images.go
+++ b/pkg/api/handlers/libpod/images.go
@@ -613,6 +613,7 @@ func ImagesBatchRemove(w http.ResponseWriter, r *http.Request) {
query := struct {
All bool `schema:"all"`
Force bool `schema:"force"`
+ Ignore bool `schema:"ignore"`
Images []string `schema:"images"`
}{}
@@ -621,7 +622,7 @@ func ImagesBatchRemove(w http.ResponseWriter, r *http.Request) {
return
}
- opts := entities.ImageRemoveOptions{All: query.All, Force: query.Force}
+ opts := entities.ImageRemoveOptions{All: query.All, Force: query.Force, Ignore: query.Ignore}
imageEngine := abi.ImageEngine{Libpod: runtime}
rmReport, rmErrors := imageEngine.Remove(r.Context(), query.Images, opts)
strErrs := errorhandling.ErrorsToStrings(rmErrors)
diff --git a/pkg/api/handlers/utils/images.go b/pkg/api/handlers/utils/images.go
index 15b16bc43..7154f5616 100644
--- a/pkg/api/handlers/utils/images.go
+++ b/pkg/api/handlers/utils/images.go
@@ -63,7 +63,7 @@ func IsRegistryReference(name string) error {
imageRef, err := alltransports.ParseImageName(name)
if err != nil {
// No supported transport -> assume a docker-stype reference.
- return nil
+ return nil // nolint: nilerr
}
if imageRef.Transport().Name() == docker.Transport.Name() {
return nil
diff --git a/pkg/api/server/register_exec.go b/pkg/api/server/register_exec.go
index c19ca7859..90136463d 100644
--- a/pkg/api/server/register_exec.go
+++ b/pkg/api/server/register_exec.go
@@ -169,7 +169,7 @@ func (s *APIServer) registerExecHandlers(r *mux.Router) error {
// - application/json
// responses:
// 200:
- // description: no error
+ // $ref: "#/responses/InspectExecSession"
// 404:
// $ref: "#/responses/NoSuchExecInstance"
// 500:
diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go
index 017310f12..89f808e7d 100644
--- a/pkg/api/server/register_images.go
+++ b/pkg/api/server/register_images.go
@@ -944,6 +944,10 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// name: force
// description: Force image removal (including containers using the images).
// type: boolean
+ // - in: query
+ // name: ignore
+ // description: Ignore if a specified image does not exist and do not throw an error.
+ // type: boolean
// produces:
// - application/json
// responses:
diff --git a/pkg/api/server/register_networks.go b/pkg/api/server/register_networks.go
index 4466c938f..b900aa953 100644
--- a/pkg/api/server/register_networks.go
+++ b/pkg/api/server/register_networks.go
@@ -105,8 +105,15 @@ func (s *APIServer) registerNetworkHandlers(r *mux.Router) error {
// schema:
// $ref: "#/definitions/NetworkCreateRequest"
// responses:
- // 200:
- // $ref: "#/responses/CompatNetworkCreate"
+ // 201:
+ // description: network created
+ // schema:
+ // type: object
+ // properties:
+ // Id:
+ // type: string
+ // Warning:
+ // type: string
// 400:
// $ref: "#/responses/BadParamError"
// 500:
diff --git a/pkg/api/server/swagger.go b/pkg/api/server/swagger.go
index 9b652be87..6cf89581a 100644
--- a/pkg/api/server/swagger.go
+++ b/pkg/api/server/swagger.go
@@ -235,3 +235,12 @@ type swagSystemAuthResponse struct {
entities.AuthReport
}
}
+
+// Inspect response
+// swagger:response InspectExecSession
+type swagInspectExecSession struct {
+ // in:body
+ Body struct {
+ define.InspectExecSession
+ }
+}
diff --git a/pkg/bindings/containers/attach.go b/pkg/bindings/containers/attach.go
index f410606e4..0c6ebdd2f 100644
--- a/pkg/bindings/containers/attach.go
+++ b/pkg/bindings/containers/attach.go
@@ -279,7 +279,7 @@ func DemuxFrame(r io.Reader, buffer []byte, length int) (frame []byte, err error
n, err := io.ReadFull(r, buffer[0:length])
if err != nil {
- return nil, nil
+ return nil, err
}
if n < length {
err = io.ErrUnexpectedEOF
diff --git a/pkg/bindings/images/types.go b/pkg/bindings/images/types.go
index a44a3527f..163365924 100644
--- a/pkg/bindings/images/types.go
+++ b/pkg/bindings/images/types.go
@@ -11,6 +11,8 @@ type RemoveOptions struct {
All *bool
// Forces removes all containers based on the image
Force *bool
+ // Ignore if a specified image does not exist and do not throw an error.
+ Ignore *bool
}
//go:generate go run ../generator/generator.go DiffOptions
diff --git a/pkg/bindings/images/types_remove_options.go b/pkg/bindings/images/types_remove_options.go
index 1fbe5f4ea..613a33183 100644
--- a/pkg/bindings/images/types_remove_options.go
+++ b/pkg/bindings/images/types_remove_options.go
@@ -46,3 +46,18 @@ func (o *RemoveOptions) GetForce() bool {
}
return *o.Force
}
+
+// WithIgnore set field Ignore to given value
+func (o *RemoveOptions) WithIgnore(value bool) *RemoveOptions {
+ o.Ignore = &value
+ return o
+}
+
+// GetIgnore returns value of field Ignore
+func (o *RemoveOptions) GetIgnore() bool {
+ if o.Ignore == nil {
+ var z bool
+ return z
+ }
+ return *o.Ignore
+}
diff --git a/pkg/bindings/test/attach_test.go b/pkg/bindings/test/attach_test.go
index 670566882..dcebe0809 100644
--- a/pkg/bindings/test/attach_test.go
+++ b/pkg/bindings/test/attach_test.go
@@ -44,7 +44,8 @@ var _ = Describe("Podman containers attach", func() {
timeout := uint(5)
err := containers.Stop(bt.conn, id, new(containers.StopOptions).WithTimeout(timeout))
if err != nil {
- GinkgoWriter.Write([]byte(err.Error()))
+ _, writeErr := GinkgoWriter.Write([]byte(err.Error()))
+ Expect(writeErr).ShouldNot(HaveOccurred())
}
}()
diff --git a/pkg/bindings/test/auth_test.go b/pkg/bindings/test/auth_test.go
index b421f0797..c4c4b16d8 100644
--- a/pkg/bindings/test/auth_test.go
+++ b/pkg/bindings/test/auth_test.go
@@ -40,7 +40,8 @@ var _ = Describe("Podman images", func() {
AfterEach(func() {
s.Kill()
bt.cleanup()
- registry.Stop()
+ err := registry.Stop()
+ Expect(err).To(BeNil())
})
// Test using credentials.
diff --git a/pkg/bindings/test/common_test.go b/pkg/bindings/test/common_test.go
index f51e5f404..f2602967b 100644
--- a/pkg/bindings/test/common_test.go
+++ b/pkg/bindings/test/common_test.go
@@ -211,7 +211,7 @@ func (b *bindingTest) RunTopContainer(containerName *string, podName *string) (s
}
ctr, err := containers.CreateWithSpec(b.conn, s, nil)
if err != nil {
- return "", nil
+ return "", err
}
err = containers.Start(b.conn, ctr.ID, nil)
if err != nil {
diff --git a/pkg/bindings/test/containers_test.go b/pkg/bindings/test/containers_test.go
index 9411d8a5f..bf627fdba 100644
--- a/pkg/bindings/test/containers_test.go
+++ b/pkg/bindings/test/containers_test.go
@@ -322,7 +322,8 @@ var _ = Describe("Podman containers ", func() {
// a container that has no healthcheck should be a 409
var name = "top"
- bt.RunTopContainer(&name, nil)
+ _, err = bt.RunTopContainer(&name, nil)
+ Expect(err).To(BeNil())
_, err = containers.RunHealthCheck(bt.conn, name, nil)
Expect(err).ToNot(BeNil())
code, _ = bindings.CheckResponseCode(err)
diff --git a/pkg/checkpoint/crutils/checkpoint_restore_utils.go b/pkg/checkpoint/crutils/checkpoint_restore_utils.go
index 2765d18e8..6a8a7894a 100644
--- a/pkg/checkpoint/crutils/checkpoint_restore_utils.go
+++ b/pkg/checkpoint/crutils/checkpoint_restore_utils.go
@@ -99,13 +99,12 @@ func CRRemoveDeletedFiles(id, baseDirectory, containerRootDirectory string) erro
// root file system changes on top of containerRootDirectory
func CRApplyRootFsDiffTar(baseDirectory, containerRootDirectory string) error {
rootfsDiffPath := filepath.Join(baseDirectory, metadata.RootFsDiffTar)
- if _, err := os.Stat(rootfsDiffPath); err != nil {
- // Only do this if a rootfs-diff.tar actually exists
- return nil
- }
-
+ // Only do this if a rootfs-diff.tar actually exists
rootfsDiffFile, err := os.Open(rootfsDiffPath)
if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return nil
+ }
return errors.Wrap(err, "failed to open root file-system diff file")
}
defer rootfsDiffFile.Close()
diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go
index 2ac21cfeb..93334fc6a 100644
--- a/pkg/domain/entities/images.go
+++ b/pkg/domain/entities/images.go
@@ -90,6 +90,8 @@ type ImageRemoveOptions struct {
All bool
// Foce will force image removal including containers using the images.
Force bool
+ // Ignore if a specified image does not exist and do not throw an error.
+ Ignore bool
// Confirms if given name is a manifest list and removes it, otherwise returns error.
LookupManifest bool
}
diff --git a/pkg/domain/entities/types.go b/pkg/domain/entities/types.go
index 4d9ced900..bed3183e9 100644
--- a/pkg/domain/entities/types.go
+++ b/pkg/domain/entities/types.go
@@ -20,7 +20,7 @@ type Volume struct {
}
type Report struct {
- Id []string //nolint
+ Id []string // nolint
Err map[string]error
}
@@ -98,8 +98,10 @@ type EventsOptions struct {
// ContainerCreateResponse is the response struct for creating a container
type ContainerCreateResponse struct {
// ID of the container created
+ // required: true
ID string `json:"Id"`
// Warnings during container creation
+ // required: true
Warnings []string `json:"Warnings"`
}
diff --git a/pkg/domain/filters/containers.go b/pkg/domain/filters/containers.go
index 85ba4f84f..4c6964a00 100644
--- a/pkg/domain/filters/containers.go
+++ b/pkg/domain/filters/containers.go
@@ -213,8 +213,10 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo
for _, val := range filterValues {
net, err := r.Network().NetworkInspect(val)
if err != nil {
- // ignore not found errors
- break
+ if errors.Is(err, define.ErrNoSuchNetwork) {
+ continue
+ }
+ return nil, err
}
inputNetNames = append(inputNetNames, net.Name)
}
diff --git a/pkg/domain/filters/pods.go b/pkg/domain/filters/pods.go
index 2f9442dff..e22480006 100644
--- a/pkg/domain/filters/pods.go
+++ b/pkg/domain/filters/pods.go
@@ -131,8 +131,10 @@ func GeneratePodFilterFunc(filter string, filterValues []string, r *libpod.Runti
for _, val := range filterValues {
net, err := r.Network().NetworkInspect(val)
if err != nil {
- // ignore not found errors
- break
+ if errors.Is(err, define.ErrNoSuchNetwork) {
+ continue
+ }
+ return nil, err
}
inputNetNames = append(inputNetNames, net.Name)
}
diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go
index 0b1281aac..3fdfa8f3a 100644
--- a/pkg/domain/infra/abi/images.go
+++ b/pkg/domain/infra/abi/images.go
@@ -578,6 +578,7 @@ func (ir *ImageEngine) Remove(ctx context.Context, images []string, opts entitie
libimageOptions := &libimage.RemoveImagesOptions{}
libimageOptions.Filters = []string{"readonly=false"}
libimageOptions.Force = opts.Force
+ libimageOptions.Ignore = opts.Ignore
libimageOptions.LookupManifest = opts.LookupManifest
if !opts.All {
libimageOptions.Filters = append(libimageOptions.Filters, "intermediate=false")
@@ -847,13 +848,12 @@ func execPodman(execUser *user.User, command []string) error {
if err != nil {
return err
}
- defer func() error {
- err := cmdLogin.Process.Kill()
- if err != nil {
- return err
- }
- return cmdLogin.Wait()
+
+ defer func() {
+ _ = cmdLogin.Process.Kill()
+ _ = cmdLogin.Wait()
}()
+
cmd := exec.Command(command[0], command[1:]...)
cmd.Env = []string{"PATH=" + os.Getenv("PATH"), "TERM=" + os.Getenv("TERM")}
cmd.Stderr = os.Stderr
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index 4d8c5a381..236d56053 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -215,7 +215,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
// FIXME This is very hard to support properly with a good ux
if len(options.StaticIPs) > *ipIndex {
if !podOpt.Net.Network.IsBridge() {
- errors.Wrap(define.ErrInvalidArg, "static ip addresses can only be set when the network mode is bridge")
+ return nil, errors.Wrap(define.ErrInvalidArg, "static ip addresses can only be set when the network mode is bridge")
}
if len(podOpt.Net.Networks) != 1 {
return nil, errors.Wrap(define.ErrInvalidArg, "cannot set static ip addresses for more than network, use netname:ip=<ip> syntax to specify ips for more than network")
@@ -230,7 +230,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
}
if len(options.StaticMACs) > *ipIndex {
if !podOpt.Net.Network.IsBridge() {
- errors.Wrap(define.ErrInvalidArg, "static mac address can only be set when the network mode is bridge")
+ return nil, errors.Wrap(define.ErrInvalidArg, "static mac address can only be set when the network mode is bridge")
}
if len(podOpt.Net.Networks) != 1 {
return nil, errors.Wrap(define.ErrInvalidArg, "cannot set static mac address for more than network, use netname:mac=<mac> syntax to specify mac for more than network")
diff --git a/pkg/domain/infra/abi/terminal/sigproxy_linux.go b/pkg/domain/infra/abi/terminal/sigproxy_linux.go
index 206ded091..fe2c268c0 100644
--- a/pkg/domain/infra/abi/terminal/sigproxy_linux.go
+++ b/pkg/domain/infra/abi/terminal/sigproxy_linux.go
@@ -20,7 +20,7 @@ const signalBufferSize = 2048
func ProxySignals(ctr *libpod.Container) {
// Stop catching the shutdown signals (SIGINT, SIGTERM) - they're going
// to the container now.
- shutdown.Stop()
+ shutdown.Stop() // nolint: errcheck
sigBuffer := make(chan os.Signal, signalBufferSize)
signal.CatchAll(sigBuffer)
diff --git a/pkg/domain/infra/abi/terminal/terminal_linux.go b/pkg/domain/infra/abi/terminal/terminal_linux.go
index 78c792d2b..153b19fdb 100644
--- a/pkg/domain/infra/abi/terminal/terminal_linux.go
+++ b/pkg/domain/infra/abi/terminal/terminal_linux.go
@@ -39,7 +39,7 @@ func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, execConfig *libpo
// StartAttachCtr starts and (if required) attaches to a container
// if you change the signature of this function from os.File to io.Writer, it will trigger a downstream
// error. we may need to just lint disable this one.
-func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool) error { //nolint-interfacer
+func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool) error { //nolint: interfacer
resize := make(chan define.TerminalSize)
haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
diff --git a/pkg/domain/infra/abi/volumes.go b/pkg/domain/infra/abi/volumes.go
index 19fc6d2d3..f59f11e20 100644
--- a/pkg/domain/infra/abi/volumes.go
+++ b/pkg/domain/infra/abi/volumes.go
@@ -171,7 +171,8 @@ func (ic *ContainerEngine) VolumeMounted(ctx context.Context, nameOrID string) (
}
mountCount, err := vol.MountCount()
if err != nil {
- return &entities.BoolReport{Value: false}, nil
+ // FIXME: this error should probably be returned
+ return &entities.BoolReport{Value: false}, nil // nolint: nilerr
}
if mountCount > 0 {
return &entities.BoolReport{Value: true}, nil
diff --git a/pkg/domain/infra/tunnel/containers.go b/pkg/domain/infra/tunnel/containers.go
index 046c2509d..10bfb3984 100644
--- a/pkg/domain/infra/tunnel/containers.go
+++ b/pkg/domain/infra/tunnel/containers.go
@@ -840,7 +840,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
if eventsErr != nil || lastEvent == nil {
logrus.Errorf("Cannot get exit code: %v", err)
report.ExitCode = define.ExecErrorCodeNotFound
- return &report, nil // compat with local client
+ return &report, nil // nolint: nilerr
}
report.ExitCode = lastEvent.ContainerExitCode
@@ -938,7 +938,7 @@ func (ic *ContainerEngine) ContainerStat(ctx context.Context, nameOrID string, p
return containers.Stat(ic.ClientCtx, nameOrID, path)
}
-// Shutdown Libpod engine
+// Shutdown Libpod engine.
func (ic *ContainerEngine) Shutdown(_ context.Context) {
}
@@ -949,7 +949,7 @@ func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []stri
return containers.Stats(ic.ClientCtx, namesOrIds, new(containers.StatsOptions).WithStream(options.Stream).WithInterval(options.Interval))
}
-// ShouldRestart reports back whether the container will restart
+// ShouldRestart reports back whether the container will restart.
func (ic *ContainerEngine) ShouldRestart(_ context.Context, id string) (bool, error) {
return containers.ShouldRestart(ic.ClientCtx, id, nil)
}
diff --git a/pkg/domain/infra/tunnel/events.go b/pkg/domain/infra/tunnel/events.go
index 1f27cdff8..b472ad03a 100644
--- a/pkg/domain/infra/tunnel/events.go
+++ b/pkg/domain/infra/tunnel/events.go
@@ -34,7 +34,7 @@ func (ic *ContainerEngine) Events(ctx context.Context, opts entities.EventsOptio
}
// GetLastContainerEvent takes a container name or ID and an event status and returns
-// the last occurrence of the container event
+// the last occurrence of the container event.
func (ic *ContainerEngine) GetLastContainerEvent(ctx context.Context, nameOrID string, containerEvent events.Status) (*events.Event, error) {
// check to make sure the event.Status is valid
if _, err := events.StringToStatus(containerEvent.String()); err != nil {
diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go
index 3ee97d94c..62eacb19f 100644
--- a/pkg/domain/infra/tunnel/images.go
+++ b/pkg/domain/infra/tunnel/images.go
@@ -28,7 +28,7 @@ func (ir *ImageEngine) Exists(_ context.Context, nameOrID string) (*entities.Boo
}
func (ir *ImageEngine) Remove(ctx context.Context, imagesArg []string, opts entities.ImageRemoveOptions) (*entities.ImageRemoveReport, []error) {
- options := new(images.RemoveOptions).WithForce(opts.Force).WithAll(opts.All)
+ options := new(images.RemoveOptions).WithForce(opts.Force).WithIgnore(opts.Ignore).WithAll(opts.All)
return images.Remove(ir.ClientCtx, imagesArg, options)
}
diff --git a/pkg/env/env.go b/pkg/env/env.go
index ecd2d62a5..5989d0da5 100644
--- a/pkg/env/env.go
+++ b/pkg/env/env.go
@@ -26,7 +26,7 @@ func DefaultEnvVariables() map[string]string {
// Slice transforms the specified map of environment variables into a
// slice. If a value is non-empty, the key and value are joined with '='.
func Slice(m map[string]string) []string {
- env := make([]string, len(m))
+ env := make([]string, 0, len(m))
for k, v := range m {
var s string
if len(v) > 0 {
diff --git a/pkg/errorhandling/errorhandling.go b/pkg/errorhandling/errorhandling.go
index 04110b62a..e33c26032 100644
--- a/pkg/errorhandling/errorhandling.go
+++ b/pkg/errorhandling/errorhandling.go
@@ -28,7 +28,7 @@ func JoinErrors(errs []error) error {
finalErr := multiE.ErrorOrNil()
if finalErr == nil {
- return finalErr
+ return nil
}
return errors.New(strings.TrimSpace(finalErr.Error()))
}
diff --git a/pkg/inspect/inspect.go b/pkg/inspect/inspect.go
index cd26db6b0..767d86daf 100644
--- a/pkg/inspect/inspect.go
+++ b/pkg/inspect/inspect.go
@@ -9,7 +9,7 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
-// ImageData holds the inspect information of an image
+// ImageData holds the inspect information of an image.
type ImageData struct {
ID string `json:"Id"`
Digest digest.Digest `json:"Digest"`
@@ -36,13 +36,13 @@ type ImageData struct {
HealthCheck *manifest.Schema2HealthConfig `json:"Healthcheck,omitempty"`
}
-// RootFS holds the root fs information of an image
+// RootFS holds the root fs information of an image.
type RootFS struct {
Type string `json:"Type"`
Layers []digest.Digest `json:"Layers"`
}
-// ImageResult is used for podman images for collection and output
+// ImageResult is used for podman images for collection and output.
type ImageResult struct {
Tag string
Repository string
diff --git a/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go b/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go
index fccddc3e0..352cc028f 100644
--- a/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go
+++ b/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go
@@ -579,9 +579,9 @@ func (q Quantity) MarshalJSON() ([]byte, error) {
// if CanonicalizeBytes needed more space than our slice provided, we may need to allocate again so use
// append
result = result[:1]
- result = append(result, number...)
- result = append(result, suffix...)
- result = append(result, '"')
+ result = append(result, number...) // nolint: makezero
+ result = append(result, suffix...) // nolint: makezero
+ result = append(result, '"') // nolint: makezero
return result, nil
}
diff --git a/pkg/lookup/lookup.go b/pkg/lookup/lookup.go
index 0b22a1974..0601e829d 100644
--- a/pkg/lookup/lookup.go
+++ b/pkg/lookup/lookup.go
@@ -14,7 +14,7 @@ const (
etcgroup = "/etc/group"
)
-// Overrides allows you to override defaults in GetUserGroupInfo
+// Overrides allows you to override defaults in GetUserGroupInfo.
type Overrides struct {
DefaultUser *user.ExecUser
ContainerEtcPasswdPath string
diff --git a/pkg/machine/config.go b/pkg/machine/config.go
index b3b105150..aaf8da872 100644
--- a/pkg/machine/config.go
+++ b/pkg/machine/config.go
@@ -36,9 +36,9 @@ type InitOptions struct {
type QemuMachineStatus = string
const (
- // Running indicates the qemu vm is running
+ // Running indicates the qemu vm is running.
Running QemuMachineStatus = "running"
- // Stopped indicates the vm has stopped
+ // Stopped indicates the vm has stopped.
Stopped QemuMachineStatus = "stopped"
DefaultMachineName string = "podman-machine-default"
)
@@ -147,7 +147,7 @@ func (rc RemoteConnectionType) MakeSSHURL(host, path, port, userName string) url
}
// GetDataDir returns the filepath where vm images should
-// live for podman-machine
+// live for podman-machine.
func GetDataDir(vmType string) (string, error) {
data, err := homedir.GetDataHome()
if err != nil {
diff --git a/pkg/machine/fedora.go b/pkg/machine/fedora.go
index b26921b52..bed45c6da 100644
--- a/pkg/machine/fedora.go
+++ b/pkg/machine/fedora.go
@@ -59,7 +59,10 @@ func (f FedoraDownload) Get() *Download {
func (f FedoraDownload) HasUsableCache() (bool, error) {
info, err := os.Stat(f.LocalPath)
if err != nil {
- return false, nil
+ if errors.Is(err, os.ErrNotExist) {
+ return false, nil
+ }
+ return false, err
}
return info.Size() == f.Size, nil
}
diff --git a/pkg/machine/qemu/config.go b/pkg/machine/qemu/config.go
index b39334be0..211d96ccb 100644
--- a/pkg/machine/qemu/config.go
+++ b/pkg/machine/qemu/config.go
@@ -61,6 +61,6 @@ type Monitor struct {
var (
// defaultQMPTimeout is the timeout duration for the
- // qmp monitor interactions
+ // qmp monitor interactions.
defaultQMPTimeout time.Duration = 2 * time.Second
)
diff --git a/pkg/machine/qemu/machine.go b/pkg/machine/qemu/machine.go
index d30e51215..1bd9fb51b 100644
--- a/pkg/machine/qemu/machine.go
+++ b/pkg/machine/qemu/machine.go
@@ -34,7 +34,7 @@ import (
var (
qemuProvider = &Provider{}
- // vmtype refers to qemu (vs libvirt, krun, etc)
+ // vmtype refers to qemu (vs libvirt, krun, etc).
vmtype = "qemu"
)
@@ -278,7 +278,9 @@ func (v *MachineVM) Init(opts machine.InitOptions) (bool, error) {
fmt.Println("An ignition path was provided. No SSH connection was added to Podman")
}
// Write the JSON file
- v.writeConfig()
+ if err := v.writeConfig(); err != nil {
+ return false, fmt.Errorf("writing JSON file: %w", err)
+ }
// User has provided ignition file so keygen
// will be skipped.
@@ -370,7 +372,7 @@ func (v *MachineVM) Start(name string, _ machine.StartOptions) error {
conn net.Conn
err error
qemuSocketConn net.Conn
- wait time.Duration = time.Millisecond * 500
+ wait = time.Millisecond * 500
)
if v.isIncompatible() {
@@ -626,7 +628,8 @@ func (v *MachineVM) Stop(name string, _ machine.StopOptions) error {
}
if err := qmpMonitor.Disconnect(); err != nil {
- return nil
+ // FIXME: this error should probably be returned
+ return nil // nolint: nilerr
}
disconnected = true
@@ -755,7 +758,8 @@ func (v *MachineVM) isRunning() (bool, error) {
// Check if we can dial it
monitor, err := qmp.NewSocketMonitor(v.QMPMonitor.Network, v.QMPMonitor.Address, v.QMPMonitor.Timeout)
if err != nil {
- return false, nil
+ // FIXME: this error should probably be returned
+ return false, nil // nolint: nilerr
}
if err := monitor.Connect(); err != nil {
return false, err
@@ -1097,10 +1101,13 @@ func waitAndPingAPI(sock string) {
Transport: &http.Transport{
DialContext: func(context.Context, string, string) (net.Conn, error) {
con, err := net.DialTimeout("unix", sock, apiUpTimeout)
- if err == nil {
- con.SetDeadline(time.Now().Add(apiUpTimeout))
+ if err != nil {
+ return nil, err
+ }
+ if err := con.SetDeadline(time.Now().Add(apiUpTimeout)); err != nil {
+ return nil, err
}
- return con, err
+ return con, nil
},
},
}
diff --git a/pkg/rootless/rootless.go b/pkg/rootless/rootless.go
index 93b4e2e9f..13f8078e2 100644
--- a/pkg/rootless/rootless.go
+++ b/pkg/rootless/rootless.go
@@ -1,6 +1,8 @@
package rootless
import (
+ "errors"
+ "fmt"
"os"
"sort"
"sync"
@@ -8,7 +10,6 @@ import (
"github.com/containers/storage/pkg/lockfile"
"github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
- "github.com/pkg/errors"
)
// TryJoinPauseProcess attempts to join the namespaces of the pause PID via
@@ -16,12 +17,15 @@ import (
// file.
func TryJoinPauseProcess(pausePidPath string) (bool, int, error) {
if _, err := os.Stat(pausePidPath); err != nil {
- return false, -1, nil
+ if errors.Is(err, os.ErrNotExist) {
+ return false, -1, nil
+ }
+ return false, -1, err
}
became, ret, err := TryJoinFromFilePaths("", false, []string{pausePidPath})
if err == nil {
- return became, ret, err
+ return became, ret, nil
}
// It could not join the pause process, let's lock the file before trying to delete it.
@@ -31,7 +35,7 @@ func TryJoinPauseProcess(pausePidPath string) (bool, int, error) {
if os.IsNotExist(err) {
return false, -1, nil
}
- return false, -1, errors.Wrapf(err, "error acquiring lock on %s", pausePidPath)
+ return false, -1, fmt.Errorf("error acquiring lock on %s: %w", pausePidPath, err)
}
pidFileLock.Lock()
@@ -46,7 +50,7 @@ func TryJoinPauseProcess(pausePidPath string) (bool, int, error) {
if err != nil {
// It is still failing. We can safely remove it.
os.Remove(pausePidPath)
- return false, -1, nil
+ return false, -1, nil // nolint: nilerr
}
return became, ret, err
}
diff --git a/pkg/signal/signal_common.go b/pkg/signal/signal_common.go
index 8ff4b4dbf..5ea67843a 100644
--- a/pkg/signal/signal_common.go
+++ b/pkg/signal/signal_common.go
@@ -25,7 +25,7 @@ func ParseSignal(rawSignal string) (syscall.Signal, error) {
}
// ParseSignalNameOrNumber translates a string to a valid syscall signal. Input
-// can be a name or number representation i.e. "KILL" "9"
+// can be a name or number representation i.e. "KILL" "9".
func ParseSignalNameOrNumber(rawSignal string) (syscall.Signal, error) {
basename := strings.TrimPrefix(rawSignal, "-")
s, err := ParseSignal(basename)
diff --git a/pkg/specgen/generate/config_linux.go b/pkg/specgen/generate/config_linux.go
index a5772bc6a..35d7f0252 100644
--- a/pkg/specgen/generate/config_linux.go
+++ b/pkg/specgen/generate/config_linux.go
@@ -262,8 +262,8 @@ func addDevice(g *generate.Generator, device string) error {
// ParseDevice parses device mapping string to a src, dest & permissions string
func ParseDevice(device string) (string, string, string, error) { //nolint
- src := ""
- dst := ""
+ var src string
+ var dst string
permissions := "rwm"
arr := strings.Split(device, ":")
switch len(arr) {
diff --git a/pkg/specgen/generate/ports_bench_test.go b/pkg/specgen/generate/ports_bench_test.go
index f208a34c5..f65cd2f15 100644
--- a/pkg/specgen/generate/ports_bench_test.go
+++ b/pkg/specgen/generate/ports_bench_test.go
@@ -9,7 +9,7 @@ import (
func benchmarkParsePortMapping(b *testing.B, ports []types.PortMapping) {
for n := 0; n < b.N; n++ {
- ParsePortMapping(ports, nil)
+ _, _ = ParsePortMapping(ports, nil)
}
}
diff --git a/pkg/specgenutil/specgen.go b/pkg/specgenutil/specgen.go
index b87da61fb..688cc2337 100644
--- a/pkg/specgenutil/specgen.go
+++ b/pkg/specgenutil/specgen.go
@@ -847,7 +847,8 @@ func makeHealthCheckFromCli(inCmd, interval string, retries uint, timeout, start
if len(cmdArr) == 0 {
return nil, errors.New("Must define a healthcheck command for all healthchecks")
}
- concat := ""
+
+ var concat string
if cmdArr[0] == "CMD" || cmdArr[0] == "none" { // this is for compat, we are already split properly for most compat cases
cmdArr = strings.Fields(inCmd)
} else if cmdArr[0] != "CMD-SHELL" { // this is for podman side of things, won't contain the keywords
diff --git a/pkg/systemd/dbus.go b/pkg/systemd/dbus.go
index 1b1bc8be9..44feb8308 100644
--- a/pkg/systemd/dbus.go
+++ b/pkg/systemd/dbus.go
@@ -24,7 +24,6 @@ func IsSystemdSessionValid(uid int) bool {
if rootless.IsRootless() {
conn, err = GetLogindConnection(rootless.GetRootlessUID())
- object = conn.Object(dbusDest, godbus.ObjectPath(dbusPath))
if err != nil {
//unable to fetch systemd object for logind
logrus.Debugf("systemd-logind: %s", err)
diff --git a/pkg/systemd/generate/containers.go b/pkg/systemd/generate/containers.go
index c01bb1baf..e11aed771 100644
--- a/pkg/systemd/generate/containers.go
+++ b/pkg/systemd/generate/containers.go
@@ -362,7 +362,9 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst
fs.StringArrayP("env", "e", nil, "")
fs.String("sdnotify", "", "")
fs.String("restart", "", "")
- fs.Parse(remainingCmd)
+ if err := fs.Parse(remainingCmd); err != nil {
+ return "", fmt.Errorf("parsing remaining command-line arguments: %w", err)
+ }
remainingCmd = filterCommonContainerFlags(remainingCmd, fs.NArg())
// If the container is in a pod, make sure that the
diff --git a/pkg/systemd/generate/pods.go b/pkg/systemd/generate/pods.go
index 78ae6391b..15b598ae8 100644
--- a/pkg/systemd/generate/pods.go
+++ b/pkg/systemd/generate/pods.go
@@ -335,7 +335,9 @@ func executePodTemplate(info *podInfo, options entities.GenerateSystemdOptions)
fs.SetInterspersed(false)
fs.String("name", "", "")
fs.Bool("replace", false, "")
- fs.Parse(podCreateArgs)
+ if err := fs.Parse(podCreateArgs); err != nil {
+ return "", fmt.Errorf("parsing remaining command-line arguments: %w", err)
+ }
hasNameParam := fs.Lookup("name").Changed
hasReplaceParam, err := fs.GetBool("replace")
diff --git a/pkg/terminal/console_unix.go b/pkg/terminal/console_unix.go
index e6c0442c9..53290be24 100644
--- a/pkg/terminal/console_unix.go
+++ b/pkg/terminal/console_unix.go
@@ -3,7 +3,7 @@
package terminal
-// SetConsole for non-windows environments is a no-op
+// SetConsole for non-windows environments is a no-op.
func SetConsole() error {
return nil
}
diff --git a/pkg/util/camelcase/camelcase.go b/pkg/util/camelcase/camelcase.go
index d27ac00d6..eaf7c0178 100644
--- a/pkg/util/camelcase/camelcase.go
+++ b/pkg/util/camelcase/camelcase.go
@@ -51,10 +51,10 @@ func Split(src string) (entries []string) {
}
entries = []string{}
var runes [][]rune
- lastClass := 0
- class := 0
+ var lastClass int
// split into fields based on class of unicode character
for _, r := range src {
+ var class int
switch {
case unicode.IsLower(r):
class = 1
diff --git a/pkg/util/utils.go b/pkg/util/utils.go
index 925ff9830..1beb3b28e 100644
--- a/pkg/util/utils.go
+++ b/pkg/util/utils.go
@@ -727,7 +727,7 @@ func SocketPath() (string, error) {
func LookupUser(name string) (*user.User, error) {
// Assume UID look up first, if it fails lookup by username
if u, err := user.LookupId(name); err == nil {
- return u, err
+ return u, nil
}
return user.Lookup(name)
}
diff --git a/test/compose/slirp4netns_opts/tests.sh b/test/compose/slirp4netns_opts/tests.sh
index cfa84e1e4..2d41311ad 100644
--- a/test/compose/slirp4netns_opts/tests.sh
+++ b/test/compose/slirp4netns_opts/tests.sh
@@ -1,20 +1,19 @@
# -*- bash -*-
-output="$(cat $OUTFILE)"
expected="teststring"
# Reading from the nc socket is flaky because docker-compose only starts
# the containers. We cannot know at this point if the container did already
# send the message. Give the container 5 seconds time to send the message
# to prevent flakes.
-local _timeout=5
-while [ $_timeout -gt 0 ]; do
+container_timeout=5
+while [ $container_timeout -gt 0 ]; do
+ output="$(< $OUTFILE)"
if [ -n "$output" ]; then
break
fi
sleep 1
- _timeout=$(($_timeout - 1))
- output="$(cat $OUTFILE)"
+ container_timeout=$(($container_timeout - 1))
done
is "$output" "$expected" "$testname : nc received teststring"
diff --git a/test/e2e/attach_test.go b/test/e2e/attach_test.go
index a7af76529..74e3a619a 100644
--- a/test/e2e/attach_test.go
+++ b/test/e2e/attach_test.go
@@ -1,7 +1,6 @@
package integration
import (
- "os"
"syscall"
"time"
@@ -20,12 +19,11 @@ var _ = Describe("Podman attach", func() {
BeforeEach(func() {
tempdir, err = CreateTempDirInTempDir()
- if err != nil {
- os.Exit(1)
- }
+ Expect(err).To(BeNil())
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
+ err = podmanTest.SeedImages()
+ Expect(err).To(BeNil())
})
AfterEach(func() {
diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go
index 5abc672e9..7b2dd89c9 100644
--- a/test/e2e/checkpoint_test.go
+++ b/test/e2e/checkpoint_test.go
@@ -37,12 +37,12 @@ var _ = Describe("Podman checkpoint", func() {
BeforeEach(func() {
SkipIfRootless("checkpoint not supported in rootless mode")
tempdir, err = CreateTempDirInTempDir()
- if err != nil {
- os.Exit(1)
- }
+ Expect(err).To(BeNil())
+
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
+ err = podmanTest.SeedImages()
+ Expect(err).To(BeNil())
// Check if the runtime implements checkpointing. Currently only
// runc's checkpoint/restore implementation is supported.
cmd := exec.Command(podmanTest.OCIRuntime, "checkpoint", "--help")
diff --git a/test/e2e/commit_test.go b/test/e2e/commit_test.go
index 6bcf17bfe..78b607f1e 100644
--- a/test/e2e/commit_test.go
+++ b/test/e2e/commit_test.go
@@ -21,12 +21,11 @@ var _ = Describe("Podman commit", func() {
BeforeEach(func() {
tempdir, err = CreateTempDirInTempDir()
- if err != nil {
- os.Exit(1)
- }
+ Expect(err).To(BeNil())
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
+ err = podmanTest.SeedImages()
+ Expect(err).To(BeNil())
})
AfterEach(func() {
diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go
index bc6d89fad..cb6574f23 100644
--- a/test/e2e/common_test.go
+++ b/test/e2e/common_test.go
@@ -809,7 +809,8 @@ func (p *PodmanTestIntegration) RestoreArtifactToCache(image string) error {
func populateCache(podman *PodmanTestIntegration) {
for _, image := range CACHE_IMAGES {
- podman.RestoreArtifactToCache(image)
+ err := podman.RestoreArtifactToCache(image)
+ Expect(err).To(BeNil())
}
// logformatter uses this to recognize the first test
fmt.Printf("-----------------------------\n")
diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go
index 339fa66d8..4c3b5604a 100644
--- a/test/e2e/create_test.go
+++ b/test/e2e/create_test.go
@@ -24,12 +24,11 @@ var _ = Describe("Podman create", func() {
BeforeEach(func() {
tempdir, err = CreateTempDirInTempDir()
- if err != nil {
- os.Exit(1)
- }
+ Expect(err).To(BeNil())
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
+ err = podmanTest.SeedImages()
+ Expect(err).To(BeNil())
})
AfterEach(func() {
diff --git a/test/e2e/healthcheck_run_test.go b/test/e2e/healthcheck_run_test.go
index 866edbf0e..757eaed20 100644
--- a/test/e2e/healthcheck_run_test.go
+++ b/test/e2e/healthcheck_run_test.go
@@ -54,6 +54,16 @@ var _ = Describe("Podman healthcheck run", func() {
Expect(hc).Should(Exit(125))
})
+ It("podman disable healthcheck with --no-healthcheck must not show starting on status", func() {
+ session := podmanTest.Podman([]string{"run", "-dt", "--no-healthcheck", "--name", "hc", healthcheck})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ hc := podmanTest.Podman([]string{"container", "inspect", "--format", "{{.State.Health.Status}}", "hc"})
+ hc.WaitWithDefaultTimeout()
+ Expect(hc).Should(Exit(0))
+ Expect(hc.OutputToString()).To(Not(ContainSubstring("starting")))
+ })
+
It("podman run healthcheck and logs should contain healthcheck output", func() {
session := podmanTest.Podman([]string{"run", "--name", "test-logs", "-dt", "--health-interval", "1s", "--health-cmd", "echo working", "busybox", "sleep", "3600"})
session.WaitWithDefaultTimeout()
diff --git a/test/e2e/inspect_test.go b/test/e2e/inspect_test.go
index 3943a5e87..bb5a3a6ad 100644
--- a/test/e2e/inspect_test.go
+++ b/test/e2e/inspect_test.go
@@ -86,6 +86,7 @@ var _ = Describe("Podman inspect", func() {
It("podman inspect container with GO format for ConmonPidFile", func() {
session, ec, _ := podmanTest.RunLsContainer("test1")
+ session.WaitWithDefaultTimeout()
Expect(ec).To(Equal(0))
session = podmanTest.Podman([]string{"inspect", "--format", "{{.ConmonPidFile}}", "test1"})
@@ -94,7 +95,8 @@ var _ = Describe("Podman inspect", func() {
})
It("podman inspect container with size", func() {
- _, ec, _ := podmanTest.RunLsContainer("sizetest")
+ session, ec, _ := podmanTest.RunLsContainer("sizetest")
+ session.WaitWithDefaultTimeout()
Expect(ec).To(Equal(0))
result := podmanTest.Podman([]string{"inspect", "--size", "sizetest"})
@@ -107,6 +109,7 @@ var _ = Describe("Podman inspect", func() {
It("podman inspect container and image", func() {
ls, ec, _ := podmanTest.RunLsContainer("")
+ ls.WaitWithDefaultTimeout()
Expect(ec).To(Equal(0))
cid := ls.OutputToString()
@@ -118,6 +121,7 @@ var _ = Describe("Podman inspect", func() {
It("podman inspect container and filter for Image{ID}", func() {
ls, ec, _ := podmanTest.RunLsContainer("")
+ ls.WaitWithDefaultTimeout()
Expect(ec).To(Equal(0))
cid := ls.OutputToString()
@@ -134,6 +138,7 @@ var _ = Describe("Podman inspect", func() {
It("podman inspect container and filter for CreateCommand", func() {
ls, ec, _ := podmanTest.RunLsContainer("")
+ ls.WaitWithDefaultTimeout()
Expect(ec).To(Equal(0))
cid := ls.OutputToString()
@@ -529,6 +534,7 @@ var _ = Describe("Podman inspect", func() {
It("podman inspect container with GO format for PidFile", func() {
SkipIfRemote("pidfile not handled by remote")
session, ec, _ := podmanTest.RunLsContainer("test1")
+ session.WaitWithDefaultTimeout()
Expect(ec).To(Equal(0))
session = podmanTest.Podman([]string{"inspect", "--format", "{{.PidFile}}", "test1"})
diff --git a/test/e2e/system_df_test.go b/test/e2e/system_df_test.go
index 2d75316ad..a9fa5f4ac 100644
--- a/test/e2e/system_df_test.go
+++ b/test/e2e/system_df_test.go
@@ -41,11 +41,17 @@ var _ = Describe("podman system df", func() {
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- session = podmanTest.Podman([]string{"volume", "create", "data"})
+ // run two containers with volumes to create something in the volume
+ session = podmanTest.Podman([]string{"run", "-v", "data1:/data", "--name", "container1", BB, "sh", "-c", "echo test > /data/1"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- session = podmanTest.Podman([]string{"create", "-v", "data:/data", "--name", "container1", BB})
+ session = podmanTest.Podman([]string{"run", "-v", "data2:/data", "--name", "container2", BB, "sh", "-c", "echo test > /data/1"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+
+ // remove one container, we keep the volume
+ session = podmanTest.Podman([]string{"rm", "container2"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -61,9 +67,10 @@ var _ = Describe("podman system df", func() {
images := strings.Fields(session.OutputToStringArray()[1])
containers := strings.Fields(session.OutputToStringArray()[2])
volumes := strings.Fields(session.OutputToStringArray()[3])
- Expect(images[1]).To(Equal(string(totImages)))
- Expect(containers[1]).To(Equal("2"))
- Expect(volumes[2]).To(Equal("1"))
+ Expect(images[1]).To(Equal(string(totImages)), "total images expected")
+ Expect(containers[1]).To(Equal("2"), "total containers expected")
+ Expect(volumes[2]).To(Equal("2"), "total volumes expected")
+ Expect(volumes[6]).To(Equal("(50%)"), "percentage usage expected")
})
It("podman system df image with no tag", func() {
diff --git a/test/system/010-images.bats b/test/system/010-images.bats
index dbf4b2828..257508418 100644
--- a/test/system/010-images.bats
+++ b/test/system/010-images.bats
@@ -303,4 +303,13 @@ Deleted: $pauseID"
run_podman image exists $IMAGE
}
+@test "podman rmi --ignore" {
+ random_image_name=$(random_string)
+ random_image_name=${random_image_name,,} # name must be lowercase
+ run_podman 1 rmi $random_image_name
+ is "$output" "Error: $random_image_name: image not known.*"
+ run_podman rmi --ignore $random_image_name
+ is "$output" ""
+}
+
# vim: filetype=sh
diff --git a/test/system/200-pod.bats b/test/system/200-pod.bats
index ca931e244..f5fe41924 100644
--- a/test/system/200-pod.bats
+++ b/test/system/200-pod.bats
@@ -6,7 +6,7 @@ load helpers
function teardown() {
run_podman pod rm -f -t 0 -a
run_podman rm -f -t 0 -a
- run_podman ? rmi $(pause_image)
+ run_podman rmi --ignore $(pause_image)
basic_teardown
}
@@ -317,16 +317,17 @@ EOF
@test "podman pod create should fail when infra-name is already in use" {
local infra_name="infra_container_$(random_string 10 | tr A-Z a-z)"
+ local infra_image="k8s.gcr.io/pause:3.5"
local pod_name="$(random_string 10 | tr A-Z a-z)"
- run_podman --noout pod create --name $pod_name --infra-name "$infra_name" --infra-image "k8s.gcr.io/pause:3.5"
- is "$output" "" "output should be empty"
+ run_podman --noout pod create --name $pod_name --infra-name "$infra_name" --infra-image "$infra_image"
+ is "$output" "" "output from pod create should be empty"
run_podman '?' pod create --infra-name "$infra_name"
if [ $status -eq 0 ]; then
die "Podman should fail when user try to create two pods with the same infra-name value"
fi
run_podman pod rm -f $pod_name
- run_podman images -a
+ run_podman rmi $infra_image
}
@test "podman pod create --share" {
diff --git a/test/utils/common_function_test.go b/test/utils/common_function_test.go
index 810d9f2a5..6323b44eb 100644
--- a/test/utils/common_function_test.go
+++ b/test/utils/common_function_test.go
@@ -51,7 +51,8 @@ var _ = Describe("Common functions test", func() {
txt := fmt.Sprintf("ID=%s\nVERSION_ID=%s", id, ver)
if !empty {
f, _ := os.Create(path)
- f.WriteString(txt)
+ _, err := f.WriteString(txt)
+ Expect(err).To(BeNil(), "Failed to write data.")
f.Close()
}
@@ -102,9 +103,10 @@ var _ = Describe("Common functions test", func() {
Item2: []string{"test"},
}
- testByte, _ := json.Marshal(testData)
- err := WriteJSONFile(testByte, "/tmp/testJSON")
+ testByte, err := json.Marshal(testData)
+ Expect(err).To(BeNil(), "Failed to marshal data.")
+ err = WriteJSONFile(testByte, "/tmp/testJSON")
Expect(err).To(BeNil(), "Failed to write JSON to file.")
read, err := os.Open("/tmp/testJSON")
@@ -135,7 +137,8 @@ var _ = Describe("Common functions test", func() {
}
if createFile {
f, _ := os.Create(path)
- f.WriteString(txt)
+ _, err := f.WriteString(txt)
+ Expect(err).To(BeNil(), "Failed to write data.")
f.Close()
}
ProcessOneCgroupPath = path
diff --git a/test/utils/matchers.go b/test/utils/matchers.go
index 288779b63..0c0948e4b 100644
--- a/test/utils/matchers.go
+++ b/test/utils/matchers.go
@@ -13,7 +13,7 @@ import (
"github.com/onsi/gomega/types"
)
-// HaveActiveService verifies the given service is the active service
+// HaveActiveService verifies the given service is the active service.
func HaveActiveService(name interface{}) OmegaMatcher {
return WithTransform(
func(cfg *config.Config) string {
@@ -86,7 +86,7 @@ type URLMatcher struct {
matchers.EqualMatcher
}
-// VerifyURL matches when actual is a valid URL and matches expected
+// VerifyURL matches when actual is a valid URL and matches expected.
func VerifyURL(uri interface{}) OmegaMatcher {
return &URLMatcher{matchers.EqualMatcher{Expected: uri}}
}
@@ -129,7 +129,7 @@ func ExitWithError(optionalExitCode ...int) *ExitMatcher {
return &ExitMatcher{Expected: exitCode}
}
-// Match follows gexec.Matcher interface
+// Match follows gexec.Matcher interface.
func (matcher *ExitMatcher) Match(actual interface{}) (success bool, err error) {
exiter, ok := actual.(gexec.Exiter)
if !ok {
@@ -184,7 +184,7 @@ func (matcher *ValidJSONMatcher) Match(actual interface{}) (success bool, err er
var i interface{}
if err := json.Unmarshal([]byte(s), &i); err != nil {
- return false, nil
+ return false, err
}
return true, nil
}
diff --git a/test/utils/utils.go b/test/utils/utils.go
index 8fe45dca0..a6295cd19 100644
--- a/test/utils/utils.go
+++ b/test/utils/utils.go
@@ -479,7 +479,10 @@ func IsCommandAvailable(command string) bool {
func WriteJSONFile(data []byte, filePath string) error {
var jsonData map[string]interface{}
json.Unmarshal(data, &jsonData)
- formatJSON, _ := json.MarshalIndent(jsonData, "", " ")
+ formatJSON, err := json.MarshalIndent(jsonData, "", " ")
+ if err != nil {
+ return err
+ }
return ioutil.WriteFile(filePath, formatJSON, 0644)
}
diff --git a/troubleshooting.md b/troubleshooting.md
index 32f14c1ee..f59963271 100644
--- a/troubleshooting.md
+++ b/troubleshooting.md
@@ -1177,3 +1177,18 @@ A side-note: Using [__--userns=keep-id__](https://docs.podman.io/en/latest/markd
can sometimes be an alternative solution, but it forces the regular
user's host UID to be mapped to the same UID inside the container
so it provides less flexibility than using __--uidmap__ and __--gidmap__.
+
+### 35) Images in the additional stores can be deleted even if there are containers using them
+
+When an image in an additional store is used, it is not locked thus it
+can be deleted even if there are containers using it.
+
+#### Symptom
+
+WARN[0000] Can't stat lower layer "/var/lib/containers/storage/overlay/l/7HS76F2P5N73FDUKUQAOJA3WI5" because it does not exist. Going through storage to recreate the missing symlinks.
+
+#### Solution
+
+It is the user responsibility to make sure images in an additional
+store are not deleted while being used by containers in another
+store.
diff --git a/utils/ports.go b/utils/ports.go
index 0a4f67dcc..57a6f8275 100644
--- a/utils/ports.go
+++ b/utils/ports.go
@@ -7,7 +7,7 @@ import (
"github.com/pkg/errors"
)
-// Find a random, open port on the host
+// Find a random, open port on the host.
func GetRandomPort() (int, error) {
l, err := net.Listen("tcp", ":0")
if err != nil {
diff --git a/utils/utils.go b/utils/utils.go
index 22f0cb12f..d0e3dbb46 100644
--- a/utils/utils.go
+++ b/utils/utils.go
@@ -21,7 +21,7 @@ import (
)
// ExecCmd executes a command with args and returns its output as a string along
-// with an error, if any
+// with an error, if any.
func ExecCmd(name string, args ...string) (string, error) {
cmd := exec.Command(name, args...)
var stdout bytes.Buffer
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
index 184421370..895d6645a 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
@@ -3,7 +3,7 @@ module github.com/containerd/stargz-snapshotter/estargz
go 1.16
require (
- github.com/klauspost/compress v1.15.0
+ github.com/klauspost/compress v1.15.1
github.com/opencontainers/go-digest v1.0.0
github.com/vbatts/tar-split v0.11.2
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
index 0eb0b7a10..8b44342da 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
@@ -1,8 +1,8 @@
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U=
-github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
+github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 00a3b5e4d..a780ef5da 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -292,6 +292,31 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
backingFs = fsName
}
+ runhome := filepath.Join(options.RunRoot, filepath.Base(home))
+ rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create the driver home dir
+ if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil {
+ return nil, err
+ }
+
+ if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil {
+ return nil, err
+ }
+
+ if opts.mountProgram == "" {
+ if supported, err := SupportsNativeOverlay(home, runhome); err != nil {
+ return nil, err
+ } else if !supported {
+ if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
+ opts.mountProgram = path
+ }
+ }
+ }
+
if opts.mountProgram != "" {
if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil {
m := os.FileMode(0700)
@@ -316,20 +341,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
}
- rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
- if err != nil {
- return nil, err
- }
-
- // Create the driver home dir
- if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil {
- return nil, err
- }
- runhome := filepath.Join(options.RunRoot, filepath.Base(home))
- if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil {
- return nil, err
- }
-
var usingMetacopy bool
var supportsDType bool
var supportsVolatile *bool
@@ -569,14 +580,11 @@ func cachedFeatureRecord(runhome, feature string, supported bool, text string) (
return err
}
-func SupportsNativeOverlay(graphroot, rundir string) (bool, error) {
- if os.Geteuid() != 0 || graphroot == "" || rundir == "" {
+func SupportsNativeOverlay(home, runhome string) (bool, error) {
+ if os.Geteuid() != 0 || home == "" || runhome == "" {
return false, nil
}
- home := filepath.Join(graphroot, "overlay")
- runhome := filepath.Join(rundir, "overlay")
-
var contents string
flagContent, err := ioutil.ReadFile(getMountProgramFlagFile(home))
if err == nil {
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index 3e8d51f6a..4da8384af 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -6,13 +6,13 @@ require (
github.com/BurntSushi/toml v1.0.0
github.com/Microsoft/go-winio v0.5.2
github.com/Microsoft/hcsshim v0.9.2
- github.com/containerd/stargz-snapshotter/estargz v0.11.2
+ github.com/containerd/stargz-snapshotter/estargz v0.11.3
github.com/cyphar/filepath-securejoin v0.2.3
github.com/docker/go-units v0.4.0
github.com/google/go-intervals v0.0.2
github.com/hashicorp/go-multierror v1.1.1
github.com/json-iterator/go v1.1.12
- github.com/klauspost/compress v1.15.0
+ github.com/klauspost/compress v1.15.1
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.12
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
@@ -23,7 +23,7 @@ require (
github.com/opencontainers/selinux v1.10.0
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.8.1
- github.com/stretchr/testify v1.7.0
+ github.com/stretchr/testify v1.7.1
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
github.com/tchap/go-patricia v2.3.0+incompatible
github.com/ulikunitz/xz v0.5.10
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index ef6b711cf..b995da734 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -176,8 +176,8 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
-github.com/containerd/stargz-snapshotter/estargz v0.11.2 h1:0P0vWmfrEeTtZ4BBRrpuyu/HxR9HPBLfeljGOra5f6g=
-github.com/containerd/stargz-snapshotter/estargz v0.11.2/go.mod h1:rjbdAXaytDSIrAy2WAy2kUrJ4ehzDS0eUQLlIb5UCY0=
+github.com/containerd/stargz-snapshotter/estargz v0.11.3 h1:k2kN16Px6LYuv++qFqK+JTcYqc8bEVxzGpf8/gFBL5M=
+github.com/containerd/stargz-snapshotter/estargz v0.11.3/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
@@ -424,8 +424,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U=
-github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
+github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -622,8 +622,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go
index 567985b98..a71c6d2ef 100644
--- a/vendor/github.com/containers/storage/types/options.go
+++ b/vendor/github.com/containers/storage/types/options.go
@@ -3,14 +3,12 @@ package types
import (
"fmt"
"os"
- "os/exec"
"path/filepath"
"strings"
"sync"
"time"
"github.com/BurntSushi/toml"
- "github.com/containers/storage/drivers/overlay"
cfg "github.com/containers/storage/pkg/config"
"github.com/containers/storage/pkg/idtools"
"github.com/sirupsen/logrus"
@@ -225,25 +223,11 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti
opts.GraphDriverName = overlayDriver
}
- if opts.GraphDriverName == "" || opts.GraphDriverName == overlayDriver {
- supported, err := overlay.SupportsNativeOverlay(opts.GraphRoot, rootlessRuntime)
- if err != nil {
- return opts, err
- }
- if supported {
- opts.GraphDriverName = overlayDriver
- } else {
- if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
- opts.GraphDriverName = overlayDriver
- opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)}
- }
- }
- if opts.GraphDriverName == overlayDriver {
- for _, o := range systemOpts.GraphDriverOptions {
- if strings.Contains(o, "ignore_chown_errors") {
- opts.GraphDriverOptions = append(opts.GraphDriverOptions, o)
- break
- }
+ if opts.GraphDriverName == overlayDriver {
+ for _, o := range systemOpts.GraphDriverOptions {
+ if strings.Contains(o, "ignore_chown_errors") {
+ opts.GraphDriverOptions = append(opts.GraphDriverOptions, o)
+ break
}
}
}
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 9ddf39f6f..0e2dc116a 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -17,6 +17,23 @@ This package provides various compression algorithms.
# changelog
+* Mar 3, 2022 (v1.15.0)
+ * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
+ * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
+ * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
+ * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
+ * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
+ * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
+
+<details>
+ <summary>See Details</summary>
+Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
+
+Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
+
+While the release has been extensively tested, it is recommended to testing when upgrading.
+</details>
+
* Feb 22, 2022 (v1.14.4)
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
diff --git a/vendor/github.com/klauspost/compress/huff0/autogen.go b/vendor/github.com/klauspost/compress/huff0/autogen.go
new file mode 100644
index 000000000..ff2c69d60
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/autogen.go
@@ -0,0 +1,5 @@
+package huff0
+
+//go:generate go run generate.go
+//go:generate asmfmt -w decompress_amd64.s
+//go:generate asmfmt -w decompress_8b_amd64.s
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
index 03562db16..451160edd 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitreader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -165,6 +165,11 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
return uint16(b.value >> ((64 - n) & 63))
}
+// peekTopBits(n) is equvialent to peekBitFast(64 - n)
+func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
+ return uint16(b.value >> n)
+}
+
func (b *bitReaderShifted) advance(n uint8) {
b.bitsRead += n
b.value <<= n & 63
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 3ae7d4677..04f652995 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -729,189 +729,6 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
// the uncompressed data exactly.
-func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
- if len(d.dt.single) == 0 {
- return nil, errors.New("no table loaded")
- }
- if len(src) < 6+(4*1) {
- return nil, errors.New("input too small")
- }
- if use8BitTables && d.actualTableLog <= 8 {
- return d.decompress4X8bit(dst, src)
- }
-
- var br [4]bitReaderShifted
- // Decode "jump table"
- start := 6
- for i := 0; i < 3; i++ {
- length := int(src[i*2]) | (int(src[i*2+1]) << 8)
- if start+length >= len(src) {
- return nil, errors.New("truncated input (or invalid offset)")
- }
- err := br[i].init(src[start : start+length])
- if err != nil {
- return nil, err
- }
- start += length
- }
- err := br[3].init(src[start:])
- if err != nil {
- return nil, err
- }
-
- // destination, offset to match first output
- dstSize := cap(dst)
- dst = dst[:dstSize]
- out := dst
- dstEvery := (dstSize + 3) / 4
-
- const tlSize = 1 << tableLogMax
- const tlMask = tlSize - 1
- single := d.dt.single[:tlSize]
-
- // Use temp table to avoid bound checks/append penalty.
- buf := d.buffer()
- var off uint8
- var decoded int
-
- // Decode 2 values from each decoder/loop.
- const bufoff = 256
- for {
- if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
- break
- }
-
- {
- const stream = 0
- const stream2 = 1
- br[stream].fillFast()
- br[stream2].fillFast()
-
- val := br[stream].peekBitsFast(d.actualTableLog)
- val2 := br[stream2].peekBitsFast(d.actualTableLog)
- v := single[val&tlMask]
- v2 := single[val2&tlMask]
- br[stream].advance(uint8(v.entry))
- br[stream2].advance(uint8(v2.entry))
- buf[stream][off] = uint8(v.entry >> 8)
- buf[stream2][off] = uint8(v2.entry >> 8)
-
- val = br[stream].peekBitsFast(d.actualTableLog)
- val2 = br[stream2].peekBitsFast(d.actualTableLog)
- v = single[val&tlMask]
- v2 = single[val2&tlMask]
- br[stream].advance(uint8(v.entry))
- br[stream2].advance(uint8(v2.entry))
- buf[stream][off+1] = uint8(v.entry >> 8)
- buf[stream2][off+1] = uint8(v2.entry >> 8)
- }
-
- {
- const stream = 2
- const stream2 = 3
- br[stream].fillFast()
- br[stream2].fillFast()
-
- val := br[stream].peekBitsFast(d.actualTableLog)
- val2 := br[stream2].peekBitsFast(d.actualTableLog)
- v := single[val&tlMask]
- v2 := single[val2&tlMask]
- br[stream].advance(uint8(v.entry))
- br[stream2].advance(uint8(v2.entry))
- buf[stream][off] = uint8(v.entry >> 8)
- buf[stream2][off] = uint8(v2.entry >> 8)
-
- val = br[stream].peekBitsFast(d.actualTableLog)
- val2 = br[stream2].peekBitsFast(d.actualTableLog)
- v = single[val&tlMask]
- v2 = single[val2&tlMask]
- br[stream].advance(uint8(v.entry))
- br[stream2].advance(uint8(v2.entry))
- buf[stream][off+1] = uint8(v.entry >> 8)
- buf[stream2][off+1] = uint8(v2.entry >> 8)
- }
-
- off += 2
-
- if off == 0 {
- if bufoff > dstEvery {
- d.bufs.Put(buf)
- return nil, errors.New("corruption detected: stream overrun 1")
- }
- copy(out, buf[0][:])
- copy(out[dstEvery:], buf[1][:])
- copy(out[dstEvery*2:], buf[2][:])
- copy(out[dstEvery*3:], buf[3][:])
- out = out[bufoff:]
- decoded += bufoff * 4
- // There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
- d.bufs.Put(buf)
- return nil, errors.New("corruption detected: stream overrun 2")
- }
- }
- }
- if off > 0 {
- ioff := int(off)
- if len(out) < dstEvery*3+ioff {
- d.bufs.Put(buf)
- return nil, errors.New("corruption detected: stream overrun 3")
- }
- copy(out, buf[0][:off])
- copy(out[dstEvery:], buf[1][:off])
- copy(out[dstEvery*2:], buf[2][:off])
- copy(out[dstEvery*3:], buf[3][:off])
- decoded += int(off) * 4
- out = out[off:]
- }
-
- // Decode remaining.
- remainBytes := dstEvery - (decoded / 4)
- for i := range br {
- offset := dstEvery * i
- endsAt := offset + remainBytes
- if endsAt > len(out) {
- endsAt = len(out)
- }
- br := &br[i]
- bitsLeft := br.remaining()
- for bitsLeft > 0 {
- br.fill()
- if offset >= endsAt {
- d.bufs.Put(buf)
- return nil, errors.New("corruption detected: stream overrun 4")
- }
-
- // Read value and increment offset.
- val := br.peekBitsFast(d.actualTableLog)
- v := single[val&tlMask].entry
- nBits := uint8(v)
- br.advance(nBits)
- bitsLeft -= uint(nBits)
- out[offset] = uint8(v >> 8)
- offset++
- }
- if offset != endsAt {
- d.bufs.Put(buf)
- return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
- }
- decoded += offset - dstEvery*i
- err = br.close()
- if err != nil {
- return nil, err
- }
- }
- d.bufs.Put(buf)
- if dstSize != decoded {
- return nil, errors.New("corruption detected: short output block")
- }
- return dst, nil
-}
-
-// Decompress4X will decompress a 4X encoded stream.
-// The length of the supplied input must match the end of a block exactly.
-// The *capacity* of the dst slice must match the destination size of
-// the uncompressed data exactly.
func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
if d.actualTableLog == 8 {
return d.decompress4X8bitExactly(dst, src)
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
new file mode 100644
index 000000000..0d6cb1a96
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
@@ -0,0 +1,488 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+#define bufoff 256 // see decompress.go, we're using [4][256]byte table
+
+// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
+TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
+#define off R8
+#define buffer DI
+#define table SI
+
+#define br_bits_read R9
+#define br_value R10
+#define br_offset R11
+#define peek_bits R12
+#define exhausted DX
+
+#define br0 R13
+#define br1 R14
+#define br2 R15
+#define br3 BP
+
+ MOVQ BP, 0(SP)
+
+ XORQ exhausted, exhausted // exhausted = false
+ XORQ off, off // off = 0
+
+ MOVBQZX peekBits+32(FP), peek_bits
+ MOVQ buf+40(FP), buffer
+ MOVQ tbl+48(FP), table
+
+ MOVQ pbr0+0(FP), br0
+ MOVQ pbr1+8(FP), br1
+ MOVQ pbr2+16(FP), br2
+ MOVQ pbr3+24(FP), br3
+
+main_loop:
+
+ // const stream = 0
+ // br0.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
+ MOVQ bitReaderShifted_value(br0), br_value
+ MOVQ bitReaderShifted_off(br0), br_offset
+
+ // if b.bitsRead >= 32 {
+ CMPQ br_bits_read, $32
+ JB skip_fill0
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br0), AX
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br0.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill0:
+
+ // val0 := br0.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br0.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val1 := br0.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br0.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 0(buffer)(off*1)
+
+ // SECOND PART:
+ // val2 := br0.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v2 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br0.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val3 := br0.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v3 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br0.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off+2] = uint8(v2.entry >> 8)
+ // buf[stream][off+3] = uint8(v3.entry >> 8)
+ MOVW BX, 0+2(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
+ MOVQ br_value, bitReaderShifted_value(br0)
+ MOVQ br_offset, bitReaderShifted_off(br0)
+
+ // const stream = 1
+ // br1.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
+ MOVQ bitReaderShifted_value(br1), br_value
+ MOVQ bitReaderShifted_off(br1), br_offset
+
+ // if b.bitsRead >= 32 {
+ CMPQ br_bits_read, $32
+ JB skip_fill1
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br1), AX
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br1.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill1:
+
+ // val0 := br1.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br1.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val1 := br1.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br1.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 256(buffer)(off*1)
+
+ // SECOND PART:
+ // val2 := br1.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v2 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br1.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val3 := br1.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v3 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br1.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off+2] = uint8(v2.entry >> 8)
+ // buf[stream][off+3] = uint8(v3.entry >> 8)
+ MOVW BX, 256+2(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
+ MOVQ br_value, bitReaderShifted_value(br1)
+ MOVQ br_offset, bitReaderShifted_off(br1)
+
+ // const stream = 2
+ // br2.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
+ MOVQ bitReaderShifted_value(br2), br_value
+ MOVQ bitReaderShifted_off(br2), br_offset
+
+ // if b.bitsRead >= 32 {
+ CMPQ br_bits_read, $32
+ JB skip_fill2
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br2), AX
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br2.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill2:
+
+ // val0 := br2.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br2.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val1 := br2.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br2.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 512(buffer)(off*1)
+
+ // SECOND PART:
+ // val2 := br2.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v2 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br2.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val3 := br2.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v3 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br2.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off+2] = uint8(v2.entry >> 8)
+ // buf[stream][off+3] = uint8(v3.entry >> 8)
+ MOVW BX, 512+2(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
+ MOVQ br_value, bitReaderShifted_value(br2)
+ MOVQ br_offset, bitReaderShifted_off(br2)
+
+ // const stream = 3
+ // br3.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
+ MOVQ bitReaderShifted_value(br3), br_value
+ MOVQ bitReaderShifted_off(br3), br_offset
+
+ // if b.bitsRead >= 32 {
+ CMPQ br_bits_read, $32
+ JB skip_fill3
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br3), AX
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br3.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill3:
+
+ // val0 := br3.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br3.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val1 := br3.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br3.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 768(buffer)(off*1)
+
+ // SECOND PART:
+ // val2 := br3.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v2 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br3.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val3 := br3.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v3 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br3.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off+2] = uint8(v2.entry >> 8)
+ // buf[stream][off+3] = uint8(v3.entry >> 8)
+ MOVW BX, 768+2(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
+ MOVQ br_value, bitReaderShifted_value(br3)
+ MOVQ br_offset, bitReaderShifted_off(br3)
+
+ ADDQ $4, off // off += 2
+
+ TESTB DH, DH // any br[i].ofs < 4?
+ JNZ end
+
+ CMPQ off, $bufoff
+ JL main_loop
+
+end:
+ MOVQ 0(SP), BP
+
+ MOVB off, ret+56(FP)
+ RET
+
+#undef off
+#undef buffer
+#undef table
+
+#undef br_bits_read
+#undef br_value
+#undef br_offset
+#undef peek_bits
+#undef exhausted
+
+#undef br0
+#undef br1
+#undef br2
+#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
new file mode 100644
index 000000000..6d477a2c1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
@@ -0,0 +1,197 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+
+#define bufoff 256 // see decompress.go, we're using [4][256]byte table
+
+//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
+TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
+#define off R8
+#define buffer DI
+#define table SI
+
+#define br_bits_read R9
+#define br_value R10
+#define br_offset R11
+#define peek_bits R12
+#define exhausted DX
+
+#define br0 R13
+#define br1 R14
+#define br2 R15
+#define br3 BP
+
+ MOVQ BP, 0(SP)
+
+ XORQ exhausted, exhausted // exhausted = false
+ XORQ off, off // off = 0
+
+ MOVBQZX peekBits+32(FP), peek_bits
+ MOVQ buf+40(FP), buffer
+ MOVQ tbl+48(FP), table
+
+ MOVQ pbr0+0(FP), br0
+ MOVQ pbr1+8(FP), br1
+ MOVQ pbr2+16(FP), br2
+ MOVQ pbr3+24(FP), br3
+
+main_loop:
+{{ define "decode_2_values_x86" }}
+ // const stream = {{ var "id" }}
+ // br{{ var "id"}}.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
+ MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
+ MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
+
+ // if b.bitsRead >= 32 {
+ CMPQ br_bits_read, $32
+ JB skip_fill{{ var "id" }}
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br{{ var "id"}}.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+ // }
+skip_fill{{ var "id" }}:
+
+ // val0 := br{{ var "id"}}.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br{{ var "id"}}.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val1 := br{{ var "id"}}.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br{{ var "id"}}.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
+
+ // SECOND PART:
+ // val2 := br{{ var "id"}}.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v2 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br{{ var "id"}}.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val3 := br{{ var "id"}}.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v3 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br{{ var "id"}}.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+
+ // these two writes get coalesced
+ // buf[stream][off+2] = uint8(v2.entry >> 8)
+ // buf[stream][off+3] = uint8(v3.entry >> 8)
+ MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
+ MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
+ MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
+{{ end }}
+
+ {{ set "id" "0" }}
+ {{ set "ofs" "0" }}
+ {{ set "bufofs" "0" }} {{/* id * bufoff */}}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "1" }}
+ {{ set "ofs" "8" }}
+ {{ set "bufofs" "256" }}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "2" }}
+ {{ set "ofs" "16" }}
+ {{ set "bufofs" "512" }}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "3" }}
+ {{ set "ofs" "24" }}
+ {{ set "bufofs" "768" }}
+ {{ template "decode_2_values_x86" . }}
+
+ ADDQ $4, off // off += 2
+
+ TESTB DH, DH // any br[i].ofs < 4?
+ JNZ end
+
+ CMPQ off, $bufoff
+ JL main_loop
+end:
+ MOVQ 0(SP), BP
+
+ MOVB off, ret+56(FP)
+ RET
+#undef off
+#undef buffer
+#undef table
+
+#undef br_bits_read
+#undef br_value
+#undef br_offset
+#undef peek_bits
+#undef exhausted
+
+#undef br0
+#undef br1
+#undef br2
+#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
new file mode 100644
index 000000000..d47f6644f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -0,0 +1,181 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+// This file contains the specialisation of Decoder.Decompress4X
+// that uses an asm implementation of its main loop.
+package huff0
+
+import (
+ "errors"
+ "fmt"
+)
+
+// decompress4x_main_loop_x86 is an x86 assembler implementation
+// of Decompress4X when tablelog > 8.
+// go:noescape
+func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+ peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
+
+// decompress4x_8b_loop_x86 is an x86 assembler implementation
+// of Decompress4X when tablelog <= 8 which decodes 4 entries
+// per loop.
+// go:noescape
+func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+ peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
+
+// fallback8BitSize is the size where using Go version is faster.
+const fallback8BitSize = 800
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if len(src) < 6+(4*1) {
+ return nil, errors.New("input too small")
+ }
+
+ use8BitTables := d.actualTableLog <= 8
+ if cap(dst) < fallback8BitSize && use8BitTables {
+ return d.decompress4X8bit(dst, src)
+ }
+ var br [4]bitReaderShifted
+ // Decode "jump table"
+ start := 6
+ for i := 0; i < 3; i++ {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
+ return nil, errors.New("truncated input (or invalid offset)")
+ }
+ err := br[i].init(src[start : start+length])
+ if err != nil {
+ return nil, err
+ }
+ start += length
+ }
+ err := br[3].init(src[start:])
+ if err != nil {
+ return nil, err
+ }
+
+ // destination, offset to match first output
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
+ dstEvery := (dstSize + 3) / 4
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ single := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ buf := d.buffer()
+ var off uint8
+ var decoded int
+
+ const debug = false
+
+ // see: bitReaderShifted.peekBitsFast()
+ peekBits := uint8((64 - d.actualTableLog) & 63)
+
+ // Decode 2 values from each decoder/loop.
+ const bufoff = 256
+ for {
+ if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+ break
+ }
+
+ if use8BitTables {
+ off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
+ } else {
+ off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
+ }
+ if debug {
+ fmt.Print("DEBUG: ")
+ fmt.Printf("off=%d,", off)
+ for i := 0; i < 4; i++ {
+ fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
+ i, br[i].bitsRead, br[i].value, br[i].off)
+ }
+ fmt.Println("")
+ }
+
+ if off != 0 {
+ break
+ }
+
+ if bufoff > dstEvery {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 1")
+ }
+ copy(out, buf[0][:])
+ copy(out[dstEvery:], buf[1][:])
+ copy(out[dstEvery*2:], buf[2][:])
+ copy(out[dstEvery*3:], buf[3][:])
+ out = out[bufoff:]
+ decoded += bufoff * 4
+ // There must at least be 3 buffers left.
+ if len(out) < dstEvery*3 {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 2")
+ }
+ }
+ if off > 0 {
+ ioff := int(off)
+ if len(out) < dstEvery*3+ioff {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 3")
+ }
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
+ decoded += int(off) * 4
+ out = out[off:]
+ }
+
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
+ for i := range br {
+ offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
+ br := &br[i]
+ bitsLeft := br.remaining()
+ for bitsLeft > 0 {
+ br.fill()
+ if offset >= endsAt {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 4")
+ }
+
+ // Read value and increment offset.
+ val := br.peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= uint(nBits)
+ out[offset] = uint8(v >> 8)
+ offset++
+ }
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+ decoded += offset - dstEvery*i
+ err = br.close()
+ if err != nil {
+ return nil, err
+ }
+ }
+ d.bufs.Put(buf)
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
new file mode 100644
index 000000000..2edad3ea5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
@@ -0,0 +1,506 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+#ifdef GOAMD64_v4
+#ifndef GOAMD64_v3
+#define GOAMD64_v3
+#endif
+#endif
+
+#define bufoff 256 // see decompress.go, we're using [4][256]byte table
+
+// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
+TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
+#define off R8
+#define buffer DI
+#define table SI
+
+#define br_bits_read R9
+#define br_value R10
+#define br_offset R11
+#define peek_bits R12
+#define exhausted DX
+
+#define br0 R13
+#define br1 R14
+#define br2 R15
+#define br3 BP
+
+ MOVQ BP, 0(SP)
+
+ XORQ exhausted, exhausted // exhausted = false
+ XORQ off, off // off = 0
+
+ MOVBQZX peekBits+32(FP), peek_bits
+ MOVQ buf+40(FP), buffer
+ MOVQ tbl+48(FP), table
+
+ MOVQ pbr0+0(FP), br0
+ MOVQ pbr1+8(FP), br1
+ MOVQ pbr2+16(FP), br2
+ MOVQ pbr3+24(FP), br3
+
+main_loop:
+
+ // const stream = 0
+ // br0.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
+ MOVQ bitReaderShifted_value(br0), br_value
+ MOVQ bitReaderShifted_off(br0), br_offset
+
+ // We must have at least 2 * max tablelog left
+ CMPQ br_bits_read, $64-22
+ JBE skip_fill0
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br0), AX
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+ SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+
+#else
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+
+#endif
+
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br0.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill0:
+
+ // val0 := br0.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br0.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ // val1 := br0.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br0.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 0(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
+ MOVQ br_value, bitReaderShifted_value(br0)
+ MOVQ br_offset, bitReaderShifted_off(br0)
+
+ // const stream = 1
+ // br1.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
+ MOVQ bitReaderShifted_value(br1), br_value
+ MOVQ bitReaderShifted_off(br1), br_offset
+
+ // We must have at least 2 * max tablelog left
+ CMPQ br_bits_read, $64-22
+ JBE skip_fill1
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br1), AX
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+ SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+
+#else
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+
+#endif
+
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br1.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill1:
+
+ // val0 := br1.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br1.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ // val1 := br1.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br1.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 256(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
+ MOVQ br_value, bitReaderShifted_value(br1)
+ MOVQ br_offset, bitReaderShifted_off(br1)
+
+ // const stream = 2
+ // br2.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
+ MOVQ bitReaderShifted_value(br2), br_value
+ MOVQ bitReaderShifted_off(br2), br_offset
+
+ // We must have at least 2 * max tablelog left
+ CMPQ br_bits_read, $64-22
+ JBE skip_fill2
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br2), AX
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+ SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+
+#else
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+
+#endif
+
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br2.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill2:
+
+ // val0 := br2.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br2.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ // val1 := br2.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br2.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 512(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
+ MOVQ br_value, bitReaderShifted_value(br2)
+ MOVQ br_offset, bitReaderShifted_off(br2)
+
+ // const stream = 3
+ // br3.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
+ MOVQ bitReaderShifted_value(br3), br_value
+ MOVQ bitReaderShifted_off(br3), br_offset
+
+ // We must have at least 2 * max tablelog left
+ CMPQ br_bits_read, $64-22
+ JBE skip_fill3
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br3), AX
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+ SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+
+#else
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+
+#endif
+
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br3.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill3:
+
+ // val0 := br3.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br3.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ // val1 := br3.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br3.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 768(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
+ MOVQ br_value, bitReaderShifted_value(br3)
+ MOVQ br_offset, bitReaderShifted_off(br3)
+
+ ADDQ $2, off // off += 2
+
+ TESTB DH, DH // any br[i].ofs < 4?
+ JNZ end
+
+ CMPQ off, $bufoff
+ JL main_loop
+
+end:
+ MOVQ 0(SP), BP
+
+ MOVB off, ret+56(FP)
+ RET
+
+#undef off
+#undef buffer
+#undef table
+
+#undef br_bits_read
+#undef br_value
+#undef br_offset
+#undef peek_bits
+#undef exhausted
+
+#undef br0
+#undef br1
+#undef br2
+#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
new file mode 100644
index 000000000..330d86ae1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
@@ -0,0 +1,195 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+#ifdef GOAMD64_v4
+#ifndef GOAMD64_v3
+#define GOAMD64_v3
+#endif
+#endif
+
+#define bufoff 256 // see decompress.go, we're using [4][256]byte table
+
+//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
+TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
+#define off R8
+#define buffer DI
+#define table SI
+
+#define br_bits_read R9
+#define br_value R10
+#define br_offset R11
+#define peek_bits R12
+#define exhausted DX
+
+#define br0 R13
+#define br1 R14
+#define br2 R15
+#define br3 BP
+
+ MOVQ BP, 0(SP)
+
+ XORQ exhausted, exhausted // exhausted = false
+ XORQ off, off // off = 0
+
+ MOVBQZX peekBits+32(FP), peek_bits
+ MOVQ buf+40(FP), buffer
+ MOVQ tbl+48(FP), table
+
+ MOVQ pbr0+0(FP), br0
+ MOVQ pbr1+8(FP), br1
+ MOVQ pbr2+16(FP), br2
+ MOVQ pbr3+24(FP), br3
+
+main_loop:
+{{ define "decode_2_values_x86" }}
+ // const stream = {{ var "id" }}
+ // br{{ var "id"}}.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
+ MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
+ MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
+
+ // We must have at least 2 * max tablelog left
+ CMPQ br_bits_read, $64-22
+ JBE skip_fill{{ var "id" }}
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+ SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+#else
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+#endif
+
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br{{ var "id"}}.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+ // }
+skip_fill{{ var "id" }}:
+
+ // val0 := br{{ var "id"}}.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+#else
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+#endif
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br{{ var "id"}}.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+#else
+ // val1 := br{{ var "id"}}.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+#endif
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br{{ var "id"}}.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
+ MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
+ MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
+{{ end }}
+
+ {{ set "id" "0" }}
+ {{ set "ofs" "0" }}
+ {{ set "bufofs" "0" }} {{/* id * bufoff */}}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "1" }}
+ {{ set "ofs" "8" }}
+ {{ set "bufofs" "256" }}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "2" }}
+ {{ set "ofs" "16" }}
+ {{ set "bufofs" "512" }}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "3" }}
+ {{ set "ofs" "24" }}
+ {{ set "bufofs" "768" }}
+ {{ template "decode_2_values_x86" . }}
+
+ ADDQ $2, off // off += 2
+
+ TESTB DH, DH // any br[i].ofs < 4?
+ JNZ end
+
+ CMPQ off, $bufoff
+ JL main_loop
+end:
+ MOVQ 0(SP), BP
+
+ MOVB off, ret+56(FP)
+ RET
+#undef off
+#undef buffer
+#undef table
+
+#undef br_bits_read
+#undef br_value
+#undef br_offset
+#undef peek_bits
+#undef exhausted
+
+#undef br0
+#undef br1
+#undef br2
+#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
new file mode 100644
index 000000000..126b4d68a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
@@ -0,0 +1,193 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+// This file contains a generic implementation of Decoder.Decompress4X.
+package huff0
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if len(src) < 6+(4*1) {
+ return nil, errors.New("input too small")
+ }
+ if use8BitTables && d.actualTableLog <= 8 {
+ return d.decompress4X8bit(dst, src)
+ }
+
+ var br [4]bitReaderShifted
+ // Decode "jump table"
+ start := 6
+ for i := 0; i < 3; i++ {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
+ return nil, errors.New("truncated input (or invalid offset)")
+ }
+ err := br[i].init(src[start : start+length])
+ if err != nil {
+ return nil, err
+ }
+ start += length
+ }
+ err := br[3].init(src[start:])
+ if err != nil {
+ return nil, err
+ }
+
+ // destination, offset to match first output
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
+ dstEvery := (dstSize + 3) / 4
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ single := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ buf := d.buffer()
+ var off uint8
+ var decoded int
+
+ // Decode 2 values from each decoder/loop.
+ const bufoff = 256
+ for {
+ if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+ break
+ }
+
+ {
+ const stream = 0
+ const stream2 = 1
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ val := br[stream].peekBitsFast(d.actualTableLog)
+ val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
+ v2 := single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off] = uint8(v.entry >> 8)
+ buf[stream2][off] = uint8(v2.entry >> 8)
+
+ val = br[stream].peekBitsFast(d.actualTableLog)
+ val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
+ v2 = single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off+1] = uint8(v.entry >> 8)
+ buf[stream2][off+1] = uint8(v2.entry >> 8)
+ }
+
+ {
+ const stream = 2
+ const stream2 = 3
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ val := br[stream].peekBitsFast(d.actualTableLog)
+ val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
+ v2 := single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off] = uint8(v.entry >> 8)
+ buf[stream2][off] = uint8(v2.entry >> 8)
+
+ val = br[stream].peekBitsFast(d.actualTableLog)
+ val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
+ v2 = single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off+1] = uint8(v.entry >> 8)
+ buf[stream2][off+1] = uint8(v2.entry >> 8)
+ }
+
+ off += 2
+
+ if off == 0 {
+ if bufoff > dstEvery {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 1")
+ }
+ copy(out, buf[0][:])
+ copy(out[dstEvery:], buf[1][:])
+ copy(out[dstEvery*2:], buf[2][:])
+ copy(out[dstEvery*3:], buf[3][:])
+ out = out[bufoff:]
+ decoded += bufoff * 4
+ // There must at least be 3 buffers left.
+ if len(out) < dstEvery*3 {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 2")
+ }
+ }
+ }
+ if off > 0 {
+ ioff := int(off)
+ if len(out) < dstEvery*3+ioff {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 3")
+ }
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
+ decoded += int(off) * 4
+ out = out[off:]
+ }
+
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
+ for i := range br {
+ offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
+ br := &br[i]
+ bitsLeft := br.remaining()
+ for bitsLeft > 0 {
+ br.fill()
+ if offset >= endsAt {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 4")
+ }
+
+ // Read value and increment offset.
+ val := br.peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= uint(nBits)
+ out[offset] = uint8(v >> 8)
+ offset++
+ }
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+ decoded += offset - dstEvery*i
+ err = br.close()
+ if err != nil {
+ return nil, err
+ }
+ }
+ d.bufs.Put(buf)
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index c876c591a..e3445ac19 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -153,10 +153,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
This package:
file out level insize outsize millis mb/s
-silesia.tar zskp 1 211947520 73101992 643 313.87
-silesia.tar zskp 2 211947520 67504318 969 208.38
-silesia.tar zskp 3 211947520 64595893 2007 100.68
-silesia.tar zskp 4 211947520 60995370 8825 22.90
+silesia.tar zskp 1 211947520 73821326 634 318.47
+silesia.tar zskp 2 211947520 67655404 1508 133.96
+silesia.tar zskp 3 211947520 64746933 3000 67.37
+silesia.tar zskp 4 211947520 60073508 16926 11.94
cgo zstd:
silesia.tar zstd 1 211947520 73605392 543 371.56
@@ -165,94 +165,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66
silesia.tar zstd 9 211947520 60212393 5063 39.92
gzip, stdlib/this package:
-silesia.tar gzstd 1 211947520 80007735 1654 122.21
-silesia.tar gzkp 1 211947520 80136201 1152 175.45
+silesia.tar gzstd 1 211947520 80007735 1498 134.87
+silesia.tar gzkp 1 211947520 80088272 1009 200.31
GOB stream of binary data. Highly compressible.
https://files.klauspost.com/compress/gob-stream.7z
file out level insize outsize millis mb/s
-gob-stream zskp 1 1911399616 235022249 3088 590.30
-gob-stream zskp 2 1911399616 205669791 3786 481.34
-gob-stream zskp 3 1911399616 175034659 9636 189.17
-gob-stream zskp 4 1911399616 165609838 50369 36.19
+gob-stream zskp 1 1911399616 233948096 3230 564.34
+gob-stream zskp 2 1911399616 203997694 4997 364.73
+gob-stream zskp 3 1911399616 173526523 13435 135.68
+gob-stream zskp 4 1911399616 162195235 47559 38.33
gob-stream zstd 1 1911399616 249810424 2637 691.26
gob-stream zstd 3 1911399616 208192146 3490 522.31
gob-stream zstd 6 1911399616 193632038 6687 272.56
gob-stream zstd 9 1911399616 177620386 16175 112.70
-gob-stream gzstd 1 1911399616 357382641 10251 177.82
-gob-stream gzkp 1 1911399616 359753026 5438 335.20
+gob-stream gzstd 1 1911399616 357382013 9046 201.49
+gob-stream gzkp 1 1911399616 359136669 4885 373.08
The test data for the Large Text Compression Benchmark is the first
10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
http://mattmahoney.net/dc/textdata.html
file out level insize outsize millis mb/s
-enwik9 zskp 1 1000000000 343848582 3609 264.18
-enwik9 zskp 2 1000000000 317276632 5746 165.97
-enwik9 zskp 3 1000000000 292243069 12162 78.41
-enwik9 zskp 4 1000000000 262183768 82837 11.51
+enwik9 zskp 1 1000000000 343833605 3687 258.64
+enwik9 zskp 2 1000000000 317001237 7672 124.29
+enwik9 zskp 3 1000000000 291915823 15923 59.89
+enwik9 zskp 4 1000000000 261710291 77697 12.27
enwik9 zstd 1 1000000000 358072021 3110 306.65
enwik9 zstd 3 1000000000 313734672 4784 199.35
enwik9 zstd 6 1000000000 295138875 10290 92.68
enwik9 zstd 9 1000000000 278348700 28549 33.40
-enwik9 gzstd 1 1000000000 382578136 9604 99.30
-enwik9 gzkp 1 1000000000 383825945 6544 145.73
+enwik9 gzstd 1 1000000000 382578136 8608 110.78
+enwik9 gzkp 1 1000000000 382781160 5628 169.45
Highly compressible JSON file.
https://files.klauspost.com/compress/github-june-2days-2019.json.zst
file out level insize outsize millis mb/s
-github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40
-github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96
-github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75
-github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16
+github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17
+github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49
+github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41
+github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
-github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79
-github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61
+github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32
+github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16
VM Image, Linux mint with a few installed applications:
https://files.klauspost.com/compress/rawstudio-mint14.7z
file out level insize outsize millis mb/s
-rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84
-rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07
-rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08
-rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52
+rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29
+rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15
+rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49
+rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
-rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40
-rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92
+rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96
+rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26
CSV data:
https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
file out level insize outsize millis mb/s
-nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35
-nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44
-nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66
-nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33
+nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17
+nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50
+nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79
+nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
-nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83
-nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00
+nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11
+nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68
```
## Decompressor
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 607b62ee3..7d567a54a 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -167,6 +167,11 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
}
return ErrCompressedSizeTooBig
}
+ // Empty compressed blocks must at least be 2 bytes
+ // for Literals_Block_Type and one for Sequences_Section_Header.
+ if cSize < 2 {
+ return ErrBlockTooSmall
+ }
case blockTypeRaw:
if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
if debugDecoder {
@@ -491,6 +496,9 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
+ if debugDecoder {
+ printf("prepareSequences: %d byte(s) input\n", len(in))
+ }
// Decode Sequences
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
if len(in) < 1 {
@@ -499,8 +507,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
var nSeqs int
seqHeader := in[0]
switch {
- case seqHeader == 0:
- in = in[1:]
case seqHeader < 128:
nSeqs = int(seqHeader)
in = in[1:]
@@ -517,6 +523,13 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
in = in[3:]
}
+ if nSeqs == 0 && len(in) != 0 {
+ // When no sequences, there should not be any more data...
+ if debugDecoder {
+ printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in))
+ }
+ return ErrUnexpectedBlockSize
+ }
var seqs = &hist.decoders
seqs.nSeqs = nSeqs
@@ -635,6 +648,7 @@ func (b *blockDec) decodeSequences(hist *history) error {
hist.decoders.seqSize = len(hist.decoders.literals)
return nil
}
+ hist.decoders.windowSize = hist.windowSize
hist.decoders.prevOffset = hist.recentOffsets
err := hist.decoders.decode(b.sequence)
hist.recentOffsets = hist.decoders.prevOffset
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index a93dfaf10..9fcdaac1d 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -348,10 +348,10 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
frame.history.setDict(&dict)
}
- if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
+ if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
return dst, ErrDecoderSizeExceeded
}
- if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
+ if frame.FrameContentSize < 1<<30 {
// Never preallocate more than 1 GB up front.
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
@@ -514,7 +514,7 @@ func (d *Decoder) nextBlockSync() (ok bool) {
// Check frame size (before CRC)
d.syncStream.decodedFrame += uint64(len(d.current.b))
- if d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame > d.frame.FrameContentSize {
+ if d.syncStream.decodedFrame > d.frame.FrameContentSize {
if debugDecoder {
printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
}
@@ -523,7 +523,7 @@ func (d *Decoder) nextBlockSync() (ok bool) {
}
// Check FCS
- if d.current.d.Last && d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame != d.frame.FrameContentSize {
+ if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize {
if debugDecoder {
printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
}
@@ -700,6 +700,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
}
hist.decoders = block.async.newHist.decoders
hist.recentOffsets = block.async.newHist.recentOffsets
+ hist.windowSize = block.async.newHist.windowSize
if block.async.newHist.dict != nil {
hist.setDict(block.async.newHist.dict)
}
@@ -811,11 +812,11 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
}
if !hasErr {
decodedFrame += uint64(len(do.b))
- if fcs > 0 && decodedFrame > fcs {
+ if decodedFrame > fcs {
println("fcs exceeded", block.Last, fcs, decodedFrame)
do.err = ErrFrameSizeExceeded
hasErr = true
- } else if block.Last && fcs > 0 && decodedFrame != fcs {
+ } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs {
do.err = ErrFrameSizeMismatch
hasErr = true
} else {
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 29c3176b0..11089d223 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -197,7 +197,7 @@ func (d *frameDec) reset(br byteBuffer) error {
default:
fcsSize = 1 << v
}
- d.FrameContentSize = 0
+ d.FrameContentSize = fcsUnknown
if fcsSize > 0 {
b, err := br.readSmall(fcsSize)
if err != nil {
@@ -343,12 +343,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
err = ErrDecoderSizeExceeded
break
}
- if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
- println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
- err = ErrFrameSizeExceeded
- break
- }
- if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
+ if uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize)
err = ErrFrameSizeExceeded
break
@@ -356,13 +351,13 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if dec.Last {
break
}
- if debugDecoder && d.FrameContentSize > 0 {
+ if debugDecoder {
println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize)
}
}
dst = d.history.b
if err == nil {
- if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
+ if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
err = ErrFrameSizeMismatch
} else if d.HasCheckSum {
var n int
diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz.go b/vendor/github.com/klauspost/compress/zstd/fuzz.go
index fda8a7422..7f2210e05 100644
--- a/vendor/github.com/klauspost/compress/zstd/fuzz.go
+++ b/vendor/github.com/klauspost/compress/zstd/fuzz.go
@@ -1,5 +1,5 @@
-//go:build gofuzz
-// +build gofuzz
+//go:build ignorecrc
+// +build ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go
index 0515b201c..6811c68a8 100644
--- a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go
+++ b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go
@@ -1,5 +1,5 @@
-//go:build !gofuzz
-// +build !gofuzz
+//go:build !ignorecrc
+// +build !ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index 213736ad7..819f1461b 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -107,7 +107,10 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
s.seqSize = 0
litRemain := len(s.literals)
-
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
for i := range seqs {
var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) {
@@ -192,7 +195,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
}
s.seqSize += ll + ml
if s.seqSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size", s.seqSize)
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
litRemain -= ll
if litRemain < 0 {
@@ -230,7 +233,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
}
s.seqSize += litRemain
if s.seqSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size", s.seqSize)
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
err := br.close()
if err != nil {
@@ -347,6 +350,10 @@ func (s *sequenceDecs) decodeSync(history *history) error {
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
hist := history.b[history.ignoreBuffer:]
out := s.out
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
for i := seqs - 1; i >= 0; i-- {
if br.overread() {
@@ -426,7 +433,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
}
size := ll + ml + len(out)
if size-startSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size", size)
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
}
if size > cap(out) {
// Not enough size, which can happen under high volume block streaming conditions
@@ -535,6 +542,11 @@ func (s *sequenceDecs) decodeSync(history *history) error {
}
}
+ // Check if space for literals
+ if len(s.literals)+len(s.out)-startSize > maxBlockSize {
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
+ }
+
// Add final literals
s.out = append(out, s.literals...)
return br.close()
diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go
index 967f29b31..ffffcbc25 100644
--- a/vendor/github.com/klauspost/compress/zstd/zip.go
+++ b/vendor/github.com/klauspost/compress/zstd/zip.go
@@ -20,7 +20,7 @@ const ZipMethodPKWare = 20
var zipReaderPool sync.Pool
-// newZipReader cannot be used since we would leak goroutines...
+// newZipReader creates a pooled zip decompressor.
func newZipReader(r io.Reader) io.ReadCloser {
dec, ok := zipReaderPool.Get().(*Decoder)
if ok {
@@ -44,10 +44,14 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.dec == nil {
- return 0, errors.New("Read after Close")
+ return 0, errors.New("read after close or EOF")
}
dec, err := r.dec.Read(p)
-
+ if err == io.EOF {
+ err = r.dec.Reset(nil)
+ zipReaderPool.Put(r.dec)
+ r.dec = nil
+ }
return dec, err
}
@@ -112,11 +116,5 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
// See ZipCompressor for example.
func ZipDecompressor() func(r io.Reader) io.ReadCloser {
- return func(r io.Reader) io.ReadCloser {
- d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
- if err != nil {
- panic(err)
- }
- return d.IOReadCloser()
- }
+ return newZipReader
}
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index 0b0c2571d..c1c90b4a0 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -39,6 +39,9 @@ const zstdMinMatch = 3
// Reset the buffer offset when reaching this.
const bufferReset = math.MaxInt32 - MaxWindowSize
+// fcsUnknown is used for unknown frame content size.
+const fcsUnknown = math.MaxUint64
+
var (
// ErrReservedBlockType is returned when a reserved block type is found.
// Typically this indicates wrong or corrupted input.
@@ -52,6 +55,10 @@ var (
// Typically returned on invalid input.
ErrBlockTooSmall = errors.New("block too small")
+ // ErrUnexpectedBlockSize is returned when a block has unexpected size.
+ // Typically returned on invalid input.
+ ErrUnexpectedBlockSize = errors.New("unexpected block size")
+
// ErrMagicMismatch is returned when a "magic" number isn't what is expected.
// Typically this indicates wrong or corrupted input.
ErrMagicMismatch = errors.New("invalid input: magic number mismatch")
diff --git a/vendor/modules.txt b/vendor/modules.txt
index c3dae287f..90d924d8d 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -69,7 +69,7 @@ github.com/containerd/containerd/log
github.com/containerd/containerd/pkg/userns
github.com/containerd/containerd/platforms
github.com/containerd/containerd/sys
-# github.com/containerd/stargz-snapshotter/estargz v0.11.2
+# github.com/containerd/stargz-snapshotter/estargz v0.11.3
github.com/containerd/stargz-snapshotter/estargz
github.com/containerd/stargz-snapshotter/estargz/errorutil
# github.com/containernetworking/cni v1.0.1
@@ -233,7 +233,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.38.3-0.20220308085612-93ce26691863
+# github.com/containers/storage v1.38.3-0.20220321121613-8e565392dd91
## explicit
github.com/containers/storage
github.com/containers/storage/drivers
@@ -455,7 +455,7 @@ github.com/jinzhu/copier
# github.com/json-iterator/go v1.1.12
## explicit
github.com/json-iterator/go
-# github.com/klauspost/compress v1.15.0
+# github.com/klauspost/compress v1.15.1
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse