summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md16
-rw-r--r--.golangci.yml3
-rw-r--r--Makefile11
-rw-r--r--cmd/podman/common/completion.go5
-rw-r--r--cmd/podman/common/create.go2
-rw-r--r--cmd/podman/common/create_opts.go2
-rw-r--r--cmd/podman/machine/init.go6
-rw-r--r--cmd/podman/machine/inspect.go34
-rw-r--r--cmd/podman/machine/list.go3
-rw-r--r--cmd/podman/machine/machine.go110
-rw-r--r--cmd/podman/machine/platform.go2
-rw-r--r--cmd/podman/machine/platform_windows.go2
-rw-r--r--cmd/podman/machine/rm.go12
-rw-r--r--cmd/podman/machine/set.go2
-rw-r--r--cmd/podman/machine/ssh.go2
-rw-r--r--cmd/podman/machine/start.go6
-rw-r--r--cmd/podman/machine/stop.go4
-rw-r--r--cmd/podman/main.go9
-rw-r--r--cmd/podman/play/kube.go2
-rw-r--r--cmd/podman/pods/create.go6
-rw-r--r--cmd/podman/root.go4
-rw-r--r--cmd/podman/secrets/inspect.go4
-rw-r--r--cmd/podman/system/connection.go15
-rw-r--r--cmd/podman/system/reset.go15
-rw-r--r--cmd/podman/system/reset_machine.go13
-rw-r--r--cmd/podman/system/reset_machine_unsupported.go8
-rw-r--r--cmd/podman/system/service_abi.go37
-rw-r--r--cmd/podman/validate/noop.go9
-rw-r--r--cmd/rootlessport/main.go24
-rw-r--r--cmd/rootlessport/wsl.go37
-rw-r--r--cmd/rootlessport/wsl_test.go89
-rw-r--r--docs/source/markdown/podman-info.1.md6
-rw-r--r--docs/source/markdown/podman-pod-create.1.md11
-rw-r--r--docs/source/markdown/podman-system-reset.1.md3
-rw-r--r--docs/tutorials/rootless_tutorial.md10
-rw-r--r--go.mod13
-rw-r--r--go.sum47
-rwxr-xr-xhack/buildah-vendor-treadmill401
-rw-r--r--hack/podman-registry-go/registry.go23
-rw-r--r--libpod/boltdb_state_internal.go13
-rw-r--r--libpod/container_copy_linux.go12
-rw-r--r--libpod/container_internal.go42
-rw-r--r--libpod/container_internal_linux.go71
-rw-r--r--libpod/define/info.go25
-rw-r--r--libpod/define/pod_inspect.go2
-rw-r--r--libpod/events/config.go6
-rw-r--r--libpod/events/events.go22
-rw-r--r--libpod/events/events_linux.go2
-rw-r--r--libpod/events/memory.go49
-rw-r--r--libpod/info.go75
-rw-r--r--libpod/info_test.go59
-rw-r--r--libpod/kube.go6
-rw-r--r--libpod/networking_linux.go2
-rw-r--r--libpod/oci_attach_linux.go11
-rw-r--r--libpod/options.go18
-rw-r--r--libpod/pod.go4
-rw-r--r--libpod/pod_api.go51
-rw-r--r--libpod/runtime.go28
-rw-r--r--libpod/runtime_ctr.go4
-rw-r--r--libpod/runtime_pod_linux.go15
-rw-r--r--libpod/runtime_worker.go41
-rw-r--r--libpod/util.go7
-rw-r--r--pkg/api/handlers/decoder.go10
-rw-r--r--pkg/api/handlers/libpod/play.go10
-rw-r--r--pkg/api/handlers/libpod/secrets.go8
-rw-r--r--pkg/api/server/server.go21
-rw-r--r--pkg/domain/entities/pods.go2
-rw-r--r--pkg/domain/infra/abi/containers.go6
-rw-r--r--pkg/domain/infra/abi/play.go6
-rw-r--r--pkg/domain/infra/abi/system.go44
-rw-r--r--pkg/domain/utils/utils_test.go76
-rw-r--r--pkg/env/env_test.go162
-rw-r--r--pkg/machine/config.go29
-rw-r--r--pkg/machine/e2e/inspect_test.go12
-rw-r--r--pkg/machine/e2e/machine_test.go11
-rw-r--r--pkg/machine/qemu/config_test.go23
-rw-r--r--pkg/machine/qemu/machine.go84
-rw-r--r--pkg/machine/wsl/machine.go273
-rw-r--r--pkg/specgen/generate/pod_create.go2
-rw-r--r--pkg/specgen/podspecgen.go2
-rw-r--r--pkg/systemd/generate/pods.go11
-rw-r--r--pkg/systemd/generate/pods_test.go32
-rw-r--r--pkg/util/utils.go23
-rw-r--r--test/e2e/attach_test.go2
-rw-r--r--test/e2e/benchmarks_test.go72
-rw-r--r--test/e2e/build_test.go5
-rw-r--r--test/e2e/checkpoint_image_test.go1
-rw-r--r--test/e2e/checkpoint_test.go2
-rw-r--r--test/e2e/commit_test.go2
-rw-r--r--test/e2e/common_test.go37
-rw-r--r--test/e2e/container_clone_test.go1
-rw-r--r--test/e2e/container_create_volume_test.go1
-rw-r--r--test/e2e/container_inspect_test.go1
-rw-r--r--test/e2e/containers_conf_test.go1
-rw-r--r--test/e2e/cp_test.go1
-rw-r--r--test/e2e/create_staticip_test.go1
-rw-r--r--test/e2e/create_staticmac_test.go1
-rw-r--r--test/e2e/create_test.go8
-rw-r--r--test/e2e/diff_test.go1
-rw-r--r--test/e2e/events_test.go1
-rw-r--r--test/e2e/exec_test.go1
-rw-r--r--test/e2e/export_test.go1
-rw-r--r--test/e2e/generate_kube_test.go1
-rw-r--r--test/e2e/generate_systemd_test.go2
-rw-r--r--test/e2e/healthcheck_run_test.go3
-rw-r--r--test/e2e/history_test.go1
-rw-r--r--test/e2e/hooks/checkhook.json5
-rwxr-xr-xtest/e2e/hooks/checkhook.sh4
-rw-r--r--test/e2e/image_sign_test.go2
-rw-r--r--test/e2e/images_test.go1
-rw-r--r--test/e2e/import_test.go1
-rw-r--r--test/e2e/info_test.go28
-rw-r--r--test/e2e/init_test.go1
-rw-r--r--test/e2e/inspect_test.go1
-rw-r--r--test/e2e/libpod_suite_remote_test.go26
-rw-r--r--test/e2e/libpod_suite_test.go12
-rw-r--r--test/e2e/login_logout_test.go21
-rw-r--r--test/e2e/logs_test.go3
-rw-r--r--test/e2e/manifest_test.go1
-rw-r--r--test/e2e/mount_rootless_test.go1
-rw-r--r--test/e2e/namespace_test.go1
-rw-r--r--test/e2e/network_create_test.go1
-rw-r--r--test/e2e/pause_test.go1
-rw-r--r--test/e2e/play_build_test.go1
-rw-r--r--test/e2e/play_kube_test.go11
-rw-r--r--test/e2e/pod_create_test.go1
-rw-r--r--test/e2e/pod_infra_container_test.go1
-rw-r--r--test/e2e/pod_initcontainers_test.go1
-rw-r--r--test/e2e/pod_inspect_test.go1
-rw-r--r--test/e2e/pod_kill_test.go1
-rw-r--r--test/e2e/pod_pause_test.go1
-rw-r--r--test/e2e/pod_pod_namespaces_test.go1
-rw-r--r--test/e2e/pod_prune_test.go1
-rw-r--r--test/e2e/pod_ps_test.go1
-rw-r--r--test/e2e/pod_restart_test.go1
-rw-r--r--test/e2e/pod_rm_test.go1
-rw-r--r--test/e2e/pod_start_test.go1
-rw-r--r--test/e2e/pod_stats_test.go1
-rw-r--r--test/e2e/pod_stop_test.go1
-rw-r--r--test/e2e/pod_top_test.go1
-rw-r--r--test/e2e/port_test.go1
-rw-r--r--test/e2e/prune_test.go1
-rw-r--r--test/e2e/ps_test.go1
-rw-r--r--test/e2e/pull_test.go15
-rw-r--r--test/e2e/push_test.go18
-rw-r--r--test/e2e/rename_test.go1
-rw-r--r--test/e2e/restart_test.go1
-rw-r--r--test/e2e/rm_test.go1
-rw-r--r--test/e2e/run_aardvark_test.go1
-rw-r--r--test/e2e/run_apparmor_test.go1
-rw-r--r--test/e2e/run_cgroup_parent_test.go1
-rw-r--r--test/e2e/run_cleanup_test.go3
-rw-r--r--test/e2e/run_cpu_test.go1
-rw-r--r--test/e2e/run_device_test.go1
-rw-r--r--test/e2e/run_dns_test.go1
-rw-r--r--test/e2e/run_entrypoint_test.go1
-rw-r--r--test/e2e/run_env_test.go1
-rw-r--r--test/e2e/run_exit_test.go1
-rw-r--r--test/e2e/run_memory_test.go1
-rw-r--r--test/e2e/run_networking_test.go6
-rw-r--r--test/e2e/run_ns_test.go1
-rw-r--r--test/e2e/run_passwd_test.go1
-rw-r--r--test/e2e/run_privileged_test.go1
-rw-r--r--test/e2e/run_restart_test.go1
-rw-r--r--test/e2e/run_seccomp_test.go1
-rw-r--r--test/e2e/run_security_labels_test.go2
-rw-r--r--test/e2e/run_selinux_test.go1
-rw-r--r--test/e2e/run_signal_test.go13
-rw-r--r--test/e2e/run_staticip_test.go1
-rw-r--r--test/e2e/run_test.go38
-rw-r--r--test/e2e/run_userns_test.go1
-rw-r--r--test/e2e/run_volume_test.go34
-rw-r--r--test/e2e/run_working_dir_test.go1
-rw-r--r--test/e2e/runlabel_test.go1
-rw-r--r--test/e2e/save_test.go3
-rw-r--r--test/e2e/search_test.go41
-rw-r--r--test/e2e/secret_test.go1
-rw-r--r--test/e2e/start_test.go1
-rw-r--r--test/e2e/stats_test.go1
-rw-r--r--test/e2e/stop_test.go1
-rw-r--r--test/e2e/system_connection_test.go4
-rw-r--r--test/e2e/system_df_test.go3
-rw-r--r--test/e2e/system_dial_stdio_test.go3
-rw-r--r--test/e2e/system_reset_test.go10
-rw-r--r--test/e2e/system_service_test.go18
-rw-r--r--test/e2e/systemd_activate_test.go1
-rw-r--r--test/e2e/systemd_test.go1
-rw-r--r--test/e2e/toolbox_test.go1
-rw-r--r--test/e2e/top_test.go1
-rw-r--r--test/e2e/tree_test.go2
-rw-r--r--test/e2e/trust_test.go4
-rw-r--r--test/e2e/unshare_test.go1
-rw-r--r--test/e2e/version_test.go2
-rw-r--r--test/e2e/volume_create_test.go1
-rw-r--r--test/e2e/volume_exists_test.go1
-rw-r--r--test/e2e/volume_inspect_test.go1
-rw-r--r--test/e2e/volume_ls_test.go1
-rw-r--r--test/e2e/volume_plugin_test.go16
-rw-r--r--test/e2e/volume_prune_test.go1
-rw-r--r--test/e2e/volume_rm_test.go1
-rw-r--r--test/e2e/wait_test.go1
-rw-r--r--test/system/200-pod.bats69
-rw-r--r--test/system/272-system-connection.bats7
-rw-r--r--test/system/500-networking.bats24
-rw-r--r--test/system/700-play.bats17
-rw-r--r--test/utils/common_function_test.go6
-rw-r--r--test/utils/podmantest_test.go12
-rw-r--r--test/utils/utils.go4
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go3
-rw-r--r--vendor/github.com/containers/common/pkg/config/containers.conf3
-rw-r--r--vendor/github.com/containers/common/pkg/config/default.go2
-rw-r--r--vendor/github.com/containers/common/pkg/config/pod_exit_policy.go36
-rw-r--r--vendor/github.com/containers/common/version/version.go2
-rw-r--r--vendor/github.com/containers/image/v5/copy/copy.go195
-rw-r--r--vendor/github.com/containers/image/v5/copy/progress_bars.go148
-rw-r--r--vendor/github.com/containers/image/v5/copy/progress_channel.go (renamed from vendor/github.com/containers/image/v5/copy/progress_reader.go)29
-rw-r--r--vendor/github.com/containers/image/v5/version/version.go2
-rw-r--r--vendor/github.com/containers/ocicrypt/MAINTAINERS1
-rw-r--r--vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go26
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/drivers/chown_unix.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/driver_freebsd.go1
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go12
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go16
-rw-r--r--vendor/github.com/containers/storage/go.mod2
-rw-r--r--vendor/github.com/containers/storage/go.sum3
-rw-r--r--vendor/github.com/containers/storage/layers.go188
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools.go64
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go48
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go21
-rw-r--r--vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go37
-rw-r--r--vendor/github.com/containers/storage/pkg/reexec/command_unix.go3
-rw-r--r--vendor/github.com/klauspost/compress/.gitignore7
-rw-r--r--vendor/github.com/klauspost/compress/README.md7
-rw-r--r--vendor/github.com/klauspost/compress/go.mod2
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_amd64.go4
-rw-r--r--vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go34
-rw-r--r--vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go11
-rw-r--r--vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s36
-rw-r--r--vendor/github.com/klauspost/compress/zstd/README.md58
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockdec.go42
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder.go14
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder_options.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/framedec.go13
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_decoder.go25
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec.go168
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go350
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s3519
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec_generic.go237
-rw-r--r--vendor/github.com/klauspost/compress/zstd/zip.go48
-rw-r--r--vendor/github.com/mrunalp/fileutils/.gitignore1
-rw-r--r--vendor/github.com/mrunalp/fileutils/LICENSE191
-rw-r--r--vendor/github.com/mrunalp/fileutils/MAINTAINERS1
-rw-r--r--vendor/github.com/mrunalp/fileutils/README.md5
-rw-r--r--vendor/github.com/mrunalp/fileutils/fileutils.go168
-rw-r--r--vendor/github.com/mrunalp/fileutils/go.mod3
-rw-r--r--vendor/github.com/mrunalp/fileutils/idtools.go54
-rw-r--r--vendor/golang.org/x/sys/unix/endian_little.go4
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_loong64.go191
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux.go1
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go818
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go552
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go313
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go679
-rw-r--r--vendor/golang.org/x/tools/go/ast/inspector/typeof.go9
-rw-r--r--vendor/golang.org/x/tools/internal/typeparams/common.go180
-rw-r--r--vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go12
-rw-r--r--vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go15
-rw-r--r--vendor/golang.org/x/tools/internal/typeparams/normalize.go216
-rw-r--r--vendor/golang.org/x/tools/internal/typeparams/termlist.go172
-rw-r--r--vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go197
-rw-r--r--vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go151
-rw-r--r--vendor/golang.org/x/tools/internal/typeparams/typeterm.go170
-rw-r--r--vendor/modules.txt25
276 files changed, 10985 insertions, 1693 deletions
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 16ec09357..9a4563308 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -9,3 +9,19 @@ Finally, be sure to sign commits with your real name. Since by opening
a PR you already have commits, you can add signatures if needed with
something like `git commit -s --amend`.
-->
+
+#### Does this PR introduce a user-facing change?
+
+<!--
+If no, just write `None` in the release-note block below. If yes, a release note
+is required: Enter your extended release note in the block below. If the PR
+requires additional action from users switching to the new release, include the
+string "action required".
+
+For more information on release notes please follow the kubernetes model:
+https://git.k8s.io/community/contributors/guide/release-notes.md
+-->
+
+```release-note
+
+```
diff --git a/.golangci.yml b/.golangci.yml
index 1ce18c0c3..7eb6ea57e 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -45,7 +45,6 @@ linters:
- gocyclo
- lll
- unconvert
- - errcheck
- gosec
- maligned
- gomoddirectives
@@ -65,4 +64,4 @@ linters:
linters-settings:
errcheck:
check-blank: false
- ignore: encoding/json:^Unmarshal,fmt:.*
+ ignore: fmt:.*
diff --git a/Makefile b/Makefile
index 5aab30d89..caa991b14 100644
--- a/Makefile
+++ b/Makefile
@@ -174,9 +174,8 @@ endif
# Necessary for nested-$(MAKE) calls and docs/remote-docs.sh
export GOOS GOARCH CGO_ENABLED BINSFX SRCBINDIR
-define go-get
- env GO111MODULE=off \
- $(GO) get -u ${1}
+define go-install
+ $(GO) install ${1}@latest
endef
# Need to use CGO for mDNS resolution, but cross builds need CGO disabled
@@ -865,7 +864,7 @@ install.tools: .install.goimports .install.gitvalidation .install.md2man .instal
.install.goimports: .gopathok
if [ ! -x "$(GOBIN)/goimports" ]; then \
- $(call go-get,golang.org/x/tools/cmd/goimports); \
+ $(call go-install,golang.org/x/tools/cmd/goimports); \
fi
touch .install.goimports
@@ -878,7 +877,7 @@ install.tools: .install.goimports .install.gitvalidation .install.md2man .instal
.PHONY: .install.gitvalidation
.install.gitvalidation: .gopathok
if [ ! -x "$(GOBIN)/git-validation" ]; then \
- $(call go-get,github.com/vbatts/git-validation); \
+ $(call go-install,github.com/vbatts/git-validation); \
fi
.PHONY: .install.golangci-lint
@@ -898,7 +897,7 @@ install.tools: .install.goimports .install.gitvalidation .install.md2man .instal
.PHONY: .install.md2man
.install.md2man: .gopathok
if [ ! -x "$(GOMD2MAN)" ]; then \
- $(call go-get,github.com/cpuguy83/go-md2man); \
+ $(call go-install,github.com/cpuguy83/go-md2man); \
fi
# $BUILD_TAGS variable is used in hack/golangci-lint.sh
diff --git a/cmd/podman/common/completion.go b/cmd/podman/common/completion.go
index ddf922b2a..6149a4465 100644
--- a/cmd/podman/common/completion.go
+++ b/cmd/podman/common/completion.go
@@ -492,6 +492,11 @@ func AutocompleteImages(cmd *cobra.Command, args []string, toComplete string) ([
return getImages(cmd, toComplete)
}
+// AutocompletePodExitPolicy - Autocomplete pod exit policy.
+func AutocompletePodExitPolicy(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return config.PodExitPolicies, cobra.ShellCompDirectiveNoFileComp
+}
+
// AutocompleteCreateRun - Autocomplete only the fist argument as image and then do file completion.
func AutocompleteCreateRun(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
if !validCurrentCmdLine(cmd, args, toComplete) {
diff --git a/cmd/podman/common/create.go b/cmd/podman/common/create.go
index 1c1a7c3e3..d28becc8a 100644
--- a/cmd/podman/common/create.go
+++ b/cmd/podman/common/create.go
@@ -299,7 +299,7 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
logDriverFlagName := "log-driver"
createFlags.StringVar(
&cf.LogDriver,
- logDriverFlagName, logDriver(),
+ logDriverFlagName, LogDriver(),
"Logging driver for the container",
)
_ = cmd.RegisterFlagCompletionFunc(logDriverFlagName, AutocompleteLogDriver)
diff --git a/cmd/podman/common/create_opts.go b/cmd/podman/common/create_opts.go
index 16f193b03..c40d1ea51 100644
--- a/cmd/podman/common/create_opts.go
+++ b/cmd/podman/common/create_opts.go
@@ -530,7 +530,7 @@ func volumes() []string {
return nil
}
-func logDriver() string {
+func LogDriver() string {
if !registry.IsRemote() {
return containerConfig.Containers.LogDriver
}
diff --git a/cmd/podman/machine/init.go b/cmd/podman/machine/init.go
index 4991c6aa3..6c31f3531 100644
--- a/cmd/podman/machine/init.go
+++ b/cmd/podman/machine/init.go
@@ -9,6 +9,7 @@ import (
"github.com/containers/common/pkg/completion"
"github.com/containers/podman/v4/cmd/podman/registry"
+ "github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/machine"
"github.com/pkg/errors"
"github.com/spf13/cobra"
@@ -117,7 +118,7 @@ func initMachine(cmd *cobra.Command, args []string) error {
vm machine.VM
)
- provider := getSystemDefaultProvider()
+ provider := GetSystemDefaultProvider()
initOpts.Name = defaultMachineName
if len(args) > 0 {
if len(args[0]) > maxMachineNameSize {
@@ -145,11 +146,14 @@ func initMachine(cmd *cobra.Command, args []string) error {
// Finished = *, err != nil - Exit with an error message
return err
}
+ newMachineEvent(events.Init, events.Event{Name: initOpts.Name})
fmt.Println("Machine init complete")
+
if now {
err = vm.Start(initOpts.Name, machine.StartOptions{})
if err == nil {
fmt.Printf("Machine %q started successfully\n", initOpts.Name)
+ newMachineEvent(events.Start, events.Event{Name: initOpts.Name})
}
} else {
extra := ""
diff --git a/cmd/podman/machine/inspect.go b/cmd/podman/machine/inspect.go
index 21e5074b7..4600a2b6d 100644
--- a/cmd/podman/machine/inspect.go
+++ b/cmd/podman/machine/inspect.go
@@ -4,13 +4,12 @@
package machine
import (
- "encoding/json"
"os"
+ "github.com/containers/common/pkg/report"
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/utils"
- "github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/machine"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@@ -52,7 +51,7 @@ func inspect(cmd *cobra.Command, args []string) error {
args = append(args, defaultMachineName)
}
vms := make([]machine.InspectInfo, 0, len(args))
- provider := getSystemDefaultProvider()
+ provider := GetSystemDefaultProvider()
for _, vmName := range args {
vm, err := provider.LoadVMByName(vmName)
if err != nil {
@@ -66,12 +65,29 @@ func inspect(cmd *cobra.Command, args []string) error {
}
vms = append(vms, *ii)
}
- if len(inspectFlag.format) > 0 {
- // need jhonce to work his template magic
- return define.ErrNotImplemented
- }
- if err := printJSON(vms); err != nil {
- logrus.Error(err)
+ switch {
+ case cmd.Flag("format").Changed:
+ row := report.NormalizeFormat(inspectFlag.format)
+ row = report.EnforceRange(row)
+
+ tmpl, err := report.NewTemplate("Machine inspect").Parse(row)
+ if err != nil {
+ return err
+ }
+
+ w, err := report.NewWriterDefault(os.Stdout)
+ if err != nil {
+ return err
+ }
+
+ if err := tmpl.Execute(w, vms); err != nil {
+ logrus.Error(err)
+ }
+ w.Flush()
+ default:
+ if err := printJSON(vms); err != nil {
+ logrus.Error(err)
+ }
}
return errs.PrintErrors()
}
diff --git a/cmd/podman/machine/list.go b/cmd/podman/machine/list.go
index c987bf71a..ef26b7886 100644
--- a/cmd/podman/machine/list.go
+++ b/cmd/podman/machine/list.go
@@ -4,7 +4,6 @@
package machine
import (
- "encoding/json"
"os"
"sort"
"strconv"
@@ -85,7 +84,7 @@ func list(cmd *cobra.Command, args []string) error {
listFlag.format = "{{.Name}}\n"
}
- provider := getSystemDefaultProvider()
+ provider := GetSystemDefaultProvider()
listResponse, err = provider.List(opts)
if err != nil {
return errors.Wrap(err, "error listing vms")
diff --git a/cmd/podman/machine/machine.go b/cmd/podman/machine/machine.go
index d3775f022..553f1ef7a 100644
--- a/cmd/podman/machine/machine.go
+++ b/cmd/podman/machine/machine.go
@@ -4,25 +4,38 @@
package machine
import (
+ "errors"
+ "net"
+ "os"
+ "path/filepath"
+ "regexp"
"strings"
+ "sync"
+ "time"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/validate"
+ "github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/machine"
+ "github.com/containers/podman/v4/pkg/util"
+ "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var (
- noOp = func(cmd *cobra.Command, args []string) error {
- return nil
- }
+ // Pull in configured json library
+ json = registry.JSONLibrary()
+
+ openEventSock sync.Once // Singleton support for opening sockets as needed
+ sockets []net.Conn // Opened sockets, if any
+
// Command: podman _machine_
machineCmd = &cobra.Command{
Use: "machine",
Short: "Manage a virtual machine",
Long: "Manage a virtual machine. Virtual machines are used to run Podman.",
- PersistentPreRunE: noOp,
- PersistentPostRunE: noOp,
+ PersistentPreRunE: validate.NoOp,
+ PersistentPostRunE: closeMachineEvents,
RunE: validate.SubCommandExists,
}
)
@@ -51,7 +64,7 @@ func autocompleteMachine(cmd *cobra.Command, args []string, toComplete string) (
func getMachines(toComplete string) ([]string, cobra.ShellCompDirective) {
suggestions := []string{}
- provider := getSystemDefaultProvider()
+ provider := GetSystemDefaultProvider()
machines, err := provider.List(machine.ListOptions{})
if err != nil {
cobra.CompErrorln(err.Error())
@@ -64,3 +77,88 @@ func getMachines(toComplete string) ([]string, cobra.ShellCompDirective) {
}
return suggestions, cobra.ShellCompDirectiveNoFileComp
}
+
+func initMachineEvents() {
+ sockPaths, err := resolveEventSock()
+ if err != nil {
+ logrus.Warnf("Failed to resolve machine event sockets, machine events will not be published: %v", err)
+ }
+
+ for _, path := range sockPaths {
+ conn, err := (&net.Dialer{}).DialContext(registry.Context(), "unix", path)
+ if err != nil {
+ logrus.Warnf("Failed to open event socket %q: %v", path, err)
+ continue
+ }
+ logrus.Debugf("Machine event socket %q found", path)
+ sockets = append(sockets, conn)
+ }
+}
+
+func resolveEventSock() ([]string, error) {
+ // Used mostly for testing
+ if sock, found := os.LookupEnv("PODMAN_MACHINE_EVENTS_SOCK"); found {
+ return []string{sock}, nil
+ }
+
+ xdg, err := util.GetRuntimeDir()
+ if err != nil {
+ logrus.Warnf("Failed to get runtime dir, machine events will not be published: %s", err)
+ return nil, nil
+ }
+
+ re := regexp.MustCompile(`machine_events.*\.sock`)
+ sockPaths := make([]string, 0)
+ fn := func(path string, info os.DirEntry, err error) error {
+ switch {
+ case err != nil:
+ return err
+ case info.IsDir():
+ return nil
+ case info.Type() != os.ModeSocket:
+ return nil
+ case !re.MatchString(info.Name()):
+ return nil
+ }
+
+ logrus.Debugf("Machine events will be published on: %q", path)
+ sockPaths = append(sockPaths, path)
+ return nil
+ }
+
+ if err := filepath.WalkDir(filepath.Join(xdg, "podman"), fn); err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return nil, nil
+ }
+ return nil, err
+ }
+ return sockPaths, nil
+}
+
+func newMachineEvent(status events.Status, event events.Event) {
+ openEventSock.Do(initMachineEvents)
+
+ event.Status = status
+ event.Time = time.Now()
+ event.Type = events.Machine
+
+ payload, err := json.Marshal(event)
+ if err != nil {
+ logrus.Errorf("Unable to format machine event: %q", err)
+ return
+ }
+
+ for _, sock := range sockets {
+ if _, err := sock.Write(payload); err != nil {
+ logrus.Errorf("Unable to write machine event: %q", err)
+ }
+ }
+}
+
+func closeMachineEvents(cmd *cobra.Command, _ []string) error {
+ logrus.Debugf("Called machine %s.PersistentPostRunE(%s)", cmd.Name(), strings.Join(os.Args, " "))
+ for _, sock := range sockets {
+ _ = sock.Close()
+ }
+ return nil
+}
diff --git a/cmd/podman/machine/platform.go b/cmd/podman/machine/platform.go
index 77fec083e..5ba649a48 100644
--- a/cmd/podman/machine/platform.go
+++ b/cmd/podman/machine/platform.go
@@ -8,6 +8,6 @@ import (
"github.com/containers/podman/v4/pkg/machine/qemu"
)
-func getSystemDefaultProvider() machine.Provider {
+func GetSystemDefaultProvider() machine.Provider {
return qemu.GetQemuProvider()
}
diff --git a/cmd/podman/machine/platform_windows.go b/cmd/podman/machine/platform_windows.go
index 03978eda1..cdbc52459 100644
--- a/cmd/podman/machine/platform_windows.go
+++ b/cmd/podman/machine/platform_windows.go
@@ -5,6 +5,6 @@ import (
"github.com/containers/podman/v4/pkg/machine/wsl"
)
-func getSystemDefaultProvider() machine.Provider {
+func GetSystemDefaultProvider() machine.Provider {
return wsl.GetWSLProvider()
}
diff --git a/cmd/podman/machine/rm.go b/cmd/podman/machine/rm.go
index 617a70a76..a6e66265c 100644
--- a/cmd/podman/machine/rm.go
+++ b/cmd/podman/machine/rm.go
@@ -10,6 +10,7 @@ import (
"strings"
"github.com/containers/podman/v4/cmd/podman/registry"
+ "github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/machine"
"github.com/spf13/cobra"
)
@@ -50,7 +51,7 @@ func init() {
flags.BoolVar(&destroyOptions.SaveImage, imageFlagName, false, "Do not delete the image file")
}
-func rm(cmd *cobra.Command, args []string) error {
+func rm(_ *cobra.Command, args []string) error {
var (
err error
vm machine.VM
@@ -60,7 +61,7 @@ func rm(cmd *cobra.Command, args []string) error {
vmName = args[0]
}
- provider := getSystemDefaultProvider()
+ provider := GetSystemDefaultProvider()
vm, err = provider.LoadVMByName(vmName)
if err != nil {
return err
@@ -83,5 +84,10 @@ func rm(cmd *cobra.Command, args []string) error {
return nil
}
}
- return remove()
+ err = remove()
+ if err != nil {
+ return err
+ }
+ newMachineEvent(events.Remove, events.Event{Name: vmName})
+ return nil
}
diff --git a/cmd/podman/machine/set.go b/cmd/podman/machine/set.go
index a994c981b..5777882da 100644
--- a/cmd/podman/machine/set.go
+++ b/cmd/podman/machine/set.go
@@ -83,7 +83,7 @@ func setMachine(cmd *cobra.Command, args []string) error {
if len(args) > 0 && len(args[0]) > 0 {
vmName = args[0]
}
- provider := getSystemDefaultProvider()
+ provider := GetSystemDefaultProvider()
vm, err = provider.LoadVMByName(vmName)
if err != nil {
return err
diff --git a/cmd/podman/machine/ssh.go b/cmd/podman/machine/ssh.go
index e1175d632..4a86da67a 100644
--- a/cmd/podman/machine/ssh.go
+++ b/cmd/podman/machine/ssh.go
@@ -51,7 +51,7 @@ func ssh(cmd *cobra.Command, args []string) error {
// Set the VM to default
vmName := defaultMachineName
- provider := getSystemDefaultProvider()
+ provider := GetSystemDefaultProvider()
// If len is greater than 0, it means we may have been
// provided the VM name. If so, we check. The VM name,
diff --git a/cmd/podman/machine/start.go b/cmd/podman/machine/start.go
index 56acb09cb..c9b99e63b 100644
--- a/cmd/podman/machine/start.go
+++ b/cmd/podman/machine/start.go
@@ -7,6 +7,7 @@ import (
"fmt"
"github.com/containers/podman/v4/cmd/podman/registry"
+ "github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/machine"
"github.com/pkg/errors"
"github.com/spf13/cobra"
@@ -31,7 +32,7 @@ func init() {
})
}
-func start(cmd *cobra.Command, args []string) error {
+func start(_ *cobra.Command, args []string) error {
var (
err error
vm machine.VM
@@ -41,7 +42,7 @@ func start(cmd *cobra.Command, args []string) error {
vmName = args[0]
}
- provider := getSystemDefaultProvider()
+ provider := GetSystemDefaultProvider()
vm, err = provider.LoadVMByName(vmName)
if err != nil {
return err
@@ -62,5 +63,6 @@ func start(cmd *cobra.Command, args []string) error {
return err
}
fmt.Printf("Machine %q started successfully\n", vmName)
+ newMachineEvent(events.Start, events.Event{Name: vmName})
return nil
}
diff --git a/cmd/podman/machine/stop.go b/cmd/podman/machine/stop.go
index e6bf3cf2b..993662792 100644
--- a/cmd/podman/machine/stop.go
+++ b/cmd/podman/machine/stop.go
@@ -7,6 +7,7 @@ import (
"fmt"
"github.com/containers/podman/v4/cmd/podman/registry"
+ "github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/machine"
"github.com/spf13/cobra"
)
@@ -40,7 +41,7 @@ func stop(cmd *cobra.Command, args []string) error {
if len(args) > 0 && len(args[0]) > 0 {
vmName = args[0]
}
- provider := getSystemDefaultProvider()
+ provider := GetSystemDefaultProvider()
vm, err = provider.LoadVMByName(vmName)
if err != nil {
return err
@@ -49,5 +50,6 @@ func stop(cmd *cobra.Command, args []string) error {
return err
}
fmt.Printf("Machine %q stopped successfully\n", vmName)
+ newMachineEvent(events.Stop, events.Event{Name: vmName})
return nil
}
diff --git a/cmd/podman/main.go b/cmd/podman/main.go
index 8f580601e..929c8a757 100644
--- a/cmd/podman/main.go
+++ b/cmd/podman/main.go
@@ -18,6 +18,7 @@ import (
_ "github.com/containers/podman/v4/cmd/podman/secrets"
_ "github.com/containers/podman/v4/cmd/podman/system"
_ "github.com/containers/podman/v4/cmd/podman/system/connection"
+ "github.com/containers/podman/v4/cmd/podman/validate"
_ "github.com/containers/podman/v4/cmd/podman/volumes"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/rootless"
@@ -64,8 +65,8 @@ func parseCommands() *cobra.Command {
c.Command.Hidden = true
// overwrite persistent pre/post function to skip setup
- c.Command.PersistentPostRunE = noop
- c.Command.PersistentPreRunE = noop
+ c.Command.PersistentPostRunE = validate.NoOp
+ c.Command.PersistentPreRunE = validate.NoOp
addCommand(c)
continue
}
@@ -120,7 +121,3 @@ func addCommand(c registry.CliCommand) {
c.Command.SetUsageTemplate(usageTemplate)
c.Command.DisableFlagsInUseLine = true
}
-
-func noop(cmd *cobra.Command, args []string) error {
- return nil
-}
diff --git a/cmd/podman/play/kube.go b/cmd/podman/play/kube.go
index 40d14a609..3be7396ce 100644
--- a/cmd/podman/play/kube.go
+++ b/cmd/podman/play/kube.go
@@ -87,7 +87,7 @@ func init() {
_ = kubeCmd.RegisterFlagCompletionFunc(staticIPFlagName, completion.AutocompleteNone)
logDriverFlagName := "log-driver"
- flags.StringVar(&kubeOptions.LogDriver, logDriverFlagName, "", "Logging driver for the container")
+ flags.StringVar(&kubeOptions.LogDriver, logDriverFlagName, common.LogDriver(), "Logging driver for the container")
_ = kubeCmd.RegisterFlagCompletionFunc(logDriverFlagName, common.AutocompleteLogDriver)
logOptFlagName := "log-opt"
diff --git a/cmd/podman/pods/create.go b/cmd/podman/pods/create.go
index 891ff2e3c..62f820790 100644
--- a/cmd/podman/pods/create.go
+++ b/cmd/podman/pods/create.go
@@ -72,6 +72,10 @@ func init() {
flags.StringVarP(&createOptions.Name, nameFlagName, "n", "", "Assign a name to the pod")
_ = createCommand.RegisterFlagCompletionFunc(nameFlagName, completion.AutocompleteNone)
+ policyFlag := "exit-policy"
+ flags.StringVarP(&createOptions.ExitPolicy, policyFlag, "", string(containerConfig.Engine.PodExitPolicy), "Behaviour when the last container exits")
+ _ = createCommand.RegisterFlagCompletionFunc(policyFlag, common.AutocompletePodExitPolicy)
+
infraImageFlagName := "infra-image"
var defInfraImage string
if !registry.IsRemote() {
@@ -214,7 +218,7 @@ func create(cmd *cobra.Command, args []string) error {
ret, err := parsers.ParseUintList(copy)
copy = ""
if err != nil {
- errors.Wrapf(err, "could not parse list")
+ return errors.Wrapf(err, "could not parse list")
}
var vals []int
for ind, val := range ret {
diff --git a/cmd/podman/root.go b/cmd/podman/root.go
index 9b1aa778b..2bd4fa723 100644
--- a/cmd/podman/root.go
+++ b/cmd/podman/root.go
@@ -153,7 +153,9 @@ func persistentPreRunE(cmd *cobra.Command, args []string) error {
*runtime,
)
}
- runtimeFlag.Value.Set(*runtime)
+ if err := runtimeFlag.Value.Set(*runtime); err != nil {
+ return err
+ }
runtimeFlag.Changed = true
logrus.Debugf("Checkpoint was created using '%s'. Restore will use the same runtime", *runtime)
} else if cfg.RuntimePath != *runtime {
diff --git a/cmd/podman/secrets/inspect.go b/cmd/podman/secrets/inspect.go
index e8947e441..473d5620c 100644
--- a/cmd/podman/secrets/inspect.go
+++ b/cmd/podman/secrets/inspect.go
@@ -61,7 +61,9 @@ func inspect(cmd *cobra.Command, args []string) error {
return err
}
defer w.Flush()
- tmpl.Execute(w, inspected)
+ if err := tmpl.Execute(w, inspected); err != nil {
+ return err
+ }
} else {
buf, err := json.MarshalIndent(inspected, "", " ")
if err != nil {
diff --git a/cmd/podman/system/connection.go b/cmd/podman/system/connection.go
index 5164de78c..5dbe50fc9 100644
--- a/cmd/podman/system/connection.go
+++ b/cmd/podman/system/connection.go
@@ -7,18 +7,15 @@ import (
)
var (
- // Skip creating engines since this command will obtain connection information to said engines
- noOp = func(cmd *cobra.Command, args []string) error {
- return nil
- }
-
+ // ConnectionCmd skips creating engines (PersistentPreRunE/PersistentPostRunE are No-Op's) since
+ // sub-commands will obtain connection information to said engines
ConnectionCmd = &cobra.Command{
Use: "connection",
- Short: "Manage remote ssh destinations",
- Long: `Manage ssh destination information in podman configuration`,
- PersistentPreRunE: noOp,
+ Short: "Manage remote API service destinations",
+ Long: `Manage remote API service destination information in podman configuration`,
+ PersistentPreRunE: validate.NoOp,
RunE: validate.SubCommandExists,
- PersistentPostRunE: noOp,
+ PersistentPostRunE: validate.NoOp,
TraverseChildren: false,
}
)
diff --git a/cmd/podman/system/reset.go b/cmd/podman/system/reset.go
index 03783170f..176573bf6 100644
--- a/cmd/podman/system/reset.go
+++ b/cmd/podman/system/reset.go
@@ -61,7 +61,9 @@ func reset(cmd *cobra.Command, args []string) {
- all pods
- all images
- all networks
- - all build cache`)
+ - all build cache
+ - all machines`)
+
if len(listCtn) > 0 {
fmt.Println(`WARNING! The following external containers will be purged:`)
// print first 12 characters of ID and first configured name alias
@@ -81,7 +83,10 @@ func reset(cmd *cobra.Command, args []string) {
}
// Purge all the external containers with storage
- registry.ContainerEngine().ContainerRm(registry.Context(), listCtnIds, entities.RmOptions{Force: true, All: true, Ignore: true, Volumes: true})
+ _, err := registry.ContainerEngine().ContainerRm(registry.Context(), listCtnIds, entities.RmOptions{Force: true, All: true, Ignore: true, Volumes: true})
+ if err != nil {
+ logrus.Error(err)
+ }
// Shutdown all running engines, `reset` will hijack repository
registry.ContainerEngine().Shutdown(registry.Context())
registry.ImageEngine().Shutdown(registry.Context())
@@ -100,5 +105,11 @@ func reset(cmd *cobra.Command, args []string) {
//nolint:gocritic
os.Exit(define.ExecErrorCodeGeneric)
}
+
+ // Shutdown podman-machine and delete all machine files
+ if err := resetMachine(); err != nil {
+ logrus.Error(err)
+ }
+
os.Exit(0)
}
diff --git a/cmd/podman/system/reset_machine.go b/cmd/podman/system/reset_machine.go
new file mode 100644
index 000000000..a07b4fb83
--- /dev/null
+++ b/cmd/podman/system/reset_machine.go
@@ -0,0 +1,13 @@
+//go:build (amd64 && !remote) || (arm64 && !remote)
+// +build amd64,!remote arm64,!remote
+
+package system
+
+import (
+ cmdMach "github.com/containers/podman/v4/cmd/podman/machine"
+)
+
+func resetMachine() error {
+ provider := cmdMach.GetSystemDefaultProvider()
+ return provider.RemoveAndCleanMachines()
+}
diff --git a/cmd/podman/system/reset_machine_unsupported.go b/cmd/podman/system/reset_machine_unsupported.go
new file mode 100644
index 000000000..e063cd089
--- /dev/null
+++ b/cmd/podman/system/reset_machine_unsupported.go
@@ -0,0 +1,8 @@
+//go:build !amd64 && !arm64
+// +build !amd64,!arm64
+
+package system
+
+func resetMachine() error {
+ return nil
+}
diff --git a/cmd/podman/system/service_abi.go b/cmd/podman/system/service_abi.go
index f8abea3aa..9dc9de1c8 100644
--- a/cmd/podman/system/service_abi.go
+++ b/cmd/podman/system/service_abi.go
@@ -4,17 +4,18 @@
package system
import (
- "context"
+ "fmt"
"net"
"net/url"
"os"
"path/filepath"
+ "github.com/containers/podman/v4/cmd/podman/registry"
api "github.com/containers/podman/v4/pkg/api/server"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/domain/infra"
"github.com/containers/podman/v4/pkg/servicereaper"
- "github.com/containers/podman/v4/pkg/util"
+ "github.com/coreos/go-systemd/v22/activation"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
@@ -27,7 +28,26 @@ func restService(flags *pflag.FlagSet, cfg *entities.PodmanConfig, opts entities
err error
)
- if opts.URI != "" {
+ libpodRuntime, err := infra.GetRuntime(registry.Context(), flags, cfg)
+ if err != nil {
+ return err
+ }
+
+ if opts.URI == "" {
+ if _, found := os.LookupEnv("LISTEN_PID"); !found {
+ return errors.New("no service URI provided and socket activation protocol is not active")
+ }
+
+ listeners, err := activation.Listeners()
+ if err != nil {
+ return fmt.Errorf("cannot retrieve file descriptors from systemd: %w", err)
+ }
+ if len(listeners) != 1 {
+ return fmt.Errorf("wrong number of file descriptors for socket activation protocol (%d != 1)", len(listeners))
+ }
+ listener = listeners[0]
+ libpodRuntime.SetRemoteURI(listeners[0].Addr().String())
+ } else {
uri, err := url.Parse(opts.URI)
if err != nil {
return errors.Errorf("%s is an invalid socket destination", opts.URI)
@@ -39,7 +59,6 @@ func restService(flags *pflag.FlagSet, cfg *entities.PodmanConfig, opts entities
if err != nil {
return err
}
- util.SetSocketPath(path)
if os.Getenv("LISTEN_FDS") != "" {
// If it is activated by systemd, use the first LISTEN_FD (3)
// instead of opening the socket file.
@@ -67,6 +86,7 @@ func restService(flags *pflag.FlagSet, cfg *entities.PodmanConfig, opts entities
default:
logrus.Debugf("Attempting API Service endpoint scheme %q", uri.Scheme)
}
+ libpodRuntime.SetRemoteURI(uri.String())
}
// Close stdin, so shortnames will not prompt
@@ -78,15 +98,10 @@ func restService(flags *pflag.FlagSet, cfg *entities.PodmanConfig, opts entities
if err := unix.Dup2(int(devNullfile.Fd()), int(os.Stdin.Fd())); err != nil {
return err
}
- rt, err := infra.GetRuntime(context.Background(), flags, cfg)
- if err != nil {
- return err
- }
servicereaper.Start()
-
- infra.StartWatcher(rt)
- server, err := api.NewServerWithSettings(rt, listener, opts)
+ infra.StartWatcher(libpodRuntime)
+ server, err := api.NewServerWithSettings(libpodRuntime, listener, opts)
if err != nil {
return err
}
diff --git a/cmd/podman/validate/noop.go b/cmd/podman/validate/noop.go
new file mode 100644
index 000000000..2243ef5c4
--- /dev/null
+++ b/cmd/podman/validate/noop.go
@@ -0,0 +1,9 @@
+package validate
+
+import (
+ "github.com/spf13/cobra"
+)
+
+func NoOp(_ *cobra.Command, _ []string) error {
+ return nil
+}
diff --git a/cmd/rootlessport/main.go b/cmd/rootlessport/main.go
index e9ab8b076..5bd35a985 100644
--- a/cmd/rootlessport/main.go
+++ b/cmd/rootlessport/main.go
@@ -1,3 +1,6 @@
+//go:build linux
+// +build linux
+
package main
import (
@@ -307,11 +310,11 @@ func exposePorts(pm rkport.Manager, portMappings []types.PortMapping, childIP st
ChildPort: int(port.ContainerPort + i),
ChildIP: childIP,
}
- if err := rkportutil.ValidatePortSpec(spec, nil); err != nil {
- return err
- }
- if _, err := pm.AddPort(ctx, spec); err != nil {
- return err
+
+ for _, spec = range splitDualStackSpecIfWsl(spec) {
+ if err := validateAndAddPort(ctx, pm, spec); err != nil {
+ return err
+ }
}
}
}
@@ -319,6 +322,17 @@ func exposePorts(pm rkport.Manager, portMappings []types.PortMapping, childIP st
return nil
}
+func validateAndAddPort(ctx context.Context, pm rkport.Manager, spec rkport.Spec) error {
+ if err := rkportutil.ValidatePortSpec(spec, nil); err != nil {
+ return err
+ }
+ if _, err := pm.AddPort(ctx, spec); err != nil {
+ return err
+ }
+
+ return nil
+}
+
func child() error {
// load the config from the parent
var opaque map[string]string
diff --git a/cmd/rootlessport/wsl.go b/cmd/rootlessport/wsl.go
new file mode 100644
index 000000000..c1e67ba87
--- /dev/null
+++ b/cmd/rootlessport/wsl.go
@@ -0,0 +1,37 @@
+package main
+
+import (
+ "net"
+ "strings"
+
+ "github.com/containers/common/pkg/machine"
+ rkport "github.com/rootless-containers/rootlesskit/pkg/port"
+)
+
+// WSL machines do not relay ipv4 traffic to dual-stack ports, simulate instead
+func splitDualStackSpecIfWsl(spec rkport.Spec) []rkport.Spec {
+ specs := []rkport.Spec{spec}
+ protocol := spec.Proto
+ if machine.MachineHostType() != machine.Wsl || strings.HasSuffix(protocol, "4") || strings.HasSuffix(protocol, "6") {
+ return specs
+ }
+
+ ip := net.ParseIP(spec.ParentIP)
+ splitLoopback := ip.IsLoopback() && ip.To4() == nil
+ // Map ::1 and 0.0.0.0/:: to ipv4 + ipv6 to simulate dual-stack
+ if ip.IsUnspecified() || splitLoopback {
+ specs = append(specs, spec)
+ specs[0].Proto = protocol + "4"
+ specs[1].Proto = protocol + "6"
+ if splitLoopback {
+ // Hacky, but we will only have one ipv4 loopback with WSL config
+ specs[0].ParentIP = "127.0.0.1"
+ }
+ if ip.IsUnspecified() {
+ specs[0].ParentIP = "0.0.0.0"
+ specs[1].ParentIP = "::"
+ }
+ }
+
+ return specs
+}
diff --git a/cmd/rootlessport/wsl_test.go b/cmd/rootlessport/wsl_test.go
new file mode 100644
index 000000000..83d7e3717
--- /dev/null
+++ b/cmd/rootlessport/wsl_test.go
@@ -0,0 +1,89 @@
+package main
+
+import (
+ "testing"
+
+ "github.com/containers/common/pkg/machine"
+ "github.com/rootless-containers/rootlesskit/pkg/port"
+ "github.com/stretchr/testify/assert"
+)
+
+type SpecData struct {
+ mach string
+ sourceProto string
+ sourceIP string
+ expectCount int
+ expectProto string
+ expectIP string
+ secondProto string
+ secondIP string
+}
+
+func TestDualStackSplit(t *testing.T) {
+ //nolint
+ const (
+ IP4_ALL = "0.0.0.0"
+ IP4__LO = "127.0.0.1"
+ IP6_ALL = "::"
+ IP6__LO = "::1"
+ TCP_ = "tcp"
+ TCP4 = "tcp4"
+ TCP6 = "tcp6"
+ WSL = "wsl"
+ ___ = ""
+ IP6_REG = "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
+ IP4_REG = "10.0.0.1"
+ )
+
+ tests := []SpecData{
+ // Split cases
+ {WSL, TCP_, IP4_ALL, 2, TCP4, IP4_ALL, TCP6, IP6_ALL},
+ {WSL, TCP_, IP6_ALL, 2, TCP4, IP4_ALL, TCP6, IP6_ALL},
+ {WSL, TCP_, IP6__LO, 2, TCP4, IP4__LO, TCP6, IP6__LO},
+
+ // Non-Split
+ {WSL, TCP_, IP4__LO, 1, TCP_, IP4__LO, "", ""},
+ {WSL, TCP4, IP4_ALL, 1, TCP4, IP4_ALL, "", ""},
+ {WSL, TCP6, IP6__LO, 1, TCP6, IP6__LO, "", ""},
+ {WSL, TCP_, IP4_REG, 1, TCP_, IP4_REG, "", ""},
+ {WSL, TCP_, IP6_REG, 1, TCP_, IP6_REG, "", ""},
+ {___, TCP_, IP4_ALL, 1, TCP_, IP4_ALL, "", ""},
+ {___, TCP_, IP6_ALL, 1, TCP_, IP6_ALL, "", ""},
+ {___, TCP_, IP4__LO, 1, TCP_, IP4__LO, "", ""},
+ {___, TCP_, IP6__LO, 1, TCP_, IP6__LO, "", ""},
+ }
+
+ for _, data := range tests {
+ verifySplit(t, data)
+ }
+}
+
+func verifySplit(t *testing.T, data SpecData) {
+ machine := machine.GetMachineMarker()
+ oldEnable, oldType := machine.Enabled, machine.Type
+ machine.Enabled, machine.Type = len(data.mach) > 0, data.mach
+
+ source := port.Spec{
+ Proto: data.sourceProto,
+ ParentIP: data.sourceIP,
+ ParentPort: 100,
+ ChildIP: "1.1.1.1",
+ ChildPort: 200,
+ }
+ expect, second := source, source
+ specs := splitDualStackSpecIfWsl(source)
+
+ assert.Equal(t, data.expectCount, len(specs))
+
+ expect.Proto = data.expectProto
+ expect.ParentIP = data.expectIP
+ assert.Equal(t, expect, specs[0])
+
+ if data.expectCount > 1 {
+ second.Proto = data.secondProto
+ second.ParentIP = data.secondIP
+ assert.Equal(t, second, specs[1])
+ }
+
+ machine.Enabled, machine.Type = oldEnable, oldType
+}
diff --git a/docs/source/markdown/podman-info.1.md b/docs/source/markdown/podman-info.1.md
index 4f09913b8..fc2d0fa60 100644
--- a/docs/source/markdown/podman-info.1.md
+++ b/docs/source/markdown/podman-info.1.md
@@ -39,6 +39,10 @@ host:
package: conmon-2.0.29-2.fc34.x86_64
path: /usr/bin/conmon
version: 'conmon version 2.0.29, commit: '
+ cpu_utilization:
+ idle_percent: 96.84
+ system_percent: 0.71
+ user_percent: 2.45
cpus: 8
distribution:
distribution: fedora
@@ -124,6 +128,8 @@ store:
graphDriverName: overlay
graphOptions: {}
graphRoot: /home/dwalsh/.local/share/containers/storage
+ graphRootAllocated: 510389125120
+ graphRootUsed: 129170714624
graphStatus:
Backing Filesystem: extfs
Native Overlay Diff: "true"
diff --git a/docs/source/markdown/podman-pod-create.1.md b/docs/source/markdown/podman-pod-create.1.md
index 714909b98..fa431b611 100644
--- a/docs/source/markdown/podman-pod-create.1.md
+++ b/docs/source/markdown/podman-pod-create.1.md
@@ -75,6 +75,15 @@ Set custom DNS options in the /etc/resolv.conf file that will be shared between
Set custom DNS search domains in the /etc/resolv.conf file that will be shared between all containers in the pod.
+#### **--exit-policy**=**continue** | *stop*
+
+Set the exit policy of the pod when the last container exits. Supported policies are:
+
+| Exit Policy | Description |
+| ------------------ | --------------------------------------------------------------------------- |
+| *continue* | The pod continues running when the last container exits. Used by default. |
+| *stop* | The pod is stopped when the last container exits. Used in `play kube`. |
+
#### **--gidmap**=*container_gid:host_gid:amount*
GID map for the user namespace. Using this flag will run the container with user namespace enabled. It conflicts with the `--userns` and `--subgidname` flags.
@@ -554,7 +563,7 @@ $ podman pod create --network net1:ip=10.89.1.5 --network net2:ip=10.89.10.10
```
## SEE ALSO
-**[podman(1)](podman.1.md)**, **[podman-pod(1)](podman-pod.1.md)**, **containers.conf(1)**
+**[podman(1)](podman.1.md)**, **[podman-pod(1)](podman-pod.1.md)**, **[podman-play-kube(1)](podman-play-kube.1.md)**, **containers.conf(1)**
## HISTORY
diff --git a/docs/source/markdown/podman-system-reset.1.md b/docs/source/markdown/podman-system-reset.1.md
index c463481e6..11ce11d07 100644
--- a/docs/source/markdown/podman-system-reset.1.md
+++ b/docs/source/markdown/podman-system-reset.1.md
@@ -7,7 +7,7 @@ podman\-system\-reset - Reset storage back to initial state
**podman system reset** [*options*]
## DESCRIPTION
-**podman system reset** removes all pods, containers, images, networks and volumes.
+**podman system reset** removes all pods, containers, images, networks and volumes, and machines.
This command must be run **before** changing any of the following fields in the
`containers.conf` or `storage.conf` files: `driver`, `static_dir`, `tmp_dir`
@@ -36,6 +36,7 @@ WARNING! This will remove:
- all images
- all networks
- all build cache
+ - all machines
Are you sure you want to continue? [y/N] y
```
diff --git a/docs/tutorials/rootless_tutorial.md b/docs/tutorials/rootless_tutorial.md
index 5d20f2cc0..981916806 100644
--- a/docs/tutorials/rootless_tutorial.md
+++ b/docs/tutorials/rootless_tutorial.md
@@ -19,11 +19,11 @@ or for all commands by changing the value for the "Default OCI runtime" in the `
### Installing Podman
-For installing Podman, please see the [installation instructions](https://github.com/containers/podman/blob/main/install.md).
+For installing Podman, please see the [installation instructions](https://podman.io/getting-started/installation).
### Building Podman
-For building Podman, please see the [installation instructions](https://github.com/containers/podman/blob/main/install.md#building-from-scratch).
+For building Podman, please see the [build instructions](https://podman.io/getting-started/installation#building-from-scratch).
### Install `slirp4netns`
@@ -81,10 +81,10 @@ If you update either `/etc/subuid` or `/etc/subgid`, you need to stop all the ru
Rather than updating the files directly, the `usermod` program can be used to assign UIDs and GIDs to a user.
```
-usermod --add-subuids 200000-201000 --add-subgids 200000-201000 johndoe
+usermod --add-subuids 100000-165535 --add-subgids 100000-165535 johndoe
grep johndoe /etc/subuid /etc/subgid
-/etc/subuid:johndoe:200000:1001
-/etc/subgid:johndoe:200000:1001
+/etc/subuid:johndoe:100000:65536
+/etc/subgid:johndoe:100000:65536
```
### Enable unprivileged `ping`
diff --git a/go.mod b/go.mod
index cc171fc3e..c80f59460 100644
--- a/go.mod
+++ b/go.mod
@@ -12,12 +12,12 @@ require (
github.com/containernetworking/cni v1.1.0
github.com/containernetworking/plugins v1.1.1
github.com/containers/buildah v1.25.2-0.20220423102655-8f2bb8876f3f
- github.com/containers/common v0.47.5-0.20220425182415-4081e6be9356
+ github.com/containers/common v0.48.0
github.com/containers/conmon v2.0.20+incompatible
- github.com/containers/image/v5 v5.21.1-0.20220425080628-be085685e524
- github.com/containers/ocicrypt v1.1.3
+ github.com/containers/image/v5 v5.21.1
+ github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f
github.com/containers/psgo v1.7.2
- github.com/containers/storage v1.39.1-0.20220422100603-8996869ae40b
+ github.com/containers/storage v1.40.2
github.com/coreos/go-systemd/v22 v22.3.2
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
github.com/cyphar/filepath-securejoin v0.2.3
@@ -42,7 +42,6 @@ require (
github.com/json-iterator/go v1.1.12
github.com/mattn/go-isatty v0.0.14
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
- github.com/mrunalp/fileutils v0.5.0
github.com/nxadm/tail v1.4.8
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.19.0
@@ -54,7 +53,7 @@ require (
github.com/opencontainers/selinux v1.10.1
github.com/pkg/errors v0.9.1
github.com/pmezard/go-difflib v1.0.0
- github.com/rootless-containers/rootlesskit v1.0.0
+ github.com/rootless-containers/rootlesskit v1.0.1
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.4.0
github.com/spf13/pflag v1.0.5
@@ -67,7 +66,7 @@ require (
go.etcd.io/bbolt v1.3.6
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
- golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
+ golang.org/x/sys v0.0.0-20220422013727-9388b58f7150
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
golang.org/x/text v0.3.7
google.golang.org/protobuf v1.28.0
diff --git a/go.sum b/go.sum
index 80def0139..f2737d093 100644
--- a/go.sum
+++ b/go.sum
@@ -287,8 +287,9 @@ github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0
github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ=
github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE=
-github.com/containerd/containerd v1.6.2 h1:pcaPUGbYW8kBw6OgIZwIVIeEhdWVrBzsoCfVJ5BjrLU=
github.com/containerd/containerd v1.6.2/go.mod h1:sidY30/InSE1j2vdD1ihtKoJz+lWdaXMdiAeIupaf+s=
+github.com/containerd/containerd v1.6.3 h1:JfgUEIAH07xDWk6kqz0P3ArZt+KJ9YeihSC9uyFtSKg=
+github.com/containerd/containerd v1.6.3/go.mod h1:gCVGrYRYFm2E8GmuUIbj/NGD7DLZQLzSJQazjVKDOig=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@@ -308,6 +309,7 @@ github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZH
github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
+github.com/containerd/go-cni v1.1.4/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
@@ -318,6 +320,7 @@ github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6T
github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4=
+github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO8j/tSUpgxvo=
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
@@ -358,22 +361,23 @@ github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19
github.com/containers/buildah v1.25.2-0.20220423102655-8f2bb8876f3f h1:a5Zjz9EXUDOelPdJKJiXBfDzQS1ynXL3rc16O93tcXo=
github.com/containers/buildah v1.25.2-0.20220423102655-8f2bb8876f3f/go.mod h1:fHTZF4uEZGIlR8oM0fhNvU0wYQOtDpuar8/PxTtdvR0=
github.com/containers/common v0.47.5-0.20220421111103-112a47964ddb/go.mod h1:r80nWTmJrG9EoLkuI6WfbWQDUNQVqkVuB8Oaj1VVjOA=
-github.com/containers/common v0.47.5-0.20220425182415-4081e6be9356 h1:eJ1ghvyswTLRywF4YYEWrzZyOFEzlD1FUPLzJSz+wKo=
-github.com/containers/common v0.47.5-0.20220425182415-4081e6be9356/go.mod h1:r80nWTmJrG9EoLkuI6WfbWQDUNQVqkVuB8Oaj1VVjOA=
+github.com/containers/common v0.48.0 h1:997nnXBZ+eNpfSM7L4SxhhZubQrfEyw3jRyNMTSsNlw=
+github.com/containers/common v0.48.0/go.mod h1:zPLZCfLXfnd1jI0QRsD4By54fP4k1+ifQs+tulIe3o0=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.19.2-0.20220224100137-1045fb70b094/go.mod h1:XoYK6kE0dpazFNcuS+a8lra+QfbC6s8tzv+cUuCrZpE=
github.com/containers/image/v5 v5.21.1-0.20220421124950-8527e238867c/go.mod h1:qpUuaiE2mON6xMA0PRO9GteyH9+KT+C6WygZzL5RhnE=
-github.com/containers/image/v5 v5.21.1-0.20220425080628-be085685e524 h1:SEar3HX8b/tYE2m5fQ6fBMbwrTXLCzSBvxqODOfjp+E=
-github.com/containers/image/v5 v5.21.1-0.20220425080628-be085685e524/go.mod h1:qpUuaiE2mON6xMA0PRO9GteyH9+KT+C6WygZzL5RhnE=
+github.com/containers/image/v5 v5.21.1 h1:Cr3zw2f0FZs4SCkdGlc8SN/mpcmg2AKG4OUuDbeGS/Q=
+github.com/containers/image/v5 v5.21.1/go.mod h1:zl35egpcDQa79IEXIuoUe1bW+D1pdxRxYjNlyb3YiXw=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
-github.com/containers/ocicrypt v1.1.3 h1:uMxn2wTb4nDR7GqG3rnZSfpJXqWURfzZ7nKydzIeKpA=
github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
+github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f h1:hffElEaoDQfREHltc2wtFPd68BqDmzW6KkEDpuSRBjs=
+github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
github.com/containers/psgo v1.7.2 h1:WbCvsY9w+nCv3j4der0mbD3PSRUv/W8l+G0YrZrdSDc=
github.com/containers/psgo v1.7.2/go.mod h1:SLpqxsPOHtTqRygjutCPXmeU2PoEFzV3gzJplN4BMx0=
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
@@ -381,8 +385,10 @@ github.com/containers/storage v1.38.0/go.mod h1:lBzt28gAk5ADZuRtwdndRJyqX22vnRaX
github.com/containers/storage v1.38.2/go.mod h1:INP0RPLHWBxx+pTsO5uiHlDUGHDFvWZPWprAbAlQWPQ=
github.com/containers/storage v1.38.3-0.20220301151551-d06b0f81c0aa/go.mod h1:LkkL34WRi4dI4jt9Cp+ImdZi/P5i36glSHimT5CP5zM=
github.com/containers/storage v1.39.0/go.mod h1:UAD0cKLouN4BOQRgZut/nMjrh/EnTCjSNPgp4ZuGWMs=
-github.com/containers/storage v1.39.1-0.20220422100603-8996869ae40b h1:nGXmBAy71/Zjvi0K9fn8bvfZR15+IRxoxqGa0XPs774=
github.com/containers/storage v1.39.1-0.20220422100603-8996869ae40b/go.mod h1:hFiHLMgNU0r3MiUpE97hEBaEKCN8fEIuEEBXoFC9eN0=
+github.com/containers/storage v1.40.0/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs=
+github.com/containers/storage v1.40.2 h1:GUlHaGnrs1JOEwv6YEvkQdgYXOXZdU1Angy4wgWNgF8=
+github.com/containers/storage v1.40.2/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -881,8 +887,9 @@ github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e
github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.14.3/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw=
+github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -1030,7 +1037,6 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8=
github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s=
-github.com/mrunalp/fileutils v0.5.0 h1:NKzVxiH7eSk+OQ4M+ZYW1K6h27RUV3MI6NUTsHhU6Z4=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
@@ -1069,8 +1075,9 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY=
+github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -1094,6 +1101,7 @@ github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84/go.mod h1:Qnt1q4cjDNQI9bT832ziho5Iw2BhK8o1KwLOwW56VP4=
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 h1:+czc/J8SlhPKLOtVLMQc+xDCFBT73ZStMsRhSsUhsSg=
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198/go.mod h1:j4h1pJW6ZcJTgMZWP3+7RlG3zTaP02aDZ/Qw0sppK7Q=
@@ -1219,8 +1227,8 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rootless-containers/rootlesskit v1.0.0 h1:+DI5RQEZa4OOnkOixkrezFye0XLlSsdrtGSP6+g1254=
-github.com/rootless-containers/rootlesskit v1.0.0/go.mod h1:8Lo4zb73rSW3seB+a7UuO1gAoRD1pVkKMbXEY3NFNTE=
+github.com/rootless-containers/rootlesskit v1.0.1 h1:jepqW1txFSowKSMAEkVhWH3Oa1TCY9S400MVYe/6Iro=
+github.com/rootless-containers/rootlesskit v1.0.1/go.mod h1:t2UAiYagxrJ+wmpFAUIZPcqsm4k2B7ve6g7lILKbloc=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
@@ -1325,8 +1333,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/sylabs/release-tools v0.1.0/go.mod h1:pqP/z/11/rYMQ0OM/Nn7TxGijw7KfZwW9UolD/J1TUo=
github.com/sylabs/sif/v2 v2.3.2/go.mod h1:IrLX2pzmQ2O4qgv5iy3HdKJcBNYds9DTMd9Je8A9tX4=
-github.com/sylabs/sif/v2 v2.6.0 h1:nrWbtSAavp4T6gETg/QgZXxs67qTpSNEgqs2H1y228w=
github.com/sylabs/sif/v2 v2.6.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ=
+github.com/sylabs/sif/v2 v2.7.0 h1:VFzN8alnJ/3n1JA0K9DyUtfSzezWgWrzLDcYGhgBskk=
+github.com/sylabs/sif/v2 v2.7.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ=
github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@@ -1366,7 +1375,7 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli/v2 v2.4.0/go.mod h1:NX9W0zmTvedE5oDoOMs2RTC8RvdK98NTYZE5LbaEYPg=
+github.com/urfave/cli/v2 v2.5.1/go.mod h1:oDzoM7pVwz6wHn5ogWgFUU1s4VJayeQS+aEZDqXIEJs=
github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
@@ -1414,6 +1423,7 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
@@ -1547,6 +1557,7 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1614,6 +1625,7 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210929193557-e81a3d93ecf6/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@@ -1776,6 +1788,7 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211001092434-39dca1131b70/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1786,8 +1799,9 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc=
+golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -1921,8 +1935,9 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
-golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
+golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
+golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/hack/buildah-vendor-treadmill b/hack/buildah-vendor-treadmill
index 1feffaa60..0aa4245c5 100755
--- a/hack/buildah-vendor-treadmill
+++ b/hack/buildah-vendor-treadmill
@@ -16,7 +16,7 @@ use JSON;
use LWP::UserAgent;
(our $ME = $0) =~ s|.*/||;
-our $VERSION = '0.1';
+our $VERSION = '0.2';
# For debugging, show data structures using DumpTree($var)
#use Data::TreeDumper; $Data::TreeDumper::Displayaddress = 0;
@@ -24,24 +24,36 @@ our $VERSION = '0.1';
###############################################################################
# BEGIN user-customizable section
+# Page describing this process in much more detail
+our $Docs_URL =
+ 'https://github.com/containers/podman/wiki/Buildah-Vendor-Treadmill';
+
# github path to buildah
our $Buildah = 'github.com/containers/buildah';
# FIXME FIXME FIXME: add 'main'? I hope we never need this script for branches.
our $Treadmill_PR_Title = 'DO NOT MERGE: buildah vendor treadmill';
+# Github API; this is where we query to find out the active treadmill PR
our $API_URL = 'https://api.github.com/graphql';
+# Temporary file used to preserve current treadmill patches. This file
+# should only exist very briefly while we perform branch operations.
+our $Patch_File = "0000-$ME.patch";
+
# Use colors if available and if stdout is a tty
-our $Highlight = '';
-our $Reset = '';
+our $C_Highlight = '';
+our $C_Warning = '';
+our $C_Reset = '';
eval '
use Term::ANSIColor;
if (-t 1) {
- $Highlight = color("green");
- $Reset = color("reset");
+ $C_Highlight = color("green");
+ $C_Warning = color("bold red");
+ $C_Reset = color("reset");
+
}
- $SIG{__WARN__} = sub { print STDERR color("bold red"), "@_", $Reset; };
+ $SIG{__WARN__} = sub { print STDERR $C_Warning, "@_", $C_Reset; };
';
@@ -72,7 +84,7 @@ Call me with one of two options:
For latest documentation and best practices, please see:
- https://github.com/containers/podman/wiki/Buildah-Vendor-Treadmill
+ $Docs_URL
OPTIONS:
@@ -86,7 +98,8 @@ END_USAGE
# Command-line options. Note that this operates directly on @ARGV !
our %action;
our $debug = 0;
-our $force = 0;
+our $force_old_main = 0; # in --pick, proceeds even if main is old
+our $force_testing = 0; # in --sync, test even no podman/buildah changes
our $verbose = 0;
our $NOT = ''; # print "blahing the blah$NOT\n" if $debug
sub handle_opts {
@@ -95,9 +108,11 @@ sub handle_opts {
'sync' => sub { $action{sync}++ },
'pick' => sub { $action{pick}++ },
+ 'force-old-main' => \$force_old_main,
+ 'force-testing' => \$force_testing,
+
'debug!' => \$debug,
'dry-run|n!' => sub { $NOT = ' [NOT]' },
- 'force' => \$force,
'verbose|v' => \$verbose,
help => \&usage,
@@ -148,70 +163,129 @@ sub do_sync {
# Preserve current branch name, so we can come back after switching to main
my $current_branch = git_current_branch();
+ # Branch HEAD must be the treadmill commit.
+ my $commit_message = git('log', '-1', '--format=%s', 'HEAD');
+ print "[$commit_message]\n" if $verbose;
+ $commit_message =~ /buildah.*treadmill/
+ or die "$ME: HEAD must be a 'buildah treadmill' commit.\n";
+
+ # ...and previous commit must be a scratch buildah vendor
+ $commit_message = git('log', '-1', '--format=%B', 'HEAD^');
+ $commit_message =~ /DO NOT MERGE.* vendor in buildah.*JUNK COMMIT/s
+ or die "$ME: HEAD^ must be a DO NOT MERGE / JUNK COMMIT commit\n";
+ assert_buildah_vendor_commit('HEAD^');
+
+ # Looks good so far.
my $buildah_old = vendored_buildah();
print "-> buildah old = $buildah_old\n";
- # If HEAD is a buildah-vendor commit (usual case), drop it now.
- if (head_is_buildah_vendor_commit()) {
- if (is_treadmill_commit('HEAD^')) {
- progress("HEAD is buildah vendor (as expected); dropping it...");
- git('reset', '--hard', 'HEAD^');
- }
- else {
- die "$ME: HEAD is a buildah commit, but HEAD^ is not a treadmill commit! Cannot continue.\n";
- }
- }
- # HEAD must now be a treadmill commit
- is_treadmill_commit('HEAD')
- or die "$ME: HEAD is not a treadmill commit!\n";
-
- # HEAD is now a change to buildah-tests. Now update main and rebase.
+ # Pull main, and pivot back to this branch
pull_main();
git('checkout', '-q', $current_branch);
- my $forkpoint = git('merge-base', '--fork-point', 'main');
- my $main_commit = git('rev-parse', 'main');
+
+ # Preserve local patches
+ git('format-patch', "--output=$Patch_File", 'HEAD^');
+ progress("Treadmill patches saved to $Patch_File");
+
+ #
+ # Danger Will Robinson! This is where it gets scary: a failure here
+ # can leave us in a state where we could lose the treadmill patches.
+ # Proceed with extreme caution.
+ #
+ local $SIG{__DIE__} = sub {
+ print STDERR $C_Warning, "@_", <<"END_FAIL_INSTRUCTIONS";
+
+This is not something I can recover from. Your human judgment is needed.
+
+You will need to recover from this manually. Your best option is to
+look at the source code for this script.
+
+Your treadmill patches are here: $Patch_File
+END_FAIL_INSTRUCTIONS
+
+ exit 1;
+ };
+
+ my $forkpoint = git_forkpoint();
my $rebased;
+
+ # Unlikely to fail
+ git('reset', '--hard', 'HEAD^^');
+
+ # Rebase branch. Also unlikely to fail
+ my $main_commit = git('rev-parse', 'main');
if ($forkpoint eq $main_commit) {
progress("[Already rebased on podman main]");
}
else {
- # --empty=keep may be needed after a --pick commit, when we've
- # vendored a new buildah into podman and incorporated the treadmill
- # commit. Since this is a perpetual-motion workflow, in which we
- # keep an in-progress PR open at all times, we need a baseline
- # commit even if it's empty.
progress("Rebasing on podman main...");
git('rebase', '--empty=keep', 'main');
- # FIXME: rebase can fail after --pick. If it does, offer instructions.
$rebased = 1;
}
- # We're now back on our treadmill branch, with one commit on top of main.
- # Now vendor in latest buildah.
+ # This does have a high possibility of failing.
progress("Vendoring in buildah...");
system('go', 'mod', 'edit', '--require' => "${Buildah}\@main") == 0
- or die "$ME: go mod edit failed\n";
+ or die "$ME: go mod edit failed";
system('make', 'vendor') == 0
- or die "$ME: make vendor failed\n";
+ or die "$ME: make vendor failed";
my $buildah_new = vendored_buildah();
print "-> buildah new = $buildah_new\n";
+
+ # Tweak .cirrus.yml so we run bud tests first in CI (to fail fast).
+ tweak_cirrus_test_order();
+
+ # 'make vendor' seems to git-add files under buildah itself, but not
+ # under other changed modules. Add those now, otherwise we fail
+ # the dirty-tree test in CI.
+ if (my @v = git('status', '--porcelain', '--untracked=all', 'vendor')) {
+ if (my @untracked = grep { /^\?\?\s/ } @v) {
+ my %repos = map {
+ s!^.*?vendor/[^/]+/([^/]+/[^/]+)/.*$!$1!; $_ => 1;
+ } @untracked;
+ my $repos = join(', ', sort keys %repos);
+ progress("Adding untracked files under $repos");
+ git('add', 'vendor');
+ }
+ }
+
+ # Commit everything.
git('commit', '-as', '-m', <<"END_COMMIT_MESSAGE");
[DO NOT MERGE] vendor in buildah \@ $buildah_new
This is a JUNK COMMIT from $ME v$VERSION.
DO NOT MERGE. This is just a way to keep the buildah-podman
-vendoring in sync. See script --help for details.
+vendoring in sync. Refer to:
+
+ $Docs_URL
END_COMMIT_MESSAGE
+ # And, finally, this has the highest possibility of failing
+ progress('Reapplying preserved patches');
+ git('am', $Patch_File);
+
+ # It worked! Clean up: remove our local die() handler and the patch file
+ undef $SIG{__DIE__};
+ unlink $Patch_File;
+
# if buildah is unchanged, and we did not pull main, exit cleanly
my $change_message = '';
if ($buildah_new eq $buildah_old) {
if (! $rebased) {
- progress("Nothing has changed (same buildah, same podman). Bye!");
- exit 0;
+ $change_message = "Nothing has changed (same buildah, same podman).";
+ if ($force_testing) {
+ $change_message .= " Testing anyway due to --force-testing.";
+ }
+ else {
+ progress($change_message);
+ progress("Not much point to testing this, but use --force-testing to continue.");
+ exit 0;
+ }
+ }
+ else {
+ $change_message = "Podman has bumped, but Buildah is unchanged. There's probably not much point to testing this.";
}
- $change_message = "Podman has bumped, but Buildah is unchanged. There's probably not much point to testing this.";
}
else {
my $samenew = ($rebased ? 'new' : 'same');
@@ -225,15 +299,6 @@ END_COMMIT_MESSAGE
progress(" --- Reminder: $change_message");
}
-#########################
-# is_treadmill_commit # ARG (HEAD or HEAD^) commit message =~ treadmill
-#########################
-sub is_treadmill_commit {
- my $commit_message = git('log', '-1', '--format=%s', @_);
- print "[$commit_message]\n" if $verbose;
- $commit_message =~ /buildah.*treadmill/;
-}
-
###############
# pull_main # Switch to main, and pull latest from github
###############
@@ -243,6 +308,58 @@ sub pull_main {
git('pull', '-r', git_upstream(), 'main');
}
+#############################
+# tweak_cirrus_test_order # Run bud tests first, to fail fast & early
+#############################
+sub tweak_cirrus_test_order {
+ my $cirrus_yml = '.cirrus.yml';
+ my $tmpfile = "$cirrus_yml.tmp.$$";
+ unlink $tmpfile;
+
+ progress("Tweaking test order in $cirrus_yml to run bud tests early");
+ open my $in, '<', $cirrus_yml
+ or do {
+ warn "$ME: Cannot read $cirrus_yml: $!\n";
+ warn "$ME: Will continue anyway\n";
+ return;
+ };
+ open my $out, '>'. $tmpfile
+ or die "$ME: Cannot create $tmpfile: $!\n";
+ my $current_task = '';
+ my $in_depend;
+ while (my $line = <$in>) {
+ chomp $line;
+ if ($line =~ /^(\S+)_task:$/) {
+ $current_task = $1;
+ undef $in_depend;
+ }
+ elsif ($line =~ /^(\s+)depends_on:$/) {
+ $in_depend = $1;
+ }
+ elsif ($in_depend && $line =~ /^($in_depend\s+-\s+)(\S+)/) {
+ if ($current_task eq 'buildah_bud_test') {
+ # Buildah bud test now depends on validate, so it runs early
+ $line = "${1}validate";
+ }
+ elsif ($2 eq 'validate' && $current_task ne 'success') {
+ # Other tests that relied on validate, now rely on bud instead
+ $line = "${1}buildah_bud_test";
+ }
+ }
+ else {
+ undef $in_depend;
+ }
+
+ print { $out } $line, "\n";
+ }
+ close $in;
+ close $out
+ or die "$ME: Error writing $tmpfile: $!\n";
+ chmod 0644 => $tmpfile;
+ rename $tmpfile => $cirrus_yml
+ or die "$ME: Could not rename $tmpfile: $!\n";
+}
+
############################
# build_and_check_podman # Run quick (local) sanity checks before pushing
############################
@@ -254,21 +371,51 @@ sub build_and_check_podman {
system('make') == 0
or die "$ME: 'make' failed with new buildah. Cannot continue.\n";
- # See if any new options need man pages
+ # See if any new options need man pages. (C_Warning will highlight errs)
progress('Cross-checking man pages...');
+ print $C_Warning;
$errs += system('hack/xref-helpmsgs-manpages');
+ print $C_Reset;
# Confirm that buildah-bud patches still apply. This requires knowing
# the name of the directory created by the bud-tests script.
progress("Confirming that buildah-bud-tests patches still apply...");
system('rm -rf test-buildah-*');
- $errs += system('test/buildah-bud/run-buildah-bud-tests', '--no-test');
- # Clean up
- system('rm -rf test-buildah-*');
+ if (system('test/buildah-bud/run-buildah-bud-tests', '--no-test')) {
+ # Error
+ ++$errs;
+ warn "$ME: Leaving test-buildah- directory for you to investigate\n";
+ }
+ else {
+ # Patches apply cleanly. Clean up
+ system('rm -rf test-buildah-*');
+ }
return if !$errs;
- warn "$ME: Errors found. Please address, then add to HEAD^ commit\n";
- die " ...see $ME --help for more information.\n";
+ warn <<"END_WARN";
+$ME: Errors found. I have to stop now for you to fix them.
+ Your best bet now is:
+ 1) Find and fix whatever needs to be fixed; then
+ 2) git commit -am'fixme-fixme'; then
+ 3) git rebase -i main:
+ a) you are now in an editor window
+ b) move the new fixme-fixme commit up a line, to between the
+ 'buildah vendor treadmill' and 'vendor in buildah @ ...' lines
+ c) change 'pick' to 'squash' (or just 's')
+ d) save & quit to continue the rebase
+ e) back to a new editor window
+ f) change the commit message: remove fixme-fixme, add a description
+ of what you actually fixed. If possible, reference the PR (buildah
+ or podman) that introduced the failure
+ g) save & quit to continue the rebase
+
+ Now, for good measure, rerun this script.
+
+ For full documentation, refer to
+
+ $Docs_URL
+END_WARN
+ exit 1;
}
# END sync and its helpers
@@ -281,14 +428,18 @@ sub do_pick {
my $current_branch = git_current_branch();
# Confirm that current branch is a buildah-vendor one
- head_is_buildah_vendor_commit(1);
+ assert_buildah_vendor_commit('HEAD');
progress("HEAD is a buildah vendor commit. Good.");
# Identify and pull the treadmill PR
my $treadmill_pr = treadmill_pr();
my $treadmill_branch = "$ME/pr$treadmill_pr/tmp$$";
progress("Fetching treadmill PR $treadmill_pr into $treadmill_branch");
- git('fetch', git_upstream(), "pull/$treadmill_pr/head:$treadmill_branch");
+ git('fetch', '-q', git_upstream(), "pull/$treadmill_pr/head:$treadmill_branch");
+
+ # Compare merge bases of our branch and the treadmill one
+ progress("Checking merge bases");
+ check_merge_bases($treadmill_pr, $treadmill_branch);
# read buildah go.mod from it, and from current tree, and compare
my $buildah_on_treadmill = vendored_buildah($treadmill_branch);
@@ -309,7 +460,7 @@ sub do_pick {
build_and_check_podman();
- progress("Looks good! Please 'git commit --amend' before pushing.");
+ progress("Looks good! Please 'git commit --amend' and edit commit message before pushing.");
}
##################
@@ -383,6 +534,51 @@ END_QUERY
return $prs[0]{node}{number};
}
+#######################
+# check_merge_bases # It's OK if our branch is newer than treadmill
+#######################
+sub check_merge_bases {
+ my $treadmill_pr = shift; # e.g., 12345
+ my $treadmill_branch = shift; # e.g., b-v-p/pr12345/tmpNNN
+
+ # Fetch latest main, for accurate comparison
+ git('fetch', '-q', git_upstream(), 'main');
+
+ my $forkpoint_cur = git_forkpoint();
+ my $forkpoint_treadmill = git_forkpoint($treadmill_branch);
+
+ print "fork cur: $forkpoint_cur\nfork tm: $forkpoint_treadmill\n"
+ if $debug;
+ if ($forkpoint_cur eq $forkpoint_treadmill) {
+ progress("Nice. This branch is up-to-date wrt treadmill PR $treadmill_pr");
+ return;
+ }
+
+ # They differ.
+ if (git_is_ancestor($forkpoint_cur, $forkpoint_treadmill)) {
+ warn <<"END_WARN";
+$ME: treadmill PR $treadmill_pr is based on
+ a newer main than this branch. This means it might have
+ more up-to-date patches.
+
+END_WARN
+
+ if ($force_old_main) {
+ warn "$ME: Proceeding due to --force-old-main\n";
+ return;
+ }
+
+ # Cannot continue. Clean up side branch, and bail.
+ git('branch', '-D', $treadmill_branch);
+ warn "$ME: You might want to consider rebasing on latest main.\n";
+ warn "$ME: Aborting. Use --force-old-main to continue without rebasing.\n";
+ exit 1;
+ }
+ else {
+ progress("Your branch is based on a newer main than treadmill PR $treadmill_pr. This is usually OK.");
+ }
+}
+
#################
# cherry_pick # cherry-pick a commit, updating its commit message
#################
@@ -390,7 +586,7 @@ sub cherry_pick {
my $treadmill_pr = shift; # e.g., 12345
my $treadmill_branch = shift; # e.g., b-v-p/pr12345/tmpNNN
- progress("Cherry-picking from $treadmill_pr^");
+ progress("Cherry-picking from $treadmill_pr");
# Create a temp script. Do so in /var/tmp because sometimes $TMPDIR
# (e.g. /tmp) has noexec.
@@ -436,7 +632,7 @@ END_EDIT_SCRIPT
or die "$ME: Error writing $editor: $!\n";
chmod 0755 => $editor;
local $ENV{EDITOR} = $editor;
- git('cherry-pick', '--allow-empty', '--edit', "$treadmill_branch^");
+ git('cherry-pick', '--allow-empty', '--edit', $treadmill_branch);
unlink $editor;
}
@@ -448,19 +644,42 @@ END_EDIT_SCRIPT
# progress # Progris riport Dr Strauss says I shud rite down what I think
##############
sub progress {
- print $Highlight, "|\n+---> @_\n", $Reset;
+ print $C_Highlight, "|\n+---> @_\n", $C_Reset;
}
#######################
# assert_clean_repo # Don't even think of running with local changes
#######################
sub assert_clean_repo {
- my @changed = git('status', '--porcelain', '--untracked=no')
- or return;
+ # Our patch file should only exist for brief moments during a sync run.
+ # If it exists at any other time, something has gone very wrong.
+ if (-e $Patch_File) {
+ warn <<"END_WARN";
+$ME: File exists: $Patch_File
+
+ This means that something went very wrong during an earlier sync run.
+ Your git branch may be in an inconsistent state. Your work to date
+ may be lost. This file may be your only hope of recovering it.
+
+ This is not something a script can resolve. You need to look at this
+ file, compare to your git HEAD, and manually reconcile any differences.
+END_WARN
+ exit 1;
+ }
- warn "$ME: Modified files in repo:\n";
- warn " $_\n" for @changed;
- exit 1;
+ # OK so far. Now check for modified files.
+ if (my @changed = git('status', '--porcelain', '--untracked=no')) {
+ warn "$ME: Modified files in repo:\n";
+ warn " $_\n" for @changed;
+ exit 1;
+ }
+
+ # ...and for untracked files under vendor/
+ if (my @v = git('status', '--porcelain', '--untracked=all', 'vendor')) {
+ warn "$ME: Untracked vendor files:\n";
+ warn " $_\n" for @v;
+ exit 1;
+ }
}
########################
@@ -474,6 +693,25 @@ sub git_current_branch() {
return $b;
}
+###################
+# git_forkpoint # Hash at which branch (default: cur) branched from main
+###################
+sub git_forkpoint {
+ return git('merge-base', '--fork-point', 'main', @_);
+}
+
+#####################
+# git_is_ancestor # Is hash1 an ancestor of hash2?
+#####################
+sub git_is_ancestor {
+ # Use system(), not git(), because we don't want to abort on exit status
+ my $rc = system('git', 'merge-base', '--is-ancestor', @_);
+ die "$ME: Cannot continue\n" if $? > 256; # e.g., Not a valid object
+
+ # Translate shell 0/256 status to logical 1/0
+ return !$rc;
+}
+
##################
# git_upstream # Name of true github upstream
##################
@@ -510,13 +748,16 @@ sub git {
return wantarray ? @results : join("\n", @results);
}
-###################################
-# head_is_buildah_vendor_commit # Returns 1 if HEAD is buildah vendor
-###################################
-sub head_is_buildah_vendor_commit {
- my $fatal = shift; # in: if true, die upon anything missing
+##################################
+# assert_buildah_vendor_commit # Fails if input arg is not a buildah vendor
+##################################
+sub assert_buildah_vendor_commit {
+ my $ref = shift; # in: probably HEAD or HEAD^
- my @deltas = git('diff', '--name-only', 'HEAD^', 'HEAD');
+ my @deltas = git('diff', '--name-only', "$ref^", $ref);
+
+ # It's OK if there are no deltas, e.g. immediately after a buildah vendor PR
+ return if !@deltas;
# It's OK if there are more modified files than just these.
# It's not OK if any of these are missing.
@@ -532,19 +773,11 @@ sub head_is_buildah_vendor_commit {
push @missing, "no changes under $Buildah";
}
- if (@missing) {
- if ($fatal || $verbose) {
- warn "$ME: HEAD does not look like a buildah vendor commit:\n";
- warn "$ME: - $_\n" for @missing;
- if ($fatal) {
- die "$ME: Cannot continue\n";
- }
- warn "$ME: ...this might be okay, continuing anyway...\n";
- }
- return;
- }
+ return if !@missing;
- return 1;
+ warn "$ME: $ref does not look like a buildah vendor commit:\n";
+ warn "$ME: - $_\n" for @missing;
+ die "$ME: Cannot continue\n";
}
######################
diff --git a/hack/podman-registry-go/registry.go b/hack/podman-registry-go/registry.go
index 095f6fb18..af8f3117c 100644
--- a/hack/podman-registry-go/registry.go
+++ b/hack/podman-registry-go/registry.go
@@ -31,10 +31,31 @@ type Registry struct {
running bool
}
+// Options allows for customizing a registry.
+type Options struct {
+ // Image - custom registry image.
+ Image string
+}
+
// Start a new registry and return it along with it's image, user, password, and port.
func Start() (*Registry, error) {
+ return StartWithOptions(nil)
+}
+
+// StartWithOptions a new registry and return it along with it's image, user, password, and port.
+func StartWithOptions(options *Options) (*Registry, error) {
+ if options == nil {
+ options = &Options{}
+ }
+
+ var args []string
+ if options.Image != "" {
+ args = append(args, "-i", options.Image)
+ }
+ args = append(args, "start")
+
// Start a registry.
- out, err := utils.ExecCmd(binary, "start")
+ out, err := utils.ExecCmd(binary, args...)
if err != nil {
return nil, errors.Wrapf(err, "error running %q: %s", binary, out)
}
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index e43226490..d6f035af9 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -542,8 +542,12 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
ctr.ID(), s.namespace, ctr.config.Namespace)
}
+ // Set the original networks to nil. We can save some space by not storing it in the config
+ // since we store it in a different mutable bucket anyway.
+ configNetworks := ctr.config.Networks
+ ctr.config.Networks = nil
+
// JSON container structs to insert into DB
- // TODO use a higher-performance struct encoding than JSON
configJSON, err := json.Marshal(ctr.config)
if err != nil {
return errors.Wrapf(err, "error marshalling container %s config to JSON", ctr.ID())
@@ -564,8 +568,8 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
}
// make sure to marshal the network options before we get the db lock
- networks := make(map[string][]byte, len(ctr.config.Networks))
- for net, opts := range ctr.config.Networks {
+ networks := make(map[string][]byte, len(configNetworks))
+ for net, opts := range configNetworks {
// Check that we don't have any empty network names
if net == "" {
return errors.Wrapf(define.ErrInvalidArg, "network names cannot be an empty string")
@@ -581,9 +585,6 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
}
networks[net] = optBytes
}
- // Set the original value to nil. We can safe some space by not storing it in the config
- // since we store it in a different mutable bucket anyway.
- ctr.config.Networks = nil
db, err := s.getDBCon()
if err != nil {
diff --git a/libpod/container_copy_linux.go b/libpod/container_copy_linux.go
index 91e712c74..7566fbb12 100644
--- a/libpod/container_copy_linux.go
+++ b/libpod/container_copy_linux.go
@@ -48,7 +48,11 @@ func (c *Container) copyFromArchive(path string, chown bool, rename map[string]s
if err != nil {
return nil, err
}
- unmount = func() { c.unmount(false) }
+ unmount = func() {
+ if err := c.unmount(false); err != nil {
+ logrus.Errorf("Failed to unmount container: %v", err)
+ }
+ }
}
if c.state.State == define.ContainerStateRunning {
@@ -117,7 +121,11 @@ func (c *Container) copyToArchive(path string, writer io.Writer) (func() error,
if err != nil {
return nil, err
}
- unmount = func() { c.unmount(false) }
+ unmount = func() {
+ if err := c.unmount(false); err != nil {
+ logrus.Errorf("Failed to unmount container: %v", err)
+ }
+ }
}
statInfo, resolvedRoot, resolvedPath, err := c.stat(mountPoint, path)
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 5c6719bdf..7494eb3ec 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -1939,9 +1939,51 @@ func (c *Container) cleanup(ctx context.Context) error {
}
}
+ if err := c.stopPodIfNeeded(context.Background()); err != nil {
+ if lastError == nil {
+ lastError = err
+ } else {
+ logrus.Errorf("Stopping pod of container %s: %v", c.ID(), err)
+ }
+ }
+
return lastError
}
+// If the container is part of a pod where only the infra container remains
+// running, attempt to stop the pod.
+func (c *Container) stopPodIfNeeded(ctx context.Context) error {
+ if c.config.Pod == "" {
+ return nil
+ }
+
+ pod, err := c.runtime.state.Pod(c.config.Pod)
+ if err != nil {
+ return fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", c.ID(), c.config.Pod, err)
+ }
+
+ switch pod.config.ExitPolicy {
+ case config.PodExitPolicyContinue:
+ return nil
+
+ case config.PodExitPolicyStop:
+ // Use the runtime's work queue to stop the pod. This resolves
+ // a number of scenarios where we'd otherwise run into
+ // deadlocks. For instance, during `pod stop`, the pod has
+ // already been locked.
+ // The work queue is a simple means without having to worry about
+ // future changes that may introduce more deadlock scenarios.
+ c.runtime.queueWork(func() {
+ if err := pod.stopIfOnlyInfraRemains(ctx, c.ID()); err != nil {
+ if !errors.Is(err, define.ErrNoSuchPod) {
+ logrus.Errorf("Checking if infra needs to be stopped: %v", err)
+ }
+ }
+ })
+ }
+ return nil
+}
+
// delete deletes the container and runs any configured poststop
// hooks.
func (c *Container) delete(ctx context.Context) error {
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 31edff762..4742b22ab 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -1180,7 +1180,11 @@ func (c *Container) createCheckpointImage(ctx context.Context, options Container
return err
}
// Clean-up buildah working container
- defer importBuilder.Delete()
+ defer func() {
+ if err := importBuilder.Delete(); err != nil {
+ logrus.Errorf("Image builder delete failed: %v", err)
+ }
+ }()
if err := c.prepareCheckpointExport(); err != nil {
return err
@@ -1201,7 +1205,9 @@ func (c *Container) createCheckpointImage(ctx context.Context, options Container
// Copy checkpoint from temporary tar file in the image
addAndCopyOptions := buildah.AddAndCopyOptions{}
- importBuilder.Add("", true, addAndCopyOptions, options.TargetFile)
+ if err := importBuilder.Add("", true, addAndCopyOptions, options.TargetFile); err != nil {
+ return err
+ }
if err := c.addCheckpointImageMetadata(importBuilder); err != nil {
return err
@@ -1543,7 +1549,11 @@ func (c *Container) importCheckpointImage(ctx context.Context, imageID string) e
}
mountPoint, err := img.Mount(ctx, nil, "")
- defer img.Unmount(true)
+ defer func() {
+ if err := c.unmount(true); err != nil {
+ logrus.Errorf("Failed to unmount container: %v", err)
+ }
+ }()
if err != nil {
return err
}
@@ -2113,15 +2123,9 @@ func (c *Container) makeBindMounts() error {
}
} else {
if !c.config.UseImageResolvConf {
- newResolv, err := c.generateResolvConf()
- if err != nil {
+ if err := c.generateResolvConf(); err != nil {
return errors.Wrapf(err, "error creating resolv.conf for container %s", c.ID())
}
- err = c.mountIntoRootDirs("/etc/resolv.conf", newResolv)
-
- if err != nil {
- return errors.Wrapf(err, "error assigning mounts to container %s", c.ID())
- }
}
if !c.config.UseImageHosts {
@@ -2278,23 +2282,25 @@ rootless=%d
}
// generateResolvConf generates a containers resolv.conf
-func (c *Container) generateResolvConf() (string, error) {
+func (c *Container) generateResolvConf() error {
var (
nameservers []string
networkNameServers []string
networkSearchDomains []string
)
+ hostns := true
resolvConf := "/etc/resolv.conf"
for _, namespace := range c.config.Spec.Linux.Namespaces {
if namespace.Type == spec.NetworkNamespace {
+ hostns = false
if namespace.Path != "" && !strings.HasPrefix(namespace.Path, "/proc/") {
definedPath := filepath.Join("/etc/netns", filepath.Base(namespace.Path), "resolv.conf")
_, err := os.Stat(definedPath)
if err == nil {
resolvConf = definedPath
} else if !os.IsNotExist(err) {
- return "", err
+ return err
}
}
break
@@ -2304,17 +2310,17 @@ func (c *Container) generateResolvConf() (string, error) {
contents, err := ioutil.ReadFile(resolvConf)
// resolv.conf doesn't have to exists
if err != nil && !os.IsNotExist(err) {
- return "", err
+ return err
}
ns := resolvconf.GetNameservers(contents)
// check if systemd-resolved is used, assume it is used when 127.0.0.53 is the only nameserver
- if len(ns) == 1 && ns[0] == "127.0.0.53" {
+ if !hostns && len(ns) == 1 && ns[0] == "127.0.0.53" {
// read the actual resolv.conf file for systemd-resolved
resolvedContents, err := ioutil.ReadFile("/run/systemd/resolve/resolv.conf")
if err != nil {
if !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "detected that systemd-resolved is in use, but could not locate real resolv.conf")
+ return errors.Wrapf(err, "detected that systemd-resolved is in use, but could not locate real resolv.conf")
}
} else {
contents = resolvedContents
@@ -2337,21 +2343,21 @@ func (c *Container) generateResolvConf() (string, error) {
ipv6, err := c.checkForIPv6(netStatus)
if err != nil {
- return "", err
+ return err
}
// Ensure that the container's /etc/resolv.conf is compatible with its
// network configuration.
- resolv, err := resolvconf.FilterResolvDNS(contents, ipv6, c.config.CreateNetNS)
+ resolv, err := resolvconf.FilterResolvDNS(contents, ipv6, !hostns)
if err != nil {
- return "", errors.Wrapf(err, "error parsing host resolv.conf")
+ return errors.Wrapf(err, "error parsing host resolv.conf")
}
dns := make([]net.IP, 0, len(c.runtime.config.Containers.DNSServers)+len(c.config.DNSServer))
for _, i := range c.runtime.config.Containers.DNSServers {
result := net.ParseIP(i)
if result == nil {
- return "", errors.Wrapf(define.ErrInvalidArg, "invalid IP address %s", i)
+ return errors.Wrapf(define.ErrInvalidArg, "invalid IP address %s", i)
}
dns = append(dns, result)
}
@@ -2402,20 +2408,15 @@ func (c *Container) generateResolvConf() (string, error) {
destPath := filepath.Join(c.state.RunDir, "resolv.conf")
if err := os.Remove(destPath); err != nil && !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "container %s", c.ID())
+ return errors.Wrapf(err, "container %s", c.ID())
}
// Build resolv.conf
if _, err = resolvconf.Build(destPath, nameservers, search, options); err != nil {
- return "", errors.Wrapf(err, "error building resolv.conf for container %s", c.ID())
+ return errors.Wrapf(err, "error building resolv.conf for container %s", c.ID())
}
- // Relabel resolv.conf for the container
- if err := c.relabel(destPath, c.config.MountLabel, true); err != nil {
- return "", err
- }
-
- return destPath, nil
+ return c.bindMountRootFile(destPath, "/etc/resolv.conf")
}
// Check if a container uses IPv6.
@@ -2590,17 +2591,21 @@ func (c *Container) createHosts() error {
return err
}
- if err := os.Chown(targetFile, c.RootUID(), c.RootGID()); err != nil {
+ return c.bindMountRootFile(targetFile, config.DefaultHostsFile)
+}
+
+// bindMountRootFile will chown and relabel the source file to make it usable in the container.
+// It will also add the path to the container bind mount map.
+// source is the path on the host, dest is the path in the container.
+func (c *Container) bindMountRootFile(source, dest string) error {
+ if err := os.Chown(source, c.RootUID(), c.RootGID()); err != nil {
return err
}
- if err := label.Relabel(targetFile, c.MountLabel(), false); err != nil {
+ if err := label.Relabel(source, c.MountLabel(), false); err != nil {
return err
}
- if err = c.mountIntoRootDirs(config.DefaultHostsFile, targetFile); err != nil {
- return err
- }
- return nil
+ return c.mountIntoRootDirs(dest, source)
}
// generateGroupEntry generates an entry or entries into /etc/group as
diff --git a/libpod/define/info.go b/libpod/define/info.go
index 713129ada..911fa5c03 100644
--- a/libpod/define/info.go
+++ b/libpod/define/info.go
@@ -1,6 +1,8 @@
package define
-import "github.com/containers/storage/pkg/idtools"
+import (
+ "github.com/containers/storage/pkg/idtools"
+)
// Info is the overall struct that describes the host system
// running libpod/podman
@@ -31,6 +33,7 @@ type HostInfo struct {
CgroupControllers []string `json:"cgroupControllers"`
Conmon *ConmonInfo `json:"conmon"`
CPUs int `json:"cpus"`
+ CPUUtilization *CPUUsage `json:"cpuUtilization"`
Distribution DistributionInfo `json:"distribution"`
EventLogger string `json:"eventLogger"`
Hostname string `json:"hostname"`
@@ -108,11 +111,15 @@ type StoreInfo struct {
GraphDriverName string `json:"graphDriverName"`
GraphOptions map[string]interface{} `json:"graphOptions"`
GraphRoot string `json:"graphRoot"`
- GraphStatus map[string]string `json:"graphStatus"`
- ImageCopyTmpDir string `json:"imageCopyTmpDir"`
- ImageStore ImageStore `json:"imageStore"`
- RunRoot string `json:"runRoot"`
- VolumePath string `json:"volumePath"`
+ // GraphRootAllocated is how much space the graphroot has in bytes
+ GraphRootAllocated uint64 `json:"graphRootAllocated"`
+ // GraphRootUsed is how much of graphroot is used in bytes
+ GraphRootUsed uint64 `json:"graphRootUsed"`
+ GraphStatus map[string]string `json:"graphStatus"`
+ ImageCopyTmpDir string `json:"imageCopyTmpDir"`
+ ImageStore ImageStore `json:"imageStore"`
+ RunRoot string `json:"runRoot"`
+ VolumePath string `json:"volumePath"`
}
// ImageStore describes the image store. Right now only the number
@@ -137,3 +144,9 @@ type Plugins struct {
// FIXME what should we do with Authorization, docker seems to return nothing by default
// Authorization []string `json:"authorization"`
}
+
+type CPUUsage struct {
+ UserPercent float64 `json:"userPercent"`
+ SystemPercent float64 `json:"systemPercent"`
+ IdlePercent float64 `json:"idlePercent"`
+}
diff --git a/libpod/define/pod_inspect.go b/libpod/define/pod_inspect.go
index e85a660a1..219ffade2 100644
--- a/libpod/define/pod_inspect.go
+++ b/libpod/define/pod_inspect.go
@@ -19,6 +19,8 @@ type InspectPodData struct {
// CreateCommand is the full command plus arguments of the process the
// container has been created with.
CreateCommand []string `json:"CreateCommand,omitempty"`
+ // ExitPolicy of the pod.
+ ExitPolicy string `json:"ExitPolicy,omitempty"`
// State represents the current state of the pod.
State string `json:"State"`
// Hostname is the hostname that the pod will set.
diff --git a/libpod/events/config.go b/libpod/events/config.go
index 35680a275..00cdca007 100644
--- a/libpod/events/config.go
+++ b/libpod/events/config.go
@@ -17,6 +17,8 @@ const (
Journald EventerType = iota
// Null is a no-op events logger. It does not read or write events.
Null EventerType = iota
+ // Memory indicates the event logger will hold events in memory
+ Memory EventerType = iota
)
// Event describes the attributes of a libpod event
@@ -55,7 +57,7 @@ type Details struct {
// EventerOptions describe options that need to be passed to create
// an eventer
type EventerOptions struct {
- // EventerType describes whether to use journald or a file
+ // EventerType describes whether to use journald, file or memory
EventerType string
// LogFilePath is the path to where the log file should reside if using
// the file logger
@@ -110,6 +112,8 @@ const (
System Type = "system"
// Volume - event is related to volumes
Volume Type = "volume"
+ // Machine - event is related to machine VM's
+ Machine Type = "machine"
// Attach ...
Attach Status = "attach"
diff --git a/libpod/events/events.go b/libpod/events/events.go
index 1745095fb..04417fd8d 100644
--- a/libpod/events/events.go
+++ b/libpod/events/events.go
@@ -20,6 +20,8 @@ func (et EventerType) String() string {
return "file"
case Journald:
return "journald"
+ case Memory:
+ return "memory"
case Null:
return "none"
default:
@@ -34,6 +36,8 @@ func IsValidEventer(eventer string) bool {
return true
case Journald.String():
return true
+ case Memory.String():
+ return true
case Null.String():
return true
default:
@@ -41,7 +45,7 @@ func IsValidEventer(eventer string) bool {
}
}
-// NewEvent creates a event struct and populates with
+// NewEvent creates an event struct and populates with
// the given status and time.
func NewEvent(status Status) Event {
return Event{
@@ -63,7 +67,7 @@ func (e *Event) ToJSONString() (string, error) {
return string(b), err
}
-// ToHumanReadable returns human readable event as a formatted string
+// ToHumanReadable returns human-readable event as a formatted string
func (e *Event) ToHumanReadable(truncate bool) string {
var humanFormat string
id := e.ID
@@ -90,7 +94,7 @@ func (e *Event) ToHumanReadable(truncate bool) string {
} else {
humanFormat = fmt.Sprintf("%s %s %s", e.Time, e.Type, e.Status)
}
- case Volume:
+ case Volume, Machine:
humanFormat = fmt.Sprintf("%s %s %s %s", e.Time, e.Type, e.Status, e.Name)
}
return humanFormat
@@ -99,19 +103,19 @@ func (e *Event) ToHumanReadable(truncate bool) string {
// NewEventFromString takes stringified json and converts
// it to an event
func newEventFromJSONString(event string) (*Event, error) {
- e := Event{}
- if err := json.Unmarshal([]byte(event), &e); err != nil {
+ e := new(Event)
+ if err := json.Unmarshal([]byte(event), e); err != nil {
return nil, err
}
- return &e, nil
+ return e, nil
}
-// ToString converts a Type to a string
+// String converts a Type to a string
func (t Type) String() string {
return string(t)
}
-// ToString converts a status to a string
+// String converts a status to a string
func (s Status) String() string {
return string(s)
}
@@ -123,6 +127,8 @@ func StringToType(name string) (Type, error) {
return Container, nil
case Image.String():
return Image, nil
+ case Machine.String():
+ return Machine, nil
case Network.String():
return Network, nil
case Pod.String():
diff --git a/libpod/events/events_linux.go b/libpod/events/events_linux.go
index 482d7d6dd..4320f2190 100644
--- a/libpod/events/events_linux.go
+++ b/libpod/events/events_linux.go
@@ -21,6 +21,8 @@ func NewEventer(options EventerOptions) (Eventer, error) {
return EventLogFile{options}, nil
case strings.ToUpper(Null.String()):
return NewNullEventer(), nil
+ case strings.ToUpper(Memory.String()):
+ return NewMemoryEventer(), nil
default:
return nil, errors.Errorf("unknown event logger type: %s", strings.ToUpper(options.EventerType))
}
diff --git a/libpod/events/memory.go b/libpod/events/memory.go
new file mode 100644
index 000000000..b3e03d86b
--- /dev/null
+++ b/libpod/events/memory.go
@@ -0,0 +1,49 @@
+package events
+
+import (
+ "context"
+)
+
+// EventMemory is the structure for event writing to a channel. It contains the eventer
+// options and the event itself. Methods for reading and writing are also defined from it.
+type EventMemory struct {
+ options EventerOptions
+ elements chan *Event
+}
+
+// Write event to memory queue
+func (e EventMemory) Write(event Event) (err error) {
+ e.elements <- &event
+ return
+}
+
+// Read event(s) from memory queue
+func (e EventMemory) Read(ctx context.Context, options ReadOptions) (err error) {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+
+ select {
+ case event := <-e.elements:
+ options.EventChannel <- event
+ default:
+ }
+ return nil
+}
+
+// String returns eventer type
+func (e EventMemory) String() string {
+ return e.options.EventerType
+}
+
+// NewMemoryEventer returns configured MemoryEventer
+func NewMemoryEventer() Eventer {
+ return EventMemory{
+ options: EventerOptions{
+ EventerType: Memory.String(),
+ },
+ elements: make(chan *Event, 100),
+ }
+}
diff --git a/libpod/info.go b/libpod/info.go
index e0b490768..321680a81 100644
--- a/libpod/info.go
+++ b/libpod/info.go
@@ -5,11 +5,13 @@ import (
"bytes"
"fmt"
"io/ioutil"
+ "math"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
+ "syscall"
"time"
"github.com/containers/buildah"
@@ -115,7 +117,10 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
if err != nil {
return nil, errors.Wrapf(err, "error getting available cgroup controllers")
}
-
+ cpuUtil, err := getCPUUtilization()
+ if err != nil {
+ return nil, err
+ }
info := define.HostInfo{
Arch: runtime.GOARCH,
BuildahVersion: buildah.Version,
@@ -123,6 +128,7 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
CgroupControllers: availableControllers,
Linkmode: linkmode.Linkmode(),
CPUs: runtime.NumCPU(),
+ CPUUtilization: cpuUtil,
Distribution: hostDistributionInfo,
LogDriver: r.config.Containers.LogDriver,
EventLogger: r.eventer.String(),
@@ -285,17 +291,25 @@ func (r *Runtime) storeInfo() (*define.StoreInfo, error) {
}
imageInfo := define.ImageStore{Number: len(images)}
+ var grStats syscall.Statfs_t
+ if err := syscall.Statfs(r.store.GraphRoot(), &grStats); err != nil {
+ return nil, errors.Wrapf(err, "unable to collect graph root usasge for %q", r.store.GraphRoot())
+ }
+ allocated := uint64(grStats.Bsize) * grStats.Blocks
info := define.StoreInfo{
- ImageStore: imageInfo,
- ImageCopyTmpDir: os.Getenv("TMPDIR"),
- ContainerStore: conInfo,
- GraphRoot: r.store.GraphRoot(),
- RunRoot: r.store.RunRoot(),
- GraphDriverName: r.store.GraphDriverName(),
- GraphOptions: nil,
- VolumePath: r.config.Engine.VolumePath,
- ConfigFile: configFile,
+ ImageStore: imageInfo,
+ ImageCopyTmpDir: os.Getenv("TMPDIR"),
+ ContainerStore: conInfo,
+ GraphRoot: r.store.GraphRoot(),
+ GraphRootAllocated: allocated,
+ GraphRootUsed: allocated - (uint64(grStats.Bsize) * grStats.Bfree),
+ RunRoot: r.store.RunRoot(),
+ GraphDriverName: r.store.GraphDriverName(),
+ GraphOptions: nil,
+ VolumePath: r.config.Engine.VolumePath,
+ ConfigFile: configFile,
}
+
graphOptions := map[string]interface{}{}
for _, o := range r.store.GraphOptions() {
split := strings.SplitN(o, "=", 2)
@@ -382,3 +396,44 @@ func (r *Runtime) GetHostDistributionInfo() define.DistributionInfo {
}
return dist
}
+
+// getCPUUtilization Returns a CPUUsage object that summarizes CPU
+// usage for userspace, system, and idle time.
+func getCPUUtilization() (*define.CPUUsage, error) {
+ f, err := os.Open("/proc/stat")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ scanner := bufio.NewScanner(f)
+ // Read firt line of /proc/stat
+ for scanner.Scan() {
+ break
+ }
+ // column 1 is user, column 3 is system, column 4 is idle
+ stats := strings.Split(scanner.Text(), " ")
+ return statToPercent(stats)
+}
+
+func statToPercent(stats []string) (*define.CPUUsage, error) {
+ // There is always an extra space between cpu and the first metric
+ userTotal, err := strconv.ParseFloat(stats[2], 64)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to parse user value %q", stats[1])
+ }
+ systemTotal, err := strconv.ParseFloat(stats[4], 64)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to parse system value %q", stats[3])
+ }
+ idleTotal, err := strconv.ParseFloat(stats[5], 64)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to parse idle value %q", stats[4])
+ }
+ total := userTotal + systemTotal + idleTotal
+ s := define.CPUUsage{
+ UserPercent: math.Round((userTotal/total*100)*100) / 100,
+ SystemPercent: math.Round((systemTotal/total*100)*100) / 100,
+ IdlePercent: math.Round((idleTotal/total*100)*100) / 100,
+ }
+ return &s, nil
+}
diff --git a/libpod/info_test.go b/libpod/info_test.go
new file mode 100644
index 000000000..909b573c0
--- /dev/null
+++ b/libpod/info_test.go
@@ -0,0 +1,59 @@
+package libpod
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/containers/podman/v4/libpod/define"
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_statToPercent(t *testing.T) {
+ type args struct {
+ in0 []string
+ }
+ tests := []struct {
+ name string
+ args args
+ want *define.CPUUsage
+ wantErr assert.ErrorAssertionFunc
+ }{
+ {
+ name: "GoodParse",
+ args: args{in0: []string{"cpu", " ", "33628064", "27537", "9696996", "1314806705", "588142", "4775073", "2789228", "0", "598711", "0"}},
+ want: &define.CPUUsage{
+ UserPercent: 2.48,
+ SystemPercent: 0.71,
+ IdlePercent: 96.81,
+ },
+ wantErr: assert.NoError,
+ },
+ {
+ name: "BadUserValue",
+ args: args{in0: []string{"cpu", " ", "k", "27537", "9696996", "1314806705", "588142", "4775073", "2789228", "0", "598711", "0"}},
+ want: nil,
+ wantErr: assert.Error,
+ },
+ {
+ name: "BadSystemValue",
+ args: args{in0: []string{"cpu", " ", "33628064", "27537", "k", "1314806705", "588142", "4775073", "2789228", "0", "598711", "0"}},
+ want: nil,
+ wantErr: assert.Error,
+ },
+ {
+ name: "BadIdleValue",
+ args: args{in0: []string{"cpu", " ", "33628064", "27537", "9696996", "k", "588142", "4775073", "2789228", "0", "598711", "0"}},
+ want: nil,
+ wantErr: assert.Error,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := statToPercent(tt.args.in0)
+ if !tt.wantErr(t, err, fmt.Sprintf("statToPercent(%v)", tt.args.in0)) {
+ return
+ }
+ assert.Equalf(t, tt.want, got, "statToPercent(%v)", tt.args.in0)
+ })
+ }
+}
diff --git a/libpod/kube.go b/libpod/kube.go
index 8b75a0c44..5a5fe9d35 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -1034,7 +1034,11 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
if err != nil {
return nil, errors.Wrapf(err, "failed to mount %s mountpoint", c.ID())
}
- defer c.unmount(false)
+ defer func() {
+ if err := c.unmount(false); err != nil {
+ logrus.Errorf("Failed to unmount container: %v", err)
+ }
+ }()
}
logrus.Debugf("Looking in container for user: %s", c.User())
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 2770b040e..0c124cf0b 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -488,7 +488,7 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
pid := strconv.Itoa(cmd.Process.Pid)
err = ioutil.WriteFile(filepath.Join(rootlessNetNsDir, rootlessNetNsSilrp4netnsPidFile), []byte(pid), 0700)
if err != nil {
- errors.Wrap(err, "unable to write rootless-netns slirp4netns pid file")
+ return nil, errors.Wrap(err, "unable to write rootless-netns slirp4netns pid file")
}
defer func() {
diff --git a/libpod/oci_attach_linux.go b/libpod/oci_attach_linux.go
index b5eabec1f..06f8f8719 100644
--- a/libpod/oci_attach_linux.go
+++ b/libpod/oci_attach_linux.go
@@ -9,6 +9,7 @@ import (
"net"
"os"
"path/filepath"
+ "syscall"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/libpod/define"
@@ -259,7 +260,7 @@ func redirectResponseToOutputStreams(outputStream, errorStream io.Writer, writeO
}
}
}
- if er == io.EOF {
+ if errors.Is(er, io.EOF) || errors.Is(er, syscall.ECONNRESET) {
break
}
if er != nil {
@@ -274,11 +275,15 @@ func readStdio(conn *net.UnixConn, streams *define.AttachStreams, receiveStdoutE
var err error
select {
case err = <-receiveStdoutError:
- conn.CloseWrite()
+ if err := conn.CloseWrite(); err != nil {
+ logrus.Errorf("Failed to close stdin: %v", err)
+ }
return err
case err = <-stdinDone:
if err == define.ErrDetach {
- conn.CloseWrite()
+ if err := conn.CloseWrite(); err != nil {
+ logrus.Errorf("Failed to close stdin: %v", err)
+ }
return err
}
if err == nil {
diff --git a/libpod/options.go b/libpod/options.go
index 98eb45e76..9b83cb76a 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -1843,6 +1843,24 @@ func WithPodName(name string) PodCreateOption {
}
}
+// WithPodExitPolicy sets the exit policy of the pod.
+func WithPodExitPolicy(policy string) PodCreateOption {
+ return func(pod *Pod) error {
+ if pod.valid {
+ return define.ErrPodFinalized
+ }
+
+ parsed, err := config.ParsePodExitPolicy(policy)
+ if err != nil {
+ return err
+ }
+
+ pod.config.ExitPolicy = parsed
+
+ return nil
+ }
+}
+
// WithPodHostname sets the hostname of the pod.
func WithPodHostname(hostname string) PodCreateOption {
return func(pod *Pod) error {
diff --git a/libpod/pod.go b/libpod/pod.go
index 237c42901..2211d5be7 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -6,6 +6,7 @@ import (
"strings"
"time"
+ "github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/lock"
"github.com/opencontainers/runtime-spec/specs-go"
@@ -70,6 +71,9 @@ type PodConfig struct {
// container has been created with.
CreateCommand []string `json:"CreateCommand,omitempty"`
+ // The pod's exit policy.
+ ExitPolicy config.PodExitPolicy `json:"ExitPolicy,omitempty"`
+
// ID of the pod's lock
LockID uint32 `json:"lockID"`
}
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index ba30d878e..73b28822b 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -2,6 +2,7 @@ package libpod
import (
"context"
+ "fmt"
"github.com/containers/common/pkg/cgroups"
"github.com/containers/podman/v4/libpod/define"
@@ -134,6 +135,10 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
p.lock.Lock()
defer p.lock.Unlock()
+ return p.stopWithTimeout(ctx, cleanup, timeout)
+}
+
+func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (map[string]error, error) {
if !p.valid {
return nil, define.ErrPodRemoved
}
@@ -195,6 +200,51 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
return nil, nil
}
+// Stops the pod if only the infra containers remains running.
+func (p *Pod) stopIfOnlyInfraRemains(ctx context.Context, ignoreID string) error {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ infraID := ""
+
+ if p.HasInfraContainer() {
+ infra, err := p.infraContainer()
+ if err != nil {
+ return err
+ }
+ infraID = infra.ID()
+ }
+
+ allCtrs, err := p.runtime.state.PodContainers(p)
+ if err != nil {
+ return err
+ }
+
+ for _, ctr := range allCtrs {
+ if ctr.ID() == infraID || ctr.ID() == ignoreID {
+ continue
+ }
+
+ state, err := ctr.State()
+ if err != nil {
+ return fmt.Errorf("getting state of container %s: %w", ctr.ID(), err)
+ }
+
+ switch state {
+ case define.ContainerStateExited,
+ define.ContainerStateRemoving,
+ define.ContainerStateStopping,
+ define.ContainerStateUnknown:
+ continue
+ default:
+ return nil
+ }
+ }
+
+ _, err = p.stopWithTimeout(ctx, true, -1)
+ return err
+}
+
// Cleanup cleans up all containers within a pod that have stopped.
// All containers are cleaned up independently. An error with one container will
// not prevent other containers being cleaned up.
@@ -661,6 +711,7 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
Namespace: p.Namespace(),
Created: p.CreatedTime(),
CreateCommand: p.config.CreateCommand,
+ ExitPolicy: string(p.config.ExitPolicy),
State: podState,
Hostname: p.config.Hostname,
Labels: p.Labels(),
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 6c2323d88..f4cd9cf00 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -86,6 +86,10 @@ type Runtime struct {
libimageEventsShutdown chan bool
lockManager lock.Manager
+ // Worker
+ workerShutdown chan bool
+ workerChannel chan func()
+
// syslog describes whenever logrus should log to the syslog as well.
// Note that the syslog hook will be enabled early in cmd/podman/syslog_linux.go
// This bool is just needed so that we can set it for netavark interface.
@@ -597,6 +601,8 @@ func makeRuntime(runtime *Runtime) (retErr error) {
}
}
+ runtime.startWorker()
+
// Mark the runtime as valid - ready to be used, cannot be modified
// further
runtime.valid = true
@@ -817,6 +823,14 @@ func (r *Runtime) Shutdown(force bool) error {
return define.ErrRuntimeStopped
}
+ if r.workerShutdown != nil {
+ // Signal the worker routine to shutdown. The routine will
+ // process all pending work items and then read from the
+ // channel; we're blocked until all work items have been
+ // processed.
+ r.workerShutdown <- true
+ }
+
r.valid = false
// Shutdown all containers if --force is given
@@ -1144,7 +1158,7 @@ func (r *Runtime) getVolumePlugin(name string) (*plugin.VolumePlugin, error) {
return plugin.GetVolumePlugin(name, pluginPath)
}
-// GetSecretsStoreageDir returns the directory that the secrets manager should take
+// GetSecretsStorageDir returns the directory that the secrets manager should take
func (r *Runtime) GetSecretsStorageDir() string {
return filepath.Join(r.store.GraphRoot(), "secrets")
}
@@ -1192,7 +1206,17 @@ func (r *Runtime) Network() nettypes.ContainerNetwork {
return r.network
}
-// Network returns the network interface which is used by the runtime
+// GetDefaultNetworkName returns the network interface which is used by the runtime
func (r *Runtime) GetDefaultNetworkName() string {
return r.config.Network.DefaultNetwork
}
+
+// RemoteURI returns the API server URI
+func (r *Runtime) RemoteURI() string {
+ return r.config.Engine.RemoteURI
+}
+
+// SetRemoteURI records the API server URI
+func (r *Runtime) SetRemoteURI(uri string) {
+ r.config.Engine.RemoteURI = uri
+}
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index fd3ffd199..df7174ac6 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -513,7 +513,9 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
case define.NoLogging, define.PassthroughLogging:
break
case define.JournaldLogging:
- ctr.initializeJournal(ctx)
+ if err := ctr.initializeJournal(ctx); err != nil {
+ return nil, fmt.Errorf("failed to initialize journal: %w", err)
+ }
default:
if ctr.config.LogPath == "" {
ctr.config.LogPath = filepath.Join(ctr.config.StaticDir, "ctr.log")
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 2bbccfdf6..62ec7df60 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -199,10 +199,15 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
// Go through and lock all containers so we can operate on them all at
// once.
// First loop also checks that we are ready to go ahead and remove.
+ containersLocked := true
for _, ctr := range ctrs {
ctrLock := ctr.lock
ctrLock.Lock()
- defer ctrLock.Unlock()
+ defer func() {
+ if containersLocked {
+ ctrLock.Unlock()
+ }
+ }()
// If we're force-removing, no need to check status.
if force {
@@ -304,6 +309,12 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
}
}
+ // let's unlock the containers so if there is any cleanup process, it can terminate its execution
+ for _, ctr := range ctrs {
+ ctr.lock.Unlock()
+ }
+ containersLocked = false
+
// Remove pod cgroup, if present
if p.state.CgroupPath != "" {
logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath)
@@ -332,7 +343,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
}
}
if err == nil {
- if err := conmonCgroup.Delete(); err != nil {
+ if err = conmonCgroup.Delete(); err != nil {
if removalErr == nil {
removalErr = errors.Wrapf(err, "error removing pod %s conmon cgroup", p.ID())
} else {
diff --git a/libpod/runtime_worker.go b/libpod/runtime_worker.go
new file mode 100644
index 000000000..ca44a27f7
--- /dev/null
+++ b/libpod/runtime_worker.go
@@ -0,0 +1,41 @@
+package libpod
+
+import (
+ "time"
+)
+
+func (r *Runtime) startWorker() {
+ if r.workerChannel == nil {
+ r.workerChannel = make(chan func(), 1)
+ r.workerShutdown = make(chan bool)
+ }
+ go func() {
+ for {
+ // Make sure to read all workers before
+ // checking if we're about to shutdown.
+ for len(r.workerChannel) > 0 {
+ w := <-r.workerChannel
+ w()
+ }
+
+ select {
+ // We'll read from the shutdown channel only when all
+ // items above have been processed.
+ //
+ // (*Runtime).Shutdown() will block until until the
+ // item is read.
+ case <-r.workerShutdown:
+ return
+
+ default:
+ time.Sleep(100 * time.Millisecond)
+ }
+ }
+ }()
+}
+
+func (r *Runtime) queueWork(f func()) {
+ go func() {
+ r.workerChannel <- f
+ }()
+}
diff --git a/libpod/util.go b/libpod/util.go
index 51fe60427..1753b4f34 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -55,8 +55,11 @@ func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, e
if err := watcher.Add(filepath.Dir(path)); err == nil {
inotifyEvents = watcher.Events
}
- defer watcher.Close()
- defer watcher.Remove(filepath.Dir(path))
+ defer func() {
+ if err := watcher.Close(); err != nil {
+ logrus.Errorf("Failed to close fsnotify watcher: %v", err)
+ }
+ }()
}
var timeoutChan <-chan time.Time
diff --git a/pkg/api/handlers/decoder.go b/pkg/api/handlers/decoder.go
index 5e8f12960..fbe03d97b 100644
--- a/pkg/api/handlers/decoder.go
+++ b/pkg/api/handlers/decoder.go
@@ -21,6 +21,7 @@ func NewAPIDecoder() *schema.Decoder {
d.RegisterConverter(map[string][]string{}, convertURLValuesString)
d.RegisterConverter(time.Time{}, convertTimeString)
d.RegisterConverter(define.ContainerStatus(0), convertContainerStatusString)
+ d.RegisterConverter(map[string]string{}, convertStringMap)
var Signal syscall.Signal
d.RegisterConverter(Signal, convertSignal)
@@ -48,6 +49,15 @@ func convertURLValuesString(query string) reflect.Value {
return reflect.ValueOf(f)
}
+func convertStringMap(query string) reflect.Value {
+ res := make(map[string]string)
+ err := json.Unmarshal([]byte(query), &res)
+ if err != nil {
+ logrus.Infof("convertStringMap: Failed to Unmarshal %s: %s", query, err.Error())
+ }
+ return reflect.ValueOf(res)
+}
+
func convertContainerStatusString(query string) reflect.Value {
result, err := define.StringToContainerStatus(query)
if err != nil {
diff --git a/pkg/api/handlers/libpod/play.go b/pkg/api/handlers/libpod/play.go
index ca9ada761..b71afc28c 100644
--- a/pkg/api/handlers/libpod/play.go
+++ b/pkg/api/handlers/libpod/play.go
@@ -70,6 +70,16 @@ func PlayKube(w http.ResponseWriter, r *http.Request) {
password = authConf.Password
}
+ logDriver := query.LogDriver
+ if logDriver == "" {
+ config, err := runtime.GetConfig()
+ if err != nil {
+ utils.Error(w, http.StatusInternalServerError, err)
+ return
+ }
+ query.LogDriver = config.Containers.LogDriver
+ }
+
containerEngine := abi.ContainerEngine{Libpod: runtime}
options := entities.PlayKubeOptions{
Annotations: query.Annotations,
diff --git a/pkg/api/handlers/libpod/secrets.go b/pkg/api/handlers/libpod/secrets.go
index 8708e630c..3ea2c2ea8 100644
--- a/pkg/api/handlers/libpod/secrets.go
+++ b/pkg/api/handlers/libpod/secrets.go
@@ -1,9 +1,7 @@
package libpod
import (
- "encoding/json"
"net/http"
- "reflect"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/pkg/api/handlers/utils"
@@ -20,12 +18,6 @@ func CreateSecret(w http.ResponseWriter, r *http.Request) {
decoder = r.Context().Value(api.DecoderKey).(*schema.Decoder)
)
- decoder.RegisterConverter(map[string]string{}, func(str string) reflect.Value {
- res := make(map[string]string)
- json.Unmarshal([]byte(str), &res)
- return reflect.ValueOf(res)
- })
-
query := struct {
Name string `schema:"name"`
Driver string `schema:"driver"`
diff --git a/pkg/api/server/server.go b/pkg/api/server/server.go
index a906a01f1..7f5537fb4 100644
--- a/pkg/api/server/server.go
+++ b/pkg/api/server/server.go
@@ -20,7 +20,6 @@ import (
"github.com/containers/podman/v4/pkg/api/server/idle"
"github.com/containers/podman/v4/pkg/api/types"
"github.com/containers/podman/v4/pkg/domain/entities"
- "github.com/coreos/go-systemd/v22/activation"
"github.com/coreos/go-systemd/v22/daemon"
"github.com/gorilla/mux"
"github.com/gorilla/schema"
@@ -65,25 +64,7 @@ func NewServerWithSettings(runtime *libpod.Runtime, listener net.Listener, opts
}
func newServer(runtime *libpod.Runtime, listener net.Listener, opts entities.ServiceOptions) (*APIServer, error) {
- // If listener not provided try socket activation protocol
- if listener == nil {
- if _, found := os.LookupEnv("LISTEN_PID"); !found {
- return nil, fmt.Errorf("no service listener provided and socket activation protocol is not active")
- }
-
- listeners, err := activation.Listeners()
- if err != nil {
- return nil, fmt.Errorf("cannot retrieve file descriptors from systemd: %w", err)
- }
- if len(listeners) != 1 {
- return nil, fmt.Errorf("wrong number of file descriptors for socket activation protocol (%d != 1)", len(listeners))
- }
- listener = listeners[0]
- // note that activation.Listeners() return nil when it cannot listen on the fd (i.e. udp connection)
- if listener == nil {
- return nil, fmt.Errorf("unexpected fd received from systemd: cannot listen on it")
- }
- }
+ logrus.Infof("API service listening on %q. URI: %q", listener.Addr(), runtime.RemoteURI())
if opts.CorsHeaders == "" {
logrus.Debug("CORS Headers were not set")
} else {
diff --git a/pkg/domain/entities/pods.go b/pkg/domain/entities/pods.go
index 1e25e0872..a19edcbf0 100644
--- a/pkg/domain/entities/pods.go
+++ b/pkg/domain/entities/pods.go
@@ -122,6 +122,7 @@ type PodCreateOptions struct {
CreateCommand []string `json:"create_command,omitempty"`
Devices []string `json:"devices,omitempty"`
DeviceReadBPs []string `json:"device_read_bps,omitempty"`
+ ExitPolicy string `json:"exit_policy,omitempty"`
Hostname string `json:"hostname,omitempty"`
Infra bool `json:"infra,omitempty"`
InfraImage string `json:"infra_image,omitempty"`
@@ -319,6 +320,7 @@ func ToPodSpecGen(s specgen.PodSpecGenerator, p *PodCreateOptions) (*specgen.Pod
}
s.Pid = out
s.Hostname = p.Hostname
+ s.ExitPolicy = p.ExitPolicy
s.Labels = p.Labels
s.Devices = p.Devices
s.SecurityOpt = p.SecurityOpt
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index 92e29c822..5ca678d6f 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -641,7 +641,11 @@ func (ic *ContainerEngine) ContainerRestore(ctx context.Context, namesOrIds []st
}
restoreOptions.CheckpointImageID = img.ID()
mountPoint, err := img.Mount(ctx, nil, "")
- defer img.Unmount(true)
+ defer func() {
+ if err := img.Unmount(true); err != nil {
+ logrus.Errorf("Failed to unmount image: %v", err)
+ }
+ }()
if err != nil {
return nil, err
}
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index 0da07bab8..b3ded7db6 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -197,7 +197,11 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
return nil, errors.Errorf("pod does not have a name")
}
- podOpt := entities.PodCreateOptions{Infra: true, Net: &entities.NetOptions{NoHosts: options.NoHosts}}
+ podOpt := entities.PodCreateOptions{
+ Infra: true,
+ Net: &entities.NetOptions{NoHosts: options.NoHosts},
+ ExitPolicy: string(config.PodExitPolicyStop),
+ }
podOpt, err = kube.ToPodOpt(ctx, podName, podOpt, podYAML)
if err != nil {
return nil, err
diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go
index 8e96e4154..17df0e3f8 100644
--- a/pkg/domain/infra/abi/system.go
+++ b/pkg/domain/infra/abi/system.go
@@ -6,6 +6,7 @@ import (
"net/url"
"os"
"os/exec"
+ "path/filepath"
"github.com/containers/common/pkg/cgroups"
"github.com/containers/common/pkg/config"
@@ -27,27 +28,40 @@ func (ic *ContainerEngine) Info(ctx context.Context) (*define.Info, error) {
if err != nil {
return nil, err
}
+ info.Host.RemoteSocket = &define.RemoteSocket{Path: ic.Libpod.RemoteURI()}
- socketPath, err := util.SocketPath()
+ // `podman system connection add` invokes podman via ssh to fill in connection string. Here
+ // we are reporting the default systemd activation socket path as we cannot know if a future
+ // service may be run with another URI.
+ if ic.Libpod.RemoteURI() == "" {
+ xdg := "/run"
+ if path, err := util.GetRuntimeDir(); err != nil {
+ // Info is as good as we can guess...
+ return info, err
+ } else if path != "" {
+ xdg = path
+ }
+
+ uri := url.URL{
+ Scheme: "unix",
+ Path: filepath.Join(xdg, "podman", "podman.sock"),
+ }
+ ic.Libpod.SetRemoteURI(uri.String())
+ info.Host.RemoteSocket.Path = uri.Path
+ }
+
+ uri, err := url.Parse(ic.Libpod.RemoteURI())
if err != nil {
return nil, err
}
- rs := define.RemoteSocket{
- Path: socketPath,
- Exists: false,
- }
- // Check if the socket exists
- if fi, err := os.Stat(socketPath); err == nil {
- if fi.Mode()&os.ModeSocket != 0 {
- rs.Exists = true
- }
+ if uri.Scheme == "unix" {
+ _, err := os.Stat(uri.Path)
+ info.Host.RemoteSocket.Exists = err == nil
+ } else {
+ info.Host.RemoteSocket.Exists = true
}
- // TODO
- // it was suggested future versions of this could perform
- // a ping on the socket for greater confidence the socket is
- // actually active.
- info.Host.RemoteSocket = &rs
+
return info, err
}
diff --git a/pkg/domain/utils/utils_test.go b/pkg/domain/utils/utils_test.go
new file mode 100644
index 000000000..952a4b5be
--- /dev/null
+++ b/pkg/domain/utils/utils_test.go
@@ -0,0 +1,76 @@
+package utils
+
+import (
+ "net/url"
+ "sort"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestToLibpodFilters(t *testing.T) {
+ good := url.Values{}
+ good.Set("apple", "red")
+ good.Set("banana", "yellow")
+ good.Set("pear", "")
+ goodResult := []string{"apple=red", "banana=yellow", "pear="}
+ sort.Strings(goodResult)
+
+ empty := url.Values{}
+ type args struct {
+ f url.Values
+ }
+ tests := []struct {
+ name string
+ args args
+ wantFilters []string
+ }{
+ {
+ name: "GoodURLValue",
+ args: args{
+ f: good,
+ },
+ wantFilters: goodResult,
+ },
+ {
+ name: "Empty",
+ args: args{
+ f: empty,
+ },
+ wantFilters: nil,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.ElementsMatchf(t, ToLibpodFilters(tt.args.f), tt.wantFilters, "ToLibpodFilters() = %v, want %v", ToLibpodFilters(tt.args.f), tt.wantFilters)
+ })
+ }
+}
+
+func TestToURLValues(t *testing.T) {
+ good := url.Values{}
+ good.Set("apple", "red")
+ good.Set("banana", "yellow")
+ good.Set("pear", "")
+ goodResult := []string{"apple=red", "banana=yellow", "pear="}
+
+ type args struct {
+ f []string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantFilters url.Values
+ }{
+ {
+ name: "Good",
+ args: args{goodResult},
+ wantFilters: good,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.EqualValuesf(t, ToURLValues(tt.args.f), tt.wantFilters, "ToURLValues() = %v, want %v", ToURLValues(tt.args.f), tt.wantFilters)
+ })
+ }
+}
diff --git a/pkg/env/env_test.go b/pkg/env/env_test.go
new file mode 100644
index 000000000..c77061ecf
--- /dev/null
+++ b/pkg/env/env_test.go
@@ -0,0 +1,162 @@
+package env
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSlice(t *testing.T) {
+ goodMap := make(map[string]string, 0)
+ goodMap["apple"] = "red"
+ goodMap["banana"] = "yellow"
+ goodMap["pear"] = ""
+ goodResult := []string{"apple=red", "banana=yellow", "pear"}
+ type args struct {
+ m map[string]string
+ }
+ tests := []struct {
+ name string
+ args args
+ want []string
+ }{
+ {
+ name: "Good",
+ args: args{
+ m: goodMap,
+ },
+ want: goodResult,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.ElementsMatchf(t, Slice(tt.args.m), tt.want, "Slice() = %v, want %v", Slice(tt.args.m), tt.want)
+ })
+ }
+}
+
+func TestJoin(t *testing.T) {
+ firstMap := make(map[string]string, 0)
+ firstMap["apple"] = "red"
+ secondMap := make(map[string]string, 0)
+ secondMap["banana"] = "yellow"
+ goodResult := make(map[string]string, 0)
+ goodResult["apple"] = "red"
+ goodResult["banana"] = "yellow"
+ overrideResult := make(map[string]string, 0)
+ overrideResult["apple"] = "green"
+ overrideResult["banana"] = "yellow"
+ overrideMap := make(map[string]string, 0)
+ overrideMap["banana"] = "yellow"
+ overrideMap["apple"] = "green"
+ type args struct {
+ base map[string]string
+ override map[string]string
+ }
+ tests := []struct {
+ name string
+ args args
+ want map[string]string
+ }{
+ {
+ name: "GoodJoin",
+ args: args{
+ base: firstMap,
+ override: secondMap,
+ },
+ want: goodResult,
+ },
+ {
+ name: "GoodOverride",
+ args: args{
+ base: firstMap,
+ override: overrideMap,
+ },
+ want: overrideResult,
+ },
+ {
+ name: "EmptyOverride",
+ args: args{
+ base: firstMap,
+ override: nil,
+ },
+ want: firstMap,
+ },
+ {
+ name: "EmptyBase",
+ args: args{
+ base: nil,
+ override: firstMap,
+ },
+ want: firstMap,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := Join(tt.args.base, tt.args.override)
+ assert.EqualValuesf(t, got, tt.want, "Join() = %v, want %v", got, tt.want)
+ })
+ }
+}
+
+func Test_parseEnv(t *testing.T) {
+ good := make(map[string]string)
+
+ type args struct {
+ env map[string]string
+ line string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "Good",
+ args: args{
+ env: good,
+ line: "apple=red",
+ },
+ wantErr: false,
+ },
+ {
+ name: "GoodNoValue",
+ args: args{
+ env: good,
+ line: "apple=",
+ },
+ wantErr: false,
+ },
+ {
+ name: "GoodNoKeyNoValue",
+ args: args{
+ env: good,
+ line: "=",
+ },
+ wantErr: true,
+ },
+ {
+ name: "BadNoKey",
+ args: args{
+ env: good,
+ line: "=foobar",
+ },
+ wantErr: true,
+ },
+ {
+ name: "BadOnlyDelim",
+ args: args{
+ env: good,
+ line: "=",
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := parseEnv(tt.args.env, tt.args.line); (err != nil) != tt.wantErr {
+ t.Errorf("parseEnv() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
diff --git a/pkg/machine/config.go b/pkg/machine/config.go
index 9a0ce757a..d34776714 100644
--- a/pkg/machine/config.go
+++ b/pkg/machine/config.go
@@ -52,6 +52,7 @@ type Provider interface {
List(opts ListOptions) ([]*ListResponse, error)
IsValidVMName(name string) (bool, error)
CheckExclusiveActiveVM() (bool, string, error)
+ RemoveAndCleanMachines() error
}
type RemoteConnectionType string
@@ -170,11 +171,11 @@ func (rc RemoteConnectionType) MakeSSHURL(host, path, port, userName string) url
// GetDataDir returns the filepath where vm images should
// live for podman-machine.
func GetDataDir(vmType string) (string, error) {
- data, err := homedir.GetDataHome()
+ dataDirPrefix, err := DataDirPrefix()
if err != nil {
return "", err
}
- dataDir := filepath.Join(data, "containers", "podman", "machine", vmType)
+ dataDir := filepath.Join(dataDirPrefix, vmType)
if _, err := os.Stat(dataDir); !os.IsNotExist(err) {
return dataDir, nil
}
@@ -182,14 +183,24 @@ func GetDataDir(vmType string) (string, error) {
return dataDir, mkdirErr
}
+// DataDirPrefix returns the path prefix for all machine data files
+func DataDirPrefix() (string, error) {
+ data, err := homedir.GetDataHome()
+ if err != nil {
+ return "", err
+ }
+ dataDir := filepath.Join(data, "containers", "podman", "machine")
+ return dataDir, nil
+}
+
// GetConfigDir returns the filepath to where configuration
// files for podman-machine should live
func GetConfDir(vmType string) (string, error) {
- conf, err := homedir.GetConfigHome()
+ confDirPrefix, err := ConfDirPrefix()
if err != nil {
return "", err
}
- confDir := filepath.Join(conf, "containers", "podman", "machine", vmType)
+ confDir := filepath.Join(confDirPrefix, vmType)
if _, err := os.Stat(confDir); !os.IsNotExist(err) {
return confDir, nil
}
@@ -197,6 +208,16 @@ func GetConfDir(vmType string) (string, error) {
return confDir, mkdirErr
}
+// ConfDirPrefix returns the path prefix for all machine config files
+func ConfDirPrefix() (string, error) {
+ conf, err := homedir.GetConfigHome()
+ if err != nil {
+ return "", err
+ }
+ confDir := filepath.Join(conf, "containers", "podman", "machine")
+ return confDir, nil
+}
+
// ResourceConfig describes physical attributes of the machine
type ResourceConfig struct {
// CPUs to be assigned to the VM
diff --git a/pkg/machine/e2e/inspect_test.go b/pkg/machine/e2e/inspect_test.go
index e282dd21d..b34285dd8 100644
--- a/pkg/machine/e2e/inspect_test.go
+++ b/pkg/machine/e2e/inspect_test.go
@@ -43,9 +43,11 @@ var _ = Describe("podman machine stop", func() {
Expect(foo2).To(Exit(0))
inspect := new(inspectMachine)
+ inspect = inspect.withFormat("{{.Name}}")
inspectSession, err := mb.setName("foo1").setCmd(inspect).run()
Expect(err).To(BeNil())
Expect(inspectSession).To(Exit(0))
+ Expect(inspectSession.Bytes()).To(ContainSubstring("foo1"))
type fakeInfos struct {
Status string
@@ -56,13 +58,13 @@ var _ = Describe("podman machine stop", func() {
Expect(err).ToNot(HaveOccurred())
Expect(len(infos)).To(Equal(2))
- //rm := new(rmMachine)
- //// Must manually clean up due to multiple names
- //for _, name := range []string{"foo1", "foo2"} {
+ // rm := new(rmMachine)
+ // // Must manually clean up due to multiple names
+ // for _, name := range []string{"foo1", "foo2"} {
// mb.setName(name).setCmd(rm.withForce()).run()
// mb.names = []string{}
- //}
- //mb.names = []string{}
+ // }
+ // mb.names = []string{}
})
})
diff --git a/pkg/machine/e2e/machine_test.go b/pkg/machine/e2e/machine_test.go
index 2b3b60b2b..657014b05 100644
--- a/pkg/machine/e2e/machine_test.go
+++ b/pkg/machine/e2e/machine_test.go
@@ -23,14 +23,20 @@ func TestMain(m *testing.M) {
const (
defaultStream string = "podman-testing"
- tmpDir string = "/var/tmp"
)
var (
+ tmpDir = "/var/tmp"
fqImageName string
suiteImageName string
)
+func init() {
+ if value, ok := os.LookupEnv("TMPDIR"); ok {
+ tmpDir = value
+ }
+}
+
// TestLibpod ginkgo master function
func TestMachine(t *testing.T) {
RegisterFailHandler(Fail)
@@ -70,7 +76,8 @@ var _ = SynchronizedAfterSuite(func() {},
})
func setup() (string, *machineTestBuilder) {
- homeDir, err := ioutil.TempDir("/var/tmp", "podman_test")
+ // Set TMPDIR if this needs a new directory
+ homeDir, err := ioutil.TempDir("", "podman_test")
if err != nil {
Fail(fmt.Sprintf("failed to create home directory: %q", err))
}
diff --git a/pkg/machine/qemu/config_test.go b/pkg/machine/qemu/config_test.go
index 0fbb5b3bf..3f92881fa 100644
--- a/pkg/machine/qemu/config_test.go
+++ b/pkg/machine/qemu/config_test.go
@@ -68,9 +68,15 @@ func TestNewMachineFile(t *testing.T) {
p := "/var/tmp/podman/my.sock"
longp := filepath.Join(longTemp, utils.RandomString(100), "my.sock")
- os.MkdirAll(filepath.Dir(longp), 0755)
- f, _ := os.Create(longp)
- f.Close()
+ err = os.MkdirAll(filepath.Dir(longp), 0755)
+ if err != nil {
+ panic(err)
+ }
+ f, err := os.Create(longp)
+ if err != nil {
+ panic(err)
+ }
+ _ = f.Close()
sym := "my.sock"
longSym := filepath.Join(homedir, ".podman", sym)
@@ -120,14 +126,15 @@ func TestNewMachineFile(t *testing.T) {
},
}
for _, tt := range tests {
+ tt := tt
t.Run(tt.name, func(t *testing.T) {
- got, err := machine.NewMachineFile(tt.args.path, tt.args.symlink) //nolint: scopelint
- if (err != nil) != tt.wantErr { //nolint: scopelint
- t.Errorf("NewMachineFile() error = %v, wantErr %v", err, tt.wantErr) //nolint: scopelint
+ got, err := machine.NewMachineFile(tt.args.path, tt.args.symlink)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("NewMachineFile() error = %v, wantErr %v", err, tt.wantErr)
return
}
- if !reflect.DeepEqual(got, tt.want) { //nolint: scopelint
- t.Errorf("NewMachineFile() got = %v, want %v", got, tt.want) //nolint: scopelint
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("NewMachineFile() got = %v, want %v", got, tt.want)
}
})
}
diff --git a/pkg/machine/qemu/machine.go b/pkg/machine/qemu/machine.go
index 35eea5fb4..6e36b0886 100644
--- a/pkg/machine/qemu/machine.go
+++ b/pkg/machine/qemu/machine.go
@@ -484,12 +484,11 @@ func (v *MachineVM) Start(name string, _ machine.StartOptions) error {
if err := v.writeConfig(); err != nil {
return fmt.Errorf("writing JSON file: %w", err)
}
- defer func() error {
+ defer func() {
v.Starting = false
if err := v.writeConfig(); err != nil {
- return fmt.Errorf("writing JSON file: %w", err)
+ logrus.Errorf("Writing JSON file: %v", err)
}
- return nil
}()
if v.isIncompatible() {
logrus.Errorf("machine %q is incompatible with this release of podman and needs to be recreated, starting for recovery only", v.Name)
@@ -526,10 +525,11 @@ func (v *MachineVM) Start(name string, _ machine.StartOptions) error {
time.Sleep(wait)
wait++
}
- defer qemuSocketConn.Close()
if err != nil {
return err
}
+ defer qemuSocketConn.Close()
+
fd, err := qemuSocketConn.(*net.UnixConn).File()
if err != nil {
return err
@@ -1544,3 +1544,79 @@ func (v *MachineVM) editCmdLine(flag string, value string) {
v.CmdLine = append(v.CmdLine, []string{flag, value}...)
}
}
+
+// RemoveAndCleanMachines removes all machine and cleans up any other files associatied with podman machine
+func (p *Provider) RemoveAndCleanMachines() error {
+ var (
+ vm machine.VM
+ listResponse []*machine.ListResponse
+ opts machine.ListOptions
+ destroyOptions machine.RemoveOptions
+ )
+ destroyOptions.Force = true
+ var prevErr error
+
+ listResponse, err := p.List(opts)
+ if err != nil {
+ return err
+ }
+
+ for _, mach := range listResponse {
+ vm, err = p.LoadVMByName(mach.Name)
+ if err != nil {
+ if prevErr != nil {
+ logrus.Error(prevErr)
+ }
+ prevErr = err
+ }
+ _, remove, err := vm.Remove(mach.Name, destroyOptions)
+ if err != nil {
+ if prevErr != nil {
+ logrus.Error(prevErr)
+ }
+ prevErr = err
+ } else {
+ if err := remove(); err != nil {
+ if prevErr != nil {
+ logrus.Error(prevErr)
+ }
+ prevErr = err
+ }
+ }
+ }
+
+ // Clean leftover files in data dir
+ dataDir, err := machine.DataDirPrefix()
+ if err != nil {
+ if prevErr != nil {
+ logrus.Error(prevErr)
+ }
+ prevErr = err
+ } else {
+ err := os.RemoveAll(dataDir)
+ if err != nil {
+ if prevErr != nil {
+ logrus.Error(prevErr)
+ }
+ prevErr = err
+ }
+ }
+
+ // Clean leftover files in conf dir
+ confDir, err := machine.ConfDirPrefix()
+ if err != nil {
+ if prevErr != nil {
+ logrus.Error(prevErr)
+ }
+ prevErr = err
+ } else {
+ err := os.RemoveAll(confDir)
+ if err != nil {
+ if prevErr != nil {
+ logrus.Error(prevErr)
+ }
+ prevErr = err
+ }
+ }
+ return prevErr
+}
diff --git a/pkg/machine/wsl/machine.go b/pkg/machine/wsl/machine.go
index addcb7c8a..57fb36fc9 100644
--- a/pkg/machine/wsl/machine.go
+++ b/pkg/machine/wsl/machine.go
@@ -18,7 +18,6 @@ import (
"strings"
"time"
- "github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/machine"
"github.com/containers/podman/v4/utils"
"github.com/containers/storage/pkg/homedir"
@@ -153,20 +152,22 @@ const (
type Provider struct{}
type MachineVM struct {
- // IdentityPath is the fq path to the ssh priv key
- IdentityPath string
+ // ConfigPath is the path to the configuration file
+ ConfigPath string
+ // Created contains the original created time instead of querying the file mod time
+ Created time.Time
// ImageStream is the version of fcos being used
ImageStream string
// ImagePath is the fq path to
ImagePath string
+ // LastUp contains the last recorded uptime
+ LastUp time.Time
// Name of the vm
Name string
- // SSH port for user networking
- Port int
- // RemoteUsername of the vm user
- RemoteUsername string
// Whether this machine should run in a rootful or rootless manner
Rootful bool
+ // SSH identity, username, etc
+ machine.SSHConfig
}
type ExitCodeError struct {
@@ -181,16 +182,22 @@ func GetWSLProvider() machine.Provider {
return wslProvider
}
-// NewMachine initializes an instance of a virtual machine based on the qemu
-// virtualization.
+// NewMachine initializes an instance of a wsl machine
func (p *Provider) NewMachine(opts machine.InitOptions) (machine.VM, error) {
vm := new(MachineVM)
if len(opts.Name) > 0 {
vm.Name = opts.Name
}
+ configPath, err := getConfigPath(opts.Name)
+ if err != nil {
+ return vm, err
+ }
+ vm.ConfigPath = configPath
vm.ImagePath = opts.ImagePath
vm.RemoteUsername = opts.Username
+ vm.Created = time.Now()
+ vm.LastUp = vm.Created
// Add a random port for ssh
port, err := utils.GetRandomPort()
@@ -202,25 +209,69 @@ func (p *Provider) NewMachine(opts machine.InitOptions) (machine.VM, error) {
return vm, nil
}
+func getConfigPath(name string) (string, error) {
+ vmConfigDir, err := machine.GetConfDir(vmtype)
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(vmConfigDir, name+".json"), nil
+}
+
// LoadByName reads a json file that describes a known qemu vm
// and returns a vm instance
func (p *Provider) LoadVMByName(name string) (machine.VM, error) {
- vm := new(MachineVM)
- vmConfigDir, err := machine.GetConfDir(vmtype)
+ configPath, err := getConfigPath(name)
if err != nil {
return nil, err
}
- b, err := ioutil.ReadFile(filepath.Join(vmConfigDir, name+".json"))
- if os.IsNotExist(err) {
- return nil, errors.Wrap(machine.ErrNoSuchVM, name)
- }
+
+ vm, err := readAndMigrate(configPath, name)
+ return vm, err
+}
+
+// readAndMigrate returns the content of the VM's
+// configuration file in json
+func readAndMigrate(configPath string, name string) (*MachineVM, error) {
+ vm := new(MachineVM)
+ b, err := os.ReadFile(configPath)
if err != nil {
- return nil, err
+ if errors.Is(err, os.ErrNotExist) {
+ return nil, errors.Wrap(machine.ErrNoSuchVM, name)
+ }
+ return vm, err
}
err = json.Unmarshal(b, vm)
+ if err == nil && vm.Created.IsZero() {
+ err = vm.migrate40(configPath)
+ }
return vm, err
}
+func (v *MachineVM) migrate40(configPath string) error {
+ v.ConfigPath = configPath
+ fi, err := os.Stat(configPath)
+ if err != nil {
+ return err
+ }
+ v.Created = fi.ModTime()
+ v.LastUp = getLegacyLastStart(v)
+ return v.writeConfig()
+}
+
+func getLegacyLastStart(vm *MachineVM) time.Time {
+ vmDataDir, err := machine.GetDataDir(vmtype)
+ if err != nil {
+ return vm.Created
+ }
+ distDir := filepath.Join(vmDataDir, "wsldist")
+ start := filepath.Join(distDir, vm.Name, "laststart")
+ info, err := os.Stat(start)
+ if err != nil {
+ return vm.Created
+ }
+ return info.ModTime()
+}
+
// Init writes the json configuration file to the filesystem for
// other verbs (start, stop)
func (v *MachineVM) Init(opts machine.InitOptions) (bool, error) {
@@ -289,12 +340,7 @@ func downloadDistro(v *MachineVM, opts machine.InitOptions) error {
}
func (v *MachineVM) writeConfig() error {
- vmConfigDir, err := machine.GetConfDir(vmtype)
- if err != nil {
- return err
- }
-
- jsonFile := filepath.Join(vmConfigDir, v.Name) + ".json"
+ jsonFile := v.ConfigPath
b, err := json.MarshalIndent(v, "", " ")
if err != nil {
@@ -810,7 +856,8 @@ func (v *MachineVM) Start(name string, _ machine.StartOptions) error {
}
}
- return markStart(name)
+ _, _, err = v.updateTimeStamps(true)
+ return err
}
func launchWinProxy(v *MachineVM) (bool, string, error) {
@@ -1005,6 +1052,8 @@ func (v *MachineVM) Stop(name string, _ machine.StopOptions) error {
return errors.Errorf("%q is not running", v.Name)
}
+ _, _, _ = v.updateTimeStamps(true)
+
if err := stopWinProxy(v); err != nil {
fmt.Fprintf(os.Stderr, "Could not stop API forwarding service (win-sshproxy.exe): %s\n", err.Error())
}
@@ -1032,10 +1081,12 @@ func (v *MachineVM) Stop(name string, _ machine.StopOptions) error {
return nil
}
-// TODO: We need to rename isRunning to State(); I do not have a
-// windows system to test this on.
func (v *MachineVM) State(bypass bool) (machine.Status, error) {
- return "", define.ErrNotImplemented
+ if v.isRunning() {
+ return machine.Running, nil
+ }
+
+ return machine.Stopped, nil
}
func stopWinProxy(v *MachineVM) error {
@@ -1210,14 +1261,9 @@ func GetVMInfos() ([]*machine.ListResponse, error) {
var listed []*machine.ListResponse
if err = filepath.WalkDir(vmConfigDir, func(path string, d fs.DirEntry, err error) error {
- vm := new(MachineVM)
if strings.HasSuffix(d.Name(), ".json") {
- fullPath := filepath.Join(vmConfigDir, d.Name())
- b, err := ioutil.ReadFile(fullPath)
- if err != nil {
- return err
- }
- err = json.Unmarshal(b, vm)
+ path := filepath.Join(vmConfigDir, d.Name())
+ vm, err := readAndMigrate(path, strings.TrimSuffix(d.Name(), ".json"))
if err != nil {
return err
}
@@ -1229,15 +1275,13 @@ func GetVMInfos() ([]*machine.ListResponse, error) {
listEntry.CPUs, _ = getCPUs(vm)
listEntry.Memory, _ = getMem(vm)
listEntry.DiskSize = getDiskSize(vm)
- fi, err := os.Stat(fullPath)
- if err != nil {
- return err
- }
- listEntry.CreatedAt = fi.ModTime()
- listEntry.LastUp = getLastStart(vm, fi.ModTime())
- if vm.isRunning() {
- listEntry.Running = true
- }
+ listEntry.RemoteUsername = vm.RemoteUsername
+ listEntry.Port = vm.Port
+ listEntry.IdentityPath = vm.IdentityPath
+
+ running := vm.isRunning()
+ listEntry.CreatedAt, listEntry.LastUp, _ = vm.updateTimeStamps(running)
+ listEntry.Running = running
listed = append(listed, listEntry)
}
@@ -1248,6 +1292,16 @@ func GetVMInfos() ([]*machine.ListResponse, error) {
return listed, err
}
+func (vm *MachineVM) updateTimeStamps(updateLast bool) (time.Time, time.Time, error) {
+ var err error
+ if updateLast {
+ vm.LastUp = time.Now()
+ err = vm.writeConfig()
+ }
+
+ return vm.Created, vm.LastUp, err
+}
+
func getDiskSize(vm *MachineVM) uint64 {
vmDataDir, err := machine.GetDataDir(vmtype)
if err != nil {
@@ -1262,36 +1316,6 @@ func getDiskSize(vm *MachineVM) uint64 {
return uint64(info.Size())
}
-func markStart(name string) error {
- vmDataDir, err := machine.GetDataDir(vmtype)
- if err != nil {
- return err
- }
- distDir := filepath.Join(vmDataDir, "wsldist")
- start := filepath.Join(distDir, name, "laststart")
- file, err := os.Create(start)
- if err != nil {
- return err
- }
- file.Close()
-
- return nil
-}
-
-func getLastStart(vm *MachineVM, created time.Time) time.Time {
- vmDataDir, err := machine.GetDataDir(vmtype)
- if err != nil {
- return created
- }
- distDir := filepath.Join(vmDataDir, "wsldist")
- start := filepath.Join(distDir, vm.Name, "laststart")
- info, err := os.Stat(start)
- if err != nil {
- return created
- }
- return info.ModTime()
-}
-
func getCPUs(vm *MachineVM) (uint64, error) {
dist := toDist(vm.Name)
if run, _ := isWSLRunning(dist); !run {
@@ -1388,6 +1412,109 @@ func (v *MachineVM) setRootful(rootful bool) error {
return nil
}
+// Inspect returns verbose detail about the machine
func (v *MachineVM) Inspect() (*machine.InspectInfo, error) {
- return nil, define.ErrNotImplemented
+ state, err := v.State(false)
+ if err != nil {
+ return nil, err
+ }
+
+ created, lastUp, _ := v.updateTimeStamps(state == machine.Running)
+
+ return &machine.InspectInfo{
+ ConfigPath: machine.VMFile{Path: v.ConfigPath},
+ Created: created,
+ Image: machine.ImageConfig{
+ ImagePath: machine.VMFile{Path: v.ImagePath},
+ ImageStream: v.ImageStream,
+ },
+ LastUp: lastUp,
+ Name: v.Name,
+ Resources: v.getResources(),
+ SSHConfig: v.SSHConfig,
+ State: state,
+ }, nil
+}
+
+func (v *MachineVM) getResources() (resources machine.ResourceConfig) {
+ resources.CPUs, _ = getCPUs(v)
+ resources.Memory, _ = getMem(v)
+ resources.DiskSize = getDiskSize(v)
+ return
+}
+
+// RemoveAndCleanMachines removes all machine and cleans up any other files associatied with podman machine
+func (p *Provider) RemoveAndCleanMachines() error {
+ var (
+ vm machine.VM
+ listResponse []*machine.ListResponse
+ opts machine.ListOptions
+ destroyOptions machine.RemoveOptions
+ )
+ destroyOptions.Force = true
+ var prevErr error
+
+ listResponse, err := p.List(opts)
+ if err != nil {
+ return err
+ }
+
+ for _, mach := range listResponse {
+ vm, err = p.LoadVMByName(mach.Name)
+ if err != nil {
+ if prevErr != nil {
+ logrus.Error(prevErr)
+ }
+ prevErr = err
+ }
+ _, remove, err := vm.Remove(mach.Name, destroyOptions)
+ if err != nil {
+ if prevErr != nil {
+ logrus.Error(prevErr)
+ }
+ prevErr = err
+ } else {
+ if err := remove(); err != nil {
+ if prevErr != nil {
+ logrus.Error(prevErr)
+ }
+ prevErr = err
+ }
+ }
+ }
+
+ // Clean leftover files in data dir
+ dataDir, err := machine.DataDirPrefix()
+ if err != nil {
+ if prevErr != nil {
+ logrus.Error(prevErr)
+ }
+ prevErr = err
+ } else {
+ err := os.RemoveAll(dataDir)
+ if err != nil {
+ if prevErr != nil {
+ logrus.Error(prevErr)
+ }
+ prevErr = err
+ }
+ }
+
+ // Clean leftover files in conf dir
+ confDir, err := machine.ConfDirPrefix()
+ if err != nil {
+ if prevErr != nil {
+ logrus.Error(prevErr)
+ }
+ prevErr = err
+ } else {
+ err := os.RemoveAll(confDir)
+ if err != nil {
+ if prevErr != nil {
+ logrus.Error(prevErr)
+ }
+ prevErr = err
+ }
+ }
+ return prevErr
}
diff --git a/pkg/specgen/generate/pod_create.go b/pkg/specgen/generate/pod_create.go
index a3408b402..fce32d688 100644
--- a/pkg/specgen/generate/pod_create.go
+++ b/pkg/specgen/generate/pod_create.go
@@ -197,6 +197,8 @@ func createPodOptions(p *specgen.PodSpecGenerator) ([]libpod.PodCreateOption, er
options = append(options, libpod.WithPodHostname(p.Hostname))
}
+ options = append(options, libpod.WithPodExitPolicy(p.ExitPolicy))
+
return options, nil
}
diff --git a/pkg/specgen/podspecgen.go b/pkg/specgen/podspecgen.go
index 759caa0c0..1bb64448f 100644
--- a/pkg/specgen/podspecgen.go
+++ b/pkg/specgen/podspecgen.go
@@ -19,6 +19,8 @@ type PodBasicConfig struct {
// all containers in the pod as long as the UTS namespace is shared.
// Optional.
Hostname string `json:"hostname,omitempty"`
+ // ExitPolicy determines the pod's exit and stop behaviour.
+ ExitPolicy string `json:"exit_policy,omitempty"`
// Labels are key-value pairs that are used to add metadata to pods.
// Optional.
Labels map[string]string `json:"labels,omitempty"`
diff --git a/pkg/systemd/generate/pods.go b/pkg/systemd/generate/pods.go
index cd1486a82..4043fbfcb 100644
--- a/pkg/systemd/generate/pods.go
+++ b/pkg/systemd/generate/pods.go
@@ -256,6 +256,16 @@ func generatePodInfo(pod *libpod.Pod, options entities.GenerateSystemdOptions) (
return &info, nil
}
+// Unless already specified, the pod's exit policy to "stop".
+func setPodExitPolicy(cmd []string) []string {
+ for _, arg := range cmd {
+ if strings.HasPrefix(arg, "--exit-policy=") || arg == "--exit-policy" {
+ return cmd
+ }
+ }
+ return append(cmd, "--exit-policy=stop")
+}
+
// executePodTemplate executes the pod template on the specified podInfo. Note
// that the podInfo is also post processed and completed, which allows for an
// easier unit testing.
@@ -355,6 +365,7 @@ func executePodTemplate(info *podInfo, options entities.GenerateSystemdOptions)
}
startCommand = append(startCommand, podCreateArgs...)
+ startCommand = setPodExitPolicy(startCommand)
startCommand = escapeSystemdArguments(startCommand)
info.ExecStartPre1 = "/bin/rm -f {{{{.PIDFile}}}} {{{{.PodIDFile}}}}"
diff --git a/pkg/systemd/generate/pods_test.go b/pkg/systemd/generate/pods_test.go
index dcb18780c..59f217256 100644
--- a/pkg/systemd/generate/pods_test.go
+++ b/pkg/systemd/generate/pods_test.go
@@ -7,6 +7,28 @@ import (
"github.com/stretchr/testify/assert"
)
+func TestSetPodExitPolicy(t *testing.T) {
+ tests := []struct {
+ input, expected []string
+ }{
+ {
+ []string{"podman", "pod", "create"},
+ []string{"podman", "pod", "create", "--exit-policy=stop"},
+ },
+ {
+ []string{"podman", "pod", "create", "--exit-policy=continue"},
+ []string{"podman", "pod", "create", "--exit-policy=continue"},
+ },
+ {
+ []string{"podman", "pod", "create", "--exit-policy", "continue"},
+ []string{"podman", "pod", "create", "--exit-policy", "continue"},
+ },
+ }
+ for _, test := range tests {
+ assert.Equalf(t, test.expected, setPodExitPolicy(test.input), "%v", test.input)
+ }
+}
+
func TestValidateRestartPolicyPod(t *testing.T) {
type podInfo struct {
restart string
@@ -252,7 +274,7 @@ Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=on-failure
TimeoutStopSec=70
ExecStartPre=/bin/rm -f %t/pod-123abc.pid %t/pod-123abc.pod-id
-ExecStartPre=/usr/bin/podman pod create --infra-conmon-pidfile %t/pod-123abc.pid --pod-id-file %t/pod-123abc.pod-id --name foo "bar=arg with space" --replace
+ExecStartPre=/usr/bin/podman pod create --infra-conmon-pidfile %t/pod-123abc.pid --pod-id-file %t/pod-123abc.pod-id --name foo "bar=arg with space" --replace --exit-policy=stop
ExecStart=/usr/bin/podman pod start --pod-id-file %t/pod-123abc.pod-id
ExecStop=/usr/bin/podman pod stop --ignore --pod-id-file %t/pod-123abc.pod-id -t 10
ExecStopPost=/usr/bin/podman pod rm --ignore -f --pod-id-file %t/pod-123abc.pod-id
@@ -280,7 +302,7 @@ Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=on-failure
TimeoutStopSec=70
ExecStartPre=/bin/rm -f %t/pod-123abc.pid %t/pod-123abc.pod-id
-ExecStartPre=/usr/bin/podman --events-backend none --runroot /root pod create --infra-conmon-pidfile %t/pod-123abc.pid --pod-id-file %t/pod-123abc.pod-id --name foo "bar=arg with space" --replace
+ExecStartPre=/usr/bin/podman --events-backend none --runroot /root pod create --infra-conmon-pidfile %t/pod-123abc.pid --pod-id-file %t/pod-123abc.pod-id --name foo "bar=arg with space" --replace --exit-policy=stop
ExecStart=/usr/bin/podman --events-backend none --runroot /root pod start --pod-id-file %t/pod-123abc.pod-id
ExecStop=/usr/bin/podman --events-backend none --runroot /root pod stop --ignore --pod-id-file %t/pod-123abc.pod-id -t 10
ExecStopPost=/usr/bin/podman --events-backend none --runroot /root pod rm --ignore -f --pod-id-file %t/pod-123abc.pod-id
@@ -308,7 +330,7 @@ Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=on-failure
TimeoutStopSec=70
ExecStartPre=/bin/rm -f %t/pod-123abc.pid %t/pod-123abc.pod-id
-ExecStartPre=/usr/bin/podman pod create --infra-conmon-pidfile %t/pod-123abc.pid --pod-id-file %t/pod-123abc.pod-id --name foo --replace
+ExecStartPre=/usr/bin/podman pod create --infra-conmon-pidfile %t/pod-123abc.pid --pod-id-file %t/pod-123abc.pod-id --name foo --replace --exit-policy=stop
ExecStart=/usr/bin/podman pod start --pod-id-file %t/pod-123abc.pod-id
ExecStop=/usr/bin/podman pod stop --ignore --pod-id-file %t/pod-123abc.pod-id -t 10
ExecStopPost=/usr/bin/podman pod rm --ignore -f --pod-id-file %t/pod-123abc.pod-id
@@ -336,7 +358,7 @@ Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=on-failure
TimeoutStopSec=70
ExecStartPre=/bin/rm -f %t/pod-123abc.pid %t/pod-123abc.pod-id
-ExecStartPre=/usr/bin/podman pod create --infra-conmon-pidfile %t/pod-123abc.pid --pod-id-file %t/pod-123abc.pod-id --name foo --label key={{someval}} --replace
+ExecStartPre=/usr/bin/podman pod create --infra-conmon-pidfile %t/pod-123abc.pid --pod-id-file %t/pod-123abc.pod-id --name foo --label key={{someval}} --exit-policy=continue --replace
ExecStart=/usr/bin/podman pod start --pod-id-file %t/pod-123abc.pod-id
ExecStop=/usr/bin/podman pod stop --ignore --pod-id-file %t/pod-123abc.pod-id -t 10
ExecStopPost=/usr/bin/podman pod rm --ignore -f --pod-id-file %t/pod-123abc.pod-id
@@ -581,7 +603,7 @@ WantedBy=default.target
GraphRoot: "/var/lib/containers/storage",
RunRoot: "/var/run/containers/storage",
RequiredServices: []string{"container-1", "container-2"},
- CreateCommand: []string{"podman", "pod", "create", "--name", "foo", "--label", "key={{someval}}"},
+ CreateCommand: []string{"podman", "pod", "create", "--name", "foo", "--label", "key={{someval}}", "--exit-policy=continue"},
},
podNewLabelWithCurlyBraces,
true,
diff --git a/pkg/util/utils.go b/pkg/util/utils.go
index 9842a0f73..a0bf8b50d 100644
--- a/pkg/util/utils.go
+++ b/pkg/util/utils.go
@@ -731,29 +731,6 @@ func IDtoolsToRuntimeSpec(idMaps []idtools.IDMap) (convertedIDMap []specs.LinuxI
return convertedIDMap
}
-var socketPath string
-
-func SetSocketPath(path string) {
- socketPath = path
-}
-
-func SocketPath() (string, error) {
- if socketPath != "" {
- return socketPath, nil
- }
- xdg, err := GetRuntimeDir()
- if err != nil {
- return "", err
- }
- if len(xdg) == 0 {
- // If no xdg is returned, assume root socket
- xdg = "/run"
- }
-
- // Glue the socket path together
- return filepath.Join(xdg, "podman", "podman.sock"), nil
-}
-
func LookupUser(name string) (*user.User, error) {
// Assume UID look up first, if it fails lookup by username
if u, err := user.LookupId(name); err == nil {
diff --git a/test/e2e/attach_test.go b/test/e2e/attach_test.go
index 74e3a619a..b110f7c5b 100644
--- a/test/e2e/attach_test.go
+++ b/test/e2e/attach_test.go
@@ -22,8 +22,6 @@ var _ = Describe("Podman attach", func() {
Expect(err).To(BeNil())
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- err = podmanTest.SeedImages()
- Expect(err).To(BeNil())
})
AfterEach(func() {
diff --git a/test/e2e/benchmarks_test.go b/test/e2e/benchmarks_test.go
index 9653cee3b..ef4d51893 100644
--- a/test/e2e/benchmarks_test.go
+++ b/test/e2e/benchmarks_test.go
@@ -36,11 +36,14 @@ type benchmark struct {
options newBenchmarkOptions
}
+var benchmarkRegistry *podmanRegistry.Registry
+
// Allows for customizing the benchnmark in an easy to extend way.
type newBenchmarkOptions struct {
// Sets the benchmark's init function.
init func()
- // Run a local registry for this benchmark.
+ // Run a local registry for this benchmark. Use `getPortUserPass()` in
+ // the benchmark to get the port, user and password.
needsRegistry bool
}
@@ -53,6 +56,15 @@ func newBenchmark(name string, main func(), options *newBenchmarkOptions) {
allBenchmarks = append(allBenchmarks, bm)
}
+// getPortUserPass returns the port, user and password of the currently running
+// registry.
+func getPortUserPass() (string, string, string) {
+ if benchmarkRegistry == nil {
+ return "", "", ""
+ }
+ return benchmarkRegistry.Port, benchmarkRegistry.User, benchmarkRegistry.Password
+}
+
var _ = Describe("Podman Benchmark Suite", func() {
var (
timedir string
@@ -66,7 +78,6 @@ var _ = Describe("Podman Benchmark Suite", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
-
timedir, err = CreateTempDirInTempDir()
if err != nil {
os.Exit(1)
@@ -76,6 +87,15 @@ var _ = Describe("Podman Benchmark Suite", func() {
cleanup := func() {
podmanTest.Cleanup()
os.RemoveAll(timedir)
+
+ // Stop the local registry.
+ if benchmarkRegistry != nil {
+ if err := benchmarkRegistry.Stop(); err != nil {
+ logrus.Errorf("Error stopping registry: %v", err)
+ os.Exit(1)
+ }
+ benchmarkRegistry = nil
+ }
}
totalMemoryInKb := func() (total uint64) {
@@ -110,22 +130,23 @@ var _ = Describe("Podman Benchmark Suite", func() {
// All benchmarks are executed here to have *one* table listing all data.
Measure("Podman Benchmark Suite", func(b Benchmarker) {
+
+ registryOptions := &podmanRegistry.Options{
+ Image: "docker-archive:" + imageTarPath(registry),
+ }
+
for i := range allBenchmarks {
setup()
bm := allBenchmarks[i]
// Start a local registry if requested.
- var registry *podmanRegistry.Registry
if bm.options.needsRegistry {
- reg, err := podmanRegistry.Start()
+ reg, err := podmanRegistry.StartWithOptions(registryOptions)
if err != nil {
logrus.Errorf("Error starting registry: %v", err)
os.Exit(1)
}
- registry = reg
- os.Setenv(podmanRegistry.UserKey, reg.User)
- os.Setenv(podmanRegistry.PassKey, reg.Password)
- os.Setenv(podmanRegistry.PortKey, reg.Port)
+ benchmarkRegistry = reg
}
if bm.options.init != nil {
@@ -140,17 +161,6 @@ var _ = Describe("Podman Benchmark Suite", func() {
mem := totalMemoryInKb()
b.RecordValueWithPrecision("[MEM] "+bm.name, float64(mem), "KB", 1)
- // Stop the local registry.
- if bm.options.needsRegistry {
- os.Unsetenv(podmanRegistry.UserKey)
- os.Unsetenv(podmanRegistry.PassKey)
- os.Unsetenv(podmanRegistry.PortKey)
- if err := registry.Stop(); err != nil {
- logrus.Errorf("Error stopping registry: %v", err)
- os.Exit(1)
- }
- }
-
cleanup()
}
}, numBenchmarkSamples)
@@ -167,11 +177,27 @@ var _ = Describe("Podman Benchmark Suite", func() {
Expect(session).Should(Exit(0))
}, nil)
+ newBenchmark("podman push", func() {
+ port, user, pass := getPortUserPass()
+ session := podmanTest.Podman([]string{"push", "--tls-verify=false", "--creds", user + ":" + pass, UBI_INIT, "localhost:" + port + "/repo/image:tag"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ }, &newBenchmarkOptions{needsRegistry: true})
+
newBenchmark("podman pull", func() {
- session := podmanTest.Podman([]string{"pull", "quay.io/libpod/cirros"})
+ port, user, pass := getPortUserPass()
+ session := podmanTest.Podman([]string{"pull", "--tls-verify=false", "--creds", user + ":" + pass, "localhost:" + port + "/repo/image:tag"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- }, nil)
+ }, &newBenchmarkOptions{
+ needsRegistry: true,
+ init: func() {
+ port, user, pass := getPortUserPass()
+ session := podmanTest.Podman([]string{"push", "--tls-verify=false", "--creds", user + ":" + pass, UBI_INIT, "localhost:" + port + "/repo/image:tag"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ },
+ })
newBenchmark("podman load [docker]", func() {
session := podmanTest.Podman([]string{"load", "-i", "./testdata/docker-two-images.tar.xz"})
@@ -198,9 +224,7 @@ var _ = Describe("Podman Benchmark Suite", func() {
}, nil)
newBenchmark("podman login + logout", func() {
- user := os.Getenv(podmanRegistry.UserKey)
- pass := os.Getenv(podmanRegistry.PassKey)
- port := os.Getenv(podmanRegistry.PortKey)
+ port, user, pass := getPortUserPass()
session := podmanTest.Podman([]string{"login", "-u", user, "-p", pass, "--tls-verify=false", "localhost:" + port})
session.WaitWithDefaultTimeout()
diff --git a/test/e2e/build_test.go b/test/e2e/build_test.go
index b8aad1084..b5cec5fff 100644
--- a/test/e2e/build_test.go
+++ b/test/e2e/build_test.go
@@ -533,7 +533,10 @@ subdir**`
// make cwd as context root path
Expect(os.Chdir(contextDir)).ToNot(HaveOccurred())
- defer os.Chdir(cwd)
+ defer func() {
+ err := os.Chdir(cwd)
+ Expect(err).ToNot(HaveOccurred())
+ }()
By("Test .containerignore filtering subdirectory")
err = ioutil.WriteFile(filepath.Join(contextDir, ".containerignore"), []byte(`subdir/`), 0644)
diff --git a/test/e2e/checkpoint_image_test.go b/test/e2e/checkpoint_image_test.go
index 6c2a000e8..94320a70e 100644
--- a/test/e2e/checkpoint_image_test.go
+++ b/test/e2e/checkpoint_image_test.go
@@ -28,7 +28,6 @@ var _ = Describe("Podman checkpoint", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
// Check if the runtime implements checkpointing. Currently only
// runc's checkpoint/restore implementation is supported.
cmd := exec.Command(podmanTest.OCIRuntime, "checkpoint", "--help")
diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go
index ac1677539..787178cd3 100644
--- a/test/e2e/checkpoint_test.go
+++ b/test/e2e/checkpoint_test.go
@@ -41,8 +41,6 @@ var _ = Describe("Podman checkpoint", func() {
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- err = podmanTest.SeedImages()
- Expect(err).To(BeNil())
// Check if the runtime implements checkpointing. Currently only
// runc's checkpoint/restore implementation is supported.
cmd := exec.Command(podmanTest.OCIRuntime, "checkpoint", "--help")
diff --git a/test/e2e/commit_test.go b/test/e2e/commit_test.go
index 1de30e423..c82e5e471 100644
--- a/test/e2e/commit_test.go
+++ b/test/e2e/commit_test.go
@@ -24,8 +24,6 @@ var _ = Describe("Podman commit", func() {
Expect(err).To(BeNil())
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- err = podmanTest.SeedImages()
- Expect(err).To(BeNil())
})
AfterEach(func() {
diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go
index 59252fcb0..28991af7f 100644
--- a/test/e2e/common_test.go
+++ b/test/e2e/common_test.go
@@ -6,6 +6,7 @@ import (
"io/ioutil"
"math/rand"
"net"
+ "net/url"
"os"
"os/exec"
"path/filepath"
@@ -861,9 +862,6 @@ func (p *PodmanTestIntegration) makeOptions(args []string, noEvents, noCache boo
podmanOptions := strings.Split(fmt.Sprintf("%s--root %s --runroot %s --runtime %s --conmon %s --network-config-dir %s --cgroup-manager %s --tmpdir %s --events-backend %s",
debug, p.Root, p.RunRoot, p.OCIRuntime, p.ConmonBinary, p.NetworkConfigDir, p.CgroupManager, p.TmpDir, eventsType), " ")
- if os.Getenv("HOOK_OPTION") != "" {
- podmanOptions = append(podmanOptions, os.Getenv("HOOK_OPTION"))
- }
if !p.RemoteTest {
podmanOptions = append(podmanOptions, "--network-backend", p.NetworkBackend.ToString())
@@ -1066,3 +1064,36 @@ func digShort(container, lookupName string, matchNames []string, p *PodmanTestIn
}
Fail("dns is not responding")
}
+
+// WaitForFile to be created in defaultWaitTimeout seconds, returns false if file not created
+func WaitForFile(path string) (err error) {
+ until := time.Now().Add(time.Duration(defaultWaitTimeout) * time.Second)
+ for i := 1; time.Now().Before(until); i++ {
+ _, err = os.Stat(path)
+ switch {
+ case err == nil:
+ return nil
+ case errors.Is(err, os.ErrNotExist):
+ time.Sleep(time.Duration(i) * time.Second)
+ default:
+ return err
+ }
+ }
+ return err
+}
+
+// WaitForService blocks, waiting for some service listening on given host:port
+func WaitForService(address url.URL) {
+ // Wait for podman to be ready
+ var conn net.Conn
+ var err error
+ for i := 1; i <= 5; i++ {
+ conn, err = net.Dial("tcp", address.Host)
+ if err != nil {
+ // Podman not available yet...
+ time.Sleep(time.Duration(i) * time.Second)
+ }
+ }
+ Expect(err).ShouldNot(HaveOccurred())
+ conn.Close()
+}
diff --git a/test/e2e/container_clone_test.go b/test/e2e/container_clone_test.go
index 075ed8264..94ccd6ffe 100644
--- a/test/e2e/container_clone_test.go
+++ b/test/e2e/container_clone_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman container clone", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/container_create_volume_test.go b/test/e2e/container_create_volume_test.go
index 015b1742a..6d9f13694 100644
--- a/test/e2e/container_create_volume_test.go
+++ b/test/e2e/container_create_volume_test.go
@@ -83,7 +83,6 @@ var _ = Describe("Podman create data volume", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/container_inspect_test.go b/test/e2e/container_inspect_test.go
index f58f2de29..5aed943da 100644
--- a/test/e2e/container_inspect_test.go
+++ b/test/e2e/container_inspect_test.go
@@ -25,7 +25,6 @@ var _ = Describe("Podman container inspect", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/containers_conf_test.go b/test/e2e/containers_conf_test.go
index 09cd68042..b48e1ed62 100644
--- a/test/e2e/containers_conf_test.go
+++ b/test/e2e/containers_conf_test.go
@@ -29,7 +29,6 @@ var _ = Describe("Verify podman containers.conf usage", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
os.Setenv("CONTAINERS_CONF", "config/containers.conf")
if IsRemote() {
podmanTest.RestartRemoteService()
diff --git a/test/e2e/cp_test.go b/test/e2e/cp_test.go
index ede6036b9..8a65b85d3 100644
--- a/test/e2e/cp_test.go
+++ b/test/e2e/cp_test.go
@@ -31,7 +31,6 @@ var _ = Describe("Podman cp", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/create_staticip_test.go b/test/e2e/create_staticip_test.go
index 4a1d926e0..6fd88753b 100644
--- a/test/e2e/create_staticip_test.go
+++ b/test/e2e/create_staticip_test.go
@@ -25,7 +25,6 @@ var _ = Describe("Podman create with --ip flag", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
// Cleanup the CNI networks used by the tests
os.RemoveAll("/var/lib/cni/networks/podman")
})
diff --git a/test/e2e/create_staticmac_test.go b/test/e2e/create_staticmac_test.go
index 5fd8e3bd6..f02d9c88b 100644
--- a/test/e2e/create_staticmac_test.go
+++ b/test/e2e/create_staticmac_test.go
@@ -25,7 +25,6 @@ var _ = Describe("Podman run with --mac-address flag", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
// Cleanup the CNI networks used by the tests
os.RemoveAll("/var/lib/cni/networks/podman")
})
diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go
index d0813459d..63544d579 100644
--- a/test/e2e/create_test.go
+++ b/test/e2e/create_test.go
@@ -27,8 +27,6 @@ var _ = Describe("Podman create", func() {
Expect(err).To(BeNil())
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- err = podmanTest.SeedImages()
- Expect(err).To(BeNil())
})
AfterEach(func() {
@@ -176,7 +174,8 @@ var _ = Describe("Podman create", func() {
// tests are passing inside a container.
mountPath := filepath.Join(podmanTest.TempDir, "secrets")
- os.Mkdir(mountPath, 0755)
+ err := os.Mkdir(mountPath, 0755)
+ Expect(err).ToNot(HaveOccurred())
session := podmanTest.Podman([]string{"create", "--log-driver", "k8s-file", "--name", "test", "--mount", fmt.Sprintf("type=bind,src=%s,target=/create/test", mountPath), ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
@@ -212,7 +211,8 @@ var _ = Describe("Podman create", func() {
Expect(session.OutputToString()).To(ContainSubstring("shared"))
mountPath = filepath.Join(podmanTest.TempDir, "scratchpad")
- os.Mkdir(mountPath, 0755)
+ err = os.Mkdir(mountPath, 0755)
+ Expect(err).ToNot(HaveOccurred())
session = podmanTest.Podman([]string{"create", "--log-driver", "k8s-file", "--name", "test_tmpfs", "--mount", "type=tmpfs,target=/create/test", ALPINE, "grep", "/create/test", "/proc/self/mountinfo"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
diff --git a/test/e2e/diff_test.go b/test/e2e/diff_test.go
index fb6df8f45..a1f57f41b 100644
--- a/test/e2e/diff_test.go
+++ b/test/e2e/diff_test.go
@@ -26,7 +26,6 @@ var _ = Describe("Podman diff", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/events_test.go b/test/e2e/events_test.go
index 1d4560e8e..725118ab0 100644
--- a/test/e2e/events_test.go
+++ b/test/e2e/events_test.go
@@ -29,7 +29,6 @@ var _ = Describe("Podman events", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/exec_test.go b/test/e2e/exec_test.go
index 3987746d0..f4ee688b7 100644
--- a/test/e2e/exec_test.go
+++ b/test/e2e/exec_test.go
@@ -27,7 +27,6 @@ var _ = Describe("Podman exec", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/export_test.go b/test/e2e/export_test.go
index 78811f1b5..59cbe06ef 100644
--- a/test/e2e/export_test.go
+++ b/test/e2e/export_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman export", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go
index c92c1519f..2ca774a03 100644
--- a/test/e2e/generate_kube_test.go
+++ b/test/e2e/generate_kube_test.go
@@ -32,7 +32,6 @@ var _ = Describe("Podman generate kube", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/generate_systemd_test.go b/test/e2e/generate_systemd_test.go
index e4b854332..08e8fbc8c 100644
--- a/test/e2e/generate_systemd_test.go
+++ b/test/e2e/generate_systemd_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman generate systemd", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
@@ -581,6 +580,7 @@ var _ = Describe("Podman generate systemd", func() {
})
It("podman generate systemd --new create command with double curly braces", func() {
+ SkipIfInContainer("journald inside a container doesn't work")
// Regression test for #9034
session := podmanTest.Podman([]string{"create", "--name", "foo", "--log-driver=journald", "--log-opt=tag={{.Name}}", ALPINE})
session.WaitWithDefaultTimeout()
diff --git a/test/e2e/healthcheck_run_test.go b/test/e2e/healthcheck_run_test.go
index a41c10162..add739988 100644
--- a/test/e2e/healthcheck_run_test.go
+++ b/test/e2e/healthcheck_run_test.go
@@ -28,14 +28,13 @@ var _ = Describe("Podman healthcheck run", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
- GinkgoWriter.Write([]byte(timedResult))
+ _, _ = GinkgoWriter.Write([]byte(timedResult))
})
diff --git a/test/e2e/history_test.go b/test/e2e/history_test.go
index 92e0e1b08..d637fd9af 100644
--- a/test/e2e/history_test.go
+++ b/test/e2e/history_test.go
@@ -23,7 +23,6 @@ var _ = Describe("Podman history", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/hooks/checkhook.json b/test/e2e/hooks/checkhook.json
deleted file mode 100644
index 5a9bc86d1..000000000
--- a/test/e2e/hooks/checkhook.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "cmd" : [".*"],
- "hook" : "/tmp/checkhook.sh",
- "stage" : [ "prestart" ]
-}
diff --git a/test/e2e/hooks/checkhook.sh b/test/e2e/hooks/checkhook.sh
deleted file mode 100755
index 8b755cb40..000000000
--- a/test/e2e/hooks/checkhook.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh
-echo $@ >> /run/hookscheck
-read line
-echo $line >> /run/hookscheck
diff --git a/test/e2e/image_sign_test.go b/test/e2e/image_sign_test.go
index dbf697bb0..3c819a7d2 100644
--- a/test/e2e/image_sign_test.go
+++ b/test/e2e/image_sign_test.go
@@ -28,8 +28,6 @@ var _ = Describe("Podman image sign", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
-
tempGNUPGHOME := filepath.Join(podmanTest.TempDir, "tmpGPG")
err := os.Mkdir(tempGNUPGHOME, os.ModePerm)
Expect(err).To(BeNil())
diff --git a/test/e2e/images_test.go b/test/e2e/images_test.go
index fc1c48c15..2473ec59e 100644
--- a/test/e2e/images_test.go
+++ b/test/e2e/images_test.go
@@ -27,7 +27,6 @@ var _ = Describe("Podman images", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/import_test.go b/test/e2e/import_test.go
index f62df23d9..e6995f0e6 100644
--- a/test/e2e/import_test.go
+++ b/test/e2e/import_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman import", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/info_test.go b/test/e2e/info_test.go
index f989a9d29..2c2c82cb6 100644
--- a/test/e2e/info_test.go
+++ b/test/e2e/info_test.go
@@ -119,33 +119,31 @@ var _ = Describe("Podman Info", func() {
Expect(string(out)).To(Equal(expect))
})
- It("podman info check RemoteSocket", func() {
+ It("check RemoteSocket ", func() {
session := podmanTest.Podman([]string{"info", "--format", "{{.Host.RemoteSocket.Path}}"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session.OutputToString()).To(MatchRegexp("/run/.*podman.*sock"))
- if IsRemote() {
- session = podmanTest.Podman([]string{"info", "--format", "{{.Host.RemoteSocket.Exists}}"})
- session.WaitWithDefaultTimeout()
- Expect(session).Should(Exit(0))
- Expect(session.OutputToString()).To(ContainSubstring("true"))
+ session = podmanTest.Podman([]string{"info", "--format", "{{.Host.ServiceIsRemote}}"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ if podmanTest.RemoteTest {
+ Expect(session.OutputToString()).To(Equal("true"))
+ } else {
+ Expect(session.OutputToString()).To(Equal("false"))
}
- })
- It("verify ServiceIsRemote", func() {
- session := podmanTest.Podman([]string{"info", "--format", "{{.Host.ServiceIsRemote}}"})
+ session = podmanTest.Podman([]string{"info", "--format", "{{.Host.RemoteSocket.Exists}}"})
session.WaitWithDefaultTimeout()
- Expect(session).To(Exit(0))
-
- if podmanTest.RemoteTest {
+ Expect(session).Should(Exit(0))
+ if IsRemote() {
Expect(session.OutputToString()).To(ContainSubstring("true"))
- } else {
- Expect(session.OutputToString()).To(ContainSubstring("false"))
}
+
})
- It("Podman info must contain cgroupControllers with ReleventControllers", func() {
+ It("Podman info must contain cgroupControllers with RelevantControllers", func() {
SkipIfRootless("Hard to tell which controllers are going to be enabled for rootless")
SkipIfRootlessCgroupsV1("Disable cgroups not supported on cgroupv1 for rootless users")
session := podmanTest.Podman([]string{"info", "--format", "{{.Host.CgroupControllers}}"})
diff --git a/test/e2e/init_test.go b/test/e2e/init_test.go
index fdb2b41a1..ccc102fa3 100644
--- a/test/e2e/init_test.go
+++ b/test/e2e/init_test.go
@@ -23,7 +23,6 @@ var _ = Describe("Podman init", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/inspect_test.go b/test/e2e/inspect_test.go
index 25b938d07..6fe850f0b 100644
--- a/test/e2e/inspect_test.go
+++ b/test/e2e/inspect_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman inspect", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/libpod_suite_remote_test.go b/test/e2e/libpod_suite_remote_test.go
index 9ad2bf7b9..19affbc6d 100644
--- a/test/e2e/libpod_suite_remote_test.go
+++ b/test/e2e/libpod_suite_remote_test.go
@@ -16,6 +16,7 @@ import (
"time"
"github.com/containers/podman/v4/pkg/rootless"
+ . "github.com/onsi/gomega"
)
func IsRemote() bool {
@@ -57,7 +58,8 @@ func (p *PodmanTestIntegration) setDefaultRegistriesConfigEnv() {
func (p *PodmanTestIntegration) setRegistriesConfigEnv(b []byte) {
outfile := filepath.Join(p.TempDir, "registries.conf")
os.Setenv("CONTAINERS_REGISTRIES_CONF", outfile)
- ioutil.WriteFile(outfile, b, 0644)
+ err := ioutil.WriteFile(outfile, b, 0644)
+ Expect(err).ToNot(HaveOccurred())
}
func resetRegistriesConfigEnv() {
@@ -71,7 +73,8 @@ func PodmanTestCreate(tempDir string) *PodmanTestIntegration {
func (p *PodmanTestIntegration) StartRemoteService() {
if os.Geteuid() == 0 {
- os.MkdirAll("/run/podman", 0755)
+ err := os.MkdirAll("/run/podman", 0755)
+ Expect(err).ToNot(HaveOccurred())
}
args := []string{}
@@ -88,7 +91,8 @@ func (p *PodmanTestIntegration) StartRemoteService() {
command.Stdout = os.Stdout
command.Stderr = os.Stderr
fmt.Printf("Running: %s %s\n", p.PodmanBinary, strings.Join(podmanOptions, " "))
- command.Start()
+ err := command.Start()
+ Expect(err).ToNot(HaveOccurred())
command.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
p.RemoteCommand = command
p.RemoteSession = command.Process
@@ -134,9 +138,6 @@ func getRemoteOptions(p *PodmanTestIntegration, args []string) []string {
networkDir := p.NetworkConfigDir
podmanOptions := strings.Split(fmt.Sprintf("--root %s --runroot %s --runtime %s --conmon %s --network-config-dir %s --cgroup-manager %s",
p.Root, p.RunRoot, p.OCIRuntime, p.ConmonBinary, networkDir, p.CgroupManager), " ")
- if os.Getenv("HOOK_OPTION") != "" {
- podmanOptions = append(podmanOptions, os.Getenv("HOOK_OPTION"))
- }
if p.NetworkBackend.ToString() == "netavark" {
podmanOptions = append(podmanOptions, "--network-backend", "netavark")
}
@@ -145,11 +146,6 @@ func getRemoteOptions(p *PodmanTestIntegration, args []string) []string {
return podmanOptions
}
-// SeedImages restores all the artifacts into the main store for remote tests
-func (p *PodmanTestIntegration) SeedImages() error {
- return nil
-}
-
// RestoreArtifact puts the cached image into our test store
func (p *PodmanTestIntegration) RestoreArtifact(image string) error {
tarball := imageTarPath(image)
@@ -159,8 +155,12 @@ func (p *PodmanTestIntegration) RestoreArtifact(image string) error {
podmanOptions := getRemoteOptions(p, args)
command := exec.Command(p.PodmanBinary, podmanOptions...)
fmt.Printf("Running: %s %s\n", p.PodmanBinary, strings.Join(podmanOptions, " "))
- command.Start()
- command.Wait()
+ if err := command.Start(); err != nil {
+ return err
+ }
+ if err := command.Wait(); err != nil {
+ return err
+ }
}
return nil
}
diff --git a/test/e2e/libpod_suite_test.go b/test/e2e/libpod_suite_test.go
index cf81a0348..a633bd3d7 100644
--- a/test/e2e/libpod_suite_test.go
+++ b/test/e2e/libpod_suite_test.go
@@ -10,6 +10,7 @@ import (
"path/filepath"
"github.com/containers/podman/v4/pkg/rootless"
+ . "github.com/onsi/gomega"
)
func IsRemote() bool {
@@ -40,13 +41,15 @@ func (p *PodmanTestIntegration) PodmanExtraFiles(args []string, extraFiles []*os
func (p *PodmanTestIntegration) setDefaultRegistriesConfigEnv() {
defaultFile := filepath.Join(INTEGRATION_ROOT, "test/registries.conf")
- os.Setenv("CONTAINERS_REGISTRIES_CONF", defaultFile)
+ err := os.Setenv("CONTAINERS_REGISTRIES_CONF", defaultFile)
+ Expect(err).ToNot(HaveOccurred())
}
func (p *PodmanTestIntegration) setRegistriesConfigEnv(b []byte) {
outfile := filepath.Join(p.TempDir, "registries.conf")
os.Setenv("CONTAINERS_REGISTRIES_CONF", outfile)
- ioutil.WriteFile(outfile, b, 0644)
+ err := ioutil.WriteFile(outfile, b, 0644)
+ Expect(err).ToNot(HaveOccurred())
}
func resetRegistriesConfigEnv() {
@@ -70,11 +73,6 @@ func (p *PodmanTestIntegration) RestoreArtifact(image string) error {
func (p *PodmanTestIntegration) StopRemoteService() {}
-// SeedImages is a no-op for localized testing
-func (p *PodmanTestIntegration) SeedImages() error {
- return nil
-}
-
// We don't support running API service when local
func (p *PodmanTestIntegration) StartRemoteService() {
}
diff --git a/test/e2e/login_logout_test.go b/test/e2e/login_logout_test.go
index 001779cdf..bce8b78c6 100644
--- a/test/e2e/login_logout_test.go
+++ b/test/e2e/login_logout_test.go
@@ -36,7 +36,8 @@ var _ = Describe("Podman login and logout", func() {
podmanTest = PodmanTestCreate(tempdir)
authPath = filepath.Join(podmanTest.TempDir, "auth")
- os.Mkdir(authPath, os.ModePerm)
+ err := os.Mkdir(authPath, os.ModePerm)
+ Expect(err).ToNot(HaveOccurred())
if IsCommandAvailable("getenforce") {
ge := SystemExec("getenforce", []string{})
@@ -55,11 +56,14 @@ var _ = Describe("Podman login and logout", func() {
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- f, _ := os.Create(filepath.Join(authPath, "htpasswd"))
+ f, err := os.Create(filepath.Join(authPath, "htpasswd"))
+ Expect(err).ToNot(HaveOccurred())
defer f.Close()
- f.WriteString(session.OutputToString())
- f.Sync()
+ _, err = f.WriteString(session.OutputToString())
+ Expect(err).ToNot(HaveOccurred())
+ err = f.Sync()
+ Expect(err).ToNot(HaveOccurred())
port := GetPort()
server = strings.Join([]string{"localhost", strconv.Itoa(port)}, ":")
@@ -68,7 +72,8 @@ var _ = Describe("Podman login and logout", func() {
testImg = strings.Join([]string{server, "test-alpine"}, "/")
certDirPath = filepath.Join(os.Getenv("HOME"), ".config/containers/certs.d", server)
- os.MkdirAll(certDirPath, os.ModePerm)
+ err = os.MkdirAll(certDirPath, os.ModePerm)
+ Expect(err).ToNot(HaveOccurred())
cwd, _ := os.Getwd()
certPath = filepath.Join(cwd, "../", "certs")
@@ -207,7 +212,8 @@ var _ = Describe("Podman login and logout", func() {
})
It("podman login and logout with --cert-dir", func() {
certDir := filepath.Join(podmanTest.TempDir, "certs")
- os.MkdirAll(certDir, os.ModePerm)
+ err := os.MkdirAll(certDir, os.ModePerm)
+ Expect(err).ToNot(HaveOccurred())
setup := SystemExec("cp", []string{filepath.Join(certPath, "domain.crt"), filepath.Join(certDir, "ca.crt")})
setup.WaitWithDefaultTimeout()
@@ -226,7 +232,8 @@ var _ = Describe("Podman login and logout", func() {
})
It("podman login and logout with multi registry", func() {
certDir := filepath.Join(os.Getenv("HOME"), ".config/containers/certs.d", "localhost:9001")
- os.MkdirAll(certDir, os.ModePerm)
+ err = os.MkdirAll(certDir, os.ModePerm)
+ Expect(err).ToNot(HaveOccurred())
cwd, _ := os.Getwd()
certPath = filepath.Join(cwd, "../", "certs")
diff --git a/test/e2e/logs_test.go b/test/e2e/logs_test.go
index 934a306ce..4e6dcb8af 100644
--- a/test/e2e/logs_test.go
+++ b/test/e2e/logs_test.go
@@ -37,9 +37,6 @@ var _ = Describe("Podman logs", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- if err := podmanTest.SeedImages(); err != nil {
- os.Exit(1)
- }
})
AfterEach(func() {
diff --git a/test/e2e/manifest_test.go b/test/e2e/manifest_test.go
index 230864891..92b8bc2e1 100644
--- a/test/e2e/manifest_test.go
+++ b/test/e2e/manifest_test.go
@@ -36,7 +36,6 @@ var _ = Describe("Podman manifest", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/mount_rootless_test.go b/test/e2e/mount_rootless_test.go
index 30d7ce8a9..994a5899b 100644
--- a/test/e2e/mount_rootless_test.go
+++ b/test/e2e/mount_rootless_test.go
@@ -25,7 +25,6 @@ var _ = Describe("Podman mount", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/namespace_test.go b/test/e2e/namespace_test.go
index bc9db4cd9..0000a2327 100644
--- a/test/e2e/namespace_test.go
+++ b/test/e2e/namespace_test.go
@@ -23,7 +23,6 @@ var _ = Describe("Podman namespaces", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/network_create_test.go b/test/e2e/network_create_test.go
index a6e927ca2..69966b07e 100644
--- a/test/e2e/network_create_test.go
+++ b/test/e2e/network_create_test.go
@@ -32,7 +32,6 @@ var _ = Describe("Podman network create", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/pause_test.go b/test/e2e/pause_test.go
index 66638d71c..402719de2 100644
--- a/test/e2e/pause_test.go
+++ b/test/e2e/pause_test.go
@@ -45,7 +45,6 @@ var _ = Describe("Podman pause", func() {
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/play_build_test.go b/test/e2e/play_build_test.go
index 96785c569..914144ae6 100644
--- a/test/e2e/play_build_test.go
+++ b/test/e2e/play_build_test.go
@@ -29,7 +29,6 @@ var _ = Describe("Podman play kube with build", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go
index aaefa4625..45414ec04 100644
--- a/test/e2e/play_kube_test.go
+++ b/test/e2e/play_kube_test.go
@@ -1206,8 +1206,6 @@ var _ = Describe("Podman play kube", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
-
kubeYaml = filepath.Join(podmanTest.TempDir, "kube.yaml")
})
@@ -2744,6 +2742,7 @@ MemoryReservation: {{ .HostConfig.MemoryReservation }}`})
})
It("podman play kube applies log driver to containers", func() {
+ SkipIfInContainer("journald inside a container doesn't work")
pod := getPod()
err := generateKubeYaml("pod", pod, kubeYaml)
Expect(err).To(BeNil())
@@ -3181,8 +3180,10 @@ invalid kube kind
Expect(ls).Should(Exit(0))
Expect(ls.OutputToStringArray()).To(HaveLen(1))
- containerLen := podmanTest.Podman([]string{"pod", "inspect", pod.Name, "--format", "'{{len .Containers}}'"})
-
+ containerLen := podmanTest.Podman([]string{"pod", "inspect", pod.Name, "--format", "{{len .Containers}}"})
+ containerLen.WaitWithDefaultTimeout()
+ Expect(containerLen).Should(Exit(0))
+ Expect(containerLen.OutputToString()).To(Equal("2"))
ctr01Name := "ctr01"
ctr02Name := "ctr02"
@@ -3200,7 +3201,7 @@ invalid kube kind
replace.WaitWithDefaultTimeout()
Expect(replace).Should(Exit(0))
- newContainerLen := podmanTest.Podman([]string{"pod", "inspect", newPod.Name, "--format", "'{{len .Containers}}'"})
+ newContainerLen := podmanTest.Podman([]string{"pod", "inspect", newPod.Name, "--format", "{{len .Containers}}"})
newContainerLen.WaitWithDefaultTimeout()
Expect(newContainerLen).Should(Exit(0))
Expect(newContainerLen.OutputToString()).NotTo(Equal(containerLen.OutputToString()))
diff --git a/test/e2e/pod_create_test.go b/test/e2e/pod_create_test.go
index 0c7886a93..dedb1caeb 100644
--- a/test/e2e/pod_create_test.go
+++ b/test/e2e/pod_create_test.go
@@ -35,7 +35,6 @@ var _ = Describe("Podman pod create", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/pod_infra_container_test.go b/test/e2e/pod_infra_container_test.go
index 2b56502b0..ab204992c 100644
--- a/test/e2e/pod_infra_container_test.go
+++ b/test/e2e/pod_infra_container_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman pod create", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/pod_initcontainers_test.go b/test/e2e/pod_initcontainers_test.go
index 3c660c5bf..ec429f568 100644
--- a/test/e2e/pod_initcontainers_test.go
+++ b/test/e2e/pod_initcontainers_test.go
@@ -26,7 +26,6 @@ var _ = Describe("Podman init containers", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/pod_inspect_test.go b/test/e2e/pod_inspect_test.go
index dcc70e4a5..351317cc5 100644
--- a/test/e2e/pod_inspect_test.go
+++ b/test/e2e/pod_inspect_test.go
@@ -25,7 +25,6 @@ var _ = Describe("Podman pod inspect", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/pod_kill_test.go b/test/e2e/pod_kill_test.go
index 18f9769a1..0612200d4 100644
--- a/test/e2e/pod_kill_test.go
+++ b/test/e2e/pod_kill_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman pod kill", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/pod_pause_test.go b/test/e2e/pod_pause_test.go
index 57ae75926..d78890347 100644
--- a/test/e2e/pod_pause_test.go
+++ b/test/e2e/pod_pause_test.go
@@ -26,7 +26,6 @@ var _ = Describe("Podman pod pause", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/pod_pod_namespaces_test.go b/test/e2e/pod_pod_namespaces_test.go
index 667a54861..5b288898f 100644
--- a/test/e2e/pod_pod_namespaces_test.go
+++ b/test/e2e/pod_pod_namespaces_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman pod create", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/pod_prune_test.go b/test/e2e/pod_prune_test.go
index 55ecc1593..dce3e34e4 100644
--- a/test/e2e/pod_prune_test.go
+++ b/test/e2e/pod_prune_test.go
@@ -23,7 +23,6 @@ var _ = Describe("Podman pod prune", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/pod_ps_test.go b/test/e2e/pod_ps_test.go
index a0a1e1438..97ca5ff94 100644
--- a/test/e2e/pod_ps_test.go
+++ b/test/e2e/pod_ps_test.go
@@ -26,7 +26,6 @@ var _ = Describe("Podman ps", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/pod_restart_test.go b/test/e2e/pod_restart_test.go
index 1897104cc..fab448f92 100644
--- a/test/e2e/pod_restart_test.go
+++ b/test/e2e/pod_restart_test.go
@@ -23,7 +23,6 @@ var _ = Describe("Podman pod restart", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/pod_rm_test.go b/test/e2e/pod_rm_test.go
index dbb2d6d13..a5eab7eed 100644
--- a/test/e2e/pod_rm_test.go
+++ b/test/e2e/pod_rm_test.go
@@ -28,7 +28,6 @@ var _ = Describe("Podman pod rm", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/pod_start_test.go b/test/e2e/pod_start_test.go
index 2f3ef3a11..084a48636 100644
--- a/test/e2e/pod_start_test.go
+++ b/test/e2e/pod_start_test.go
@@ -27,7 +27,6 @@ var _ = Describe("Podman pod start", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/pod_stats_test.go b/test/e2e/pod_stats_test.go
index 0e94406f8..8f76e6e5a 100644
--- a/test/e2e/pod_stats_test.go
+++ b/test/e2e/pod_stats_test.go
@@ -28,7 +28,6 @@ var _ = Describe("Podman pod stats", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/pod_stop_test.go b/test/e2e/pod_stop_test.go
index fc78f9ed6..2fe0aa486 100644
--- a/test/e2e/pod_stop_test.go
+++ b/test/e2e/pod_stop_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman pod stop", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/pod_top_test.go b/test/e2e/pod_top_test.go
index 564170412..07028da45 100644
--- a/test/e2e/pod_top_test.go
+++ b/test/e2e/pod_top_test.go
@@ -25,7 +25,6 @@ var _ = Describe("Podman top", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/port_test.go b/test/e2e/port_test.go
index d81c03e5b..03263a198 100644
--- a/test/e2e/port_test.go
+++ b/test/e2e/port_test.go
@@ -25,7 +25,6 @@ var _ = Describe("Podman port", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/prune_test.go b/test/e2e/prune_test.go
index 01f987b92..75adf1724 100644
--- a/test/e2e/prune_test.go
+++ b/test/e2e/prune_test.go
@@ -35,7 +35,6 @@ var _ = Describe("Podman prune", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/ps_test.go b/test/e2e/ps_test.go
index 021ebc30b..1100a5d90 100644
--- a/test/e2e/ps_test.go
+++ b/test/e2e/ps_test.go
@@ -29,7 +29,6 @@ var _ = Describe("Podman ps", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/pull_test.go b/test/e2e/pull_test.go
index d13334651..41eb8b449 100644
--- a/test/e2e/pull_test.go
+++ b/test/e2e/pull_test.go
@@ -345,7 +345,8 @@ var _ = Describe("Podman pull", func() {
podmanTest.AddImageToRWStore(cirros)
dirpath := filepath.Join(podmanTest.TempDir, "cirros")
- os.MkdirAll(dirpath, os.ModePerm)
+ err = os.MkdirAll(dirpath, os.ModePerm)
+ Expect(err).ToNot(HaveOccurred())
imgPath := fmt.Sprintf("dir:%s", dirpath)
session := podmanTest.Podman([]string{"push", "cirros", imgPath})
@@ -368,7 +369,8 @@ var _ = Describe("Podman pull", func() {
podmanTest.AddImageToRWStore(cirros)
dirpath := filepath.Join(podmanTest.TempDir, "cirros")
- os.MkdirAll(dirpath, os.ModePerm)
+ err = os.MkdirAll(dirpath, os.ModePerm)
+ Expect(err).ToNot(HaveOccurred())
imgPath := fmt.Sprintf("oci:%s", dirpath)
session := podmanTest.Podman([]string{"push", "cirros", imgPath})
@@ -387,7 +389,8 @@ var _ = Describe("Podman pull", func() {
})
It("podman pull check quiet", func() {
- podmanTest.RestoreArtifact(ALPINE)
+ err := podmanTest.RestoreArtifact(ALPINE)
+ Expect(err).ToNot(HaveOccurred())
setup := podmanTest.Podman([]string{"images", ALPINE, "-q", "--no-trunc"})
setup.WaitWithDefaultTimeout()
Expect(setup).Should(Exit(0))
@@ -428,8 +431,10 @@ var _ = Describe("Podman pull", func() {
// We already tested pulling, so we can save some energy and
// just restore local artifacts and tag them.
- podmanTest.RestoreArtifact(ALPINE)
- podmanTest.RestoreArtifact(BB)
+ err := podmanTest.RestoreArtifact(ALPINE)
+ Expect(err).ToNot(HaveOccurred())
+ err = podmanTest.RestoreArtifact(BB)
+ Expect(err).ToNot(HaveOccurred())
// What we want is at least two images which have the same name
// and are prefixed with two different unqualified-search
diff --git a/test/e2e/push_test.go b/test/e2e/push_test.go
index 3b571ab20..0288bf915 100644
--- a/test/e2e/push_test.go
+++ b/test/e2e/push_test.go
@@ -101,7 +101,8 @@ var _ = Describe("Podman push", func() {
Skip("No registry image for ppc64le")
}
if rootless.IsRootless() {
- podmanTest.RestoreArtifact(registry)
+ err := podmanTest.RestoreArtifact(registry)
+ Expect(err).ToNot(HaveOccurred())
}
lock := GetPortLock("5000")
defer lock.Unlock()
@@ -132,8 +133,10 @@ var _ = Describe("Podman push", func() {
Skip("No registry image for ppc64le")
}
authPath := filepath.Join(podmanTest.TempDir, "auth")
- os.Mkdir(authPath, os.ModePerm)
- os.MkdirAll("/etc/containers/certs.d/localhost:5000", os.ModePerm)
+ err = os.Mkdir(authPath, os.ModePerm)
+ Expect(err).ToNot(HaveOccurred())
+ err = os.MkdirAll("/etc/containers/certs.d/localhost:5000", os.ModePerm)
+ Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll("/etc/containers/certs.d/localhost:5000")
cwd, _ := os.Getwd()
@@ -157,11 +160,14 @@ var _ = Describe("Podman push", func() {
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
- f, _ := os.Create(filepath.Join(authPath, "htpasswd"))
+ f, err := os.Create(filepath.Join(authPath, "htpasswd"))
+ Expect(err).ToNot(HaveOccurred())
defer f.Close()
- f.WriteString(session.OutputToString())
- f.Sync()
+ _, err = f.WriteString(session.OutputToString())
+ Expect(err).ToNot(HaveOccurred())
+ err = f.Sync()
+ Expect(err).ToNot(HaveOccurred())
session = podmanTest.Podman([]string{"run", "-d", "-p", "5000:5000", "--name", "registry", "-v",
strings.Join([]string{authPath, "/auth"}, ":"), "-e", "REGISTRY_AUTH=htpasswd", "-e",
diff --git a/test/e2e/rename_test.go b/test/e2e/rename_test.go
index ef90c3f22..341490d9c 100644
--- a/test/e2e/rename_test.go
+++ b/test/e2e/rename_test.go
@@ -24,7 +24,6 @@ var _ = Describe("podman rename", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/restart_test.go b/test/e2e/restart_test.go
index b8c74d395..b3052623b 100644
--- a/test/e2e/restart_test.go
+++ b/test/e2e/restart_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman restart", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/rm_test.go b/test/e2e/rm_test.go
index f836540b4..7dbe5fed8 100644
--- a/test/e2e/rm_test.go
+++ b/test/e2e/rm_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman rm", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/run_aardvark_test.go b/test/e2e/run_aardvark_test.go
index 7b4598423..25eb8b538 100644
--- a/test/e2e/run_aardvark_test.go
+++ b/test/e2e/run_aardvark_test.go
@@ -25,7 +25,6 @@ var _ = Describe("Podman run networking", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
SkipIfCNI(podmanTest)
})
diff --git a/test/e2e/run_apparmor_test.go b/test/e2e/run_apparmor_test.go
index ed88ab7a0..18d011e6d 100644
--- a/test/e2e/run_apparmor_test.go
+++ b/test/e2e/run_apparmor_test.go
@@ -42,7 +42,6 @@ var _ = Describe("Podman run", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/run_cgroup_parent_test.go b/test/e2e/run_cgroup_parent_test.go
index 34715be22..24cae43b1 100644
--- a/test/e2e/run_cgroup_parent_test.go
+++ b/test/e2e/run_cgroup_parent_test.go
@@ -30,7 +30,6 @@ var _ = Describe("Podman run with --cgroup-parent", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/run_cleanup_test.go b/test/e2e/run_cleanup_test.go
index 2282ef913..ea2caf907 100644
--- a/test/e2e/run_cleanup_test.go
+++ b/test/e2e/run_cleanup_test.go
@@ -23,7 +23,8 @@ var _ = Describe("Podman run exit", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.RestoreArtifact(ALPINE)
+ err = podmanTest.RestoreArtifact(ALPINE)
+ Expect(err).ToNot(HaveOccurred())
})
AfterEach(func() {
diff --git a/test/e2e/run_cpu_test.go b/test/e2e/run_cpu_test.go
index fda0a7c24..b21be5729 100644
--- a/test/e2e/run_cpu_test.go
+++ b/test/e2e/run_cpu_test.go
@@ -33,7 +33,6 @@ var _ = Describe("Podman run cpu", func() {
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/run_device_test.go b/test/e2e/run_device_test.go
index 479837dda..c46afdaca 100644
--- a/test/e2e/run_device_test.go
+++ b/test/e2e/run_device_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman run device", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/run_dns_test.go b/test/e2e/run_dns_test.go
index 7561a2e85..61177b4c7 100644
--- a/test/e2e/run_dns_test.go
+++ b/test/e2e/run_dns_test.go
@@ -23,7 +23,6 @@ var _ = Describe("Podman run dns", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/run_entrypoint_test.go b/test/e2e/run_entrypoint_test.go
index fde43dfec..9f35b9e7e 100644
--- a/test/e2e/run_entrypoint_test.go
+++ b/test/e2e/run_entrypoint_test.go
@@ -23,7 +23,6 @@ var _ = Describe("Podman run entrypoint", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/run_env_test.go b/test/e2e/run_env_test.go
index f4c44c23b..bab52efc5 100644
--- a/test/e2e/run_env_test.go
+++ b/test/e2e/run_env_test.go
@@ -23,7 +23,6 @@ var _ = Describe("Podman run", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/run_exit_test.go b/test/e2e/run_exit_test.go
index aa9a4295c..0663e4d9a 100644
--- a/test/e2e/run_exit_test.go
+++ b/test/e2e/run_exit_test.go
@@ -25,7 +25,6 @@ var _ = Describe("Podman run exit", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/run_memory_test.go b/test/e2e/run_memory_test.go
index d6a67da57..083020f08 100644
--- a/test/e2e/run_memory_test.go
+++ b/test/e2e/run_memory_test.go
@@ -26,7 +26,6 @@ var _ = Describe("Podman run memory", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go
index 53a91b2b3..c9990b70f 100644
--- a/test/e2e/run_networking_test.go
+++ b/test/e2e/run_networking_test.go
@@ -32,7 +32,6 @@ var _ = Describe("Podman run networking", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
@@ -747,11 +746,12 @@ EXPOSE 2004-2005/tcp`, ALPINE)
routeAdd := func(gateway string) {
gw := net.ParseIP(gateway)
route := &netlink.Route{Dst: nil, Gw: gw}
- netlink.RouteAdd(route)
+ err = netlink.RouteAdd(route)
+ Expect(err).ToNot(HaveOccurred())
}
setupNetworkNs := func(networkNSName string) {
- ns.WithNetNSPath("/run/netns/"+networkNSName, func(_ ns.NetNS) error {
+ _ = ns.WithNetNSPath("/run/netns/"+networkNSName, func(_ ns.NetNS) error {
loopbackup()
linkup("eth0", "46:7f:45:6e:4f:c8", []string{"10.25.40.0/24", "fd04:3e42:4a4e:3381::/64"})
linkup("eth1", "56:6e:35:5d:3e:a8", []string{"10.88.0.0/16"})
diff --git a/test/e2e/run_ns_test.go b/test/e2e/run_ns_test.go
index 23fd298d7..f99d6cf3f 100644
--- a/test/e2e/run_ns_test.go
+++ b/test/e2e/run_ns_test.go
@@ -25,7 +25,6 @@ var _ = Describe("Podman run ns", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/run_passwd_test.go b/test/e2e/run_passwd_test.go
index ce6c6ffda..411e12218 100644
--- a/test/e2e/run_passwd_test.go
+++ b/test/e2e/run_passwd_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman run passwd", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/run_privileged_test.go b/test/e2e/run_privileged_test.go
index 59223c589..4f0b512c6 100644
--- a/test/e2e/run_privileged_test.go
+++ b/test/e2e/run_privileged_test.go
@@ -49,7 +49,6 @@ var _ = Describe("Podman privileged container tests", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/run_restart_test.go b/test/e2e/run_restart_test.go
index ec8fbfe98..b1002ece4 100644
--- a/test/e2e/run_restart_test.go
+++ b/test/e2e/run_restart_test.go
@@ -23,7 +23,6 @@ var _ = Describe("Podman run restart containers", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/run_seccomp_test.go b/test/e2e/run_seccomp_test.go
index 03212b6dc..bd44a3ef1 100644
--- a/test/e2e/run_seccomp_test.go
+++ b/test/e2e/run_seccomp_test.go
@@ -23,7 +23,6 @@ var _ = Describe("Podman run", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/run_security_labels_test.go b/test/e2e/run_security_labels_test.go
index 8aebeaebb..915566a2c 100644
--- a/test/e2e/run_security_labels_test.go
+++ b/test/e2e/run_security_labels_test.go
@@ -25,8 +25,6 @@ var _ = Describe("Podman generate kube", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
-
})
AfterEach(func() {
diff --git a/test/e2e/run_selinux_test.go b/test/e2e/run_selinux_test.go
index b71c68baf..4a433f308 100644
--- a/test/e2e/run_selinux_test.go
+++ b/test/e2e/run_selinux_test.go
@@ -25,7 +25,6 @@ var _ = Describe("Podman run", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
if !selinux.GetEnabled() {
Skip("SELinux not enabled")
}
diff --git a/test/e2e/run_signal_test.go b/test/e2e/run_signal_test.go
index d40a5a1b4..e5d9b6c7b 100644
--- a/test/e2e/run_signal_test.go
+++ b/test/e2e/run_signal_test.go
@@ -34,7 +34,6 @@ var _ = Describe("Podman run with --sig-proxy", func() {
}
podmanTest = PodmanTestCreate(tmpdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
@@ -51,11 +50,14 @@ var _ = Describe("Podman run with --sig-proxy", func() {
signal := syscall.SIGFPE
// Set up a socket for communication
udsDir := filepath.Join(tmpdir, "socket")
- os.Mkdir(udsDir, 0700)
+ err := os.Mkdir(udsDir, 0700)
+ Expect(err).ToNot(HaveOccurred())
udsPath := filepath.Join(udsDir, "fifo")
- syscall.Mkfifo(udsPath, 0600)
+ err = syscall.Mkfifo(udsPath, 0600)
+ Expect(err).ToNot(HaveOccurred())
if rootless.IsRootless() {
- podmanTest.RestoreArtifact(fedoraMinimal)
+ err = podmanTest.RestoreArtifact(fedoraMinimal)
+ Expect(err).ToNot(HaveOccurred())
}
_, pid := podmanTest.PodmanPID([]string{"run", "-it", "-v", fmt.Sprintf("%s:/h:Z", udsDir), fedoraMinimal, "bash", "-c", sigCatch})
@@ -112,7 +114,8 @@ var _ = Describe("Podman run with --sig-proxy", func() {
Specify("signals are not forwarded to container with sig-proxy false", func() {
signal := syscall.SIGFPE
if rootless.IsRootless() {
- podmanTest.RestoreArtifact(fedoraMinimal)
+ err = podmanTest.RestoreArtifact(fedoraMinimal)
+ Expect(err).ToNot(HaveOccurred())
}
session, pid := podmanTest.PodmanPID([]string{"run", "--name", "test2", "--sig-proxy=false", fedoraMinimal, "bash", "-c", sigCatch2})
diff --git a/test/e2e/run_staticip_test.go b/test/e2e/run_staticip_test.go
index 7e61e7c5e..af3f98d4b 100644
--- a/test/e2e/run_staticip_test.go
+++ b/test/e2e/run_staticip_test.go
@@ -28,7 +28,6 @@ var _ = Describe("Podman run with --ip flag", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
// Cleanup the CNI networks used by the tests
os.RemoveAll("/var/lib/cni/networks/podman")
})
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index 386a27a2f..182ae1888 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -16,7 +16,6 @@ import (
"github.com/containers/podman/v4/pkg/rootless"
. "github.com/containers/podman/v4/test/utils"
"github.com/containers/storage/pkg/stringid"
- "github.com/mrunalp/fileutils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
@@ -36,7 +35,6 @@ var _ = Describe("Podman run", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
@@ -812,16 +810,38 @@ USER bin`, BB)
})
It("podman test hooks", func() {
- hcheck := "/run/hookscheck"
+ SkipIfRemote("--hooks-dir does not work with remote")
hooksDir := tempdir + "/hooks"
- os.Mkdir(hooksDir, 0755)
- fileutils.CopyFile("hooks/hooks.json", hooksDir)
- os.Setenv("HOOK_OPTION", fmt.Sprintf("--hooks-dir=%s", hooksDir))
- os.Remove(hcheck)
- session := podmanTest.Podman([]string{"run", ALPINE, "ls"})
+ err := os.Mkdir(hooksDir, 0755)
+ Expect(err).ToNot(HaveOccurred())
+ hookJSONPath := filepath.Join(hooksDir, "checkhooks.json")
+ hookScriptPath := filepath.Join(hooksDir, "checkhooks.sh")
+ targetFile := filepath.Join(hooksDir, "target")
+
+ hookJSON := fmt.Sprintf(`{
+ "cmd" : [".*"],
+ "hook" : "%s",
+ "stage" : [ "prestart" ]
+}
+`, hookScriptPath)
+ err = ioutil.WriteFile(hookJSONPath, []byte(hookJSON), 0644)
+ Expect(err).ToNot(HaveOccurred())
+
+ random := stringid.GenerateNonCryptoID()
+
+ hookScript := fmt.Sprintf(`#!/bin/sh
+echo -n %s >%s
+`, random, targetFile)
+ err = ioutil.WriteFile(hookScriptPath, []byte(hookScript), 0755)
+ Expect(err).ToNot(HaveOccurred())
+
+ session := podmanTest.Podman([]string{"--hooks-dir", hooksDir, "run", ALPINE, "ls"})
session.Wait(10)
- os.Unsetenv("HOOK_OPTION")
Expect(session).Should(Exit(0))
+
+ b, err := ioutil.ReadFile(targetFile)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(string(b)).To(Equal(random))
})
It("podman run with subscription secrets", func() {
diff --git a/test/e2e/run_userns_test.go b/test/e2e/run_userns_test.go
index 092621c27..613727118 100644
--- a/test/e2e/run_userns_test.go
+++ b/test/e2e/run_userns_test.go
@@ -33,7 +33,6 @@ var _ = Describe("Podman UserNS support", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/run_volume_test.go b/test/e2e/run_volume_test.go
index 4f1013f8d..3bef889b7 100644
--- a/test/e2e/run_volume_test.go
+++ b/test/e2e/run_volume_test.go
@@ -34,7 +34,6 @@ var _ = Describe("Podman run with volumes", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
@@ -55,7 +54,8 @@ var _ = Describe("Podman run with volumes", func() {
It("podman run with volume flag", func() {
mountPath := filepath.Join(podmanTest.TempDir, "secrets")
- os.Mkdir(mountPath, 0755)
+ err = os.Mkdir(mountPath, 0755)
+ Expect(err).ToNot(HaveOccurred())
vol := mountPath + ":" + dest
// [5] is flags
@@ -82,7 +82,8 @@ var _ = Describe("Podman run with volumes", func() {
Skip("skip failing test on ppc64le")
}
mountPath := filepath.Join(podmanTest.TempDir, "secrets")
- os.Mkdir(mountPath, 0755)
+ err = os.Mkdir(mountPath, 0755)
+ Expect(err).ToNot(HaveOccurred())
mount := "type=bind,src=" + mountPath + ",target=" + dest
session := podmanTest.Podman([]string{"run", "--rm", "--mount", mount, ALPINE, "grep", dest, "/proc/self/mountinfo"})
@@ -141,14 +142,16 @@ var _ = Describe("Podman run with volumes", func() {
It("podman run with conflicting volumes errors", func() {
mountPath := filepath.Join(podmanTest.TmpDir, "secrets")
- os.Mkdir(mountPath, 0755)
+ err := os.Mkdir(mountPath, 0755)
+ Expect(err).ToNot(HaveOccurred())
session := podmanTest.Podman([]string{"run", "-v", mountPath + ":" + dest, "-v", "/tmp" + ":" + dest, ALPINE, "ls"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(125))
})
It("podman run with conflict between image volume and user mount succeeds", func() {
- podmanTest.RestoreArtifact(redis)
+ err = podmanTest.RestoreArtifact(redis)
+ Expect(err).ToNot(HaveOccurred())
mountPath := filepath.Join(podmanTest.TempDir, "secrets")
err := os.Mkdir(mountPath, 0755)
Expect(err).To(BeNil())
@@ -164,7 +167,8 @@ var _ = Describe("Podman run with volumes", func() {
It("podman run with mount flag and boolean options", func() {
mountPath := filepath.Join(podmanTest.TempDir, "secrets")
- os.Mkdir(mountPath, 0755)
+ err := os.Mkdir(mountPath, 0755)
+ Expect(err).ToNot(HaveOccurred())
mount := "type=bind,src=" + mountPath + ",target=" + dest
session := podmanTest.Podman([]string{"run", "--rm", "--mount", mount + ",ro=false", ALPINE, "grep", dest, "/proc/self/mountinfo"})
@@ -193,7 +197,8 @@ var _ = Describe("Podman run with volumes", func() {
It("podman run with volumes and suid/dev/exec options", func() {
SkipIfRemote("podman-remote does not support --volumes")
mountPath := filepath.Join(podmanTest.TempDir, "secrets")
- os.Mkdir(mountPath, 0755)
+ err := os.Mkdir(mountPath, 0755)
+ Expect(err).ToNot(HaveOccurred())
session := podmanTest.Podman([]string{"run", "--rm", "-v", mountPath + ":" + dest + ":suid,dev,exec", ALPINE, "grep", dest, "/proc/self/mountinfo"})
session.WaitWithDefaultTimeout()
@@ -224,7 +229,8 @@ var _ = Describe("Podman run with volumes", func() {
}
}
mountPath := filepath.Join(podmanTest.TempDir, "secrets")
- os.Mkdir(mountPath, 0755)
+ err := os.Mkdir(mountPath, 0755)
+ Expect(err).ToNot(HaveOccurred())
// Container should be able to start with custom overlay volume
session := podmanTest.Podman([]string{"run", "--rm", "-v", mountPath + ":/data:O", "--workdir=/data", ALPINE, "echo", "hello"})
@@ -603,7 +609,8 @@ VOLUME /test/`, ALPINE)
}
}
mountPath := filepath.Join(podmanTest.TempDir, "secrets")
- os.Mkdir(mountPath, 0755)
+ err := os.Mkdir(mountPath, 0755)
+ Expect(err).ToNot(HaveOccurred())
testFile := filepath.Join(mountPath, "test1")
f, err := os.Create(testFile)
Expect(err).To(BeNil(), "os.Create "+testFile)
@@ -651,7 +658,8 @@ VOLUME /test/`, ALPINE)
It("overlay volume conflicts with named volume and mounts", func() {
mountPath := filepath.Join(podmanTest.TempDir, "secrets")
- os.Mkdir(mountPath, 0755)
+ err := os.Mkdir(mountPath, 0755)
+ Expect(err).ToNot(HaveOccurred())
testFile := filepath.Join(mountPath, "test1")
f, err := os.Create(testFile)
Expect(err).To(BeNil())
@@ -716,7 +724,8 @@ VOLUME /test/`, ALPINE)
}
mountPath := filepath.Join(podmanTest.TempDir, "secrets")
- os.Mkdir(mountPath, 0755)
+ err = os.Mkdir(mountPath, 0755)
+ Expect(err).ToNot(HaveOccurred())
vol := mountPath + ":" + dest + ":U"
session := podmanTest.Podman([]string{"run", "--rm", "--user", "888:888", "-v", vol, ALPINE, "stat", "-c", "%u:%g", dest})
@@ -754,7 +763,8 @@ VOLUME /test/`, ALPINE)
}
mountPath := filepath.Join(podmanTest.TempDir, "foo")
- os.Mkdir(mountPath, 0755)
+ err = os.Mkdir(mountPath, 0755)
+ Expect(err).ToNot(HaveOccurred())
// false bind mount
vol := "type=bind,src=" + mountPath + ",dst=" + dest + ",U=false"
diff --git a/test/e2e/run_working_dir_test.go b/test/e2e/run_working_dir_test.go
index 50d0a2194..ff91a420f 100644
--- a/test/e2e/run_working_dir_test.go
+++ b/test/e2e/run_working_dir_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman run", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/runlabel_test.go b/test/e2e/runlabel_test.go
index d1e11dd9a..018ed37c2 100644
--- a/test/e2e/runlabel_test.go
+++ b/test/e2e/runlabel_test.go
@@ -37,7 +37,6 @@ var _ = Describe("podman container runlabel", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/save_test.go b/test/e2e/save_test.go
index 39295608e..536eefda7 100644
--- a/test/e2e/save_test.go
+++ b/test/e2e/save_test.go
@@ -170,7 +170,8 @@ var _ = Describe("Podman save", func() {
}
defer func() {
cmd = exec.Command("cp", "default.yaml", "/etc/containers/registries.d/default.yaml")
- cmd.Run()
+ err := cmd.Run()
+ Expect(err).ToNot(HaveOccurred())
}()
cmd = exec.Command("cp", "sign/key.gpg", "/tmp/key.gpg")
diff --git a/test/e2e/search_test.go b/test/e2e/search_test.go
index 07198d799..8237f6433 100644
--- a/test/e2e/search_test.go
+++ b/test/e2e/search_test.go
@@ -64,8 +64,6 @@ registries = ['{{.Host}}:{{.Port}}']`
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
-
})
AfterEach(func() {
@@ -242,7 +240,8 @@ registries = ['{{.Host}}:{{.Port}}']`
Fail("Cannot start docker registry on port %s", port)
}
ep := endpoint{Port: fmt.Sprintf("%d", port), Host: "localhost"}
- podmanTest.RestoreArtifact(ALPINE)
+ err = podmanTest.RestoreArtifact(ALPINE)
+ Expect(err).ToNot(HaveOccurred())
image := fmt.Sprintf("%s/my-alpine", ep.Address())
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, image})
push.WaitWithDefaultTimeout()
@@ -277,7 +276,8 @@ registries = ['{{.Host}}:{{.Port}}']`
Fail("unable to start registry on port %s", port)
}
- podmanTest.RestoreArtifact(ALPINE)
+ err = podmanTest.RestoreArtifact(ALPINE)
+ Expect(err).ToNot(HaveOccurred())
image := fmt.Sprintf("%s/my-alpine", ep.Address())
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, image})
push.WaitWithDefaultTimeout()
@@ -285,9 +285,11 @@ registries = ['{{.Host}}:{{.Port}}']`
// registries.conf set up
var buffer bytes.Buffer
- registryFileTmpl.Execute(&buffer, ep)
+ err = registryFileTmpl.Execute(&buffer, ep)
+ Expect(err).ToNot(HaveOccurred())
podmanTest.setRegistriesConfigEnv(buffer.Bytes())
- ioutil.WriteFile(fmt.Sprintf("%s/registry4.conf", tempdir), buffer.Bytes(), 0644)
+ err = ioutil.WriteFile(fmt.Sprintf("%s/registry4.conf", tempdir), buffer.Bytes(), 0644)
+ Expect(err).ToNot(HaveOccurred())
if IsRemote() {
podmanTest.RestartRemoteService()
defer podmanTest.RestartRemoteService()
@@ -319,16 +321,19 @@ registries = ['{{.Host}}:{{.Port}}']`
Fail("Cannot start docker registry on port %s", port)
}
- podmanTest.RestoreArtifact(ALPINE)
+ err = podmanTest.RestoreArtifact(ALPINE)
+ Expect(err).ToNot(HaveOccurred())
image := fmt.Sprintf("%s/my-alpine", ep.Address())
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, image})
push.WaitWithDefaultTimeout()
Expect(push).Should(Exit(0))
var buffer bytes.Buffer
- registryFileTmpl.Execute(&buffer, ep)
+ err = registryFileTmpl.Execute(&buffer, ep)
+ Expect(err).ToNot(HaveOccurred())
podmanTest.setRegistriesConfigEnv(buffer.Bytes())
- ioutil.WriteFile(fmt.Sprintf("%s/registry5.conf", tempdir), buffer.Bytes(), 0644)
+ err = ioutil.WriteFile(fmt.Sprintf("%s/registry5.conf", tempdir), buffer.Bytes(), 0644)
+ Expect(err).ToNot(HaveOccurred())
search := podmanTest.Podman([]string{"search", image, "--tls-verify=true"})
search.WaitWithDefaultTimeout()
@@ -356,16 +361,19 @@ registries = ['{{.Host}}:{{.Port}}']`
Fail("Cannot start docker registry on port %s", port)
}
- podmanTest.RestoreArtifact(ALPINE)
+ err = podmanTest.RestoreArtifact(ALPINE)
+ Expect(err).ToNot(HaveOccurred())
image := fmt.Sprintf("%s/my-alpine", ep.Address())
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, image})
push.WaitWithDefaultTimeout()
Expect(push).Should(Exit(0))
var buffer bytes.Buffer
- registryFileBadTmpl.Execute(&buffer, ep)
+ err = registryFileBadTmpl.Execute(&buffer, ep)
+ Expect(err).ToNot(HaveOccurred())
podmanTest.setRegistriesConfigEnv(buffer.Bytes())
- ioutil.WriteFile(fmt.Sprintf("%s/registry6.conf", tempdir), buffer.Bytes(), 0644)
+ err = ioutil.WriteFile(fmt.Sprintf("%s/registry6.conf", tempdir), buffer.Bytes(), 0644)
+ Expect(err).ToNot(HaveOccurred())
if IsRemote() {
podmanTest.RestartRemoteService()
@@ -409,16 +417,19 @@ registries = ['{{.Host}}:{{.Port}}']`
Fail("Cannot start docker registry on port %s", port2)
}
- podmanTest.RestoreArtifact(ALPINE)
+ err = podmanTest.RestoreArtifact(ALPINE)
+ Expect(err).ToNot(HaveOccurred())
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, fmt.Sprintf("localhost:%d/my-alpine", port2)})
push.WaitWithDefaultTimeout()
Expect(push).Should(Exit(0))
// registries.conf set up
var buffer bytes.Buffer
- registryFileTwoTmpl.Execute(&buffer, ep3)
+ err = registryFileTwoTmpl.Execute(&buffer, ep3)
+ Expect(err).ToNot(HaveOccurred())
podmanTest.setRegistriesConfigEnv(buffer.Bytes())
- ioutil.WriteFile(fmt.Sprintf("%s/registry8.conf", tempdir), buffer.Bytes(), 0644)
+ err = ioutil.WriteFile(fmt.Sprintf("%s/registry8.conf", tempdir), buffer.Bytes(), 0644)
+ Expect(err).ToNot(HaveOccurred())
if IsRemote() {
podmanTest.RestartRemoteService()
diff --git a/test/e2e/secret_test.go b/test/e2e/secret_test.go
index 90d760c81..ed328d84a 100644
--- a/test/e2e/secret_test.go
+++ b/test/e2e/secret_test.go
@@ -26,7 +26,6 @@ var _ = Describe("Podman secret", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/start_test.go b/test/e2e/start_test.go
index 98943c6fc..73af9d12c 100644
--- a/test/e2e/start_test.go
+++ b/test/e2e/start_test.go
@@ -26,7 +26,6 @@ var _ = Describe("Podman start", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/stats_test.go b/test/e2e/stats_test.go
index 7435a0e3b..b43a81cd3 100644
--- a/test/e2e/stats_test.go
+++ b/test/e2e/stats_test.go
@@ -32,7 +32,6 @@ var _ = Describe("Podman stats", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/stop_test.go b/test/e2e/stop_test.go
index 99d7f278c..8864ba5fd 100644
--- a/test/e2e/stop_test.go
+++ b/test/e2e/stop_test.go
@@ -25,7 +25,6 @@ var _ = Describe("Podman stop", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/system_connection_test.go b/test/e2e/system_connection_test.go
index ac4c5e5ea..2228c23b2 100644
--- a/test/e2e/system_connection_test.go
+++ b/test/e2e/system_connection_test.go
@@ -47,7 +47,7 @@ var _ = Describe("podman system connection", func() {
}
f := CurrentGinkgoTestDescription()
- GinkgoWriter.Write(
+ _, _ = GinkgoWriter.Write(
[]byte(
fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())))
})
@@ -247,7 +247,7 @@ var _ = Describe("podman system connection", func() {
// podman-remote commands will be executed by ginkgo directly.
SkipIfContainerized("sshd is not available when running in a container")
SkipIfRemote("connection heuristic requires both podman and podman-remote binaries")
- SkipIfNotRootless("FIXME: setup ssh keys when root")
+ SkipIfNotRootless(fmt.Sprintf("FIXME: setup ssh keys when root. uid(%d) euid(%d)", os.Getuid(), os.Geteuid()))
SkipIfSystemdNotRunning("cannot test connection heuristic if systemd is not running")
SkipIfNotActive("sshd", "cannot test connection heuristic if sshd is not running")
})
diff --git a/test/e2e/system_df_test.go b/test/e2e/system_df_test.go
index a9fa5f4ac..ba4a40ab4 100644
--- a/test/e2e/system_df_test.go
+++ b/test/e2e/system_df_test.go
@@ -26,14 +26,13 @@ var _ = Describe("podman system df", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
- GinkgoWriter.Write([]byte(timedResult))
+ _, _ = GinkgoWriter.Write([]byte(timedResult))
})
It("podman system df", func() {
diff --git a/test/e2e/system_dial_stdio_test.go b/test/e2e/system_dial_stdio_test.go
index 5fcb20cb8..4e4c99bfe 100644
--- a/test/e2e/system_dial_stdio_test.go
+++ b/test/e2e/system_dial_stdio_test.go
@@ -24,14 +24,13 @@ var _ = Describe("podman system dial-stdio", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
- GinkgoWriter.Write([]byte(timedResult))
+ _, _ = GinkgoWriter.Write([]byte(timedResult))
})
It("podman system dial-stdio help", func() {
diff --git a/test/e2e/system_reset_test.go b/test/e2e/system_reset_test.go
index f413ce147..28f2e25ca 100644
--- a/test/e2e/system_reset_test.go
+++ b/test/e2e/system_reset_test.go
@@ -24,14 +24,13 @@ var _ = Describe("podman system reset", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
- GinkgoWriter.Write([]byte(timedResult))
+ _, _ = GinkgoWriter.Write([]byte(timedResult))
})
It("podman system reset", func() {
@@ -90,5 +89,12 @@ var _ = Describe("podman system reset", func() {
Expect(session).Should(Exit(0))
// default network should exists
Expect(session.OutputToStringArray()).To(HaveLen(1))
+
+ // TODO: machine tests currently don't run outside of the machine test pkg
+ // no machines are created here to cleanup
+ session = podmanTest.Podman([]string{"machine", "list", "-q"})
+ session.WaitWithDefaultTimeout()
+ Expect(session).Should(Exit(0))
+ Expect(session.OutputToStringArray()).To(BeEmpty())
})
})
diff --git a/test/e2e/system_service_test.go b/test/e2e/system_service_test.go
index 2bc7756d6..398290426 100644
--- a/test/e2e/system_service_test.go
+++ b/test/e2e/system_service_test.go
@@ -20,7 +20,7 @@ var _ = Describe("podman system service", func() {
// The timeout used to for the service to respond. As shown in #12167,
// this may take some time on machines under high load.
- var timeout = 20
+ var timeout = 30
BeforeEach(func() {
tempdir, err := CreateTempDirInTempDir()
@@ -122,22 +122,6 @@ var _ = Describe("podman system service", func() {
})
})
-// WaitForService blocks, waiting for some service listening on given host:port
-func WaitForService(address url.URL) {
- // Wait for podman to be ready
- var conn net.Conn
- var err error
- for i := 1; i <= 5; i++ {
- conn, err = net.Dial("tcp", address.Host)
- if err != nil {
- // Podman not available yet...
- time.Sleep(time.Duration(i) * time.Second)
- }
- }
- Expect(err).ShouldNot(HaveOccurred())
- conn.Close()
-}
-
// randomPort leans on the go net library to find an available port...
func randomPort() string {
port, err := utils.GetRandomPort()
diff --git a/test/e2e/systemd_activate_test.go b/test/e2e/systemd_activate_test.go
index 04acafe1b..aeea4f932 100644
--- a/test/e2e/systemd_activate_test.go
+++ b/test/e2e/systemd_activate_test.go
@@ -31,7 +31,6 @@ var _ = Describe("Systemd activate", func() {
podmanTest = PodmanTestCreate(tempDir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/systemd_test.go b/test/e2e/systemd_test.go
index 57fc323ce..a1a080904 100644
--- a/test/e2e/systemd_test.go
+++ b/test/e2e/systemd_test.go
@@ -28,7 +28,6 @@ var _ = Describe("Podman systemd", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
systemdUnitFile = `[Unit]
Description=redis container
[Service]
diff --git a/test/e2e/toolbox_test.go b/test/e2e/toolbox_test.go
index 1fc28a06d..1e9a6da1f 100644
--- a/test/e2e/toolbox_test.go
+++ b/test/e2e/toolbox_test.go
@@ -56,7 +56,6 @@ var _ = Describe("Toolbox-specific testing", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/top_test.go b/test/e2e/top_test.go
index 344568da5..66bb887dc 100644
--- a/test/e2e/top_test.go
+++ b/test/e2e/top_test.go
@@ -23,7 +23,6 @@ var _ = Describe("Podman top", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/tree_test.go b/test/e2e/tree_test.go
index ab6e49e88..e1282d2b4 100644
--- a/test/e2e/tree_test.go
+++ b/test/e2e/tree_test.go
@@ -31,7 +31,7 @@ var _ = Describe("Podman image tree", func() {
podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
- GinkgoWriter.Write([]byte(timedResult))
+ _, _ = GinkgoWriter.Write([]byte(timedResult))
})
It("podman image tree", func() {
diff --git a/test/e2e/trust_test.go b/test/e2e/trust_test.go
index d17e34e9c..eee802e43 100644
--- a/test/e2e/trust_test.go
+++ b/test/e2e/trust_test.go
@@ -28,7 +28,6 @@ var _ = Describe("Podman trust", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
@@ -75,7 +74,8 @@ var _ = Describe("Podman trust", func() {
Expect(session).Should(Exit(0))
Expect(session.OutputToString()).To(BeValidJSON())
var teststruct []map[string]string
- json.Unmarshal(session.Out.Contents(), &teststruct)
+ err = json.Unmarshal(session.Out.Contents(), &teststruct)
+ Expect(err).ToNot(HaveOccurred())
Expect(teststruct).To(HaveLen(3))
// To ease comparison, group the unordered array of repos by repo (and we expect only one entry by repo, so order within groups doesn’t matter)
repoMap := map[string][]map[string]string{}
diff --git a/test/e2e/unshare_test.go b/test/e2e/unshare_test.go
index 8b06dd4f5..520a2f884 100644
--- a/test/e2e/unshare_test.go
+++ b/test/e2e/unshare_test.go
@@ -32,7 +32,6 @@ var _ = Describe("Podman unshare", func() {
podmanTest.CgroupManager = "cgroupfs"
podmanTest.StorageOptions = ROOTLESS_STORAGE_OPTIONS
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/version_test.go b/test/e2e/version_test.go
index a30db80eb..052b9aa39 100644
--- a/test/e2e/version_test.go
+++ b/test/e2e/version_test.go
@@ -30,7 +30,7 @@ var _ = Describe("Podman version", func() {
podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
- podmanTest.SeedImages()
+
})
It("podman version", func() {
diff --git a/test/e2e/volume_create_test.go b/test/e2e/volume_create_test.go
index 0ac91abd3..09e5da8a0 100644
--- a/test/e2e/volume_create_test.go
+++ b/test/e2e/volume_create_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman volume create", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/volume_exists_test.go b/test/e2e/volume_exists_test.go
index fdadbda27..0de574968 100644
--- a/test/e2e/volume_exists_test.go
+++ b/test/e2e/volume_exists_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman volume exists", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/volume_inspect_test.go b/test/e2e/volume_inspect_test.go
index 5e3edfe24..344fe8b05 100644
--- a/test/e2e/volume_inspect_test.go
+++ b/test/e2e/volume_inspect_test.go
@@ -23,7 +23,6 @@ var _ = Describe("Podman volume inspect", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/volume_ls_test.go b/test/e2e/volume_ls_test.go
index ce4cfc77d..19f87fb8a 100644
--- a/test/e2e/volume_ls_test.go
+++ b/test/e2e/volume_ls_test.go
@@ -24,7 +24,6 @@ var _ = Describe("Podman volume ls", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/volume_plugin_test.go b/test/e2e/volume_plugin_test.go
index fd205805d..4700afdb5 100644
--- a/test/e2e/volume_plugin_test.go
+++ b/test/e2e/volume_plugin_test.go
@@ -25,11 +25,11 @@ var _ = Describe("Podman volume plugins", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
os.Setenv("CONTAINERS_CONF", "config/containers.conf")
SkipIfRemote("Volume plugins only supported as local")
SkipIfRootless("Root is required for volume plugin testing")
- os.MkdirAll("/run/docker/plugins", 0755)
+ err = os.MkdirAll("/run/docker/plugins", 0755)
+ Expect(err).ToNot(HaveOccurred())
})
AfterEach(func() {
@@ -55,7 +55,8 @@ var _ = Describe("Podman volume plugins", func() {
podmanTest.AddImageToRWStore(volumeTest)
pluginStatePath := filepath.Join(podmanTest.TempDir, "volumes")
- os.Mkdir(pluginStatePath, 0755)
+ err := os.Mkdir(pluginStatePath, 0755)
+ Expect(err).ToNot(HaveOccurred())
// Keep this distinct within tests to avoid multiple tests using the same plugin.
pluginName := "testvol1"
@@ -89,7 +90,8 @@ var _ = Describe("Podman volume plugins", func() {
podmanTest.AddImageToRWStore(volumeTest)
pluginStatePath := filepath.Join(podmanTest.TempDir, "volumes")
- os.Mkdir(pluginStatePath, 0755)
+ err := os.Mkdir(pluginStatePath, 0755)
+ Expect(err).ToNot(HaveOccurred())
// Keep this distinct within tests to avoid multiple tests using the same plugin.
pluginName := "testvol2"
@@ -112,7 +114,8 @@ var _ = Describe("Podman volume plugins", func() {
podmanTest.AddImageToRWStore(volumeTest)
pluginStatePath := filepath.Join(podmanTest.TempDir, "volumes")
- os.Mkdir(pluginStatePath, 0755)
+ err := os.Mkdir(pluginStatePath, 0755)
+ Expect(err).ToNot(HaveOccurred())
// Keep this distinct within tests to avoid multiple tests using the same plugin.
pluginName := "testvol3"
@@ -153,7 +156,8 @@ var _ = Describe("Podman volume plugins", func() {
podmanTest.AddImageToRWStore(volumeTest)
pluginStatePath := filepath.Join(podmanTest.TempDir, "volumes")
- os.Mkdir(pluginStatePath, 0755)
+ err := os.Mkdir(pluginStatePath, 0755)
+ Expect(err).ToNot(HaveOccurred())
// Keep this distinct within tests to avoid multiple tests using the same plugin.
pluginName := "testvol4"
diff --git a/test/e2e/volume_prune_test.go b/test/e2e/volume_prune_test.go
index 0b4c30a48..600f1b887 100644
--- a/test/e2e/volume_prune_test.go
+++ b/test/e2e/volume_prune_test.go
@@ -23,7 +23,6 @@ var _ = Describe("Podman volume prune", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/volume_rm_test.go b/test/e2e/volume_rm_test.go
index 2a2de0920..0180b7a46 100644
--- a/test/e2e/volume_rm_test.go
+++ b/test/e2e/volume_rm_test.go
@@ -23,7 +23,6 @@ var _ = Describe("Podman volume rm", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/e2e/wait_test.go b/test/e2e/wait_test.go
index 098780c70..16e876af9 100644
--- a/test/e2e/wait_test.go
+++ b/test/e2e/wait_test.go
@@ -23,7 +23,6 @@ var _ = Describe("Podman wait", func() {
}
podmanTest = PodmanTestCreate(tempdir)
podmanTest.Setup()
- podmanTest.SeedImages()
})
AfterEach(func() {
diff --git a/test/system/200-pod.bats b/test/system/200-pod.bats
index 64f95f723..39982848f 100644
--- a/test/system/200-pod.bats
+++ b/test/system/200-pod.bats
@@ -406,7 +406,76 @@ EOF
run_podman pod inspect test --format {{.InfraConfig.HostNetwork}}
is "$output" "true" "Host network sharing with only ipc should be true"
run_podman pod rm test
+}
+
+# Wait for the pod (1st arg) to transition into the state (2nd arg)
+function _ensure_pod_state() {
+ for i in {0..5}; do
+ run_podman pod inspect $1 --format "{{.State}}"
+ if [[ $output == "$2" ]]; then
+ break
+ fi
+ sleep 0.5
+ done
+
+ is "$output" "$2" "unexpected pod state"
+}
+
+@test "pod exit policies" {
+ # Test setting exit policies
+ run_podman pod create
+ podID="$output"
+ run_podman pod inspect $podID --format "{{.ExitPolicy}}"
+ is "$output" "continue" "default exit policy"
+ run_podman pod rm $podID
+
+ run_podman pod create --exit-policy stop
+ podID="$output"
+ run_podman pod inspect $podID --format "{{.ExitPolicy}}"
+ is "$output" "stop" "custom exit policy"
+ run_podman pod rm $podID
+
+ run_podman 125 pod create --exit-policy invalid
+ is "$output" "Error: .*error running pod create option: invalid pod exit policy: \"invalid\"" "invalid exit policy"
+
+ # Test exit-policy behaviour
+ run_podman pod create --exit-policy continue
+ podID="$output"
+ run_podman run --pod $podID $IMAGE true
+ run_podman pod inspect $podID --format "{{.State}}"
+ _ensure_pod_state $podID Degraded
+ run_podman pod rm $podID
+
+ run_podman pod create --exit-policy stop
+ podID="$output"
+ run_podman run --pod $podID $IMAGE true
+ run_podman pod inspect $podID --format "{{.State}}"
+ _ensure_pod_state $podID Exited
+ run_podman pod rm $podID
+}
+@test "pod exit policies - play kube" {
+ # play-kube sets the exit policy to "stop"
+ local name="$(random_string 10 | tr A-Z a-z)"
+
+ kubeFile="apiVersion: v1
+kind: Pod
+metadata:
+ name: $name-pod
+spec:
+ containers:
+ - command:
+ - \"true\"
+ image: $IMAGE
+ name: ctr
+ restartPolicy: OnFailure"
+
+ echo "$kubeFile" > $PODMAN_TMPDIR/test.yaml
+ run_podman play kube $PODMAN_TMPDIR/test.yaml
+ run_podman pod inspect $name-pod --format "{{.ExitPolicy}}"
+ is "$output" "stop" "custom exit policy"
+ _ensure_pod_state $name-pod Exited
+ run_podman pod rm $name-pod
}
# vim: filetype=sh
diff --git a/test/system/272-system-connection.bats b/test/system/272-system-connection.bats
index 7b70f60f4..e9e9a01ea 100644
--- a/test/system/272-system-connection.bats
+++ b/test/system/272-system-connection.bats
@@ -99,10 +99,9 @@ $c2[ ]\+tcp://localhost:54321[ ]\+true" \
_SERVICE_PID=$!
wait_for_port localhost $_SERVICE_PORT
- # FIXME: #12023, RemoteSocket is always /run/something
-# run_podman info --format '{{.Host.RemoteSocket.Path}}'
-# is "$output" "tcp:localhost:$_SERVICE_PORT" \
-# "podman info works, and talks to the correct server"
+ _run_podman_remote info --format '{{.Host.RemoteSocket.Path}}'
+ is "$output" "tcp:localhost:$_SERVICE_PORT" \
+ "podman info works, and talks to the correct server"
_run_podman_remote info --format '{{.Store.GraphRoot}}'
is "$output" "${PODMAN_TMPDIR}/root" \
diff --git a/test/system/500-networking.bats b/test/system/500-networking.bats
index 01571d176..3db0804d1 100644
--- a/test/system/500-networking.bats
+++ b/test/system/500-networking.bats
@@ -656,6 +656,15 @@ EOF
run_podman run --network $netname --rm $IMAGE cat /etc/resolv.conf
is "$output" "search dns.podman.*" "correct search domain"
is "$output" ".*nameserver $subnet.1.*" "integrated dns nameserver is set"
+
+ # host network should keep localhost nameservers
+ if grep 127.0.0. /etc/resolv.conf >/dev/null; then
+ run_podman run --network host --rm $IMAGE cat /etc/resolv.conf
+ is "$output" ".*nameserver 127\.0\.0.*" "resolv.conf contains localhost nameserver"
+ fi
+ # host net + dns still works
+ run_podman run --network host --dns 1.1.1.1 --rm $IMAGE cat /etc/resolv.conf
+ is "$output" ".*nameserver 1\.1\.1\.1.*" "resolv.conf contains 1.1.1.1 nameserver"
}
@test "podman run port forward range" {
@@ -723,4 +732,19 @@ EOF
is "${#lines[@]}" "5" "expect 5 host entries in /etc/hosts"
}
+@test "podman run /etc/* permissions" {
+ userns="--userns=keep-id"
+ if ! is_rootless; then
+ userns="--uidmap=0:1111111:65536 --gidmap=0:1111111:65536"
+ fi
+ # check with and without userns
+ for userns in "" "$userns"; do
+ # check the /etc/hosts /etc/hostname /etc/resolv.conf are owned by root
+ run_podman run $userns --rm $IMAGE stat -c %u:%g /etc/hosts /etc/resolv.conf /etc/hostname
+ is "${lines[0]}" "0\:0" "/etc/hosts owned by root"
+ is "${lines[1]}" "0\:0" "/etc/resolv.conf owned by root"
+ is "${lines[2]}" "0\:0" "/etc/hosts owned by root"
+ done
+}
+
# vim: filetype=sh
diff --git a/test/system/700-play.bats b/test/system/700-play.bats
index b0624cbf2..7988b26a4 100644
--- a/test/system/700-play.bats
+++ b/test/system/700-play.bats
@@ -278,3 +278,20 @@ status: {}
run_podman 125 play kube - < $PODMAN_TMPDIR/test.yaml
assert "$output" =~ "invalid annotation \"test\"=\"$RANDOMSTRING\"" "Expected to fail with annotation length greater than 63"
}
+
+@test "podman play kube - default log driver" {
+ TESTDIR=$PODMAN_TMPDIR/testdir
+ mkdir -p $TESTDIR
+ echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
+ # Get the default log driver
+ run_podman info --format "{{.Host.LogDriver}}"
+ default_driver=$output
+
+ # Make sure that the default log driver is used
+ run_podman play kube $PODMAN_TMPDIR/test.yaml
+ run_podman inspect --format "{{.HostConfig.LogConfig.Type}}" test_pod-test
+ is "$output" "$default_driver" "play kube uses default log driver"
+
+ run_podman stop -a -t 0
+ run_podman pod rm -t 0 -f test_pod
+}
diff --git a/test/utils/common_function_test.go b/test/utils/common_function_test.go
index a73d75490..7092e40a1 100644
--- a/test/utils/common_function_test.go
+++ b/test/utils/common_function_test.go
@@ -113,8 +113,10 @@ var _ = Describe("Common functions test", func() {
Expect(err).To(BeNil(), "Can not find the JSON file after we write it.")
defer read.Close()
- bytes, _ := ioutil.ReadAll(read)
- json.Unmarshal(bytes, compareData)
+ bytes, err := ioutil.ReadAll(read)
+ Expect(err).ToNot(HaveOccurred())
+ err = json.Unmarshal(bytes, compareData)
+ Expect(err).ToNot(HaveOccurred())
Expect(reflect.DeepEqual(testData, compareData)).To(BeTrue(), "Data changed after we store it to file.")
})
diff --git a/test/utils/podmantest_test.go b/test/utils/podmantest_test.go
index a9b5aef1f..26d359d38 100644
--- a/test/utils/podmantest_test.go
+++ b/test/utils/podmantest_test.go
@@ -1,8 +1,6 @@
package utils_test
import (
- "os"
-
. "github.com/containers/podman/v4/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -19,16 +17,6 @@ var _ = Describe("PodmanTest test", func() {
FakeOutputs = make(map[string][]string)
})
- It("Test PodmanAsUserBase", func() {
- FakeOutputs["check"] = []string{"check"}
- os.Setenv("HOOK_OPTION", "hook_option")
- env := os.Environ()
- session := podmanTest.PodmanAsUserBase([]string{"check"}, 1000, 1000, "", env, true, false, nil, nil)
- os.Unsetenv("HOOK_OPTION")
- session.WaitWithDefaultTimeout()
- Expect(session.Command.Process).ShouldNot(BeNil())
- })
-
It("Test NumberOfContainersRunning", func() {
FakeOutputs["ps -q"] = []string{"one", "two"}
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(2))
diff --git a/test/utils/utils.go b/test/utils/utils.go
index f3e14c784..36f5a9414 100644
--- a/test/utils/utils.go
+++ b/test/utils/utils.go
@@ -489,7 +489,9 @@ func IsCommandAvailable(command string) bool {
// WriteJSONFile write json format data to a json file
func WriteJSONFile(data []byte, filePath string) error {
var jsonData map[string]interface{}
- json.Unmarshal(data, &jsonData)
+ if err := json.Unmarshal(data, &jsonData); err != nil {
+ return err
+ }
formatJSON, err := json.MarshalIndent(jsonData, "", " ")
if err != nil {
return err
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index d362495e3..a86eca88e 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -349,6 +349,9 @@ type EngineConfig struct {
// OCIRuntimes are the set of configured OCI runtimes (default is runc).
OCIRuntimes map[string][]string `toml:"runtimes,omitempty"`
+ // PodExitPolicy determines the behaviour when the last container of a pod exits.
+ PodExitPolicy PodExitPolicy `toml:"pod_exit_policy,omitempty"`
+
// PullPolicy determines whether to pull image before creating or running a container
// default is "missing"
PullPolicy string `toml:"pull_policy,omitempty"`
diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf
index 2b250753e..a4e755a66 100644
--- a/vendor/github.com/containers/common/pkg/config/containers.conf
+++ b/vendor/github.com/containers/common/pkg/config/containers.conf
@@ -506,6 +506,9 @@ default_sysctls = [
#
#num_locks = 2048
+# Set the exit policy of the pod when the last container exits.
+#pod_exit_policy = "continue"
+
# Whether to pull new image before running a container
#
#pull_policy = "missing"
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index 62b348d6e..8979a406b 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -388,6 +388,8 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
c.MachineEnabled = false
c.ChownCopiedFiles = true
+ c.PodExitPolicy = defaultPodExitPolicy
+
return c, nil
}
diff --git a/vendor/github.com/containers/common/pkg/config/pod_exit_policy.go b/vendor/github.com/containers/common/pkg/config/pod_exit_policy.go
new file mode 100644
index 000000000..f0f983077
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/config/pod_exit_policy.go
@@ -0,0 +1,36 @@
+package config
+
+import "fmt"
+
+// PodExitPolicies includes the supported pod exit policies.
+var PodExitPolicies = []string{string(PodExitPolicyContinue), string(PodExitPolicyStop)}
+
+// PodExitPolicy determines a pod's exit and stop behaviour.
+type PodExitPolicy string
+
+const (
+ // PodExitPolicyContinue instructs the pod to continue running when the
+ // last container has exited.
+ PodExitPolicyContinue PodExitPolicy = "continue"
+ // PodExitPolicyStop instructs the pod to stop when the last container
+ // has exited.
+ PodExitPolicyStop = "stop"
+ // PodExitPolicyUnsupported implies an internal error.
+ // Negative for backwards compat.
+ PodExitPolicyUnsupported = "invalid"
+
+ defaultPodExitPolicy = PodExitPolicyContinue
+)
+
+// ParsePodExitPolicy parses the specified policy and returns an error if it is
+// invalid.
+func ParsePodExitPolicy(policy string) (PodExitPolicy, error) {
+ switch policy {
+ case "", string(PodExitPolicyContinue):
+ return PodExitPolicyContinue, nil
+ case string(PodExitPolicyStop):
+ return PodExitPolicyStop, nil
+ default:
+ return PodExitPolicyUnsupported, fmt.Errorf("invalid pod exit policy: %q", policy)
+ }
+}
diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go
index c1ae6c9dd..61fce9d22 100644
--- a/vendor/github.com/containers/common/version/version.go
+++ b/vendor/github.com/containers/common/version/version.go
@@ -1,4 +1,4 @@
package version
// Version is the version of the build.
-const Version = "0.47.4+dev"
+const Version = "0.48.0"
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index 644f82615..d28cc4a3f 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -32,7 +32,6 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vbauerster/mpb/v7"
- "github.com/vbauerster/mpb/v7/decor"
"golang.org/x/sync/semaphore"
"golang.org/x/term"
)
@@ -712,8 +711,6 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
- // If encrypted and decryption keys provided, we should try to decrypt
- ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || ic.c.ociEncryptConfig != nil
// If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal
if options.OptimizeDestinationImageAlreadyExists {
@@ -1069,85 +1066,6 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
return man, manifestDigest, nil
}
-// newProgressPool creates a *mpb.Progress.
-// The caller must eventually call pool.Wait() after the pool will no longer be updated.
-// NOTE: Every progress bar created within the progress pool must either successfully
-// complete or be aborted, or pool.Wait() will hang. That is typically done
-// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called.
-func (c *copier) newProgressPool() *mpb.Progress {
- return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput))
-}
-
-// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar
-func customPartialBlobDecorFunc(s decor.Statistics) string {
- if s.Total == 0 {
- pairFmt := "%.1f / %.1f (skipped: %.1f)"
- return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill))
- }
- pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)"
- percentage := 100.0 * float64(s.Refill) / float64(s.Total)
- return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage)
-}
-
-// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
-// is io.Discard, the progress bar's output will be discarded
-// NOTE: Every progress bar created within a progress pool must either successfully
-// complete or be aborted, or pool.Wait() will hang. That is typically done
-// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called.
-func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *mpb.Bar {
- // shortDigestLen is the length of the digest used for blobs.
- const shortDigestLen = 12
-
- prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
- // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
- maxPrefixLen := len("Copying blob ") + shortDigestLen
- if len(prefix) > maxPrefixLen {
- prefix = prefix[:maxPrefixLen]
- }
-
- // onComplete will replace prefix once the bar/spinner has completed
- onComplete = prefix + " " + onComplete
-
- // Use a normal progress bar when we know the size (i.e., size > 0).
- // Otherwise, use a spinner to indicate that something's happening.
- var bar *mpb.Bar
- if info.Size > 0 {
- if partial {
- bar = pool.AddBar(info.Size,
- mpb.BarFillerClearOnComplete(),
- mpb.PrependDecorators(
- decor.OnComplete(decor.Name(prefix), onComplete),
- ),
- mpb.AppendDecorators(
- decor.Any(customPartialBlobDecorFunc),
- ),
- )
- } else {
- bar = pool.AddBar(info.Size,
- mpb.BarFillerClearOnComplete(),
- mpb.PrependDecorators(
- decor.OnComplete(decor.Name(prefix), onComplete),
- ),
- mpb.AppendDecorators(
- decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
- ),
- )
- }
- } else {
- bar = pool.New(0,
- mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(),
- mpb.BarFillerClearOnComplete(),
- mpb.PrependDecorators(
- decor.OnComplete(decor.Name(prefix), onComplete),
- ),
- )
- }
- if c.progressOutput == io.Discard {
- c.Printf("Copying %s %s\n", kind, info.Digest)
- }
- return bar
-}
-
// copyConfig copies config.json, if any, from src to dest.
func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
srcInfo := src.ConfigInfo()
@@ -1158,22 +1076,23 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
}
defer c.concurrentBlobCopiesSemaphore.Release(1)
- configBlob, err := src.ConfigBlob(ctx)
- if err != nil {
- return errors.Wrapf(err, "reading config blob %s", srcInfo.Digest)
- }
-
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
progressPool := c.newProgressPool()
defer progressPool.Wait()
bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done")
defer bar.Abort(false)
+ configBlob, err := src.ConfigBlob(ctx)
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrapf(err, "reading config blob %s", srcInfo.Digest)
+ }
+
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false)
if err != nil {
return types.BlobInfo{}, err
}
- bar.SetTotal(int64(len(configBlob)), true)
+
+ bar.mark100PercentComplete()
return destInfo, nil
}()
if err != nil {
@@ -1218,11 +1137,15 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
}
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
- // Diffs are needed if we are encrypting an image or trying to decrypt an image
- diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil)
-
- // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
- if !diffIDIsNeeded {
+ diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
+ // When encrypting to decrypting, only use the simple code path. We might be able to optimize more
+ // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again),
+ // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not.
+ encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil)
+ canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting
+
+ // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable.
+ if canAvoidProcessingCompleteLayer {
// TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm
// that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing
// a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause
@@ -1242,9 +1165,9 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
if reused {
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
func() { // A scope for defer
- bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists")
+ bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: blobInfo.Digest, Size: 0}, "blob", "skipped: already exists")
defer bar.Abort(false)
- bar.SetTotal(0, true)
+ bar.mark100PercentComplete()
}()
// Throw an event that the layer has been skipped
@@ -1275,7 +1198,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// of the source file are not known yet and must be fetched.
// Attempt a partial only when the source allows to retrieve a blob partially and
// the destination has support for it.
- if ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() && !diffIDIsNeeded {
+ if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() {
if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer
bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
hideProgressBar := true
@@ -1287,12 +1210,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
wrapped: ic.c.rawSource,
bar: bar,
}
- bar.SetTotal(srcInfo.Size, false)
info, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
if err == nil {
- bar.SetRefill(srcInfo.Size - bar.Current())
- bar.SetCurrent(srcInfo.Size)
- bar.SetTotal(srcInfo.Size, true)
+ if srcInfo.Size != -1 {
+ bar.SetRefill(srcInfo.Size - bar.Current())
+ }
+ bar.mark100PercentComplete()
hideProgressBar = false
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
return true, info
@@ -1305,16 +1228,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
}
// Fallback: copy the layer, computing the diffID if we need to do so
- srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
- if err != nil {
- return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
- }
- defer srcStream.Close()
-
return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer
bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
defer bar.Abort(false)
+ srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
+ if err != nil {
+ return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
+ }
+ defer srcStream.Close()
+
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer)
if err != nil {
return types.BlobInfo{}, "", err
@@ -1330,14 +1253,22 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID")
}
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
- // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
- // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
- ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
+ // Don’t record any associations that involve encrypted data. This is a bit crude,
+ // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
+ // might be safe, but it’s not trivially obvious, so let’s be conservative for now.
+ // This crude approach also means we don’t need to record whether a blob is encrypted
+ // in the blob info cache (which would probably be necessary for any more complex logic),
+ // and the simplicity is attractive.
+ if !encryptingOrDecrypting {
+ // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
+ // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
+ ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
+ }
diffID = diffIDResult.digest
}
}
- bar.SetTotal(srcInfo.Size, true)
+ bar.mark100PercentComplete()
return blobInfo, diffID, nil
}()
}
@@ -1347,7 +1278,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// perhaps (de/re/)compressing the stream,
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
- diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
+ diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil
var diffIDChan chan diffIDResult
@@ -1424,7 +1355,7 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
// and returns a complete blobInfo of the copied blob.
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
- canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
+ canModifyBlob bool, isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
if isConfig { // This is guaranteed by the caller, but set it here to be explicit.
canModifyBlob = false
}
@@ -1444,6 +1375,9 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
}
var destStream io.Reader = digestingReader
+ // === Update progress bars
+ destStream = bar.ProxyReader(destStream)
+
// === Decrypt the stream, if required.
var decrypted bool
if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil {
@@ -1478,9 +1412,6 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name())
}
- // === Update progress bars
- destStream = bar.ProxyReader(destStream)
-
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
if getOriginalLayerCopyWriter != nil {
@@ -1681,19 +1612,27 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
}
if digestingReader.validationSucceeded {
- // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
- // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
- // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
- // (because inputInfo.Digest == "", this must have been computed afresh).
- switch compressionOperation {
- case types.PreserveOriginal:
- break // Do nothing, we have only one digest and we might not have even verified it.
- case types.Compress:
- c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
- case types.Decompress:
- c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
- default:
- return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
+ // Don’t record any associations that involve encrypted data. This is a bit crude,
+ // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
+ // might be safe, but it’s not trivially obvious, so let’s be conservative for now.
+ // This crude approach also means we don’t need to record whether a blob is encrypted
+ // in the blob info cache (which would probably be necessary for any more complex logic),
+ // and the simplicity is attractive.
+ if !encrypted && !decrypted {
+ // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
+ // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
+ // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
+ // (because inputInfo.Digest == "", this must have been computed afresh).
+ switch compressionOperation {
+ case types.PreserveOriginal:
+ break // Do nothing, we have only one digest and we might not have even verified it.
+ case types.Compress:
+ c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
+ case types.Decompress:
+ c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
+ default:
+ return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
+ }
}
if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName)
diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go
new file mode 100644
index 000000000..585d86057
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go
@@ -0,0 +1,148 @@
+package copy
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/containers/image/v5/internal/private"
+ "github.com/containers/image/v5/types"
+ "github.com/vbauerster/mpb/v7"
+ "github.com/vbauerster/mpb/v7/decor"
+)
+
+// newProgressPool creates a *mpb.Progress.
+// The caller must eventually call pool.Wait() after the pool will no longer be updated.
+// NOTE: Every progress bar created within the progress pool must either successfully
+// complete or be aborted, or pool.Wait() will hang. That is typically done
+// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called.
+func (c *copier) newProgressPool() *mpb.Progress {
+ return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput))
+}
+
+// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar
+func customPartialBlobDecorFunc(s decor.Statistics) string {
+ if s.Total == 0 {
+ pairFmt := "%.1f / %.1f (skipped: %.1f)"
+ return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill))
+ }
+ pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)"
+ percentage := 100.0 * float64(s.Refill) / float64(s.Total)
+ return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage)
+}
+
+// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods.
+type progressBar struct {
+ *mpb.Bar
+ originalSize int64 // or -1 if unknown
+}
+
+// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter
+// is io.Discard, the progress bar's output will be discarded
+//
+// NOTE: Every progress bar created within a progress pool must either successfully
+// complete or be aborted, or pool.Wait() will hang. That is typically done
+// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called.
+//
+// As a convention, most users of progress bars should call mark100PercentComplete on full success;
+// by convention, we don't leave progress bars in partial state when fully done
+// (even if we copied much less data than anticipated).
+func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar {
+ // shortDigestLen is the length of the digest used for blobs.
+ const shortDigestLen = 12
+
+ prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
+ // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
+ maxPrefixLen := len("Copying blob ") + shortDigestLen
+ if len(prefix) > maxPrefixLen {
+ prefix = prefix[:maxPrefixLen]
+ }
+
+ // onComplete will replace prefix once the bar/spinner has completed
+ onComplete = prefix + " " + onComplete
+
+ // Use a normal progress bar when we know the size (i.e., size > 0).
+ // Otherwise, use a spinner to indicate that something's happening.
+ var bar *mpb.Bar
+ if info.Size > 0 {
+ if partial {
+ bar = pool.AddBar(info.Size,
+ mpb.BarFillerClearOnComplete(),
+ mpb.PrependDecorators(
+ decor.OnComplete(decor.Name(prefix), onComplete),
+ ),
+ mpb.AppendDecorators(
+ decor.Any(customPartialBlobDecorFunc),
+ ),
+ )
+ } else {
+ bar = pool.AddBar(info.Size,
+ mpb.BarFillerClearOnComplete(),
+ mpb.PrependDecorators(
+ decor.OnComplete(decor.Name(prefix), onComplete),
+ ),
+ mpb.AppendDecorators(
+ decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
+ ),
+ )
+ }
+ } else {
+ bar = pool.New(0,
+ mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(),
+ mpb.BarFillerClearOnComplete(),
+ mpb.PrependDecorators(
+ decor.OnComplete(decor.Name(prefix), onComplete),
+ ),
+ )
+ }
+ if c.progressOutput == io.Discard {
+ c.Printf("Copying %s %s\n", kind, info.Digest)
+ }
+ return &progressBar{
+ Bar: bar,
+ originalSize: info.Size,
+ }
+}
+
+// mark100PercentComplete marks the progres bars as 100% complete;
+// it may do so by possibly advancing the current state if it is below the known total.
+func (bar *progressBar) mark100PercentComplete() {
+ if bar.originalSize > 0 {
+ // We can't call bar.SetTotal even if we wanted to; the total can not be changed
+ // after a progress bar is created with a definite total.
+ bar.SetCurrent(bar.originalSize) // This triggers the completion condition.
+ } else {
+ // -1 = unknown size
+ // 0 is somewhat of a a special case: Unlike c/image, where 0 is a definite known
+ // size (possible at least in theory), in mpb, zero-sized progress bars are treated
+ // as unknown size, in particular they are not configured to be marked as
+ // complete on bar.Current() reaching bar.total (because that would happen already
+ // when creating the progress bar).
+ // That means that we are both _allowed_ to call SetTotal, and we _have to_.
+ bar.SetTotal(-1, true) // total < 0 = set it to bar.Current(), report it; and mark the bar as complete.
+ }
+}
+
+// blobChunkAccessorProxy wraps a BlobChunkAccessor and updates a *progressBar
+// with the number of received bytes.
+type blobChunkAccessorProxy struct {
+ wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor
+ bar *progressBar // A progress bar updated with the number of bytes read so far
+}
+
+// GetBlobAt returns a sequential channel of readers that contain data for the requested
+// blob chunks, and a channel that might get a single error value.
+// The specified chunks must be not overlapping and sorted by their offset.
+// The readers must be fully consumed, in the order they are returned, before blocking
+// to read the next chunk.
+func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
+ rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
+ if err == nil {
+ total := int64(0)
+ for _, c := range chunks {
+ total += int64(c.Length)
+ }
+ s.bar.IncrInt64(total)
+ }
+ return rc, errs, err
+}
diff --git a/vendor/github.com/containers/image/v5/copy/progress_reader.go b/vendor/github.com/containers/image/v5/copy/progress_channel.go
index de23cec1b..d5e9e09bd 100644
--- a/vendor/github.com/containers/image/v5/copy/progress_reader.go
+++ b/vendor/github.com/containers/image/v5/copy/progress_channel.go
@@ -1,16 +1,13 @@
package copy
import (
- "context"
"io"
"time"
- "github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
- "github.com/vbauerster/mpb/v7"
)
-// progressReader is a reader that reports its progress on an interval.
+// progressReader is a reader that reports its progress to a types.ProgressProperties channel on an interval.
type progressReader struct {
source io.Reader
channel chan<- types.ProgressProperties
@@ -80,27 +77,3 @@ func (r *progressReader) Read(p []byte) (int, error) {
}
return n, err
}
-
-// blobChunkAccessorProxy wraps a BlobChunkAccessor and keeps track of how many bytes
-// are received.
-type blobChunkAccessorProxy struct {
- wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor
- bar *mpb.Bar // A progress bar updated with the number of bytes read so far
-}
-
-// GetBlobAt returns a sequential channel of readers that contain data for the requested
-// blob chunks, and a channel that might get a single error value.
-// The specified chunks must be not overlapping and sorted by their offset.
-// The readers must be fully consumed, in the order they are returned, before blocking
-// to read the next chunk.
-func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
- rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
- if err == nil {
- total := int64(0)
- for _, c := range chunks {
- total += int64(c.Length)
- }
- s.bar.IncrInt64(total)
- }
- return rc, errs, err
-}
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index 9447d53c4..6295b5493 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -11,7 +11,7 @@ const (
VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string.
- VersionDev = "-dev"
+ VersionDev = ""
)
// Version is the specification version that the package types support.
diff --git a/vendor/github.com/containers/ocicrypt/MAINTAINERS b/vendor/github.com/containers/ocicrypt/MAINTAINERS
index e6a7d1f0a..af38d03bf 100644
--- a/vendor/github.com/containers/ocicrypt/MAINTAINERS
+++ b/vendor/github.com/containers/ocicrypt/MAINTAINERS
@@ -3,3 +3,4 @@
# Github ID, Name, Email Address
lumjjb, Brandon Lum, lumjjb@gmail.com
stefanberger, Stefan Berger, stefanb@linux.ibm.com
+arronwy, Arron Wang, arron.wang@intel.com
diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go
index 448e88c7c..7d80f5f84 100644
--- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go
+++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go
@@ -40,8 +40,6 @@ import (
var (
// OAEPLabel defines the label we use for OAEP encryption; this cannot be changed
OAEPLabel = []byte("")
- // OAEPDefaultHash defines the default hash used for OAEP encryption; this cannot be changed
- OAEPDefaultHash = "sha1"
// OAEPSha1Params describes the OAEP parameters with sha1 hash algorithm; needed by SoftHSM
OAEPSha1Params = &pkcs11.OAEPParams{
@@ -69,12 +67,12 @@ func rsaPublicEncryptOAEP(pubKey *rsa.PublicKey, plaintext []byte) ([]byte, stri
)
oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG")
- // The default is 'sha1'
+ // The default is sha256 (previously was sha1)
switch strings.ToLower(oaephash) {
- case "sha1", "":
+ case "sha1":
hashfunc = sha1.New()
hashalg = "sha1"
- case "sha256":
+ case "sha256", "":
hashfunc = sha256.New()
hashalg = "sha256"
default:
@@ -283,12 +281,12 @@ func publicEncryptOAEP(pubKey *Pkcs11KeyFileObject, plaintext []byte) ([]byte, s
var oaep *pkcs11.OAEPParams
oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG")
- // the default is sha1
+ // The default is sha256 (previously was sha1)
switch strings.ToLower(oaephash) {
- case "sha1", "":
+ case "sha1":
oaep = OAEPSha1Params
hashalg = "sha1"
- case "sha256":
+ case "sha256", "":
oaep = OAEPSha256Params
hashalg = "sha256"
default:
@@ -333,7 +331,7 @@ func privateDecryptOAEP(privKeyObj *Pkcs11KeyFileObject, ciphertext []byte, hash
var oaep *pkcs11.OAEPParams
- // the default is sha1
+ // An empty string from the Hash in the JSON historically defaults to sha1.
switch hashalg {
case "sha1", "":
oaep = OAEPSha1Params
@@ -410,9 +408,6 @@ func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) {
return nil, err
}
- if hashalg == OAEPDefaultHash {
- hashalg = ""
- }
recipient := Pkcs11Recipient{
Version: 0,
Blob: base64.StdEncoding.EncodeToString(ciphertext),
@@ -431,15 +426,18 @@ func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) {
// {
// "version": 0,
// "blob": <base64 encoded RSA OAEP encrypted blob>,
-// "hash": <hash used for OAEP other than 'sha256'>
+// "hash": <hash used for OAEP other than 'sha1'>
// } ,
// {
// "version": 0,
// "blob": <base64 encoded RSA OAEP encrypted blob>,
-// "hash": <hash used for OAEP other than 'sha256'>
+// "hash": <hash used for OAEP other than 'sha1'>
// } ,
// [...]
// }
+// Note: More recent versions of this code explicitly write 'sha1'
+// while older versions left it empty in case of 'sha1'.
+//
func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) {
pkcs11blob := Pkcs11Blob{}
err := json.Unmarshal(pkcs11blobstr, &pkcs11blob)
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 79833f2ce..148dabb45 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.39.0+dev
+1.40.2
diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go
index 3c508b66b..c598b936d 100644
--- a/vendor/github.com/containers/storage/drivers/chown_unix.go
+++ b/vendor/github.com/containers/storage/drivers/chown_unix.go
@@ -76,7 +76,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
UID: uid,
GID: gid,
}
- mappedPair, err := toHost.ToHost(pair)
+ mappedPair, err := toHost.ToHostOverflow(pair)
if err != nil {
return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err)
}
diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go
index 79a591288..143cccf92 100644
--- a/vendor/github.com/containers/storage/drivers/driver_freebsd.go
+++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go
@@ -1,7 +1,6 @@
package graphdriver
import (
- "fmt"
"golang.org/x/sys/unix"
"github.com/containers/storage/pkg/mount"
diff --git a/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
index 34fdc5790..2af33a6fc 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
@@ -133,7 +133,7 @@ func createUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int,
_ = unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(unix.SIGKILL), 0, 0, 0)
// just wait for the SIGKILL
for {
- syscall.Syscall6(uintptr(unix.SYS_PAUSE), 0, 0, 0, 0, 0, 0)
+ syscall.Pause()
}
}
cleanupFunc := func() {
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 550dc7d39..09d24ae8b 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -939,6 +939,16 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
rootUID = int(st.UID())
rootGID = int(st.GID())
}
+
+ if _, err := system.Lstat(dir); err == nil {
+ logrus.Warnf("Trying to create a layer %#v while directory %q already exists; removing it first", id, dir)
+ // Don’t just os.RemoveAll(dir) here; d.Remove also removes the link in linkDir,
+ // so that we can’t end up with two symlinks in linkDir pointing to the same layer.
+ if err := d.Remove(id); err != nil {
+ return errors.Wrapf(err, "removing a pre-existing layer directory %q", dir)
+ }
+ }
+
if err := idtools.MkdirAllAndChownNew(dir, 0700, idPair); err != nil {
return err
}
@@ -1389,7 +1399,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
for err == nil {
absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN)))
- relLowers = append(relLowers, dumbJoin(string(link), "..", nameWithSuffix("diff", diffN)))
+ relLowers = append(relLowers, dumbJoin(linkDir, string(link), "..", nameWithSuffix("diff", diffN)))
diffN++
st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
if err == nil && !permsKnown {
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
index fd98ad305..61a2ed871 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
@@ -2,7 +2,6 @@ package zfs
import (
"fmt"
- "strings"
"github.com/containers/storage/drivers"
"github.com/pkg/errors"
@@ -26,19 +25,10 @@ func checkRootdirFs(rootdir string) error {
}
func getMountpoint(id string) string {
- maxlen := 12
-
- // we need to preserve filesystem suffix
- suffix := strings.SplitN(id, "-", 2)
-
- if len(suffix) > 1 {
- return id[:maxlen] + "-" + suffix[1]
- }
-
- return id[:maxlen]
+ return id
}
func detachUnmount(mountpoint string) error {
- // FreeBSD doesn't have an equivalent to MNT_DETACH
- return unix.Unmount(mountpoint, 0)
+ // FreeBSD's MNT_FORCE is roughly equivalent to MNT_DETACH
+ return unix.Unmount(mountpoint, unix.MNT_FORCE)
}
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index 1915ea65d..5d8c59960 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -12,7 +12,7 @@ require (
github.com/google/go-intervals v0.0.2
github.com/hashicorp/go-multierror v1.1.1
github.com/json-iterator/go v1.1.12
- github.com/klauspost/compress v1.15.1
+ github.com/klauspost/compress v1.15.2
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.12
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index cd5bf3b97..97a0d167d 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -424,8 +424,9 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw=
+github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index 34d27ffa3..bba8d7588 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -692,11 +692,10 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
return copyLayer(layer), nil
}
-func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) {
+func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) {
if !r.IsReadWrite() {
return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath())
}
- size = -1
if err := os.MkdirAll(r.rundir, 0700); err != nil {
return nil, -1, err
}
@@ -762,6 +761,60 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
if mountLabel != "" {
label.ReserveLabel(mountLabel)
}
+
+ // Before actually creating the layer, make a persistent record of it with incompleteFlag,
+ // so that future processes have a chance to delete it.
+ layer := &Layer{
+ ID: id,
+ Parent: parent,
+ Names: names,
+ MountLabel: mountLabel,
+ Metadata: templateMetadata,
+ Created: time.Now().UTC(),
+ CompressedDigest: templateCompressedDigest,
+ CompressedSize: templateCompressedSize,
+ UncompressedDigest: templateUncompressedDigest,
+ UncompressedSize: templateUncompressedSize,
+ CompressionType: templateCompressionType,
+ UIDs: templateUIDs,
+ GIDs: templateGIDs,
+ Flags: make(map[string]interface{}),
+ UIDMap: copyIDMap(moreOptions.UIDMap),
+ GIDMap: copyIDMap(moreOptions.GIDMap),
+ BigDataNames: []string{},
+ }
+ r.layers = append(r.layers, layer)
+ r.idindex.Add(id)
+ r.byid[id] = layer
+ for _, name := range names {
+ r.byname[name] = layer
+ }
+ for flag, value := range flags {
+ layer.Flags[flag] = value
+ }
+ layer.Flags[incompleteFlag] = true
+
+ succeeded := false
+ cleanupFailureContext := ""
+ defer func() {
+ if !succeeded {
+ // On any error, try both removing the driver's data as well
+ // as the in-memory layer record.
+ if err2 := r.Delete(layer.ID); err2 != nil {
+ if cleanupFailureContext == "" {
+ cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site"
+ }
+ logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, layer.ID, err2)
+ }
+ }
+ }()
+
+ err := r.Save()
+ if err != nil {
+ cleanupFailureContext = "saving incomplete layer metadata"
+ return nil, -1, err
+ }
+
idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap)
opts := drivers.CreateOpts{
MountLabel: mountLabel,
@@ -769,132 +822,67 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
IDMappings: idMappings,
}
if moreOptions.TemplateLayer != "" {
- if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
+ if err := r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
+ cleanupFailureContext = "creating a layer from template"
return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id)
}
oldMappings = templateIDMappings
} else {
if writeable {
- if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil {
+ if err := r.driver.CreateReadWrite(id, parent, &opts); err != nil {
+ cleanupFailureContext = "creating a read-write layer"
return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id)
}
} else {
- if err = r.driver.Create(id, parent, &opts); err != nil {
+ if err := r.driver.Create(id, parent, &opts); err != nil {
+ cleanupFailureContext = "creating a read-only layer"
return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id)
}
}
oldMappings = parentMappings
}
if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) {
- if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
- // We don't have a record of this layer, but at least
- // try to clean it up underneath us.
- if err2 := r.driver.Remove(id); err2 != nil {
- logrus.Errorf("While recovering from a failure creating in UpdateLayerIDMap, error deleting layer %#v: %v", id, err2)
- }
+ if err := r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
+ cleanupFailureContext = "in UpdateLayerIDMap"
return nil, -1, err
}
}
if len(templateTSdata) > 0 {
if err := os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil {
- // We don't have a record of this layer, but at least
- // try to clean it up underneath us.
- if err2 := r.driver.Remove(id); err2 != nil {
- logrus.Errorf("While recovering from a failure creating in UpdateLayerIDMap, error deleting layer %#v: %v", id, err2)
- }
+ cleanupFailureContext = "creating tar-split parent directory for a copy from template"
return nil, -1, err
}
- if err = ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil {
- // We don't have a record of this layer, but at least
- // try to clean it up underneath us.
- if err2 := r.driver.Remove(id); err2 != nil {
- logrus.Errorf("While recovering from a failure creating in UpdateLayerIDMap, error deleting layer %#v: %v", id, err2)
- }
+ if err := ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil {
+ cleanupFailureContext = "creating a tar-split copy from template"
return nil, -1, err
}
}
- if err == nil {
- layer = &Layer{
- ID: id,
- Parent: parent,
- Names: names,
- MountLabel: mountLabel,
- Metadata: templateMetadata,
- Created: time.Now().UTC(),
- CompressedDigest: templateCompressedDigest,
- CompressedSize: templateCompressedSize,
- UncompressedDigest: templateUncompressedDigest,
- UncompressedSize: templateUncompressedSize,
- CompressionType: templateCompressionType,
- UIDs: templateUIDs,
- GIDs: templateGIDs,
- Flags: make(map[string]interface{}),
- UIDMap: copyIDMap(moreOptions.UIDMap),
- GIDMap: copyIDMap(moreOptions.GIDMap),
- BigDataNames: []string{},
- }
- r.layers = append(r.layers, layer)
- r.idindex.Add(id)
- r.byid[id] = layer
- for _, name := range names {
- r.byname[name] = layer
- }
- for flag, value := range flags {
- layer.Flags[flag] = value
- }
- savedIncompleteLayer := false
- if diff != nil {
- layer.Flags[incompleteFlag] = true
- err = r.Save()
- if err != nil {
- // We don't have a record of this layer, but at least
- // try to clean it up underneath us.
- if err2 := r.driver.Remove(id); err2 != nil {
- logrus.Errorf("While recovering from a failure saving incomplete layer metadata, error deleting layer %#v: %v", id, err2)
- }
- return nil, -1, err
- }
- savedIncompleteLayer = true
- size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff)
- if err != nil {
- if err2 := r.Delete(layer.ID); err2 != nil {
- // Either a driver error or an error saving.
- // We now have a layer that's been marked for
- // deletion but which we failed to remove.
- logrus.Errorf("While recovering from a failure applying layer diff, error deleting layer %#v: %v", layer.ID, err2)
- }
- return nil, -1, err
- }
- delete(layer.Flags, incompleteFlag)
- } else {
- // applyDiffWithOptions in the `diff != nil` case handles this bit for us
- if layer.CompressedDigest != "" {
- r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
- }
- if layer.UncompressedDigest != "" {
- r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
- }
- }
- err = r.Save()
+
+ var size int64 = -1
+ if diff != nil {
+ size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff)
if err != nil {
- if savedIncompleteLayer {
- if err2 := r.Delete(layer.ID); err2 != nil {
- // Either a driver error or an error saving.
- // We now have a layer that's been marked for
- // deletion but which we failed to remove.
- logrus.Errorf("While recovering from a failure saving finished layer metadata, error deleting layer %#v: %v", layer.ID, err2)
- }
- } else {
- // We don't have a record of this layer, but at least
- // try to clean it up underneath us.
- if err2 := r.driver.Remove(id); err2 != nil {
- logrus.Errorf("While recovering from a failure saving finished layer metadata, error deleting layer %#v in graph driver: %v", id, err2)
- }
- }
+ cleanupFailureContext = "applying layer diff"
return nil, -1, err
}
- layer = copyLayer(layer)
+ } else {
+ // applyDiffWithOptions in the `diff != nil` case handles this bit for us
+ if layer.CompressedDigest != "" {
+ r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
+ }
+ if layer.UncompressedDigest != "" {
+ r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
+ }
}
+ delete(layer.Flags, incompleteFlag)
+ err = r.Save()
+ if err != nil {
+ cleanupFailureContext = "saving finished layer metadata"
+ return nil, -1, err
+ }
+
+ layer = copyLayer(layer)
+ succeeded = true
return layer, size, err
}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
index 7c8f4d10c..7a8fec0ce 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
@@ -3,15 +3,18 @@ package idtools
import (
"bufio"
"fmt"
+ "io/ioutil"
"os"
"os/user"
"sort"
"strconv"
"strings"
+ "sync"
"syscall"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// IDMap contains a single entry for user namespace range remapping. An array
@@ -203,6 +206,67 @@ func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) {
return target, err
}
+var (
+ overflowUIDOnce sync.Once
+ overflowGIDOnce sync.Once
+ overflowUID int
+ overflowGID int
+)
+
+// getOverflowUID returns the UID mapped to the overflow user
+func getOverflowUID() int {
+ overflowUIDOnce.Do(func() {
+ // 65534 is the value on older kernels where /proc/sys/kernel/overflowuid is not present
+ overflowUID = 65534
+ if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowuid"); err == nil {
+ if tmp, err := strconv.Atoi(string(content)); err == nil {
+ overflowUID = tmp
+ }
+ }
+ })
+ return overflowUID
+}
+
+// getOverflowUID returns the GID mapped to the overflow user
+func getOverflowGID() int {
+ overflowGIDOnce.Do(func() {
+ // 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present
+ overflowGID = 65534
+ if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowgid"); err == nil {
+ if tmp, err := strconv.Atoi(string(content)); err == nil {
+ overflowGID = tmp
+ }
+ }
+ })
+ return overflowGID
+}
+
+// ToHost returns the host UID and GID for the container uid, gid.
+// Remapping is only performed if the ids aren't already the remapped root ids
+// If the mapping is not possible because the target ID is not mapped into
+// the namespace, then the overflow ID is used.
+func (i *IDMappings) ToHostOverflow(pair IDPair) (IDPair, error) {
+ var err error
+ target := i.RootPair()
+
+ if pair.UID != target.UID {
+ target.UID, err = RawToHost(pair.UID, i.uids)
+ if err != nil {
+ target.UID = getOverflowUID()
+ logrus.Debugf("Failed to map UID %v to the target mapping, using the overflow ID %v", pair.UID, target.UID)
+ }
+ }
+
+ if pair.GID != target.GID {
+ target.GID, err = RawToHost(pair.GID, i.gids)
+ if err != nil {
+ target.GID = getOverflowGID()
+ logrus.Debugf("Failed to map GID %v to the target mapping, using the overflow ID %v", pair.GID, target.GID)
+ }
+ }
+ return target, nil
+}
+
// ToContainer returns the container UID and GID for the host uid and gid
func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
uid, err := RawToContainer(pair.UID, i.uids)
diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go
new file mode 100644
index 000000000..3ba99cf93
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go
@@ -0,0 +1,48 @@
+package mount
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // RDONLY will mount the file system read-only.
+ RDONLY = unix.MNT_RDONLY
+
+ // NOSUID will not allow set-user-identifier or set-group-identifier bits to
+ // take effect.
+ NOSUID = unix.MNT_NOSUID
+
+ // NOEXEC will not allow execution of any binaries on the mounted file system.
+ NOEXEC = unix.MNT_NOEXEC
+
+ // SYNCHRONOUS will allow I/O to the file system to be done synchronously.
+ SYNCHRONOUS = unix.MNT_SYNCHRONOUS
+
+ // REMOUNT will attempt to remount an already-mounted file system. This is
+ // commonly used to change the mount flags for a file system, especially to
+ // make a readonly file system writeable. It does not change device or mount
+ // point.
+ REMOUNT = unix.MNT_UPDATE
+
+ // NOATIME will not update the file access time when reading from a file.
+ NOATIME = unix.MNT_NOATIME
+
+ mntDetach = unix.MNT_FORCE
+
+ NODIRATIME = 0
+ NODEV = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ BIND = 0
+ RBIND = 0
+ UNBINDABLE = 0
+ RUNBINDABLE = 0
+ PRIVATE = 0
+ RPRIVATE = 0
+ SLAVE = 0
+ RSLAVE = 0
+ SHARED = 0
+ RSHARED = 0
+ RELATIME = 0
+ STRICTATIME = 0
+)
diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go
index 9afd26d4c..ee0f593a5 100644
--- a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go
+++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go
@@ -1,4 +1,5 @@
-// +build !linux
+//go:build !linux && !freebsd
+// +build !linux,!freebsd
package mount
diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
index b31cf99d0..72ceec3dd 100644
--- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go
@@ -28,14 +28,25 @@ func allocateIOVecs(options []string) []C.struct_iovec {
func mount(device, target, mType string, flag uintptr, data string) error {
isNullFS := false
- xs := strings.Split(data, ",")
- for _, x := range xs {
- if x == "bind" {
- isNullFS = true
+ options := []string{"fspath", target}
+
+ if data != "" {
+ xs := strings.Split(data, ",")
+ for _, x := range xs {
+ if x == "bind" {
+ isNullFS = true
+ continue
+ }
+ opt := strings.SplitN(x, "=", 2)
+ options = append(options, opt[0])
+ if len(opt) == 2 {
+ options = append(options, opt[1])
+ } else {
+ options = append(options, "")
+ }
}
}
- options := []string{"fspath", target}
if isNullFS {
options = append(options, "fstype", "nullfs", "target", device)
} else {
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go
new file mode 100644
index 000000000..6f63ae991
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go
@@ -0,0 +1,37 @@
+// +build freebsd
+
+package reexec
+
+import (
+ "context"
+ "os"
+ "os/exec"
+
+ "golang.org/x/sys/unix"
+)
+
+// Self returns the path to the current process's binary.
+// Uses sysctl.
+func Self() string {
+ path, err := unix.SysctlArgs("kern.proc.pathname", -1)
+ if err == nil {
+ return path
+ }
+ return os.Args[0]
+}
+
+// Command returns *exec.Cmd which has Path as current binary.
+// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
+// be set to "/usr/bin/docker".
+func Command(args ...string) *exec.Cmd {
+ cmd := exec.Command(Self())
+ cmd.Args = args
+ return cmd
+}
+
+// CommandContext returns *exec.Cmd which has Path as current binary.
+func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
+ cmd := exec.CommandContext(ctx, Self())
+ cmd.Args = args
+ return cmd
+}
diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
index 9dd8cb9bb..a56ada216 100644
--- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
+++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go
@@ -1,4 +1,5 @@
-// +build freebsd solaris darwin
+//go:build solaris || darwin
+// +build solaris darwin
package reexec
diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore
index b35f8449b..d31b37815 100644
--- a/vendor/github.com/klauspost/compress/.gitignore
+++ b/vendor/github.com/klauspost/compress/.gitignore
@@ -23,3 +23,10 @@ _testmain.go
*.test
*.prof
/s2/cmd/_s2sx/sfx-exe
+
+# Linux perf files
+perf.data
+perf.data.old
+
+# gdb history
+.gdb_history
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 0e2dc116a..5b7cf781a 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -17,6 +17,13 @@ This package provides various compression algorithms.
# changelog
+* Mar 11, 2022 (v1.15.1)
+ * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
+ * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)
+ * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520)
+ * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521)
+ * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
+
* Mar 3, 2022 (v1.15.0)
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
diff --git a/vendor/github.com/klauspost/compress/go.mod b/vendor/github.com/klauspost/compress/go.mod
index 5aa64a436..b605e2d52 100644
--- a/vendor/github.com/klauspost/compress/go.mod
+++ b/vendor/github.com/klauspost/compress/go.mod
@@ -1,3 +1,3 @@
module github.com/klauspost/compress
-go 1.15
+go 1.16
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
index d47f6644f..ce8e93bcd 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -12,14 +12,14 @@ import (
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog > 8.
-// go:noescape
+//go:noescape
func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
// decompress4x_8b_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog <= 8 which decodes 4 entries
// per loop.
-// go:noescape
+//go:noescape
func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
new file mode 100644
index 000000000..3954c5121
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
@@ -0,0 +1,34 @@
+// Package cpuinfo gives runtime info about the current CPU.
+//
+// This is a very limited module meant for use internally
+// in this project. For more versatile solution check
+// https://github.com/klauspost/cpuid.
+package cpuinfo
+
+// HasBMI1 checks whether an x86 CPU supports the BMI1 extension.
+func HasBMI1() bool {
+ return hasBMI1
+}
+
+// HasBMI2 checks whether an x86 CPU supports the BMI2 extension.
+func HasBMI2() bool {
+ return hasBMI2
+}
+
+// DisableBMI2 will disable BMI2, for testing purposes.
+// Call returned function to restore previous state.
+func DisableBMI2() func() {
+ old := hasBMI2
+ hasBMI2 = false
+ return func() {
+ hasBMI2 = old
+ }
+}
+
+// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions.
+func HasBMI() bool {
+ return HasBMI1() && HasBMI2()
+}
+
+var hasBMI1 bool
+var hasBMI2 bool
diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
new file mode 100644
index 000000000..e802579c4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
@@ -0,0 +1,11 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+package cpuinfo
+
+// go:noescape
+func x86extensions() (bmi1, bmi2 bool)
+
+func init() {
+ hasBMI1, hasBMI2 = x86extensions()
+}
diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
new file mode 100644
index 000000000..4465fbe9e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
@@ -0,0 +1,36 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+TEXT Β·x86extensions(SB), NOSPLIT, $0
+ // 1. determine max EAX value
+ XORQ AX, AX
+ CPUID
+
+ CMPQ AX, $7
+ JB unsupported
+
+ // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction"
+ MOVQ $7, AX
+ MOVQ $0, CX
+ CPUID
+
+ BTQ $3, BX // bit 3 = BMI1
+ SETCS AL
+
+ BTQ $8, BX // bit 8 = BMI2
+ SETCS AH
+
+ MOVB AL, bmi1+0(FP)
+ MOVB AH, bmi2+1(FP)
+ RET
+
+unsupported:
+ XORQ AX, AX
+ MOVB AL, bmi1+0(FP)
+ MOVB AL, bmi2+1(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index e3445ac19..beb7fa872 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -386,47 +386,31 @@ In practice this means that concurrency is often limited to utilizing about 3 co
### Benchmarks
-These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
-
The first two are streaming decodes and the last are smaller inputs.
-
+
+Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used.
+
```
-BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op
-BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op
-
-BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op
-BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op
-
-Concurrent performance:
-
-BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op
-
-BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op
+BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op
+BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op
+
+Concurrent blocks, performance:
+
+BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op
```
-This reflects the performance around May 2020, but this may be out of date.
+This reflects the performance around May 2022, but this may be out of date.
## Zstd inside ZIP files
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 7d567a54a..b2bca3301 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -5,9 +5,14 @@
package zstd
import (
+ "bytes"
+ "encoding/binary"
"errors"
"fmt"
"io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
"sync"
"github.com/klauspost/compress/huff0"
@@ -38,6 +43,9 @@ const (
// maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
maxCompressedBlockSize = 128 << 10
+ compressedBlockOverAlloc = 16
+ maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc
+
// Maximum possible block size (all Raw+Uncompressed).
maxBlockSize = (1 << 21) - 1
@@ -136,7 +144,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
b.Type = blockType((bh >> 1) & 3)
// find size.
cSize := int(bh >> 3)
- maxSize := maxBlockSize
+ maxSize := maxCompressedBlockSizeAlloc
switch b.Type {
case blockTypeReserved:
return ErrReservedBlockType
@@ -157,9 +165,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
println("Data size on stream:", cSize)
}
b.RLESize = 0
- maxSize = maxCompressedBlockSize
+ maxSize = maxCompressedBlockSizeAlloc
if windowSize < maxCompressedBlockSize && b.lowMem {
- maxSize = int(windowSize)
+ maxSize = int(windowSize) + compressedBlockOverAlloc
}
if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
if debugDecoder {
@@ -190,9 +198,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
// Read block data.
if cap(b.dataStorage) < cSize {
if b.lowMem || cSize > maxCompressedBlockSize {
- b.dataStorage = make([]byte, 0, cSize)
+ b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
} else {
- b.dataStorage = make([]byte, 0, maxCompressedBlockSize)
+ b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
}
}
if cap(b.dst) <= maxSize {
@@ -486,10 +494,15 @@ func (b *blockDec) decodeCompressed(hist *history) error {
b.dst = append(b.dst, hist.decoders.literals...)
return nil
}
- err = hist.decoders.decodeSync(hist)
+ before := len(hist.decoders.out)
+ err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:])
if err != nil {
return err
}
+ if hist.decoders.maxSyncLen > 0 {
+ hist.decoders.maxSyncLen += uint64(before)
+ hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out))
+ }
b.dst = hist.decoders.out
hist.recentOffsets = hist.decoders.prevOffset
return nil
@@ -632,6 +645,22 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
println("initializing sequences:", err)
return err
}
+ // Extract blocks...
+ if false && hist.dict == nil {
+ fatalErr := func(err error) {
+ if err != nil {
+ panic(err)
+ }
+ }
+ fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
+ var buf bytes.Buffer
+ fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
+ fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
+ fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
+ buf.Write(in)
+ ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
+ }
+
return nil
}
@@ -650,6 +679,7 @@ func (b *blockDec) decodeSequences(hist *history) error {
}
hist.decoders.windowSize = hist.windowSize
hist.decoders.prevOffset = hist.recentOffsets
+
err := hist.decoders.decode(b.sequence)
hist.recentOffsets = hist.decoders.prevOffset
return err
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index 9fcdaac1d..c65ea9795 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -347,18 +347,20 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
}
frame.history.setDict(&dict)
}
-
- if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
- return dst, ErrDecoderSizeExceeded
+ if frame.WindowSize > d.o.maxWindowSize {
+ return dst, ErrWindowSizeExceeded
}
- if frame.FrameContentSize < 1<<30 {
- // Never preallocate more than 1 GB up front.
+ if frame.FrameContentSize != fcsUnknown {
+ if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
+ return dst, ErrDecoderSizeExceeded
+ }
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
- dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
+ dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc)
copy(dst2, dst)
dst = dst2
}
}
+
if cap(dst) == 0 {
// Allocate len(input) * 2 by default if nothing is provided
// and we didn't get frame content size.
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
index fd05c9bb0..fc52ebc40 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -31,7 +31,7 @@ func (o *decoderOptions) setDefault() {
if o.concurrent > 4 {
o.concurrent = 4
}
- o.maxDecodedSize = 1 << 63
+ o.maxDecodedSize = 64 << 30
}
// WithDecoderLowmem will set whether to use a lower amount of memory,
@@ -66,7 +66,7 @@ func WithDecoderConcurrency(n int) DOption {
// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
// non-streaming operations or maximum window size for streaming operations.
// This can be used to control memory usage of potentially hostile content.
-// Maximum and default is 1 << 63 bytes.
+// Maximum is 1 << 63 bytes. Default is 64GiB.
func WithDecoderMaxMemory(n uint64) DOption {
return func(o *decoderOptions) error {
if n == 0 {
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 11089d223..509d5cece 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -326,6 +326,19 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
d.history.ignoreBuffer = len(dst)
// Store input length, so we only check new data.
crcStart := len(dst)
+ d.history.decoders.maxSyncLen = 0
+ if d.FrameContentSize != fcsUnknown {
+ d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+ if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
+ return dst, ErrDecoderSizeExceeded
+ }
+ if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
+ // Alloc for output
+ dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
+ copy(dst2, dst)
+ dst = dst2
+ }
+ }
var err error
for {
err = dec.reset(d.rawInput, d.WindowSize)
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
index bb3d4fd6c..fde4e6b60 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
@@ -5,8 +5,10 @@
package zstd
import (
+ "encoding/binary"
"errors"
"fmt"
+ "io"
)
const (
@@ -182,6 +184,29 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
return s.buildDtable()
}
+func (s *fseDecoder) mustReadFrom(r io.Reader) {
+ fatalErr := func(err error) {
+ if err != nil {
+ panic(err)
+ }
+ }
+ // dt [maxTablesize]decSymbol // Decompression table.
+ // symbolLen uint16 // Length of active part of the symbol table.
+ // actualTableLog uint8 // Selected tablelog.
+ // maxBits uint8 // Maximum number of additional bits
+ // // used for table creation to avoid allocations.
+ // stateTable [256]uint16
+ // norm [maxSymbolValue + 1]int16
+ // preDefined bool
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.dt))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.norm))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined))
+}
+
// decSymbol contains information about a state entry,
// Including the state offset base, the output symbol and
// the number of bits to read for the low part of the destination state.
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index 819f1461b..e80139dd9 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -73,6 +73,7 @@ type sequenceDecs struct {
seqSize int
windowSize int
maxBits uint8
+ maxSyncLen uint64
}
// initialize all 3 decoders from the stream input.
@@ -98,153 +99,13 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) erro
return nil
}
-// decode sequences from the stream with the provided history.
-func (s *sequenceDecs) decode(seqs []seqVals) error {
- br := s.br
-
- // Grab full sizes tables, to avoid bounds checks.
- llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
- llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
- s.seqSize = 0
- litRemain := len(s.literals)
- maxBlockSize := maxCompressedBlockSize
- if s.windowSize < maxBlockSize {
- maxBlockSize = s.windowSize
- }
- for i := range seqs {
- var ll, mo, ml int
- if br.off > 4+((maxOffsetBits+16+16)>>3) {
- // inlined function:
- // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
-
- // Final will not read from stream.
- var llB, mlB, moB uint8
- ll, llB = llState.final()
- ml, mlB = mlState.final()
- mo, moB = ofState.final()
-
- // extra bits are stored in reverse order.
- br.fillFast()
- mo += br.getBits(moB)
- if s.maxBits > 32 {
- br.fillFast()
- }
- ml += br.getBits(mlB)
- ll += br.getBits(llB)
-
- if moB > 1 {
- s.prevOffset[2] = s.prevOffset[1]
- s.prevOffset[1] = s.prevOffset[0]
- s.prevOffset[0] = mo
- } else {
- // mo = s.adjustOffset(mo, ll, moB)
- // Inlined for rather big speedup
- if ll == 0 {
- // There is an exception though, when current sequence's literals_length = 0.
- // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
- // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
- mo++
- }
-
- if mo == 0 {
- mo = s.prevOffset[0]
- } else {
- var temp int
- if mo == 3 {
- temp = s.prevOffset[0] - 1
- } else {
- temp = s.prevOffset[mo]
- }
-
- if temp == 0 {
- // 0 is not valid; input is corrupted; force offset to 1
- println("WARNING: temp was 0")
- temp = 1
- }
-
- if mo != 1 {
- s.prevOffset[2] = s.prevOffset[1]
- }
- s.prevOffset[1] = s.prevOffset[0]
- s.prevOffset[0] = temp
- mo = temp
- }
- }
- br.fillFast()
- } else {
- if br.overread() {
- if debugDecoder {
- printf("reading sequence %d, exceeded available data\n", i)
- }
- return io.ErrUnexpectedEOF
- }
- ll, mo, ml = s.next(br, llState, mlState, ofState)
- br.fill()
- }
-
- if debugSequences {
- println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
- }
- // Evaluate.
- // We might be doing this async, so do it early.
- if mo == 0 && ml > 0 {
- return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
- }
- if ml > maxMatchLen {
- return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
- }
- s.seqSize += ll + ml
- if s.seqSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
- }
- litRemain -= ll
- if litRemain < 0 {
- return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
- }
- seqs[i] = seqVals{
- ll: ll,
- ml: ml,
- mo: mo,
- }
- if i == len(seqs)-1 {
- // This is the last sequence, so we shouldn't update state.
- break
- }
-
- // Manually inlined, ~ 5-20% faster
- // Update all 3 states at once. Approx 20% faster.
- nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
- if nBits == 0 {
- llState = llTable[llState.newState()&maxTableMask]
- mlState = mlTable[mlState.newState()&maxTableMask]
- ofState = ofTable[ofState.newState()&maxTableMask]
- } else {
- bits := br.get32BitsFast(nBits)
- lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
- llState = llTable[(llState.newState()+lowBits)&maxTableMask]
-
- lowBits = uint16(bits >> (ofState.nbBits() & 31))
- lowBits &= bitMask[mlState.nbBits()&15]
- mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
-
- lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
- ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
- }
- }
- s.seqSize += litRemain
- if s.seqSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
- }
- err := br.close()
- if err != nil {
- printf("Closing sequences: %v, %+v\n", err, *br)
- }
- return err
-}
-
// execute will execute the decoded sequence with the provided history.
// The sequence must be evaluated before being sent.
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
+ if len(s.dict) == 0 {
+ return s.executeSimple(seqs, hist)
+ }
+
// Ensure we have enough output size...
if len(s.out)+s.seqSize > cap(s.out) {
addBytes := s.seqSize + len(s.out)
@@ -341,14 +202,19 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
}
// decode sequences from the stream with the provided history.
-func (s *sequenceDecs) decodeSync(history *history) error {
+func (s *sequenceDecs) decodeSync(hist []byte) error {
+ if true {
+ supported, err := s.decodeSyncSimple(hist)
+ if supported {
+ return err
+ }
+ }
br := s.br
seqs := s.nSeqs
startSize := len(s.out)
// Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
- hist := history.b[history.ignoreBuffer:]
out := s.out
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
@@ -433,7 +299,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
}
size := ll + ml + len(out)
if size-startSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
}
if size > cap(out) {
// Not enough size, which can happen under high volume block streaming conditions
@@ -463,13 +329,13 @@ func (s *sequenceDecs) decodeSync(history *history) error {
if mo > len(out)+len(hist) || mo > s.windowSize {
if len(s.dict) == 0 {
- return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
}
// we may be in dictionary.
dictO := len(s.dict) - (mo - (len(out) + len(hist)))
if dictO < 0 || dictO >= len(s.dict) {
- return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
}
end := dictO + ml
if end > len(s.dict) {
@@ -543,8 +409,8 @@ func (s *sequenceDecs) decodeSync(history *history) error {
}
// Check if space for literals
- if len(s.literals)+len(s.out)-startSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
+ if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
}
// Add final literals
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
new file mode 100644
index 000000000..4676b09cc
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
@@ -0,0 +1,350 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+package zstd
+
+import (
+ "fmt"
+
+ "github.com/klauspost/compress/internal/cpuinfo"
+)
+
+type decodeSyncAsmContext struct {
+ llTable []decSymbol
+ mlTable []decSymbol
+ ofTable []decSymbol
+ llState uint64
+ mlState uint64
+ ofState uint64
+ iteration int
+ litRemain int
+ out []byte
+ outPosition int
+ literals []byte
+ litPosition int
+ history []byte
+ windowSize int
+ ll int // set on error (not for all errors, please refer to _generate/gen.go)
+ ml int // set on error (not for all errors, please refer to _generate/gen.go)
+ mo int // set on error (not for all errors, please refer to _generate/gen.go)
+}
+
+// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//go:noescape
+func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
+//go:noescape
+func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
+//go:noescape
+func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
+//go:noescape
+func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// decode sequences from the stream with the provided history but without a dictionary.
+func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
+ if len(s.dict) > 0 {
+ return false, nil
+ }
+ if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize {
+ return false, nil
+ }
+ useSafe := false
+ if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc {
+ useSafe = true
+ }
+ if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
+ useSafe = true
+ }
+ br := s.br
+
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+
+ ctx := decodeSyncAsmContext{
+ llTable: s.litLengths.fse.dt[:maxTablesize],
+ mlTable: s.matchLengths.fse.dt[:maxTablesize],
+ ofTable: s.offsets.fse.dt[:maxTablesize],
+ llState: uint64(s.litLengths.state.state),
+ mlState: uint64(s.matchLengths.state.state),
+ ofState: uint64(s.offsets.state.state),
+ iteration: s.nSeqs - 1,
+ litRemain: len(s.literals),
+ out: s.out,
+ outPosition: len(s.out),
+ literals: s.literals,
+ windowSize: s.windowSize,
+ history: hist,
+ }
+
+ s.seqSize = 0
+ startSize := len(s.out)
+
+ var errCode int
+ if cpuinfo.HasBMI2() {
+ if useSafe {
+ errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx)
+ }
+ } else {
+ if useSafe {
+ errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx)
+ }
+ }
+ switch errCode {
+ case noError:
+ break
+
+ case errorMatchLenOfsMismatch:
+ return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml)
+
+ case errorMatchLenTooBig:
+ return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml)
+
+ case errorMatchOffTooBig:
+ return true, fmt.Errorf("match offset (%d) bigger than current history (%d)",
+ ctx.mo, ctx.outPosition+len(hist)-startSize)
+
+ case errorNotEnoughLiterals:
+ return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
+ ctx.ll, ctx.litRemain+ctx.ll)
+
+ case errorNotEnoughSpace:
+ size := ctx.outPosition + ctx.ll + ctx.ml
+ if debugDecoder {
+ println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
+ }
+ return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
+
+ default:
+ return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
+ }
+
+ s.seqSize += ctx.litRemain
+ if s.seqSize > maxBlockSize {
+ return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+ }
+ err := br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ return true, err
+ }
+
+ s.literals = s.literals[ctx.litPosition:]
+ t := ctx.outPosition
+ s.out = s.out[:t]
+
+ // Add final literals
+ s.out = append(s.out, s.literals...)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(s.out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t))
+ }
+ }
+
+ return true, nil
+}
+
+// --------------------------------------------------------------------------------
+
+type decodeAsmContext struct {
+ llTable []decSymbol
+ mlTable []decSymbol
+ ofTable []decSymbol
+ llState uint64
+ mlState uint64
+ ofState uint64
+ iteration int
+ seqs []seqVals
+ litRemain int
+}
+
+const noError = 0
+
+// error reported when mo == 0 && ml > 0
+const errorMatchLenOfsMismatch = 1
+
+// error reported when ml > maxMatchLen
+const errorMatchLenTooBig = 2
+
+// error reported when mo > available history or mo > s.windowSize
+const errorMatchOffTooBig = 3
+
+// error reported when the sum of literal lengths exeeceds the literal buffer size
+const errorNotEnoughLiterals = 4
+
+// error reported when capacity of `out` is too small
+const errorNotEnoughSpace = 5
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//go:noescape
+func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//go:noescape
+func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//go:noescape
+func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//go:noescape
+func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// decode sequences from the stream without the provided history.
+func (s *sequenceDecs) decode(seqs []seqVals) error {
+ br := s.br
+
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+
+ ctx := decodeAsmContext{
+ llTable: s.litLengths.fse.dt[:maxTablesize],
+ mlTable: s.matchLengths.fse.dt[:maxTablesize],
+ ofTable: s.offsets.fse.dt[:maxTablesize],
+ llState: uint64(s.litLengths.state.state),
+ mlState: uint64(s.matchLengths.state.state),
+ ofState: uint64(s.offsets.state.state),
+ seqs: seqs,
+ iteration: len(seqs) - 1,
+ litRemain: len(s.literals),
+ }
+
+ s.seqSize = 0
+ lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
+ var errCode int
+ if cpuinfo.HasBMI2() {
+ if lte56bits {
+ errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decode_bmi2(s, br, &ctx)
+ }
+ } else {
+ if lte56bits {
+ errCode = sequenceDecs_decode_56_amd64(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decode_amd64(s, br, &ctx)
+ }
+ }
+ if errCode != 0 {
+ i := len(seqs) - ctx.iteration - 1
+ switch errCode {
+ case errorMatchLenOfsMismatch:
+ ml := ctx.seqs[i].ml
+ return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+
+ case errorMatchLenTooBig:
+ ml := ctx.seqs[i].ml
+ return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
+
+ case errorNotEnoughLiterals:
+ ll := ctx.seqs[i].ll
+ return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
+ }
+
+ return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
+ }
+
+ if ctx.litRemain < 0 {
+ return fmt.Errorf("literal count is too big: total available %d, total requested %d",
+ len(s.literals), len(s.literals)-ctx.litRemain)
+ }
+
+ s.seqSize += ctx.litRemain
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+ }
+ err := br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ }
+ return err
+}
+
+// --------------------------------------------------------------------------------
+
+type executeAsmContext struct {
+ seqs []seqVals
+ seqIndex int
+ out []byte
+ history []byte
+ literals []byte
+ outPosition int
+ litPosition int
+ windowSize int
+}
+
+// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm.
+//
+// Returns false if a match offset is too big.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//go:noescape
+func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
+
+// executeSimple handles cases when dictionary is not used.
+func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
+ // Ensure we have enough output size...
+ if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) {
+ addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc
+ s.out = append(s.out, make([]byte, addBytes)...)
+ s.out = s.out[:len(s.out)-addBytes]
+ }
+
+ if debugDecoder {
+ printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
+ }
+
+ var t = len(s.out)
+ out := s.out[:t+s.seqSize]
+
+ ctx := executeAsmContext{
+ seqs: seqs,
+ seqIndex: 0,
+ out: out,
+ history: hist,
+ outPosition: t,
+ litPosition: 0,
+ literals: s.literals,
+ windowSize: s.windowSize,
+ }
+
+ ok := sequenceDecs_executeSimple_amd64(&ctx)
+ if !ok {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)",
+ seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist))
+ }
+ s.literals = s.literals[ctx.litPosition:]
+ t = ctx.outPosition
+
+ // Add final literals
+ copy(out[t:], s.literals)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
+ }
+ }
+ s.out = out
+
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
new file mode 100644
index 000000000..01cc23fa8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -0,0 +1,3519 @@
+// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT.
+
+//go:build !appengine && !noasm && gc && !noasm
+// +build !appengine,!noasm,gc,!noasm
+
+// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: CMOV
+TEXT Β·sequenceDecs_decode_amd64(SB), $8-32
+ MOVQ br+8(FP), AX
+ MOVQ 32(AX), DX
+ MOVBQZX 40(AX), BX
+ MOVQ 24(AX), SI
+ MOVQ (AX), AX
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ MOVQ 104(AX), R10
+ MOVQ s+0(FP), AX
+ MOVQ 144(AX), R11
+ MOVQ 152(AX), R12
+ MOVQ 160(AX), R13
+
+sequenceDecs_decode_amd64_main_loop:
+ MOVQ (SP), R14
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decode_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R14
+ MOVQ (R14), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decode_amd64_fill_end
+
+sequenceDecs_decode_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decode_amd64_fill_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decode_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R14
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R14), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decode_amd64_fill_byte_by_byte
+
+sequenceDecs_decode_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ ADDQ CX, BX
+ NEGL CX
+ SHRQ CL, R15
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ CMOVQEQ CX, R15
+ ADDQ R15, AX
+ MOVQ AX, 16(R10)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ ADDQ CX, BX
+ NEGL CX
+ SHRQ CL, R15
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ CMOVQEQ CX, R15
+ ADDQ R15, AX
+ MOVQ AX, 8(R10)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ SI, $0x08
+ JL sequenceDecs_decode_amd64_fill_2_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R14
+ MOVQ (R14), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decode_amd64_fill_2_end
+
+sequenceDecs_decode_amd64_fill_2_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decode_amd64_fill_2_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decode_amd64_fill_2_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R14
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R14), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte
+
+sequenceDecs_decode_amd64_fill_2_end:
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ ADDQ CX, BX
+ NEGL CX
+ SHRQ CL, R15
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ CMOVQEQ CX, R15
+ ADDQ R15, AX
+ MOVQ AX, (R10)
+
+ // Fill bitreader for state updates
+ MOVQ R14, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R14
+ SHRQ $0x10, DI
+ MOVWQZX DI, DI
+ CMPQ R14, $0x00
+ JZ sequenceDecs_decode_amd64_llState_updateState_skip_zero
+ MOVQ BX, CX
+ ADDQ R14, BX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVQ R14, CX
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, DI
+
+sequenceDecs_decode_amd64_llState_updateState_skip_zero:
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R14
+ SHRQ $0x10, R8
+ MOVWQZX R8, R8
+ CMPQ R14, $0x00
+ JZ sequenceDecs_decode_amd64_mlState_updateState_skip_zero
+ MOVQ BX, CX
+ ADDQ R14, BX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVQ R14, CX
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, R8
+
+sequenceDecs_decode_amd64_mlState_updateState_skip_zero:
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R14
+ SHRQ $0x10, R9
+ MOVWQZX R9, R9
+ CMPQ R14, $0x00
+ JZ sequenceDecs_decode_amd64_ofState_updateState_skip_zero
+ MOVQ BX, CX
+ ADDQ R14, BX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVQ R14, CX
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, R9
+
+sequenceDecs_decode_amd64_ofState_updateState_skip_zero:
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decode_amd64_skip_update:
+ // Adjust offset
+ MOVQ 16(R10), CX
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0
+ MOVQ R12, R13
+ MOVQ R11, R12
+ MOVQ CX, R11
+ JMP sequenceDecs_decode_amd64_adjust_end
+
+sequenceDecs_decode_amd64_adjust_offsetB_1_or_0:
+ CMPQ (R10), $0x00000000
+ JNE sequenceDecs_decode_amd64_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_amd64_adjust_offset_nonzero
+
+sequenceDecs_decode_amd64_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero
+ MOVQ R11, CX
+ JMP sequenceDecs_decode_amd64_adjust_end
+
+sequenceDecs_decode_amd64_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_amd64_adjust_zero
+ JEQ sequenceDecs_decode_amd64_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_amd64_adjust_three
+ JMP sequenceDecs_decode_amd64_adjust_two
+
+sequenceDecs_decode_amd64_adjust_zero:
+ MOVQ R11, AX
+ JMP sequenceDecs_decode_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_amd64_adjust_one:
+ MOVQ R12, AX
+ JMP sequenceDecs_decode_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_amd64_adjust_two:
+ MOVQ R13, AX
+ JMP sequenceDecs_decode_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_amd64_adjust_three:
+ LEAQ -1(R11), AX
+
+sequenceDecs_decode_amd64_adjust_test_temp_valid:
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_amd64_adjust_temp_valid
+ MOVQ $0x00000001, AX
+
+sequenceDecs_decode_amd64_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R12, R13
+ MOVQ R11, R12
+ MOVQ AX, R11
+ MOVQ AX, CX
+
+sequenceDecs_decode_amd64_adjust_end:
+ MOVQ CX, 16(R10)
+
+ // Check values
+ MOVQ 8(R10), AX
+ MOVQ (R10), R14
+ LEAQ (AX)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decode_amd64_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_amd64_match_len_ofs_ok:
+ ADDQ $0x18, R10
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decode_amd64_main_loop
+ MOVQ s+0(FP), AX
+ MOVQ R11, 144(AX)
+ MOVQ R12, 152(AX)
+ MOVQ R13, 160(AX)
+ MOVQ br+8(FP), AX
+ MOVQ DX, 32(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 24(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_amd64_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_amd64_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: CMOV
+TEXT Β·sequenceDecs_decode_56_amd64(SB), $8-32
+ MOVQ br+8(FP), AX
+ MOVQ 32(AX), DX
+ MOVBQZX 40(AX), BX
+ MOVQ 24(AX), SI
+ MOVQ (AX), AX
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ MOVQ 104(AX), R10
+ MOVQ s+0(FP), AX
+ MOVQ 144(AX), R11
+ MOVQ 152(AX), R12
+ MOVQ 160(AX), R13
+
+sequenceDecs_decode_56_amd64_main_loop:
+ MOVQ (SP), R14
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decode_56_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R14
+ MOVQ (R14), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decode_56_amd64_fill_end
+
+sequenceDecs_decode_56_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decode_56_amd64_fill_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decode_56_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R14
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R14), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte
+
+sequenceDecs_decode_56_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ ADDQ CX, BX
+ NEGL CX
+ SHRQ CL, R15
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ CMOVQEQ CX, R15
+ ADDQ R15, AX
+ MOVQ AX, 16(R10)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ ADDQ CX, BX
+ NEGL CX
+ SHRQ CL, R15
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ CMOVQEQ CX, R15
+ ADDQ R15, AX
+ MOVQ AX, 8(R10)
+
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ ADDQ CX, BX
+ NEGL CX
+ SHRQ CL, R15
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ CMOVQEQ CX, R15
+ ADDQ R15, AX
+ MOVQ AX, (R10)
+
+ // Fill bitreader for state updates
+ MOVQ R14, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_56_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R14
+ SHRQ $0x10, DI
+ MOVWQZX DI, DI
+ CMPQ R14, $0x00
+ JZ sequenceDecs_decode_56_amd64_llState_updateState_skip_zero
+ MOVQ BX, CX
+ ADDQ R14, BX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVQ R14, CX
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, DI
+
+sequenceDecs_decode_56_amd64_llState_updateState_skip_zero:
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R14
+ SHRQ $0x10, R8
+ MOVWQZX R8, R8
+ CMPQ R14, $0x00
+ JZ sequenceDecs_decode_56_amd64_mlState_updateState_skip_zero
+ MOVQ BX, CX
+ ADDQ R14, BX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVQ R14, CX
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, R8
+
+sequenceDecs_decode_56_amd64_mlState_updateState_skip_zero:
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R14
+ SHRQ $0x10, R9
+ MOVWQZX R9, R9
+ CMPQ R14, $0x00
+ JZ sequenceDecs_decode_56_amd64_ofState_updateState_skip_zero
+ MOVQ BX, CX
+ ADDQ R14, BX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVQ R14, CX
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, R9
+
+sequenceDecs_decode_56_amd64_ofState_updateState_skip_zero:
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decode_56_amd64_skip_update:
+ // Adjust offset
+ MOVQ 16(R10), CX
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0
+ MOVQ R12, R13
+ MOVQ R11, R12
+ MOVQ CX, R11
+ JMP sequenceDecs_decode_56_amd64_adjust_end
+
+sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0:
+ CMPQ (R10), $0x00000000
+ JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero
+
+sequenceDecs_decode_56_amd64_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero
+ MOVQ R11, CX
+ JMP sequenceDecs_decode_56_amd64_adjust_end
+
+sequenceDecs_decode_56_amd64_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_56_amd64_adjust_zero
+ JEQ sequenceDecs_decode_56_amd64_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_56_amd64_adjust_three
+ JMP sequenceDecs_decode_56_amd64_adjust_two
+
+sequenceDecs_decode_56_amd64_adjust_zero:
+ MOVQ R11, AX
+ JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_56_amd64_adjust_one:
+ MOVQ R12, AX
+ JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_56_amd64_adjust_two:
+ MOVQ R13, AX
+ JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_56_amd64_adjust_three:
+ LEAQ -1(R11), AX
+
+sequenceDecs_decode_56_amd64_adjust_test_temp_valid:
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid
+ MOVQ $0x00000001, AX
+
+sequenceDecs_decode_56_amd64_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R12, R13
+ MOVQ R11, R12
+ MOVQ AX, R11
+ MOVQ AX, CX
+
+sequenceDecs_decode_56_amd64_adjust_end:
+ MOVQ CX, 16(R10)
+
+ // Check values
+ MOVQ 8(R10), AX
+ MOVQ (R10), R14
+ LEAQ (AX)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decode_56_amd64_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_56_amd64_match_len_ofs_ok:
+ ADDQ $0x18, R10
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decode_56_amd64_main_loop
+ MOVQ s+0(FP), AX
+ MOVQ R11, 144(AX)
+ MOVQ R12, 152(AX)
+ MOVQ R13, 160(AX)
+ MOVQ br+8(FP), AX
+ MOVQ DX, 32(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 24(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_56_amd64_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: BMI, BMI2, CMOV
+TEXT Β·sequenceDecs_decode_bmi2(SB), $8-32
+ MOVQ br+8(FP), CX
+ MOVQ 32(CX), AX
+ MOVBQZX 40(CX), DX
+ MOVQ 24(CX), BX
+ MOVQ (CX), CX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ MOVQ 104(CX), R9
+ MOVQ s+0(FP), CX
+ MOVQ 144(CX), R10
+ MOVQ 152(CX), R11
+ MOVQ 160(CX), R12
+
+sequenceDecs_decode_bmi2_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decode_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R13
+ MOVQ (R13), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decode_bmi2_fill_end
+
+sequenceDecs_decode_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decode_bmi2_fill_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decode_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R13
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R13), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decode_bmi2_fill_byte_by_byte
+
+sequenceDecs_decode_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 16(R9)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 8(R9)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ BX, $0x08
+ JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R13
+ MOVQ (R13), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decode_bmi2_fill_2_end
+
+sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decode_bmi2_fill_2_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decode_bmi2_fill_2_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R13
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R13), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte
+
+sequenceDecs_decode_bmi2_fill_2_end:
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, (R9)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_bmi2_skip_update
+
+ // Update Literal Length State
+ MOVBQZX SI, R14
+ MOVQ $0x00001010, CX
+ BEXTRQ CX, SI, SI
+ LEAQ (DX)(R14*1), CX
+ MOVQ AX, R15
+ MOVQ CX, DX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ ADDQ R15, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+ // Update Match Length State
+ MOVBQZX DI, R14
+ MOVQ $0x00001010, CX
+ BEXTRQ CX, DI, DI
+ LEAQ (DX)(R14*1), CX
+ MOVQ AX, R15
+ MOVQ CX, DX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ ADDQ R15, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Offset State
+ MOVBQZX R8, R14
+ MOVQ $0x00001010, CX
+ BEXTRQ CX, R8, R8
+ LEAQ (DX)(R14*1), CX
+ MOVQ AX, R15
+ MOVQ CX, DX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ ADDQ R15, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+sequenceDecs_decode_bmi2_skip_update:
+ // Adjust offset
+ MOVQ 16(R9), CX
+ CMPQ R13, $0x01
+ JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0
+ MOVQ R11, R12
+ MOVQ R10, R11
+ MOVQ CX, R10
+ JMP sequenceDecs_decode_bmi2_adjust_end
+
+sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0:
+ CMPQ (R9), $0x00000000
+ JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decode_bmi2_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero
+ MOVQ R10, CX
+ JMP sequenceDecs_decode_bmi2_adjust_end
+
+sequenceDecs_decode_bmi2_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_bmi2_adjust_zero
+ JEQ sequenceDecs_decode_bmi2_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_bmi2_adjust_three
+ JMP sequenceDecs_decode_bmi2_adjust_two
+
+sequenceDecs_decode_bmi2_adjust_zero:
+ MOVQ R10, R13
+ JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_bmi2_adjust_one:
+ MOVQ R11, R13
+ JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_bmi2_adjust_two:
+ MOVQ R12, R13
+ JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_bmi2_adjust_three:
+ LEAQ -1(R10), R13
+
+sequenceDecs_decode_bmi2_adjust_test_temp_valid:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R13
+
+sequenceDecs_decode_bmi2_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R11, R12
+ MOVQ R10, R11
+ MOVQ R13, R10
+ MOVQ R13, CX
+
+sequenceDecs_decode_bmi2_adjust_end:
+ MOVQ CX, 16(R9)
+
+ // Check values
+ MOVQ 8(R9), R13
+ MOVQ (R9), R14
+ LEAQ (R13)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ R13, $0x00020002
+ JA sequenceDecs_decode_bmi2_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_bmi2_match_len_ofs_ok:
+ ADDQ $0x18, R9
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decode_bmi2_main_loop
+ MOVQ s+0(FP), CX
+ MOVQ R10, 144(CX)
+ MOVQ R11, 152(CX)
+ MOVQ R12, 160(CX)
+ MOVQ br+8(FP), CX
+ MOVQ AX, 32(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 24(CX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_bmi2_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: BMI, BMI2, CMOV
+TEXT Β·sequenceDecs_decode_56_bmi2(SB), $8-32
+ MOVQ br+8(FP), CX
+ MOVQ 32(CX), AX
+ MOVBQZX 40(CX), DX
+ MOVQ 24(CX), BX
+ MOVQ (CX), CX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ MOVQ 104(CX), R9
+ MOVQ s+0(FP), CX
+ MOVQ 144(CX), R10
+ MOVQ 152(CX), R11
+ MOVQ 160(CX), R12
+
+sequenceDecs_decode_56_bmi2_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R13
+ MOVQ (R13), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decode_56_bmi2_fill_end
+
+sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decode_56_bmi2_fill_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decode_56_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R13
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R13), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte
+
+sequenceDecs_decode_56_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 16(R9)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 8(R9)
+
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, (R9)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_56_bmi2_skip_update
+
+ // Update Literal Length State
+ MOVBQZX SI, R14
+ MOVQ $0x00001010, CX
+ BEXTRQ CX, SI, SI
+ LEAQ (DX)(R14*1), CX
+ MOVQ AX, R15
+ MOVQ CX, DX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ ADDQ R15, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+ // Update Match Length State
+ MOVBQZX DI, R14
+ MOVQ $0x00001010, CX
+ BEXTRQ CX, DI, DI
+ LEAQ (DX)(R14*1), CX
+ MOVQ AX, R15
+ MOVQ CX, DX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ ADDQ R15, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Offset State
+ MOVBQZX R8, R14
+ MOVQ $0x00001010, CX
+ BEXTRQ CX, R8, R8
+ LEAQ (DX)(R14*1), CX
+ MOVQ AX, R15
+ MOVQ CX, DX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ ADDQ R15, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+sequenceDecs_decode_56_bmi2_skip_update:
+ // Adjust offset
+ MOVQ 16(R9), CX
+ CMPQ R13, $0x01
+ JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0
+ MOVQ R11, R12
+ MOVQ R10, R11
+ MOVQ CX, R10
+ JMP sequenceDecs_decode_56_bmi2_adjust_end
+
+sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0:
+ CMPQ (R9), $0x00000000
+ JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decode_56_bmi2_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero
+ MOVQ R10, CX
+ JMP sequenceDecs_decode_56_bmi2_adjust_end
+
+sequenceDecs_decode_56_bmi2_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_56_bmi2_adjust_zero
+ JEQ sequenceDecs_decode_56_bmi2_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_56_bmi2_adjust_three
+ JMP sequenceDecs_decode_56_bmi2_adjust_two
+
+sequenceDecs_decode_56_bmi2_adjust_zero:
+ MOVQ R10, R13
+ JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_56_bmi2_adjust_one:
+ MOVQ R11, R13
+ JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_56_bmi2_adjust_two:
+ MOVQ R12, R13
+ JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_56_bmi2_adjust_three:
+ LEAQ -1(R10), R13
+
+sequenceDecs_decode_56_bmi2_adjust_test_temp_valid:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R13
+
+sequenceDecs_decode_56_bmi2_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R11, R12
+ MOVQ R10, R11
+ MOVQ R13, R10
+ MOVQ R13, CX
+
+sequenceDecs_decode_56_bmi2_adjust_end:
+ MOVQ CX, 16(R9)
+
+ // Check values
+ MOVQ 8(R9), R13
+ MOVQ (R9), R14
+ LEAQ (R13)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ R13, $0x00020002
+ JA sequenceDecs_decode_56_bmi2_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_56_bmi2_match_len_ofs_ok:
+ ADDQ $0x18, R9
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decode_56_bmi2_main_loop
+ MOVQ s+0(FP), CX
+ MOVQ R10, 144(CX)
+ MOVQ R11, 152(CX)
+ MOVQ R12, 160(CX)
+ MOVQ br+8(FP), CX
+ MOVQ AX, 32(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 24(CX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_56_bmi2_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
+// Requires: SSE
+TEXT Β·sequenceDecs_executeSimple_amd64(SB), $8-9
+ MOVQ ctx+0(FP), R10
+ MOVQ 8(R10), CX
+ TESTQ CX, CX
+ JZ empty_seqs
+ MOVQ (R10), AX
+ MOVQ 24(R10), DX
+ MOVQ 32(R10), BX
+ MOVQ 80(R10), SI
+ MOVQ 104(R10), DI
+ MOVQ 120(R10), R8
+ MOVQ 56(R10), R9
+ MOVQ 64(R10), R10
+ ADDQ R10, R9
+
+ // seqsBase += 24 * seqIndex
+ LEAQ (DX)(DX*2), R11
+ SHLQ $0x03, R11
+ ADDQ R11, AX
+
+ // outBase += outPosition
+ ADDQ DI, BX
+
+main_loop:
+ MOVQ (AX), R11
+ MOVQ 16(AX), R12
+ MOVQ 8(AX), R13
+
+ // Copy literals
+ TESTQ R11, R11
+ JZ check_offset
+ XORQ R14, R14
+ TESTQ $0x00000001, R11
+ JZ copy_1_word
+ MOVB (SI)(R14*1), R15
+ MOVB R15, (BX)(R14*1)
+ ADDQ $0x01, R14
+
+copy_1_word:
+ TESTQ $0x00000002, R11
+ JZ copy_1_dword
+ MOVW (SI)(R14*1), R15
+ MOVW R15, (BX)(R14*1)
+ ADDQ $0x02, R14
+
+copy_1_dword:
+ TESTQ $0x00000004, R11
+ JZ copy_1_qword
+ MOVL (SI)(R14*1), R15
+ MOVL R15, (BX)(R14*1)
+ ADDQ $0x04, R14
+
+copy_1_qword:
+ TESTQ $0x00000008, R11
+ JZ copy_1_test
+ MOVQ (SI)(R14*1), R15
+ MOVQ R15, (BX)(R14*1)
+ ADDQ $0x08, R14
+ JMP copy_1_test
+
+copy_1:
+ MOVUPS (SI)(R14*1), X0
+ MOVUPS X0, (BX)(R14*1)
+ ADDQ $0x10, R14
+
+copy_1_test:
+ CMPQ R14, R11
+ JB copy_1
+ ADDQ R11, SI
+ ADDQ R11, BX
+ ADDQ R11, DI
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ LEAQ (DI)(R10*1), R11
+ CMPQ R12, R11
+ JG error_match_off_too_big
+ CMPQ R12, R8
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, R11
+ SUBQ DI, R11
+ JLS copy_match
+ MOVQ R9, R14
+ SUBQ R11, R14
+ CMPQ R13, R11
+ JGE copy_all_from_history
+ XORQ R11, R11
+ TESTQ $0x00000001, R13
+ JZ copy_4_word
+ MOVB (R14)(R11*1), R12
+ MOVB R12, (BX)(R11*1)
+ ADDQ $0x01, R11
+
+copy_4_word:
+ TESTQ $0x00000002, R13
+ JZ copy_4_dword
+ MOVW (R14)(R11*1), R12
+ MOVW R12, (BX)(R11*1)
+ ADDQ $0x02, R11
+
+copy_4_dword:
+ TESTQ $0x00000004, R13
+ JZ copy_4_qword
+ MOVL (R14)(R11*1), R12
+ MOVL R12, (BX)(R11*1)
+ ADDQ $0x04, R11
+
+copy_4_qword:
+ TESTQ $0x00000008, R13
+ JZ copy_4_test
+ MOVQ (R14)(R11*1), R12
+ MOVQ R12, (BX)(R11*1)
+ ADDQ $0x08, R11
+ JMP copy_4_test
+
+copy_4:
+ MOVUPS (R14)(R11*1), X0
+ MOVUPS X0, (BX)(R11*1)
+ ADDQ $0x10, R11
+
+copy_4_test:
+ CMPQ R11, R13
+ JB copy_4
+ ADDQ R13, DI
+ ADDQ R13, BX
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ XORQ R15, R15
+ TESTQ $0x00000001, R11
+ JZ copy_5_word
+ MOVB (R14)(R15*1), BP
+ MOVB BP, (BX)(R15*1)
+ ADDQ $0x01, R15
+
+copy_5_word:
+ TESTQ $0x00000002, R11
+ JZ copy_5_dword
+ MOVW (R14)(R15*1), BP
+ MOVW BP, (BX)(R15*1)
+ ADDQ $0x02, R15
+
+copy_5_dword:
+ TESTQ $0x00000004, R11
+ JZ copy_5_qword
+ MOVL (R14)(R15*1), BP
+ MOVL BP, (BX)(R15*1)
+ ADDQ $0x04, R15
+
+copy_5_qword:
+ TESTQ $0x00000008, R11
+ JZ copy_5_test
+ MOVQ (R14)(R15*1), BP
+ MOVQ BP, (BX)(R15*1)
+ ADDQ $0x08, R15
+ JMP copy_5_test
+
+copy_5:
+ MOVUPS (R14)(R15*1), X0
+ MOVUPS X0, (BX)(R15*1)
+ ADDQ $0x10, R15
+
+copy_5_test:
+ CMPQ R15, R11
+ JB copy_5
+ ADDQ R11, BX
+ ADDQ R11, DI
+ SUBQ R11, R13
+
+ // Copy match from the current buffer
+copy_match:
+ TESTQ R13, R13
+ JZ handle_loop
+ MOVQ BX, R11
+ SUBQ R12, R11
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ XORQ R12, R12
+
+copy_2:
+ MOVUPS (R11)(R12*1), X0
+ MOVUPS X0, (BX)(R12*1)
+ ADDQ $0x10, R12
+ CMPQ R12, R13
+ JB copy_2
+ ADDQ R13, BX
+ ADDQ R13, DI
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ XORQ R12, R12
+
+copy_slow_3:
+ MOVB (R11)(R12*1), R14
+ MOVB R14, (BX)(R12*1)
+ INCQ R12
+ CMPQ R12, R13
+ JB copy_slow_3
+ ADDQ R13, BX
+ ADDQ R13, DI
+
+handle_loop:
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+
+loop_finished:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ MOVQ 80(AX), CX
+ SUBQ CX, SI
+ MOVQ SI, 112(AX)
+ RET
+
+error_match_off_too_big:
+ // Return value
+ MOVB $0x00, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ MOVQ 80(AX), CX
+ SUBQ CX, SI
+ MOVQ SI, 112(AX)
+ RET
+
+empty_seqs:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+ RET
+
+// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: CMOV, SSE
+TEXT Β·sequenceDecs_decodeSync_amd64(SB), $64-32
+ MOVQ br+8(FP), AX
+ MOVQ 32(AX), DX
+ MOVBQZX 40(AX), BX
+ MOVQ 24(AX), SI
+ MOVQ (AX), AX
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ MOVQ 112(AX), R10
+ MOVQ 128(AX), CX
+ MOVQ CX, 32(SP)
+ MOVQ 144(AX), R11
+ MOVQ 136(AX), R12
+ MOVQ 200(AX), CX
+ MOVQ CX, 56(SP)
+ MOVQ 176(AX), CX
+ MOVQ CX, 48(SP)
+ MOVQ 184(AX), AX
+ MOVQ AX, 40(SP)
+ MOVQ 40(SP), AX
+ ADDQ AX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R10, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R12, R10
+
+sequenceDecs_decodeSync_amd64_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_amd64_fill_end
+
+sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_amd64_fill_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte
+
+sequenceDecs_decodeSync_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ ADDQ CX, BX
+ NEGL CX
+ SHRQ CL, R14
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ CMOVQEQ CX, R14
+ ADDQ R14, AX
+ MOVQ AX, 8(SP)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ ADDQ CX, BX
+ NEGL CX
+ SHRQ CL, R14
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ CMOVQEQ CX, R14
+ ADDQ R14, AX
+ MOVQ AX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_amd64_fill_2_end
+
+sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_amd64_fill_2_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_amd64_fill_2_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_amd64_fill_2_end:
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ ADDQ CX, BX
+ NEGL CX
+ SHRQ CL, R14
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ CMOVQEQ CX, R14
+ ADDQ R14, AX
+ MOVQ AX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R13
+ SHRQ $0x10, DI
+ MOVWQZX DI, DI
+ CMPQ R13, $0x00
+ JZ sequenceDecs_decodeSync_amd64_llState_updateState_skip_zero
+ MOVQ BX, CX
+ ADDQ R13, BX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVQ R13, CX
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, DI
+
+sequenceDecs_decodeSync_amd64_llState_updateState_skip_zero:
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R13
+ SHRQ $0x10, R8
+ MOVWQZX R8, R8
+ CMPQ R13, $0x00
+ JZ sequenceDecs_decodeSync_amd64_mlState_updateState_skip_zero
+ MOVQ BX, CX
+ ADDQ R13, BX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVQ R13, CX
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, R8
+
+sequenceDecs_decodeSync_amd64_mlState_updateState_skip_zero:
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R13
+ SHRQ $0x10, R9
+ MOVWQZX R9, R9
+ CMPQ R13, $0x00
+ JZ sequenceDecs_decodeSync_amd64_ofState_updateState_skip_zero
+ MOVQ BX, CX
+ ADDQ R13, BX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVQ R13, CX
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, R9
+
+sequenceDecs_decodeSync_amd64_ofState_updateState_skip_zero:
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decodeSync_amd64_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_amd64_adjust_end
+
+sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_amd64_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_amd64_adjust_end
+
+sequenceDecs_decodeSync_amd64_adjust_offset_nonzero:
+ MOVQ R13, AX
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, AX
+ CMOVQEQ R15, R14
+ LEAQ 144(CX), R15
+ ADDQ (R15)(AX*8), R14
+ JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_amd64_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_amd64_adjust_skip
+ MOVQ 152(CX), AX
+ MOVQ AX, 160(CX)
+
+sequenceDecs_decodeSync_amd64_adjust_skip:
+ MOVQ 144(CX), AX
+ MOVQ AX, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_amd64_adjust_end:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), AX
+ MOVQ 24(SP), CX
+ LEAQ (AX)(CX*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ CX, 104(R14)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decodeSync_amd64_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_amd64_match_len_ofs_ok:
+ MOVQ 24(SP), AX
+ MOVQ 8(SP), CX
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (AX)(R13*1), R14
+ ADDQ R10, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ AX, AX
+ JZ check_offset
+ XORQ R14, R14
+ TESTQ $0x00000001, AX
+ JZ copy_1_word
+ MOVB (R11)(R14*1), R15
+ MOVB R15, (R10)(R14*1)
+ ADDQ $0x01, R14
+
+copy_1_word:
+ TESTQ $0x00000002, AX
+ JZ copy_1_dword
+ MOVW (R11)(R14*1), R15
+ MOVW R15, (R10)(R14*1)
+ ADDQ $0x02, R14
+
+copy_1_dword:
+ TESTQ $0x00000004, AX
+ JZ copy_1_qword
+ MOVL (R11)(R14*1), R15
+ MOVL R15, (R10)(R14*1)
+ ADDQ $0x04, R14
+
+copy_1_qword:
+ TESTQ $0x00000008, AX
+ JZ copy_1_test
+ MOVQ (R11)(R14*1), R15
+ MOVQ R15, (R10)(R14*1)
+ ADDQ $0x08, R14
+ JMP copy_1_test
+
+copy_1:
+ MOVUPS (R11)(R14*1), X0
+ MOVUPS X0, (R10)(R14*1)
+ ADDQ $0x10, R14
+
+copy_1_test:
+ CMPQ R14, AX
+ JB copy_1
+ ADDQ AX, R11
+ ADDQ AX, R10
+ ADDQ AX, R12
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R12, AX
+ ADDQ 40(SP), AX
+ CMPQ CX, AX
+ JG error_match_off_too_big
+ CMPQ CX, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ CX, AX
+ SUBQ R12, AX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ AX, R14
+ CMPQ R13, AX
+ JGE copy_all_from_history
+ XORQ AX, AX
+ TESTQ $0x00000001, R13
+ JZ copy_4_word
+ MOVB (R14)(AX*1), CL
+ MOVB CL, (R10)(AX*1)
+ ADDQ $0x01, AX
+
+copy_4_word:
+ TESTQ $0x00000002, R13
+ JZ copy_4_dword
+ MOVW (R14)(AX*1), CX
+ MOVW CX, (R10)(AX*1)
+ ADDQ $0x02, AX
+
+copy_4_dword:
+ TESTQ $0x00000004, R13
+ JZ copy_4_qword
+ MOVL (R14)(AX*1), CX
+ MOVL CX, (R10)(AX*1)
+ ADDQ $0x04, AX
+
+copy_4_qword:
+ TESTQ $0x00000008, R13
+ JZ copy_4_test
+ MOVQ (R14)(AX*1), CX
+ MOVQ CX, (R10)(AX*1)
+ ADDQ $0x08, AX
+ JMP copy_4_test
+
+copy_4:
+ MOVUPS (R14)(AX*1), X0
+ MOVUPS X0, (R10)(AX*1)
+ ADDQ $0x10, AX
+
+copy_4_test:
+ CMPQ AX, R13
+ JB copy_4
+ ADDQ R13, R12
+ ADDQ R13, R10
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ XORQ R15, R15
+ TESTQ $0x00000001, AX
+ JZ copy_5_word
+ MOVB (R14)(R15*1), BP
+ MOVB BP, (R10)(R15*1)
+ ADDQ $0x01, R15
+
+copy_5_word:
+ TESTQ $0x00000002, AX
+ JZ copy_5_dword
+ MOVW (R14)(R15*1), BP
+ MOVW BP, (R10)(R15*1)
+ ADDQ $0x02, R15
+
+copy_5_dword:
+ TESTQ $0x00000004, AX
+ JZ copy_5_qword
+ MOVL (R14)(R15*1), BP
+ MOVL BP, (R10)(R15*1)
+ ADDQ $0x04, R15
+
+copy_5_qword:
+ TESTQ $0x00000008, AX
+ JZ copy_5_test
+ MOVQ (R14)(R15*1), BP
+ MOVQ BP, (R10)(R15*1)
+ ADDQ $0x08, R15
+ JMP copy_5_test
+
+copy_5:
+ MOVUPS (R14)(R15*1), X0
+ MOVUPS X0, (R10)(R15*1)
+ ADDQ $0x10, R15
+
+copy_5_test:
+ CMPQ R15, AX
+ JB copy_5
+ ADDQ AX, R10
+ ADDQ AX, R12
+ SUBQ AX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ TESTQ R13, R13
+ JZ handle_loop
+ MOVQ R10, AX
+ SUBQ CX, AX
+
+ // ml <= mo
+ CMPQ R13, CX
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ XORQ CX, CX
+
+copy_2:
+ MOVUPS (AX)(CX*1), X0
+ MOVUPS X0, (R10)(CX*1)
+ ADDQ $0x10, CX
+ CMPQ CX, R13
+ JB copy_2
+ ADDQ R13, R10
+ ADDQ R13, R12
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ XORQ CX, CX
+
+copy_slow_3:
+ MOVB (AX)(CX*1), R14
+ MOVB R14, (R10)(CX*1)
+ INCQ CX
+ CMPQ CX, R13
+ JB copy_slow_3
+ ADDQ R13, R10
+ ADDQ R13, R12
+
+handle_loop:
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decodeSync_amd64_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), AX
+ MOVQ DX, 32(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 24(AX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R12, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R11
+ MOVQ R11, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_amd64_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: BMI, BMI2, CMOV, SSE
+TEXT Β·sequenceDecs_decodeSync_bmi2(SB), $64-32
+ MOVQ br+8(FP), CX
+ MOVQ 32(CX), AX
+ MOVBQZX 40(CX), DX
+ MOVQ 24(CX), BX
+ MOVQ (CX), CX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ MOVQ 112(CX), R9
+ MOVQ 128(CX), R10
+ MOVQ R10, 32(SP)
+ MOVQ 144(CX), R10
+ MOVQ 136(CX), R11
+ MOVQ 200(CX), R12
+ MOVQ R12, 56(SP)
+ MOVQ 176(CX), R12
+ MOVQ R12, 48(SP)
+ MOVQ 184(CX), CX
+ MOVQ CX, 40(SP)
+ MOVQ 40(SP), CX
+ ADDQ CX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R9, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R11, R9
+
+sequenceDecs_decodeSync_bmi2_main_loop:
+ MOVQ (SP), R12
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_bmi2_fill_end
+
+sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_bmi2_fill_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
+
+sequenceDecs_decodeSync_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 8(SP)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_bmi2_fill_2_end
+
+sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_bmi2_fill_2_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_bmi2_fill_2_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_bmi2_fill_2_end:
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R12, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R12
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_bmi2_skip_update
+
+ // Update Literal Length State
+ MOVBQZX SI, R13
+ MOVQ $0x00001010, CX
+ BEXTRQ CX, SI, SI
+ LEAQ (DX)(R13*1), CX
+ MOVQ AX, R14
+ MOVQ CX, DX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ ADDQ R14, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+ // Update Match Length State
+ MOVBQZX DI, R13
+ MOVQ $0x00001010, CX
+ BEXTRQ CX, DI, DI
+ LEAQ (DX)(R13*1), CX
+ MOVQ AX, R14
+ MOVQ CX, DX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ ADDQ R14, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Offset State
+ MOVBQZX R8, R13
+ MOVQ $0x00001010, CX
+ BEXTRQ CX, R8, R8
+ LEAQ (DX)(R13*1), CX
+ MOVQ AX, R14
+ MOVQ CX, DX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ ADDQ R14, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+sequenceDecs_decodeSync_bmi2_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ R12, $0x01
+ JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_bmi2_adjust_end
+
+sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_bmi2_adjust_end
+
+sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero:
+ MOVQ R13, R12
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, R12
+ CMOVQEQ R15, R14
+ LEAQ 144(CX), R15
+ ADDQ (R15)(R12*8), R14
+ JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_bmi2_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_bmi2_adjust_skip
+ MOVQ 152(CX), R12
+ MOVQ R12, 160(CX)
+
+sequenceDecs_decodeSync_bmi2_adjust_skip:
+ MOVQ 144(CX), R12
+ MOVQ R12, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_bmi2_adjust_end:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), CX
+ MOVQ 24(SP), R12
+ LEAQ (CX)(R12*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ R12, 104(R14)
+ JS error_not_enough_literals
+ CMPQ CX, $0x00020002
+ JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok
+ TESTQ CX, CX
+ JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_bmi2_match_len_ofs_ok:
+ MOVQ 24(SP), CX
+ MOVQ 8(SP), R12
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (CX)(R13*1), R14
+ ADDQ R9, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ CX, CX
+ JZ check_offset
+ XORQ R14, R14
+ TESTQ $0x00000001, CX
+ JZ copy_1_word
+ MOVB (R10)(R14*1), R15
+ MOVB R15, (R9)(R14*1)
+ ADDQ $0x01, R14
+
+copy_1_word:
+ TESTQ $0x00000002, CX
+ JZ copy_1_dword
+ MOVW (R10)(R14*1), R15
+ MOVW R15, (R9)(R14*1)
+ ADDQ $0x02, R14
+
+copy_1_dword:
+ TESTQ $0x00000004, CX
+ JZ copy_1_qword
+ MOVL (R10)(R14*1), R15
+ MOVL R15, (R9)(R14*1)
+ ADDQ $0x04, R14
+
+copy_1_qword:
+ TESTQ $0x00000008, CX
+ JZ copy_1_test
+ MOVQ (R10)(R14*1), R15
+ MOVQ R15, (R9)(R14*1)
+ ADDQ $0x08, R14
+ JMP copy_1_test
+
+copy_1:
+ MOVUPS (R10)(R14*1), X0
+ MOVUPS X0, (R9)(R14*1)
+ ADDQ $0x10, R14
+
+copy_1_test:
+ CMPQ R14, CX
+ JB copy_1
+ ADDQ CX, R10
+ ADDQ CX, R9
+ ADDQ CX, R11
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R11, CX
+ ADDQ 40(SP), CX
+ CMPQ R12, CX
+ JG error_match_off_too_big
+ CMPQ R12, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, CX
+ SUBQ R11, CX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ CX, R14
+ CMPQ R13, CX
+ JGE copy_all_from_history
+ XORQ CX, CX
+ TESTQ $0x00000001, R13
+ JZ copy_4_word
+ MOVB (R14)(CX*1), R12
+ MOVB R12, (R9)(CX*1)
+ ADDQ $0x01, CX
+
+copy_4_word:
+ TESTQ $0x00000002, R13
+ JZ copy_4_dword
+ MOVW (R14)(CX*1), R12
+ MOVW R12, (R9)(CX*1)
+ ADDQ $0x02, CX
+
+copy_4_dword:
+ TESTQ $0x00000004, R13
+ JZ copy_4_qword
+ MOVL (R14)(CX*1), R12
+ MOVL R12, (R9)(CX*1)
+ ADDQ $0x04, CX
+
+copy_4_qword:
+ TESTQ $0x00000008, R13
+ JZ copy_4_test
+ MOVQ (R14)(CX*1), R12
+ MOVQ R12, (R9)(CX*1)
+ ADDQ $0x08, CX
+ JMP copy_4_test
+
+copy_4:
+ MOVUPS (R14)(CX*1), X0
+ MOVUPS X0, (R9)(CX*1)
+ ADDQ $0x10, CX
+
+copy_4_test:
+ CMPQ CX, R13
+ JB copy_4
+ ADDQ R13, R11
+ ADDQ R13, R9
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ XORQ R15, R15
+ TESTQ $0x00000001, CX
+ JZ copy_5_word
+ MOVB (R14)(R15*1), BP
+ MOVB BP, (R9)(R15*1)
+ ADDQ $0x01, R15
+
+copy_5_word:
+ TESTQ $0x00000002, CX
+ JZ copy_5_dword
+ MOVW (R14)(R15*1), BP
+ MOVW BP, (R9)(R15*1)
+ ADDQ $0x02, R15
+
+copy_5_dword:
+ TESTQ $0x00000004, CX
+ JZ copy_5_qword
+ MOVL (R14)(R15*1), BP
+ MOVL BP, (R9)(R15*1)
+ ADDQ $0x04, R15
+
+copy_5_qword:
+ TESTQ $0x00000008, CX
+ JZ copy_5_test
+ MOVQ (R14)(R15*1), BP
+ MOVQ BP, (R9)(R15*1)
+ ADDQ $0x08, R15
+ JMP copy_5_test
+
+copy_5:
+ MOVUPS (R14)(R15*1), X0
+ MOVUPS X0, (R9)(R15*1)
+ ADDQ $0x10, R15
+
+copy_5_test:
+ CMPQ R15, CX
+ JB copy_5
+ ADDQ CX, R9
+ ADDQ CX, R11
+ SUBQ CX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ TESTQ R13, R13
+ JZ handle_loop
+ MOVQ R9, CX
+ SUBQ R12, CX
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ XORQ R12, R12
+
+copy_2:
+ MOVUPS (CX)(R12*1), X0
+ MOVUPS X0, (R9)(R12*1)
+ ADDQ $0x10, R12
+ CMPQ R12, R13
+ JB copy_2
+ ADDQ R13, R9
+ ADDQ R13, R11
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ XORQ R12, R12
+
+copy_slow_3:
+ MOVB (CX)(R12*1), R14
+ MOVB R14, (R9)(R12*1)
+ INCQ R12
+ CMPQ R12, R13
+ JB copy_slow_3
+ ADDQ R13, R9
+ ADDQ R13, R11
+
+handle_loop:
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decodeSync_bmi2_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), CX
+ MOVQ AX, 32(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 24(CX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R11, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R10
+ MOVQ R10, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_bmi2_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: CMOV, SSE
+TEXT Β·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
+ MOVQ br+8(FP), AX
+ MOVQ 32(AX), DX
+ MOVBQZX 40(AX), BX
+ MOVQ 24(AX), SI
+ MOVQ (AX), AX
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ MOVQ 112(AX), R10
+ MOVQ 128(AX), CX
+ MOVQ CX, 32(SP)
+ MOVQ 144(AX), R11
+ MOVQ 136(AX), R12
+ MOVQ 200(AX), CX
+ MOVQ CX, 56(SP)
+ MOVQ 176(AX), CX
+ MOVQ CX, 48(SP)
+ MOVQ 184(AX), AX
+ MOVQ AX, 40(SP)
+ MOVQ 40(SP), AX
+ ADDQ AX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R10, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R12, R10
+
+sequenceDecs_decodeSync_safe_amd64_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_end
+
+sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
+
+sequenceDecs_decodeSync_safe_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ ADDQ CX, BX
+ NEGL CX
+ SHRQ CL, R14
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ CMOVQEQ CX, R14
+ ADDQ R14, AX
+ MOVQ AX, 8(SP)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ ADDQ CX, BX
+ NEGL CX
+ SHRQ CL, R14
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ CMOVQEQ CX, R14
+ ADDQ R14, AX
+ MOVQ AX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end
+
+sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_safe_amd64_fill_2_end:
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ ADDQ CX, BX
+ NEGL CX
+ SHRQ CL, R14
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ CMOVQEQ CX, R14
+ ADDQ R14, AX
+ MOVQ AX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_safe_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R13
+ SHRQ $0x10, DI
+ MOVWQZX DI, DI
+ CMPQ R13, $0x00
+ JZ sequenceDecs_decodeSync_safe_amd64_llState_updateState_skip_zero
+ MOVQ BX, CX
+ ADDQ R13, BX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVQ R13, CX
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, DI
+
+sequenceDecs_decodeSync_safe_amd64_llState_updateState_skip_zero:
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R13
+ SHRQ $0x10, R8
+ MOVWQZX R8, R8
+ CMPQ R13, $0x00
+ JZ sequenceDecs_decodeSync_safe_amd64_mlState_updateState_skip_zero
+ MOVQ BX, CX
+ ADDQ R13, BX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVQ R13, CX
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, R8
+
+sequenceDecs_decodeSync_safe_amd64_mlState_updateState_skip_zero:
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R13
+ SHRQ $0x10, R9
+ MOVWQZX R9, R9
+ CMPQ R13, $0x00
+ JZ sequenceDecs_decodeSync_safe_amd64_ofState_updateState_skip_zero
+ MOVQ BX, CX
+ ADDQ R13, BX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVQ R13, CX
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, R9
+
+sequenceDecs_decodeSync_safe_amd64_ofState_updateState_skip_zero:
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decodeSync_safe_amd64_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_safe_amd64_adjust_end
+
+sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_safe_amd64_adjust_end
+
+sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero:
+ MOVQ R13, AX
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, AX
+ CMOVQEQ R15, R14
+ LEAQ 144(CX), R15
+ ADDQ (R15)(AX*8), R14
+ JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip
+ MOVQ 152(CX), AX
+ MOVQ AX, 160(CX)
+
+sequenceDecs_decodeSync_safe_amd64_adjust_skip:
+ MOVQ 144(CX), AX
+ MOVQ AX, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_safe_amd64_adjust_end:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), AX
+ MOVQ 24(SP), CX
+ LEAQ (AX)(CX*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ CX, 104(R14)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok:
+ MOVQ 24(SP), AX
+ MOVQ 8(SP), CX
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (AX)(R13*1), R14
+ ADDQ R10, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ AX, AX
+ JZ check_offset
+ XORQ R14, R14
+ TESTQ $0x00000001, AX
+ JZ copy_1_word
+ MOVB (R11)(R14*1), R15
+ MOVB R15, (R10)(R14*1)
+ ADDQ $0x01, R14
+
+copy_1_word:
+ TESTQ $0x00000002, AX
+ JZ copy_1_dword
+ MOVW (R11)(R14*1), R15
+ MOVW R15, (R10)(R14*1)
+ ADDQ $0x02, R14
+
+copy_1_dword:
+ TESTQ $0x00000004, AX
+ JZ copy_1_qword
+ MOVL (R11)(R14*1), R15
+ MOVL R15, (R10)(R14*1)
+ ADDQ $0x04, R14
+
+copy_1_qword:
+ TESTQ $0x00000008, AX
+ JZ copy_1_test
+ MOVQ (R11)(R14*1), R15
+ MOVQ R15, (R10)(R14*1)
+ ADDQ $0x08, R14
+ JMP copy_1_test
+
+copy_1:
+ MOVUPS (R11)(R14*1), X0
+ MOVUPS X0, (R10)(R14*1)
+ ADDQ $0x10, R14
+
+copy_1_test:
+ CMPQ R14, AX
+ JB copy_1
+ ADDQ AX, R11
+ ADDQ AX, R10
+ ADDQ AX, R12
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R12, AX
+ ADDQ 40(SP), AX
+ CMPQ CX, AX
+ JG error_match_off_too_big
+ CMPQ CX, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ CX, AX
+ SUBQ R12, AX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ AX, R14
+ CMPQ R13, AX
+ JGE copy_all_from_history
+ XORQ AX, AX
+ TESTQ $0x00000001, R13
+ JZ copy_4_word
+ MOVB (R14)(AX*1), CL
+ MOVB CL, (R10)(AX*1)
+ ADDQ $0x01, AX
+
+copy_4_word:
+ TESTQ $0x00000002, R13
+ JZ copy_4_dword
+ MOVW (R14)(AX*1), CX
+ MOVW CX, (R10)(AX*1)
+ ADDQ $0x02, AX
+
+copy_4_dword:
+ TESTQ $0x00000004, R13
+ JZ copy_4_qword
+ MOVL (R14)(AX*1), CX
+ MOVL CX, (R10)(AX*1)
+ ADDQ $0x04, AX
+
+copy_4_qword:
+ TESTQ $0x00000008, R13
+ JZ copy_4_test
+ MOVQ (R14)(AX*1), CX
+ MOVQ CX, (R10)(AX*1)
+ ADDQ $0x08, AX
+ JMP copy_4_test
+
+copy_4:
+ MOVUPS (R14)(AX*1), X0
+ MOVUPS X0, (R10)(AX*1)
+ ADDQ $0x10, AX
+
+copy_4_test:
+ CMPQ AX, R13
+ JB copy_4
+ ADDQ R13, R12
+ ADDQ R13, R10
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ XORQ R15, R15
+ TESTQ $0x00000001, AX
+ JZ copy_5_word
+ MOVB (R14)(R15*1), BP
+ MOVB BP, (R10)(R15*1)
+ ADDQ $0x01, R15
+
+copy_5_word:
+ TESTQ $0x00000002, AX
+ JZ copy_5_dword
+ MOVW (R14)(R15*1), BP
+ MOVW BP, (R10)(R15*1)
+ ADDQ $0x02, R15
+
+copy_5_dword:
+ TESTQ $0x00000004, AX
+ JZ copy_5_qword
+ MOVL (R14)(R15*1), BP
+ MOVL BP, (R10)(R15*1)
+ ADDQ $0x04, R15
+
+copy_5_qword:
+ TESTQ $0x00000008, AX
+ JZ copy_5_test
+ MOVQ (R14)(R15*1), BP
+ MOVQ BP, (R10)(R15*1)
+ ADDQ $0x08, R15
+ JMP copy_5_test
+
+copy_5:
+ MOVUPS (R14)(R15*1), X0
+ MOVUPS X0, (R10)(R15*1)
+ ADDQ $0x10, R15
+
+copy_5_test:
+ CMPQ R15, AX
+ JB copy_5
+ ADDQ AX, R10
+ ADDQ AX, R12
+ SUBQ AX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ TESTQ R13, R13
+ JZ handle_loop
+ MOVQ R10, AX
+ SUBQ CX, AX
+
+ // ml <= mo
+ CMPQ R13, CX
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ XORQ CX, CX
+ TESTQ $0x00000001, R13
+ JZ copy_2_word
+ MOVB (AX)(CX*1), R14
+ MOVB R14, (R10)(CX*1)
+ ADDQ $0x01, CX
+
+copy_2_word:
+ TESTQ $0x00000002, R13
+ JZ copy_2_dword
+ MOVW (AX)(CX*1), R14
+ MOVW R14, (R10)(CX*1)
+ ADDQ $0x02, CX
+
+copy_2_dword:
+ TESTQ $0x00000004, R13
+ JZ copy_2_qword
+ MOVL (AX)(CX*1), R14
+ MOVL R14, (R10)(CX*1)
+ ADDQ $0x04, CX
+
+copy_2_qword:
+ TESTQ $0x00000008, R13
+ JZ copy_2_test
+ MOVQ (AX)(CX*1), R14
+ MOVQ R14, (R10)(CX*1)
+ ADDQ $0x08, CX
+ JMP copy_2_test
+
+copy_2:
+ MOVUPS (AX)(CX*1), X0
+ MOVUPS X0, (R10)(CX*1)
+ ADDQ $0x10, CX
+
+copy_2_test:
+ CMPQ CX, R13
+ JB copy_2
+ ADDQ R13, R10
+ ADDQ R13, R12
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ XORQ CX, CX
+
+copy_slow_3:
+ MOVB (AX)(CX*1), R14
+ MOVB R14, (R10)(CX*1)
+ INCQ CX
+ CMPQ CX, R13
+ JB copy_slow_3
+ ADDQ R13, R10
+ ADDQ R13, R12
+
+handle_loop:
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decodeSync_safe_amd64_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), AX
+ MOVQ DX, 32(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 24(AX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R12, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R11
+ MOVQ R11, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: BMI, BMI2, CMOV, SSE
+TEXT Β·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
+ MOVQ br+8(FP), CX
+ MOVQ 32(CX), AX
+ MOVBQZX 40(CX), DX
+ MOVQ 24(CX), BX
+ MOVQ (CX), CX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ MOVQ 112(CX), R9
+ MOVQ 128(CX), R10
+ MOVQ R10, 32(SP)
+ MOVQ 144(CX), R10
+ MOVQ 136(CX), R11
+ MOVQ 200(CX), R12
+ MOVQ R12, 56(SP)
+ MOVQ 176(CX), R12
+ MOVQ R12, 48(SP)
+ MOVQ 184(CX), CX
+ MOVQ CX, 40(SP)
+ MOVQ 40(SP), CX
+ ADDQ CX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R9, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R11, R9
+
+sequenceDecs_decodeSync_safe_bmi2_main_loop:
+ MOVQ (SP), R12
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_end
+
+sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
+
+sequenceDecs_decodeSync_safe_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 8(SP)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end
+
+sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R12, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R12
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_safe_bmi2_skip_update
+
+ // Update Literal Length State
+ MOVBQZX SI, R13
+ MOVQ $0x00001010, CX
+ BEXTRQ CX, SI, SI
+ LEAQ (DX)(R13*1), CX
+ MOVQ AX, R14
+ MOVQ CX, DX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ ADDQ R14, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+ // Update Match Length State
+ MOVBQZX DI, R13
+ MOVQ $0x00001010, CX
+ BEXTRQ CX, DI, DI
+ LEAQ (DX)(R13*1), CX
+ MOVQ AX, R14
+ MOVQ CX, DX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ ADDQ R14, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Offset State
+ MOVBQZX R8, R13
+ MOVQ $0x00001010, CX
+ BEXTRQ CX, R8, R8
+ LEAQ (DX)(R13*1), CX
+ MOVQ AX, R14
+ MOVQ CX, DX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ ADDQ R14, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+sequenceDecs_decodeSync_safe_bmi2_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ R12, $0x01
+ JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_safe_bmi2_adjust_end
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_safe_bmi2_adjust_end
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero:
+ MOVQ R13, R12
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, R12
+ CMOVQEQ R15, R14
+ LEAQ 144(CX), R15
+ ADDQ (R15)(R12*8), R14
+ JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip
+ MOVQ 152(CX), R12
+ MOVQ R12, 160(CX)
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_skip:
+ MOVQ 144(CX), R12
+ MOVQ R12, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_end:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), CX
+ MOVQ 24(SP), R12
+ LEAQ (CX)(R12*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ R12, 104(R14)
+ JS error_not_enough_literals
+ CMPQ CX, $0x00020002
+ JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok
+ TESTQ CX, CX
+ JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok:
+ MOVQ 24(SP), CX
+ MOVQ 8(SP), R12
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (CX)(R13*1), R14
+ ADDQ R9, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ CX, CX
+ JZ check_offset
+ XORQ R14, R14
+ TESTQ $0x00000001, CX
+ JZ copy_1_word
+ MOVB (R10)(R14*1), R15
+ MOVB R15, (R9)(R14*1)
+ ADDQ $0x01, R14
+
+copy_1_word:
+ TESTQ $0x00000002, CX
+ JZ copy_1_dword
+ MOVW (R10)(R14*1), R15
+ MOVW R15, (R9)(R14*1)
+ ADDQ $0x02, R14
+
+copy_1_dword:
+ TESTQ $0x00000004, CX
+ JZ copy_1_qword
+ MOVL (R10)(R14*1), R15
+ MOVL R15, (R9)(R14*1)
+ ADDQ $0x04, R14
+
+copy_1_qword:
+ TESTQ $0x00000008, CX
+ JZ copy_1_test
+ MOVQ (R10)(R14*1), R15
+ MOVQ R15, (R9)(R14*1)
+ ADDQ $0x08, R14
+ JMP copy_1_test
+
+copy_1:
+ MOVUPS (R10)(R14*1), X0
+ MOVUPS X0, (R9)(R14*1)
+ ADDQ $0x10, R14
+
+copy_1_test:
+ CMPQ R14, CX
+ JB copy_1
+ ADDQ CX, R10
+ ADDQ CX, R9
+ ADDQ CX, R11
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R11, CX
+ ADDQ 40(SP), CX
+ CMPQ R12, CX
+ JG error_match_off_too_big
+ CMPQ R12, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, CX
+ SUBQ R11, CX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ CX, R14
+ CMPQ R13, CX
+ JGE copy_all_from_history
+ XORQ CX, CX
+ TESTQ $0x00000001, R13
+ JZ copy_4_word
+ MOVB (R14)(CX*1), R12
+ MOVB R12, (R9)(CX*1)
+ ADDQ $0x01, CX
+
+copy_4_word:
+ TESTQ $0x00000002, R13
+ JZ copy_4_dword
+ MOVW (R14)(CX*1), R12
+ MOVW R12, (R9)(CX*1)
+ ADDQ $0x02, CX
+
+copy_4_dword:
+ TESTQ $0x00000004, R13
+ JZ copy_4_qword
+ MOVL (R14)(CX*1), R12
+ MOVL R12, (R9)(CX*1)
+ ADDQ $0x04, CX
+
+copy_4_qword:
+ TESTQ $0x00000008, R13
+ JZ copy_4_test
+ MOVQ (R14)(CX*1), R12
+ MOVQ R12, (R9)(CX*1)
+ ADDQ $0x08, CX
+ JMP copy_4_test
+
+copy_4:
+ MOVUPS (R14)(CX*1), X0
+ MOVUPS X0, (R9)(CX*1)
+ ADDQ $0x10, CX
+
+copy_4_test:
+ CMPQ CX, R13
+ JB copy_4
+ ADDQ R13, R11
+ ADDQ R13, R9
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ XORQ R15, R15
+ TESTQ $0x00000001, CX
+ JZ copy_5_word
+ MOVB (R14)(R15*1), BP
+ MOVB BP, (R9)(R15*1)
+ ADDQ $0x01, R15
+
+copy_5_word:
+ TESTQ $0x00000002, CX
+ JZ copy_5_dword
+ MOVW (R14)(R15*1), BP
+ MOVW BP, (R9)(R15*1)
+ ADDQ $0x02, R15
+
+copy_5_dword:
+ TESTQ $0x00000004, CX
+ JZ copy_5_qword
+ MOVL (R14)(R15*1), BP
+ MOVL BP, (R9)(R15*1)
+ ADDQ $0x04, R15
+
+copy_5_qword:
+ TESTQ $0x00000008, CX
+ JZ copy_5_test
+ MOVQ (R14)(R15*1), BP
+ MOVQ BP, (R9)(R15*1)
+ ADDQ $0x08, R15
+ JMP copy_5_test
+
+copy_5:
+ MOVUPS (R14)(R15*1), X0
+ MOVUPS X0, (R9)(R15*1)
+ ADDQ $0x10, R15
+
+copy_5_test:
+ CMPQ R15, CX
+ JB copy_5
+ ADDQ CX, R9
+ ADDQ CX, R11
+ SUBQ CX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ TESTQ R13, R13
+ JZ handle_loop
+ MOVQ R9, CX
+ SUBQ R12, CX
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ XORQ R12, R12
+ TESTQ $0x00000001, R13
+ JZ copy_2_word
+ MOVB (CX)(R12*1), R14
+ MOVB R14, (R9)(R12*1)
+ ADDQ $0x01, R12
+
+copy_2_word:
+ TESTQ $0x00000002, R13
+ JZ copy_2_dword
+ MOVW (CX)(R12*1), R14
+ MOVW R14, (R9)(R12*1)
+ ADDQ $0x02, R12
+
+copy_2_dword:
+ TESTQ $0x00000004, R13
+ JZ copy_2_qword
+ MOVL (CX)(R12*1), R14
+ MOVL R14, (R9)(R12*1)
+ ADDQ $0x04, R12
+
+copy_2_qword:
+ TESTQ $0x00000008, R13
+ JZ copy_2_test
+ MOVQ (CX)(R12*1), R14
+ MOVQ R14, (R9)(R12*1)
+ ADDQ $0x08, R12
+ JMP copy_2_test
+
+copy_2:
+ MOVUPS (CX)(R12*1), X0
+ MOVUPS X0, (R9)(R12*1)
+ ADDQ $0x10, R12
+
+copy_2_test:
+ CMPQ R12, R13
+ JB copy_2
+ ADDQ R13, R9
+ ADDQ R13, R11
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ XORQ R12, R12
+
+copy_slow_3:
+ MOVB (CX)(R12*1), R14
+ MOVB R14, (R9)(R12*1)
+ INCQ R12
+ CMPQ R12, R13
+ JB copy_slow_3
+ ADDQ R13, R9
+ ADDQ R13, R11
+
+handle_loop:
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decodeSync_safe_bmi2_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), CX
+ MOVQ AX, 32(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 24(CX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R11, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R10
+ MOVQ R10, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
new file mode 100644
index 000000000..c3452bc3a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
@@ -0,0 +1,237 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+package zstd
+
+import (
+ "fmt"
+ "io"
+)
+
+// decode sequences from the stream with the provided history but without dictionary.
+func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
+ return false, nil
+}
+
+// decode sequences from the stream without the provided history.
+func (s *sequenceDecs) decode(seqs []seqVals) error {
+ br := s.br
+
+ // Grab full sizes tables, to avoid bounds checks.
+ llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
+ llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+ s.seqSize = 0
+ litRemain := len(s.literals)
+
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+ for i := range seqs {
+ var ll, mo, ml int
+ if br.off > 4+((maxOffsetBits+16+16)>>3) {
+ // inlined function:
+ // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
+
+ // Final will not read from stream.
+ var llB, mlB, moB uint8
+ ll, llB = llState.final()
+ ml, mlB = mlState.final()
+ mo, moB = ofState.final()
+
+ // extra bits are stored in reverse order.
+ br.fillFast()
+ mo += br.getBits(moB)
+ if s.maxBits > 32 {
+ br.fillFast()
+ }
+ ml += br.getBits(mlB)
+ ll += br.getBits(llB)
+
+ if moB > 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = mo
+ } else {
+ // mo = s.adjustOffset(mo, ll, moB)
+ // Inlined for rather big speedup
+ if ll == 0 {
+ // There is an exception though, when current sequence's literals_length = 0.
+ // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+ // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+ mo++
+ }
+
+ if mo == 0 {
+ mo = s.prevOffset[0]
+ } else {
+ var temp int
+ if mo == 3 {
+ temp = s.prevOffset[0] - 1
+ } else {
+ temp = s.prevOffset[mo]
+ }
+
+ if temp == 0 {
+ // 0 is not valid; input is corrupted; force offset to 1
+ println("WARNING: temp was 0")
+ temp = 1
+ }
+
+ if mo != 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ }
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = temp
+ mo = temp
+ }
+ }
+ br.fillFast()
+ } else {
+ if br.overread() {
+ if debugDecoder {
+ printf("reading sequence %d, exceeded available data\n", i)
+ }
+ return io.ErrUnexpectedEOF
+ }
+ ll, mo, ml = s.next(br, llState, mlState, ofState)
+ br.fill()
+ }
+
+ if debugSequences {
+ println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
+ }
+ // Evaluate.
+ // We might be doing this async, so do it early.
+ if mo == 0 && ml > 0 {
+ return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+ }
+ if ml > maxMatchLen {
+ return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
+ }
+ s.seqSize += ll + ml
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+ }
+ litRemain -= ll
+ if litRemain < 0 {
+ return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
+ }
+ seqs[i] = seqVals{
+ ll: ll,
+ ml: ml,
+ mo: mo,
+ }
+ if i == len(seqs)-1 {
+ // This is the last sequence, so we shouldn't update state.
+ break
+ }
+
+ // Manually inlined, ~ 5-20% faster
+ // Update all 3 states at once. Approx 20% faster.
+ nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
+ if nBits == 0 {
+ llState = llTable[llState.newState()&maxTableMask]
+ mlState = mlTable[mlState.newState()&maxTableMask]
+ ofState = ofTable[ofState.newState()&maxTableMask]
+ } else {
+ bits := br.get32BitsFast(nBits)
+ lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
+ llState = llTable[(llState.newState()+lowBits)&maxTableMask]
+
+ lowBits = uint16(bits >> (ofState.nbBits() & 31))
+ lowBits &= bitMask[mlState.nbBits()&15]
+ mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
+
+ lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
+ ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
+ }
+ }
+ s.seqSize += litRemain
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+ }
+ err := br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ }
+ return err
+}
+
+// executeSimple handles cases when a dictionary is not used.
+func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
+ // Ensure we have enough output size...
+ if len(s.out)+s.seqSize > cap(s.out) {
+ addBytes := s.seqSize + len(s.out)
+ s.out = append(s.out, make([]byte, addBytes)...)
+ s.out = s.out[:len(s.out)-addBytes]
+ }
+
+ if debugDecoder {
+ printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
+ }
+
+ var t = len(s.out)
+ out := s.out[:t+s.seqSize]
+
+ for _, seq := range seqs {
+ // Add literals
+ copy(out[t:], s.literals[:seq.ll])
+ t += seq.ll
+ s.literals = s.literals[seq.ll:]
+
+ // Malformed input
+ if seq.mo > t+len(hist) || seq.mo > s.windowSize {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
+ }
+
+ // Copy from history.
+ if v := seq.mo - t; v > 0 {
+ // v is the start position in history from end.
+ start := len(hist) - v
+ if seq.ml > v {
+ // Some goes into the current block.
+ // Copy remainder of history
+ copy(out[t:], hist[start:])
+ t += v
+ seq.ml -= v
+ } else {
+ copy(out[t:], hist[start:start+seq.ml])
+ t += seq.ml
+ continue
+ }
+ }
+
+ // We must be in the current buffer now
+ if seq.ml > 0 {
+ start := t - seq.mo
+ if seq.ml <= t-start {
+ // No overlap
+ copy(out[t:], out[start:start+seq.ml])
+ t += seq.ml
+ } else {
+ // Overlapping copy
+ // Extend destination slice and copy one byte at the time.
+ src := out[start : start+seq.ml]
+ dst := out[t:]
+ dst = dst[:len(src)]
+ t += len(src)
+ // Destination is the space we just added.
+ for i := range src {
+ dst[i] = src[i]
+ }
+ }
+ }
+ }
+ // Add final literals
+ copy(out[t:], s.literals)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
+ }
+ }
+ s.out = out
+
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go
index ffffcbc25..b53f606a1 100644
--- a/vendor/github.com/klauspost/compress/zstd/zip.go
+++ b/vendor/github.com/klauspost/compress/zstd/zip.go
@@ -21,23 +21,34 @@ const ZipMethodPKWare = 20
var zipReaderPool sync.Pool
// newZipReader creates a pooled zip decompressor.
-func newZipReader(r io.Reader) io.ReadCloser {
- dec, ok := zipReaderPool.Get().(*Decoder)
- if ok {
- dec.Reset(r)
- } else {
- d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
- if err != nil {
- panic(err)
+func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser {
+ pool := &zipReaderPool
+ if len(opts) > 0 {
+ opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...)
+ // Force concurrency 1
+ opts = append(opts, WithDecoderConcurrency(1))
+ // Create our own pool
+ pool = &sync.Pool{}
+ }
+ return func(r io.Reader) io.ReadCloser {
+ dec, ok := pool.Get().(*Decoder)
+ if ok {
+ dec.Reset(r)
+ } else {
+ d, err := NewReader(r, opts...)
+ if err != nil {
+ panic(err)
+ }
+ dec = d
}
- dec = d
+ return &pooledZipReader{dec: dec, pool: pool}
}
- return &pooledZipReader{dec: dec}
}
type pooledZipReader struct {
- mu sync.Mutex // guards Close and Read
- dec *Decoder
+ mu sync.Mutex // guards Close and Read
+ pool *sync.Pool
+ dec *Decoder
}
func (r *pooledZipReader) Read(p []byte) (n int, err error) {
@@ -48,8 +59,8 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
}
dec, err := r.dec.Read(p)
if err == io.EOF {
- err = r.dec.Reset(nil)
- zipReaderPool.Put(r.dec)
+ r.dec.Reset(nil)
+ r.pool.Put(r.dec)
r.dec = nil
}
return dec, err
@@ -61,7 +72,7 @@ func (r *pooledZipReader) Close() error {
var err error
if r.dec != nil {
err = r.dec.Reset(nil)
- zipReaderPool.Put(r.dec)
+ r.pool.Put(r.dec)
r.dec = nil
}
return err
@@ -115,6 +126,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
// See ZipCompressor for example.
-func ZipDecompressor() func(r io.Reader) io.ReadCloser {
- return newZipReader
+// Options can be specified. WithDecoderConcurrency(1) is forced,
+// and by default a 128MB maximum decompression window is specified.
+// The window size can be overridden if required.
+func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser {
+ return newZipReader(opts...)
}
diff --git a/vendor/github.com/mrunalp/fileutils/.gitignore b/vendor/github.com/mrunalp/fileutils/.gitignore
deleted file mode 100644
index aac977bca..000000000
--- a/vendor/github.com/mrunalp/fileutils/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-/gocp
diff --git a/vendor/github.com/mrunalp/fileutils/LICENSE b/vendor/github.com/mrunalp/fileutils/LICENSE
deleted file mode 100644
index 27448585a..000000000
--- a/vendor/github.com/mrunalp/fileutils/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright 2014 Docker, Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/mrunalp/fileutils/MAINTAINERS b/vendor/github.com/mrunalp/fileutils/MAINTAINERS
deleted file mode 100644
index 4a2cafa5c..000000000
--- a/vendor/github.com/mrunalp/fileutils/MAINTAINERS
+++ /dev/null
@@ -1 +0,0 @@
-Mrunal Patel <mrunalp@gmail.com> (@mrunalp)
diff --git a/vendor/github.com/mrunalp/fileutils/README.md b/vendor/github.com/mrunalp/fileutils/README.md
deleted file mode 100644
index 6cb4140ea..000000000
--- a/vendor/github.com/mrunalp/fileutils/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# fileutils
-
-Collection of utilities for file manipulation in golang
-
-The library is based on docker pkg/archive pkg/idtools but does copies instead of handling archive formats.
diff --git a/vendor/github.com/mrunalp/fileutils/fileutils.go b/vendor/github.com/mrunalp/fileutils/fileutils.go
deleted file mode 100644
index 7421e6207..000000000
--- a/vendor/github.com/mrunalp/fileutils/fileutils.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package fileutils
-
-import (
- "fmt"
- "io"
- "os"
- "path/filepath"
- "syscall"
-)
-
-// CopyFile copies the file at source to dest
-func CopyFile(source string, dest string) error {
- si, err := os.Lstat(source)
- if err != nil {
- return err
- }
-
- st, ok := si.Sys().(*syscall.Stat_t)
- if !ok {
- return fmt.Errorf("could not convert to syscall.Stat_t")
- }
-
- uid := int(st.Uid)
- gid := int(st.Gid)
- modeType := si.Mode() & os.ModeType
-
- // Handle symlinks
- if modeType == os.ModeSymlink {
- target, err := os.Readlink(source)
- if err != nil {
- return err
- }
- if err := os.Symlink(target, dest); err != nil {
- return err
- }
- }
-
- // Handle device files
- if modeType == os.ModeDevice {
- devMajor := int64(major(uint64(st.Rdev)))
- devMinor := int64(minor(uint64(st.Rdev)))
- mode := uint32(si.Mode() & os.ModePerm)
- if si.Mode()&os.ModeCharDevice != 0 {
- mode |= syscall.S_IFCHR
- } else {
- mode |= syscall.S_IFBLK
- }
- if err := syscall.Mknod(dest, mode, int(mkdev(devMajor, devMinor))); err != nil {
- return err
- }
- }
-
- // Handle regular files
- if si.Mode().IsRegular() {
- err = copyInternal(source, dest)
- if err != nil {
- return err
- }
- }
-
- // Chown the file
- if err := os.Lchown(dest, uid, gid); err != nil {
- return err
- }
-
- // Chmod the file
- if !(modeType == os.ModeSymlink) {
- if err := os.Chmod(dest, si.Mode()); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func copyInternal(source, dest string) (retErr error) {
- sf, err := os.Open(source)
- if err != nil {
- return err
- }
- defer sf.Close()
-
- df, err := os.Create(dest)
- if err != nil {
- return err
- }
- defer func() {
- err := df.Close()
- if retErr == nil {
- retErr = err
- }
- }()
-
- _, err = io.Copy(df, sf)
- return err
-}
-
-// CopyDirectory copies the files under the source directory
-// to dest directory. The dest directory is created if it
-// does not exist.
-func CopyDirectory(source string, dest string) error {
- fi, err := os.Stat(source)
- if err != nil {
- return err
- }
-
- // Get owner.
- st, ok := fi.Sys().(*syscall.Stat_t)
- if !ok {
- return fmt.Errorf("could not convert to syscall.Stat_t")
- }
-
- // We have to pick an owner here anyway.
- if err := MkdirAllNewAs(dest, fi.Mode(), int(st.Uid), int(st.Gid)); err != nil {
- return err
- }
-
- return filepath.Walk(source, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
-
- // Get the relative path
- relPath, err := filepath.Rel(source, path)
- if err != nil {
- return nil
- }
-
- if info.IsDir() {
- // Skip the source directory.
- if path != source {
- // Get the owner.
- st, ok := info.Sys().(*syscall.Stat_t)
- if !ok {
- return fmt.Errorf("could not convert to syscall.Stat_t")
- }
-
- uid := int(st.Uid)
- gid := int(st.Gid)
-
- if err := os.Mkdir(filepath.Join(dest, relPath), info.Mode()); err != nil {
- return err
- }
-
- if err := os.Lchown(filepath.Join(dest, relPath), uid, gid); err != nil {
- return err
- }
- }
- return nil
- }
-
- return CopyFile(path, filepath.Join(dest, relPath))
- })
-}
-
-// Gives a number indicating the device driver to be used to access the passed device
-func major(device uint64) uint64 {
- return (device >> 8) & 0xfff
-}
-
-// Gives a number that serves as a flag to the device driver for the passed device
-func minor(device uint64) uint64 {
- return (device & 0xff) | ((device >> 12) & 0xfff00)
-}
-
-func mkdev(major int64, minor int64) uint32 {
- return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
-}
diff --git a/vendor/github.com/mrunalp/fileutils/go.mod b/vendor/github.com/mrunalp/fileutils/go.mod
deleted file mode 100644
index d8971cabc..000000000
--- a/vendor/github.com/mrunalp/fileutils/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/mrunalp/fileutils
-
-go 1.13
diff --git a/vendor/github.com/mrunalp/fileutils/idtools.go b/vendor/github.com/mrunalp/fileutils/idtools.go
deleted file mode 100644
index bad6539df..000000000
--- a/vendor/github.com/mrunalp/fileutils/idtools.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package fileutils
-
-import (
- "os"
- "path/filepath"
- "syscall"
-)
-
-// MkdirAllNewAs creates a directory (include any along the path) and then modifies
-// ownership ONLY of newly created directories to the requested uid/gid. If the
-// directories along the path exist, no change of ownership will be performed
-func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
- // make an array containing the original path asked for, plus (for mkAll == true)
- // all path components leading up to the complete path that don't exist before we MkdirAll
- // so that we can chown all of them properly at the end. If chownExisting is false, we won't
- // chown the full directory path if it exists
- var paths []string
- st, err := os.Stat(path)
- if err != nil && os.IsNotExist(err) {
- paths = []string{path}
- } else if err == nil {
- if !st.IsDir() {
- return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
- }
- // nothing to do; directory path fully exists already
- return nil
- }
-
- // walk back to "/" looking for directories which do not exist
- // and add them to the paths array for chown after creation
- dirPath := path
- for {
- dirPath = filepath.Dir(dirPath)
- if dirPath == "/" {
- break
- }
- if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
- paths = append(paths, dirPath)
- }
- }
-
- if err := os.MkdirAll(path, mode); err != nil {
- return err
- }
-
- // even if it existed, we will chown the requested path + any subpaths that
- // didn't exist when we called MkdirAll
- for _, pathComponent := range paths {
- if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go
index 4362f47e2..b0f2bc4ae 100644
--- a/vendor/golang.org/x/sys/unix/endian_little.go
+++ b/vendor/golang.org/x/sys/unix/endian_little.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
-//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh
-// +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh
+//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh
+// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh
package unix
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
new file mode 100644
index 000000000..28ba7b8cb
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
@@ -0,0 +1,191 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build loong64 && linux
+// +build loong64,linux
+
+package unix
+
+import "unsafe"
+
+//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT
+//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
+//sys Fstatfs(fd int, buf *Statfs_t) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (euid int)
+//sysnb Getgid() (gid int)
+//sysnb Getuid() (uid int)
+//sys Listen(s int, n int) (err error)
+//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
+
+func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
+ var ts *Timespec
+ if timeout != nil {
+ ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
+ }
+ return Pselect(nfd, r, w, e, ts, nil)
+}
+
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
+//sys setfsgid(gid int) (prev int, err error)
+//sys setfsuid(uid int) (prev int, err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sys Shutdown(fd int, how int) (err error)
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+
+func Stat(path string, stat *Stat_t) (err error) {
+ return Fstatat(AT_FDCWD, path, stat, 0)
+}
+
+func Lchown(path string, uid int, gid int) (err error) {
+ return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW)
+}
+
+func Lstat(path string, stat *Stat_t) (err error) {
+ return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW)
+}
+
+//sys Statfs(path string, buf *Statfs_t) (err error)
+//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
+//sys Truncate(path string, length int64) (err error)
+
+func Ustat(dev int, ubuf *Ustat_t) (err error) {
+ return ENOSYS
+}
+
+//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
+//sysnb setgroups(n int, list *_Gid_t) (err error)
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb socket(domain int, typ int, proto int) (fd int, err error)
+//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
+
+//sysnb Gettimeofday(tv *Timeval) (err error)
+
+func setTimespec(sec, nsec int64) Timespec {
+ return Timespec{Sec: sec, Nsec: nsec}
+}
+
+func setTimeval(sec, usec int64) Timeval {
+ return Timeval{Sec: sec, Usec: usec}
+}
+
+func Getrlimit(resource int, rlim *Rlimit) (err error) {
+ err = Prlimit(0, resource, nil, rlim)
+ return
+}
+
+func Setrlimit(resource int, rlim *Rlimit) (err error) {
+ err = Prlimit(0, resource, rlim, nil)
+ return
+}
+
+func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) {
+ if tv == nil {
+ return utimensat(dirfd, path, nil, 0)
+ }
+
+ ts := []Timespec{
+ NsecToTimespec(TimevalToNsec(tv[0])),
+ NsecToTimespec(TimevalToNsec(tv[1])),
+ }
+ return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
+}
+
+func Time(t *Time_t) (Time_t, error) {
+ var tv Timeval
+ err := Gettimeofday(&tv)
+ if err != nil {
+ return 0, err
+ }
+ if t != nil {
+ *t = Time_t(tv.Sec)
+ }
+ return Time_t(tv.Sec), nil
+}
+
+func Utime(path string, buf *Utimbuf) error {
+ tv := []Timeval{
+ {Sec: buf.Actime},
+ {Sec: buf.Modtime},
+ }
+ return Utimes(path, tv)
+}
+
+func utimes(path string, tv *[2]Timeval) (err error) {
+ if tv == nil {
+ return utimensat(AT_FDCWD, path, nil, 0)
+ }
+
+ ts := []Timespec{
+ NsecToTimespec(TimevalToNsec(tv[0])),
+ NsecToTimespec(TimevalToNsec(tv[1])),
+ }
+ return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
+}
+
+func (r *PtraceRegs) PC() uint64 { return r.Era }
+
+func (r *PtraceRegs) SetPC(era uint64) { r.Era = era }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint64(length)
+}
+
+func (msghdr *Msghdr) SetIovlen(length int) {
+ msghdr.Iovlen = uint64(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint64(length)
+}
+
+func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) {
+ rsa.Service_name_len = uint64(length)
+}
+
+func Pause() error {
+ _, err := ppoll(nil, 0, nil, nil)
+ return err
+}
+
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ return Renameat2(olddirfd, oldpath, newdirfd, newpath, 0)
+}
+
+//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error)
+
+func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error {
+ cmdlineLen := len(cmdline)
+ if cmdlineLen > 0 {
+ // Account for the additional NULL byte added by
+ // BytePtrFromString in kexecFileLoad. The kexec_file_load
+ // syscall expects a NULL-terminated string.
+ cmdlineLen++
+ }
+ return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 3de79fa25..c0a43f8ba 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -1310,6 +1310,7 @@ const (
KEXEC_ARCH_ARM = 0x280000
KEXEC_ARCH_DEFAULT = 0x0
KEXEC_ARCH_IA_64 = 0x320000
+ KEXEC_ARCH_LOONGARCH = 0x1020000
KEXEC_ARCH_MASK = 0xffff0000
KEXEC_ARCH_MIPS = 0x80000
KEXEC_ARCH_MIPS_LE = 0xa0000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
new file mode 100644
index 000000000..ebc5f3218
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -0,0 +1,818 @@
+// mkerrors.sh -Wall -Werror -static -I/tmp/include
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+//go:build loong64 && linux
+// +build loong64,linux
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
+
+package unix
+
+import "syscall"
+
+const (
+ B1000000 = 0x1008
+ B115200 = 0x1002
+ B1152000 = 0x1009
+ B1500000 = 0x100a
+ B2000000 = 0x100b
+ B230400 = 0x1003
+ B2500000 = 0x100c
+ B3000000 = 0x100d
+ B3500000 = 0x100e
+ B4000000 = 0x100f
+ B460800 = 0x1004
+ B500000 = 0x1005
+ B57600 = 0x1001
+ B576000 = 0x1006
+ B921600 = 0x1007
+ BLKBSZGET = 0x80081270
+ BLKBSZSET = 0x40081271
+ BLKFLSBUF = 0x1261
+ BLKFRAGET = 0x1265
+ BLKFRASET = 0x1264
+ BLKGETSIZE = 0x1260
+ BLKGETSIZE64 = 0x80081272
+ BLKPBSZGET = 0x127b
+ BLKRAGET = 0x1263
+ BLKRASET = 0x1262
+ BLKROGET = 0x125e
+ BLKROSET = 0x125d
+ BLKRRPART = 0x125f
+ BLKSECTGET = 0x1267
+ BLKSECTSET = 0x1266
+ BLKSSZGET = 0x1268
+ BOTHER = 0x1000
+ BS1 = 0x2000
+ BSDLY = 0x2000
+ CBAUD = 0x100f
+ CBAUDEX = 0x1000
+ CIBAUD = 0x100f0000
+ CLOCAL = 0x800
+ CR1 = 0x200
+ CR2 = 0x400
+ CR3 = 0x600
+ CRDLY = 0x600
+ CREAD = 0x80
+ CS6 = 0x10
+ CS7 = 0x20
+ CS8 = 0x30
+ CSIZE = 0x30
+ CSTOPB = 0x40
+ ECCGETLAYOUT = 0x81484d11
+ ECCGETSTATS = 0x80104d12
+ ECHOCTL = 0x200
+ ECHOE = 0x10
+ ECHOK = 0x20
+ ECHOKE = 0x800
+ ECHONL = 0x40
+ ECHOPRT = 0x400
+ EFD_CLOEXEC = 0x80000
+ EFD_NONBLOCK = 0x800
+ EPOLL_CLOEXEC = 0x80000
+ EXTPROC = 0x10000
+ FF1 = 0x8000
+ FFDLY = 0x8000
+ FICLONE = 0x40049409
+ FICLONERANGE = 0x4020940d
+ FLUSHO = 0x1000
+ FPU_CTX_MAGIC = 0x46505501
+ FS_IOC_ENABLE_VERITY = 0x40806685
+ FS_IOC_GETFLAGS = 0x80086601
+ FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
+ FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
+ FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
+ FS_IOC_SETFLAGS = 0x40086602
+ FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
+ F_GETLK = 0x5
+ F_GETLK64 = 0x5
+ F_GETOWN = 0x9
+ F_RDLCK = 0x0
+ F_SETLK = 0x6
+ F_SETLK64 = 0x6
+ F_SETLKW = 0x7
+ F_SETLKW64 = 0x7
+ F_SETOWN = 0x8
+ F_UNLCK = 0x2
+ F_WRLCK = 0x1
+ HIDIOCGRAWINFO = 0x80084803
+ HIDIOCGRDESC = 0x90044802
+ HIDIOCGRDESCSIZE = 0x80044801
+ HUPCL = 0x400
+ ICANON = 0x2
+ IEXTEN = 0x8000
+ IN_CLOEXEC = 0x80000
+ IN_NONBLOCK = 0x800
+ IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ ISIG = 0x1
+ IUCLC = 0x200
+ IXOFF = 0x1000
+ IXON = 0x400
+ LASX_CTX_MAGIC = 0x41535801
+ LSX_CTX_MAGIC = 0x53580001
+ MAP_ANON = 0x20
+ MAP_ANONYMOUS = 0x20
+ MAP_DENYWRITE = 0x800
+ MAP_EXECUTABLE = 0x1000
+ MAP_GROWSDOWN = 0x100
+ MAP_HUGETLB = 0x40000
+ MAP_LOCKED = 0x2000
+ MAP_NONBLOCK = 0x10000
+ MAP_NORESERVE = 0x4000
+ MAP_POPULATE = 0x8000
+ MAP_STACK = 0x20000
+ MAP_SYNC = 0x80000
+ MCL_CURRENT = 0x1
+ MCL_FUTURE = 0x2
+ MCL_ONFAULT = 0x4
+ MEMERASE = 0x40084d02
+ MEMERASE64 = 0x40104d14
+ MEMGETBADBLOCK = 0x40084d0b
+ MEMGETINFO = 0x80204d01
+ MEMGETOOBSEL = 0x80c84d0a
+ MEMGETREGIONCOUNT = 0x80044d07
+ MEMISLOCKED = 0x80084d17
+ MEMLOCK = 0x40084d05
+ MEMREADOOB = 0xc0104d04
+ MEMSETBADBLOCK = 0x40084d0c
+ MEMUNLOCK = 0x40084d06
+ MEMWRITEOOB = 0xc0104d03
+ MTDFILEMODE = 0x4d13
+ NFDBITS = 0x40
+ NLDLY = 0x100
+ NOFLSH = 0x80
+ NS_GET_NSTYPE = 0xb703
+ NS_GET_OWNER_UID = 0xb704
+ NS_GET_PARENT = 0xb702
+ NS_GET_USERNS = 0xb701
+ OLCUC = 0x2
+ ONLCR = 0x4
+ OTPERASE = 0x400c4d19
+ OTPGETREGIONCOUNT = 0x40044d0e
+ OTPGETREGIONINFO = 0x400c4d0f
+ OTPLOCK = 0x800c4d10
+ OTPSELECT = 0x80044d0d
+ O_APPEND = 0x400
+ O_ASYNC = 0x2000
+ O_CLOEXEC = 0x80000
+ O_CREAT = 0x40
+ O_DIRECT = 0x4000
+ O_DIRECTORY = 0x10000
+ O_DSYNC = 0x1000
+ O_EXCL = 0x80
+ O_FSYNC = 0x101000
+ O_LARGEFILE = 0x0
+ O_NDELAY = 0x800
+ O_NOATIME = 0x40000
+ O_NOCTTY = 0x100
+ O_NOFOLLOW = 0x20000
+ O_NONBLOCK = 0x800
+ O_PATH = 0x200000
+ O_RSYNC = 0x101000
+ O_SYNC = 0x101000
+ O_TMPFILE = 0x410000
+ O_TRUNC = 0x200
+ PARENB = 0x100
+ PARODD = 0x200
+ PENDIN = 0x4000
+ PERF_EVENT_IOC_DISABLE = 0x2401
+ PERF_EVENT_IOC_ENABLE = 0x2400
+ PERF_EVENT_IOC_ID = 0x80082407
+ PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b
+ PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409
+ PERF_EVENT_IOC_PERIOD = 0x40082404
+ PERF_EVENT_IOC_QUERY_BPF = 0xc008240a
+ PERF_EVENT_IOC_REFRESH = 0x2402
+ PERF_EVENT_IOC_RESET = 0x2403
+ PERF_EVENT_IOC_SET_BPF = 0x40042408
+ PERF_EVENT_IOC_SET_FILTER = 0x40082406
+ PERF_EVENT_IOC_SET_OUTPUT = 0x2405
+ PPPIOCATTACH = 0x4004743d
+ PPPIOCATTCHAN = 0x40047438
+ PPPIOCBRIDGECHAN = 0x40047435
+ PPPIOCCONNECT = 0x4004743a
+ PPPIOCDETACH = 0x4004743c
+ PPPIOCDISCONN = 0x7439
+ PPPIOCGASYNCMAP = 0x80047458
+ PPPIOCGCHAN = 0x80047437
+ PPPIOCGDEBUG = 0x80047441
+ PPPIOCGFLAGS = 0x8004745a
+ PPPIOCGIDLE = 0x8010743f
+ PPPIOCGIDLE32 = 0x8008743f
+ PPPIOCGIDLE64 = 0x8010743f
+ PPPIOCGL2TPSTATS = 0x80487436
+ PPPIOCGMRU = 0x80047453
+ PPPIOCGRASYNCMAP = 0x80047455
+ PPPIOCGUNIT = 0x80047456
+ PPPIOCGXASYNCMAP = 0x80207450
+ PPPIOCSACTIVE = 0x40107446
+ PPPIOCSASYNCMAP = 0x40047457
+ PPPIOCSCOMPRESS = 0x4010744d
+ PPPIOCSDEBUG = 0x40047440
+ PPPIOCSFLAGS = 0x40047459
+ PPPIOCSMAXCID = 0x40047451
+ PPPIOCSMRRU = 0x4004743b
+ PPPIOCSMRU = 0x40047452
+ PPPIOCSNPMODE = 0x4008744b
+ PPPIOCSPASS = 0x40107447
+ PPPIOCSRASYNCMAP = 0x40047454
+ PPPIOCSXASYNCMAP = 0x4020744f
+ PPPIOCUNBRIDGECHAN = 0x7434
+ PPPIOCXFERUNIT = 0x744e
+ PR_SET_PTRACER_ANY = 0xffffffffffffffff
+ PTRACE_SYSEMU = 0x1f
+ PTRACE_SYSEMU_SINGLESTEP = 0x20
+ RLIMIT_AS = 0x9
+ RLIMIT_MEMLOCK = 0x8
+ RLIMIT_NOFILE = 0x7
+ RLIMIT_NPROC = 0x6
+ RLIMIT_RSS = 0x5
+ RNDADDENTROPY = 0x40085203
+ RNDADDTOENTCNT = 0x40045201
+ RNDCLEARPOOL = 0x5206
+ RNDGETENTCNT = 0x80045200
+ RNDGETPOOL = 0x80085202
+ RNDRESEEDCRNG = 0x5207
+ RNDZAPENTCNT = 0x5204
+ RTC_AIE_OFF = 0x7002
+ RTC_AIE_ON = 0x7001
+ RTC_ALM_READ = 0x80247008
+ RTC_ALM_SET = 0x40247007
+ RTC_EPOCH_READ = 0x8008700d
+ RTC_EPOCH_SET = 0x4008700e
+ RTC_IRQP_READ = 0x8008700b
+ RTC_IRQP_SET = 0x4008700c
+ RTC_PARAM_GET = 0x40187013
+ RTC_PARAM_SET = 0x40187014
+ RTC_PIE_OFF = 0x7006
+ RTC_PIE_ON = 0x7005
+ RTC_PLL_GET = 0x80207011
+ RTC_PLL_SET = 0x40207012
+ RTC_RD_TIME = 0x80247009
+ RTC_SET_TIME = 0x4024700a
+ RTC_UIE_OFF = 0x7004
+ RTC_UIE_ON = 0x7003
+ RTC_VL_CLR = 0x7014
+ RTC_VL_READ = 0x80047013
+ RTC_WIE_OFF = 0x7010
+ RTC_WIE_ON = 0x700f
+ RTC_WKALM_RD = 0x80287010
+ RTC_WKALM_SET = 0x4028700f
+ SCM_TIMESTAMPING = 0x25
+ SCM_TIMESTAMPING_OPT_STATS = 0x36
+ SCM_TIMESTAMPING_PKTINFO = 0x3a
+ SCM_TIMESTAMPNS = 0x23
+ SCM_TXTIME = 0x3d
+ SCM_WIFI_STATUS = 0x29
+ SFD_CLOEXEC = 0x80000
+ SFD_NONBLOCK = 0x800
+ SIOCATMARK = 0x8905
+ SIOCGPGRP = 0x8904
+ SIOCGSTAMPNS_NEW = 0x80108907
+ SIOCGSTAMP_NEW = 0x80108906
+ SIOCINQ = 0x541b
+ SIOCOUTQ = 0x5411
+ SIOCSPGRP = 0x8902
+ SOCK_CLOEXEC = 0x80000
+ SOCK_DGRAM = 0x2
+ SOCK_NONBLOCK = 0x800
+ SOCK_STREAM = 0x1
+ SOL_SOCKET = 0x1
+ SO_ACCEPTCONN = 0x1e
+ SO_ATTACH_BPF = 0x32
+ SO_ATTACH_REUSEPORT_CBPF = 0x33
+ SO_ATTACH_REUSEPORT_EBPF = 0x34
+ SO_BINDTODEVICE = 0x19
+ SO_BINDTOIFINDEX = 0x3e
+ SO_BPF_EXTENSIONS = 0x30
+ SO_BROADCAST = 0x6
+ SO_BSDCOMPAT = 0xe
+ SO_BUF_LOCK = 0x48
+ SO_BUSY_POLL = 0x2e
+ SO_BUSY_POLL_BUDGET = 0x46
+ SO_CNX_ADVICE = 0x35
+ SO_COOKIE = 0x39
+ SO_DETACH_REUSEPORT_BPF = 0x44
+ SO_DOMAIN = 0x27
+ SO_DONTROUTE = 0x5
+ SO_ERROR = 0x4
+ SO_INCOMING_CPU = 0x31
+ SO_INCOMING_NAPI_ID = 0x38
+ SO_KEEPALIVE = 0x9
+ SO_LINGER = 0xd
+ SO_LOCK_FILTER = 0x2c
+ SO_MARK = 0x24
+ SO_MAX_PACING_RATE = 0x2f
+ SO_MEMINFO = 0x37
+ SO_NETNS_COOKIE = 0x47
+ SO_NOFCS = 0x2b
+ SO_OOBINLINE = 0xa
+ SO_PASSCRED = 0x10
+ SO_PASSSEC = 0x22
+ SO_PEEK_OFF = 0x2a
+ SO_PEERCRED = 0x11
+ SO_PEERGROUPS = 0x3b
+ SO_PEERSEC = 0x1f
+ SO_PREFER_BUSY_POLL = 0x45
+ SO_PROTOCOL = 0x26
+ SO_RCVBUF = 0x8
+ SO_RCVBUFFORCE = 0x21
+ SO_RCVLOWAT = 0x12
+ SO_RCVTIMEO = 0x14
+ SO_RCVTIMEO_NEW = 0x42
+ SO_RCVTIMEO_OLD = 0x14
+ SO_RESERVE_MEM = 0x49
+ SO_REUSEADDR = 0x2
+ SO_REUSEPORT = 0xf
+ SO_RXQ_OVFL = 0x28
+ SO_SECURITY_AUTHENTICATION = 0x16
+ SO_SECURITY_ENCRYPTION_NETWORK = 0x18
+ SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17
+ SO_SELECT_ERR_QUEUE = 0x2d
+ SO_SNDBUF = 0x7
+ SO_SNDBUFFORCE = 0x20
+ SO_SNDLOWAT = 0x13
+ SO_SNDTIMEO = 0x15
+ SO_SNDTIMEO_NEW = 0x43
+ SO_SNDTIMEO_OLD = 0x15
+ SO_TIMESTAMPING = 0x25
+ SO_TIMESTAMPING_NEW = 0x41
+ SO_TIMESTAMPING_OLD = 0x25
+ SO_TIMESTAMPNS = 0x23
+ SO_TIMESTAMPNS_NEW = 0x40
+ SO_TIMESTAMPNS_OLD = 0x23
+ SO_TIMESTAMP_NEW = 0x3f
+ SO_TXTIME = 0x3d
+ SO_TYPE = 0x3
+ SO_WIFI_STATUS = 0x29
+ SO_ZEROCOPY = 0x3c
+ TAB1 = 0x800
+ TAB2 = 0x1000
+ TAB3 = 0x1800
+ TABDLY = 0x1800
+ TCFLSH = 0x540b
+ TCGETA = 0x5405
+ TCGETS = 0x5401
+ TCGETS2 = 0x802c542a
+ TCGETX = 0x5432
+ TCSAFLUSH = 0x2
+ TCSBRK = 0x5409
+ TCSBRKP = 0x5425
+ TCSETA = 0x5406
+ TCSETAF = 0x5408
+ TCSETAW = 0x5407
+ TCSETS = 0x5402
+ TCSETS2 = 0x402c542b
+ TCSETSF = 0x5404
+ TCSETSF2 = 0x402c542d
+ TCSETSW = 0x5403
+ TCSETSW2 = 0x402c542c
+ TCSETX = 0x5433
+ TCSETXF = 0x5434
+ TCSETXW = 0x5435
+ TCXONC = 0x540a
+ TFD_CLOEXEC = 0x80000
+ TFD_NONBLOCK = 0x800
+ TIOCCBRK = 0x5428
+ TIOCCONS = 0x541d
+ TIOCEXCL = 0x540c
+ TIOCGDEV = 0x80045432
+ TIOCGETD = 0x5424
+ TIOCGEXCL = 0x80045440
+ TIOCGICOUNT = 0x545d
+ TIOCGISO7816 = 0x80285442
+ TIOCGLCKTRMIOS = 0x5456
+ TIOCGPGRP = 0x540f
+ TIOCGPKT = 0x80045438
+ TIOCGPTLCK = 0x80045439
+ TIOCGPTN = 0x80045430
+ TIOCGPTPEER = 0x5441
+ TIOCGRS485 = 0x542e
+ TIOCGSERIAL = 0x541e
+ TIOCGSID = 0x5429
+ TIOCGSOFTCAR = 0x5419
+ TIOCGWINSZ = 0x5413
+ TIOCINQ = 0x541b
+ TIOCLINUX = 0x541c
+ TIOCMBIC = 0x5417
+ TIOCMBIS = 0x5416
+ TIOCMGET = 0x5415
+ TIOCMIWAIT = 0x545c
+ TIOCMSET = 0x5418
+ TIOCM_CAR = 0x40
+ TIOCM_CD = 0x40
+ TIOCM_CTS = 0x20
+ TIOCM_DSR = 0x100
+ TIOCM_RI = 0x80
+ TIOCM_RNG = 0x80
+ TIOCM_SR = 0x10
+ TIOCM_ST = 0x8
+ TIOCNOTTY = 0x5422
+ TIOCNXCL = 0x540d
+ TIOCOUTQ = 0x5411
+ TIOCPKT = 0x5420
+ TIOCSBRK = 0x5427
+ TIOCSCTTY = 0x540e
+ TIOCSERCONFIG = 0x5453
+ TIOCSERGETLSR = 0x5459
+ TIOCSERGETMULTI = 0x545a
+ TIOCSERGSTRUCT = 0x5458
+ TIOCSERGWILD = 0x5454
+ TIOCSERSETMULTI = 0x545b
+ TIOCSERSWILD = 0x5455
+ TIOCSER_TEMT = 0x1
+ TIOCSETD = 0x5423
+ TIOCSIG = 0x40045436
+ TIOCSISO7816 = 0xc0285443
+ TIOCSLCKTRMIOS = 0x5457
+ TIOCSPGRP = 0x5410
+ TIOCSPTLCK = 0x40045431
+ TIOCSRS485 = 0x542f
+ TIOCSSERIAL = 0x541f
+ TIOCSSOFTCAR = 0x541a
+ TIOCSTI = 0x5412
+ TIOCSWINSZ = 0x5414
+ TIOCVHANGUP = 0x5437
+ TOSTOP = 0x100
+ TUNATTACHFILTER = 0x401054d5
+ TUNDETACHFILTER = 0x401054d6
+ TUNGETDEVNETNS = 0x54e3
+ TUNGETFEATURES = 0x800454cf
+ TUNGETFILTER = 0x801054db
+ TUNGETIFF = 0x800454d2
+ TUNGETSNDBUF = 0x800454d3
+ TUNGETVNETBE = 0x800454df
+ TUNGETVNETHDRSZ = 0x800454d7
+ TUNGETVNETLE = 0x800454dd
+ TUNSETCARRIER = 0x400454e2
+ TUNSETDEBUG = 0x400454c9
+ TUNSETFILTEREBPF = 0x800454e1
+ TUNSETGROUP = 0x400454ce
+ TUNSETIFF = 0x400454ca
+ TUNSETIFINDEX = 0x400454da
+ TUNSETLINK = 0x400454cd
+ TUNSETNOCSUM = 0x400454c8
+ TUNSETOFFLOAD = 0x400454d0
+ TUNSETOWNER = 0x400454cc
+ TUNSETPERSIST = 0x400454cb
+ TUNSETQUEUE = 0x400454d9
+ TUNSETSNDBUF = 0x400454d4
+ TUNSETSTEERINGEBPF = 0x800454e0
+ TUNSETTXFILTER = 0x400454d1
+ TUNSETVNETBE = 0x400454de
+ TUNSETVNETHDRSZ = 0x400454d8
+ TUNSETVNETLE = 0x400454dc
+ UBI_IOCATT = 0x40186f40
+ UBI_IOCDET = 0x40046f41
+ UBI_IOCEBCH = 0x40044f02
+ UBI_IOCEBER = 0x40044f01
+ UBI_IOCEBISMAP = 0x80044f05
+ UBI_IOCEBMAP = 0x40084f03
+ UBI_IOCEBUNMAP = 0x40044f04
+ UBI_IOCMKVOL = 0x40986f00
+ UBI_IOCRMVOL = 0x40046f01
+ UBI_IOCRNVOL = 0x51106f03
+ UBI_IOCRPEB = 0x40046f04
+ UBI_IOCRSVOL = 0x400c6f02
+ UBI_IOCSETVOLPROP = 0x40104f06
+ UBI_IOCSPEB = 0x40046f05
+ UBI_IOCVOLCRBLK = 0x40804f07
+ UBI_IOCVOLRMBLK = 0x4f08
+ UBI_IOCVOLUP = 0x40084f00
+ VDISCARD = 0xd
+ VEOF = 0x4
+ VEOL = 0xb
+ VEOL2 = 0x10
+ VMIN = 0x6
+ VREPRINT = 0xc
+ VSTART = 0x8
+ VSTOP = 0x9
+ VSUSP = 0xa
+ VSWTC = 0x7
+ VT1 = 0x4000
+ VTDLY = 0x4000
+ VTIME = 0x5
+ VWERASE = 0xe
+ WDIOC_GETBOOTSTATUS = 0x80045702
+ WDIOC_GETPRETIMEOUT = 0x80045709
+ WDIOC_GETSTATUS = 0x80045701
+ WDIOC_GETSUPPORT = 0x80285700
+ WDIOC_GETTEMP = 0x80045703
+ WDIOC_GETTIMELEFT = 0x8004570a
+ WDIOC_GETTIMEOUT = 0x80045707
+ WDIOC_KEEPALIVE = 0x80045705
+ WDIOC_SETOPTIONS = 0x80045704
+ WORDSIZE = 0x40
+ XCASE = 0x4
+ XTABS = 0x1800
+ _HIDIOCGRAWNAME = 0x80804804
+ _HIDIOCGRAWPHYS = 0x80404805
+ _HIDIOCGRAWUNIQ = 0x80404808
+)
+
+// Errors
+const (
+ EADDRINUSE = syscall.Errno(0x62)
+ EADDRNOTAVAIL = syscall.Errno(0x63)
+ EADV = syscall.Errno(0x44)
+ EAFNOSUPPORT = syscall.Errno(0x61)
+ EALREADY = syscall.Errno(0x72)
+ EBADE = syscall.Errno(0x34)
+ EBADFD = syscall.Errno(0x4d)
+ EBADMSG = syscall.Errno(0x4a)
+ EBADR = syscall.Errno(0x35)
+ EBADRQC = syscall.Errno(0x38)
+ EBADSLT = syscall.Errno(0x39)
+ EBFONT = syscall.Errno(0x3b)
+ ECANCELED = syscall.Errno(0x7d)
+ ECHRNG = syscall.Errno(0x2c)
+ ECOMM = syscall.Errno(0x46)
+ ECONNABORTED = syscall.Errno(0x67)
+ ECONNREFUSED = syscall.Errno(0x6f)
+ ECONNRESET = syscall.Errno(0x68)
+ EDEADLK = syscall.Errno(0x23)
+ EDEADLOCK = syscall.Errno(0x23)
+ EDESTADDRREQ = syscall.Errno(0x59)
+ EDOTDOT = syscall.Errno(0x49)
+ EDQUOT = syscall.Errno(0x7a)
+ EHOSTDOWN = syscall.Errno(0x70)
+ EHOSTUNREACH = syscall.Errno(0x71)
+ EHWPOISON = syscall.Errno(0x85)
+ EIDRM = syscall.Errno(0x2b)
+ EILSEQ = syscall.Errno(0x54)
+ EINPROGRESS = syscall.Errno(0x73)
+ EISCONN = syscall.Errno(0x6a)
+ EISNAM = syscall.Errno(0x78)
+ EKEYEXPIRED = syscall.Errno(0x7f)
+ EKEYREJECTED = syscall.Errno(0x81)
+ EKEYREVOKED = syscall.Errno(0x80)
+ EL2HLT = syscall.Errno(0x33)
+ EL2NSYNC = syscall.Errno(0x2d)
+ EL3HLT = syscall.Errno(0x2e)
+ EL3RST = syscall.Errno(0x2f)
+ ELIBACC = syscall.Errno(0x4f)
+ ELIBBAD = syscall.Errno(0x50)
+ ELIBEXEC = syscall.Errno(0x53)
+ ELIBMAX = syscall.Errno(0x52)
+ ELIBSCN = syscall.Errno(0x51)
+ ELNRNG = syscall.Errno(0x30)
+ ELOOP = syscall.Errno(0x28)
+ EMEDIUMTYPE = syscall.Errno(0x7c)
+ EMSGSIZE = syscall.Errno(0x5a)
+ EMULTIHOP = syscall.Errno(0x48)
+ ENAMETOOLONG = syscall.Errno(0x24)
+ ENAVAIL = syscall.Errno(0x77)
+ ENETDOWN = syscall.Errno(0x64)
+ ENETRESET = syscall.Errno(0x66)
+ ENETUNREACH = syscall.Errno(0x65)
+ ENOANO = syscall.Errno(0x37)
+ ENOBUFS = syscall.Errno(0x69)
+ ENOCSI = syscall.Errno(0x32)
+ ENODATA = syscall.Errno(0x3d)
+ ENOKEY = syscall.Errno(0x7e)
+ ENOLCK = syscall.Errno(0x25)
+ ENOLINK = syscall.Errno(0x43)
+ ENOMEDIUM = syscall.Errno(0x7b)
+ ENOMSG = syscall.Errno(0x2a)
+ ENONET = syscall.Errno(0x40)
+ ENOPKG = syscall.Errno(0x41)
+ ENOPROTOOPT = syscall.Errno(0x5c)
+ ENOSR = syscall.Errno(0x3f)
+ ENOSTR = syscall.Errno(0x3c)
+ ENOSYS = syscall.Errno(0x26)
+ ENOTCONN = syscall.Errno(0x6b)
+ ENOTEMPTY = syscall.Errno(0x27)
+ ENOTNAM = syscall.Errno(0x76)
+ ENOTRECOVERABLE = syscall.Errno(0x83)
+ ENOTSOCK = syscall.Errno(0x58)
+ ENOTSUP = syscall.Errno(0x5f)
+ ENOTUNIQ = syscall.Errno(0x4c)
+ EOPNOTSUPP = syscall.Errno(0x5f)
+ EOVERFLOW = syscall.Errno(0x4b)
+ EOWNERDEAD = syscall.Errno(0x82)
+ EPFNOSUPPORT = syscall.Errno(0x60)
+ EPROTO = syscall.Errno(0x47)
+ EPROTONOSUPPORT = syscall.Errno(0x5d)
+ EPROTOTYPE = syscall.Errno(0x5b)
+ EREMCHG = syscall.Errno(0x4e)
+ EREMOTE = syscall.Errno(0x42)
+ EREMOTEIO = syscall.Errno(0x79)
+ ERESTART = syscall.Errno(0x55)
+ ERFKILL = syscall.Errno(0x84)
+ ESHUTDOWN = syscall.Errno(0x6c)
+ ESOCKTNOSUPPORT = syscall.Errno(0x5e)
+ ESRMNT = syscall.Errno(0x45)
+ ESTALE = syscall.Errno(0x74)
+ ESTRPIPE = syscall.Errno(0x56)
+ ETIME = syscall.Errno(0x3e)
+ ETIMEDOUT = syscall.Errno(0x6e)
+ ETOOMANYREFS = syscall.Errno(0x6d)
+ EUCLEAN = syscall.Errno(0x75)
+ EUNATCH = syscall.Errno(0x31)
+ EUSERS = syscall.Errno(0x57)
+ EXFULL = syscall.Errno(0x36)
+)
+
+// Signals
+const (
+ SIGBUS = syscall.Signal(0x7)
+ SIGCHLD = syscall.Signal(0x11)
+ SIGCLD = syscall.Signal(0x11)
+ SIGCONT = syscall.Signal(0x12)
+ SIGIO = syscall.Signal(0x1d)
+ SIGPOLL = syscall.Signal(0x1d)
+ SIGPROF = syscall.Signal(0x1b)
+ SIGPWR = syscall.Signal(0x1e)
+ SIGSTKFLT = syscall.Signal(0x10)
+ SIGSTOP = syscall.Signal(0x13)
+ SIGSYS = syscall.Signal(0x1f)
+ SIGTSTP = syscall.Signal(0x14)
+ SIGTTIN = syscall.Signal(0x15)
+ SIGTTOU = syscall.Signal(0x16)
+ SIGURG = syscall.Signal(0x17)
+ SIGUSR1 = syscall.Signal(0xa)
+ SIGUSR2 = syscall.Signal(0xc)
+ SIGVTALRM = syscall.Signal(0x1a)
+ SIGWINCH = syscall.Signal(0x1c)
+ SIGXCPU = syscall.Signal(0x18)
+ SIGXFSZ = syscall.Signal(0x19)
+)
+
+// Error table
+var errorList = [...]struct {
+ num syscall.Errno
+ name string
+ desc string
+}{
+ {1, "EPERM", "operation not permitted"},
+ {2, "ENOENT", "no such file or directory"},
+ {3, "ESRCH", "no such process"},
+ {4, "EINTR", "interrupted system call"},
+ {5, "EIO", "input/output error"},
+ {6, "ENXIO", "no such device or address"},
+ {7, "E2BIG", "argument list too long"},
+ {8, "ENOEXEC", "exec format error"},
+ {9, "EBADF", "bad file descriptor"},
+ {10, "ECHILD", "no child processes"},
+ {11, "EAGAIN", "resource temporarily unavailable"},
+ {12, "ENOMEM", "cannot allocate memory"},
+ {13, "EACCES", "permission denied"},
+ {14, "EFAULT", "bad address"},
+ {15, "ENOTBLK", "block device required"},
+ {16, "EBUSY", "device or resource busy"},
+ {17, "EEXIST", "file exists"},
+ {18, "EXDEV", "invalid cross-device link"},
+ {19, "ENODEV", "no such device"},
+ {20, "ENOTDIR", "not a directory"},
+ {21, "EISDIR", "is a directory"},
+ {22, "EINVAL", "invalid argument"},
+ {23, "ENFILE", "too many open files in system"},
+ {24, "EMFILE", "too many open files"},
+ {25, "ENOTTY", "inappropriate ioctl for device"},
+ {26, "ETXTBSY", "text file busy"},
+ {27, "EFBIG", "file too large"},
+ {28, "ENOSPC", "no space left on device"},
+ {29, "ESPIPE", "illegal seek"},
+ {30, "EROFS", "read-only file system"},
+ {31, "EMLINK", "too many links"},
+ {32, "EPIPE", "broken pipe"},
+ {33, "EDOM", "numerical argument out of domain"},
+ {34, "ERANGE", "numerical result out of range"},
+ {35, "EDEADLK", "resource deadlock avoided"},
+ {36, "ENAMETOOLONG", "file name too long"},
+ {37, "ENOLCK", "no locks available"},
+ {38, "ENOSYS", "function not implemented"},
+ {39, "ENOTEMPTY", "directory not empty"},
+ {40, "ELOOP", "too many levels of symbolic links"},
+ {42, "ENOMSG", "no message of desired type"},
+ {43, "EIDRM", "identifier removed"},
+ {44, "ECHRNG", "channel number out of range"},
+ {45, "EL2NSYNC", "level 2 not synchronized"},
+ {46, "EL3HLT", "level 3 halted"},
+ {47, "EL3RST", "level 3 reset"},
+ {48, "ELNRNG", "link number out of range"},
+ {49, "EUNATCH", "protocol driver not attached"},
+ {50, "ENOCSI", "no CSI structure available"},
+ {51, "EL2HLT", "level 2 halted"},
+ {52, "EBADE", "invalid exchange"},
+ {53, "EBADR", "invalid request descriptor"},
+ {54, "EXFULL", "exchange full"},
+ {55, "ENOANO", "no anode"},
+ {56, "EBADRQC", "invalid request code"},
+ {57, "EBADSLT", "invalid slot"},
+ {59, "EBFONT", "bad font file format"},
+ {60, "ENOSTR", "device not a stream"},
+ {61, "ENODATA", "no data available"},
+ {62, "ETIME", "timer expired"},
+ {63, "ENOSR", "out of streams resources"},
+ {64, "ENONET", "machine is not on the network"},
+ {65, "ENOPKG", "package not installed"},
+ {66, "EREMOTE", "object is remote"},
+ {67, "ENOLINK", "link has been severed"},
+ {68, "EADV", "advertise error"},
+ {69, "ESRMNT", "srmount error"},
+ {70, "ECOMM", "communication error on send"},
+ {71, "EPROTO", "protocol error"},
+ {72, "EMULTIHOP", "multihop attempted"},
+ {73, "EDOTDOT", "RFS specific error"},
+ {74, "EBADMSG", "bad message"},
+ {75, "EOVERFLOW", "value too large for defined data type"},
+ {76, "ENOTUNIQ", "name not unique on network"},
+ {77, "EBADFD", "file descriptor in bad state"},
+ {78, "EREMCHG", "remote address changed"},
+ {79, "ELIBACC", "can not access a needed shared library"},
+ {80, "ELIBBAD", "accessing a corrupted shared library"},
+ {81, "ELIBSCN", ".lib section in a.out corrupted"},
+ {82, "ELIBMAX", "attempting to link in too many shared libraries"},
+ {83, "ELIBEXEC", "cannot exec a shared library directly"},
+ {84, "EILSEQ", "invalid or incomplete multibyte or wide character"},
+ {85, "ERESTART", "interrupted system call should be restarted"},
+ {86, "ESTRPIPE", "streams pipe error"},
+ {87, "EUSERS", "too many users"},
+ {88, "ENOTSOCK", "socket operation on non-socket"},
+ {89, "EDESTADDRREQ", "destination address required"},
+ {90, "EMSGSIZE", "message too long"},
+ {91, "EPROTOTYPE", "protocol wrong type for socket"},
+ {92, "ENOPROTOOPT", "protocol not available"},
+ {93, "EPROTONOSUPPORT", "protocol not supported"},
+ {94, "ESOCKTNOSUPPORT", "socket type not supported"},
+ {95, "ENOTSUP", "operation not supported"},
+ {96, "EPFNOSUPPORT", "protocol family not supported"},
+ {97, "EAFNOSUPPORT", "address family not supported by protocol"},
+ {98, "EADDRINUSE", "address already in use"},
+ {99, "EADDRNOTAVAIL", "cannot assign requested address"},
+ {100, "ENETDOWN", "network is down"},
+ {101, "ENETUNREACH", "network is unreachable"},
+ {102, "ENETRESET", "network dropped connection on reset"},
+ {103, "ECONNABORTED", "software caused connection abort"},
+ {104, "ECONNRESET", "connection reset by peer"},
+ {105, "ENOBUFS", "no buffer space available"},
+ {106, "EISCONN", "transport endpoint is already connected"},
+ {107, "ENOTCONN", "transport endpoint is not connected"},
+ {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"},
+ {109, "ETOOMANYREFS", "too many references: cannot splice"},
+ {110, "ETIMEDOUT", "connection timed out"},
+ {111, "ECONNREFUSED", "connection refused"},
+ {112, "EHOSTDOWN", "host is down"},
+ {113, "EHOSTUNREACH", "no route to host"},
+ {114, "EALREADY", "operation already in progress"},
+ {115, "EINPROGRESS", "operation now in progress"},
+ {116, "ESTALE", "stale file handle"},
+ {117, "EUCLEAN", "structure needs cleaning"},
+ {118, "ENOTNAM", "not a XENIX named type file"},
+ {119, "ENAVAIL", "no XENIX semaphores available"},
+ {120, "EISNAM", "is a named type file"},
+ {121, "EREMOTEIO", "remote I/O error"},
+ {122, "EDQUOT", "disk quota exceeded"},
+ {123, "ENOMEDIUM", "no medium found"},
+ {124, "EMEDIUMTYPE", "wrong medium type"},
+ {125, "ECANCELED", "operation canceled"},
+ {126, "ENOKEY", "required key not available"},
+ {127, "EKEYEXPIRED", "key has expired"},
+ {128, "EKEYREVOKED", "key has been revoked"},
+ {129, "EKEYREJECTED", "key was rejected by service"},
+ {130, "EOWNERDEAD", "owner died"},
+ {131, "ENOTRECOVERABLE", "state not recoverable"},
+ {132, "ERFKILL", "operation not possible due to RF-kill"},
+ {133, "EHWPOISON", "memory page has hardware error"},
+}
+
+// Signal table
+var signalList = [...]struct {
+ num syscall.Signal
+ name string
+ desc string
+}{
+ {1, "SIGHUP", "hangup"},
+ {2, "SIGINT", "interrupt"},
+ {3, "SIGQUIT", "quit"},
+ {4, "SIGILL", "illegal instruction"},
+ {5, "SIGTRAP", "trace/breakpoint trap"},
+ {6, "SIGABRT", "aborted"},
+ {7, "SIGBUS", "bus error"},
+ {8, "SIGFPE", "floating point exception"},
+ {9, "SIGKILL", "killed"},
+ {10, "SIGUSR1", "user defined signal 1"},
+ {11, "SIGSEGV", "segmentation fault"},
+ {12, "SIGUSR2", "user defined signal 2"},
+ {13, "SIGPIPE", "broken pipe"},
+ {14, "SIGALRM", "alarm clock"},
+ {15, "SIGTERM", "terminated"},
+ {16, "SIGSTKFLT", "stack fault"},
+ {17, "SIGCHLD", "child exited"},
+ {18, "SIGCONT", "continued"},
+ {19, "SIGSTOP", "stopped (signal)"},
+ {20, "SIGTSTP", "stopped"},
+ {21, "SIGTTIN", "stopped (tty input)"},
+ {22, "SIGTTOU", "stopped (tty output)"},
+ {23, "SIGURG", "urgent I/O condition"},
+ {24, "SIGXCPU", "CPU time limit exceeded"},
+ {25, "SIGXFSZ", "file size limit exceeded"},
+ {26, "SIGVTALRM", "virtual timer expired"},
+ {27, "SIGPROF", "profiling timer expired"},
+ {28, "SIGWINCH", "window changed"},
+ {29, "SIGIO", "I/O possible"},
+ {30, "SIGPWR", "power failure"},
+ {31, "SIGSYS", "bad system call"},
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go
new file mode 100644
index 000000000..8cdfbe71e
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go
@@ -0,0 +1,552 @@
+// go run mksyscall.go -tags linux,loong64 syscall_linux.go syscall_linux_loong64.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+//go:build linux && loong64
+// +build linux,loong64
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var _ syscall.Errno
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) {
+ _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fallocate(fd int, mode uint32, off int64, len int64) (err error) {
+ _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
+ r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0)
+ n = int64(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(events) > 0 {
+ _p0 = unsafe.Pointer(&events[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
+ _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchown(fd int, uid int, gid int) (err error) {
+ _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, stat *Stat_t) (err error) {
+ _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstatfs(fd int, buf *Statfs_t) (err error) {
+ _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ftruncate(fd int, length int64) (err error) {
+ _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getegid() (egid int) {
+ r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0)
+ egid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Geteuid() (euid int) {
+ r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0)
+ euid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getgid() (gid int) {
+ r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getuid() (uid int) {
+ r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listen(s int, n int) (err error) {
+ _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pread(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seek(fd int, offset int64, whence int) (off int64, err error) {
+ r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
+ off = int64(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
+ written = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setfsgid(gid int) (prev int, err error) {
+ r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0)
+ prev = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setfsuid(uid int) (prev int, err error) {
+ r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0)
+ prev = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setregid(rgid int, egid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresgid(rgid int, egid int, sgid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresuid(ruid int, euid int, suid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setreuid(ruid int, euid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Shutdown(fd int, how int) (err error) {
+ _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) {
+ r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
+ n = int64(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Statfs(path string, buf *Statfs_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func SyncFileRange(fd int, off int64, n int64, flags int) (err error) {
+ _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Truncate(path string, length int64) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getgroups(n int, list *_Gid_t) (nn int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
+ nn = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setgroups(n int, list *_Gid_t) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
+ _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
+ _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socket(domain int, typ int, proto int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
+ _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
+ r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset))
+ xaddr = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettimeofday(tv *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(cmdline)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
new file mode 100644
index 000000000..e443f9a32
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
@@ -0,0 +1,313 @@
+// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+//go:build loong64 && linux
+// +build loong64,linux
+
+package unix
+
+const (
+ SYS_IO_SETUP = 0
+ SYS_IO_DESTROY = 1
+ SYS_IO_SUBMIT = 2
+ SYS_IO_CANCEL = 3
+ SYS_IO_GETEVENTS = 4
+ SYS_SETXATTR = 5
+ SYS_LSETXATTR = 6
+ SYS_FSETXATTR = 7
+ SYS_GETXATTR = 8
+ SYS_LGETXATTR = 9
+ SYS_FGETXATTR = 10
+ SYS_LISTXATTR = 11
+ SYS_LLISTXATTR = 12
+ SYS_FLISTXATTR = 13
+ SYS_REMOVEXATTR = 14
+ SYS_LREMOVEXATTR = 15
+ SYS_FREMOVEXATTR = 16
+ SYS_GETCWD = 17
+ SYS_LOOKUP_DCOOKIE = 18
+ SYS_EVENTFD2 = 19
+ SYS_EPOLL_CREATE1 = 20
+ SYS_EPOLL_CTL = 21
+ SYS_EPOLL_PWAIT = 22
+ SYS_DUP = 23
+ SYS_DUP3 = 24
+ SYS_FCNTL = 25
+ SYS_INOTIFY_INIT1 = 26
+ SYS_INOTIFY_ADD_WATCH = 27
+ SYS_INOTIFY_RM_WATCH = 28
+ SYS_IOCTL = 29
+ SYS_IOPRIO_SET = 30
+ SYS_IOPRIO_GET = 31
+ SYS_FLOCK = 32
+ SYS_MKNODAT = 33
+ SYS_MKDIRAT = 34
+ SYS_UNLINKAT = 35
+ SYS_SYMLINKAT = 36
+ SYS_LINKAT = 37
+ SYS_UMOUNT2 = 39
+ SYS_MOUNT = 40
+ SYS_PIVOT_ROOT = 41
+ SYS_NFSSERVCTL = 42
+ SYS_STATFS = 43
+ SYS_FSTATFS = 44
+ SYS_TRUNCATE = 45
+ SYS_FTRUNCATE = 46
+ SYS_FALLOCATE = 47
+ SYS_FACCESSAT = 48
+ SYS_CHDIR = 49
+ SYS_FCHDIR = 50
+ SYS_CHROOT = 51
+ SYS_FCHMOD = 52
+ SYS_FCHMODAT = 53
+ SYS_FCHOWNAT = 54
+ SYS_FCHOWN = 55
+ SYS_OPENAT = 56
+ SYS_CLOSE = 57
+ SYS_VHANGUP = 58
+ SYS_PIPE2 = 59
+ SYS_QUOTACTL = 60
+ SYS_GETDENTS64 = 61
+ SYS_LSEEK = 62
+ SYS_READ = 63
+ SYS_WRITE = 64
+ SYS_READV = 65
+ SYS_WRITEV = 66
+ SYS_PREAD64 = 67
+ SYS_PWRITE64 = 68
+ SYS_PREADV = 69
+ SYS_PWRITEV = 70
+ SYS_SENDFILE = 71
+ SYS_PSELECT6 = 72
+ SYS_PPOLL = 73
+ SYS_SIGNALFD4 = 74
+ SYS_VMSPLICE = 75
+ SYS_SPLICE = 76
+ SYS_TEE = 77
+ SYS_READLINKAT = 78
+ SYS_FSTATAT = 79
+ SYS_FSTAT = 80
+ SYS_SYNC = 81
+ SYS_FSYNC = 82
+ SYS_FDATASYNC = 83
+ SYS_SYNC_FILE_RANGE = 84
+ SYS_TIMERFD_CREATE = 85
+ SYS_TIMERFD_SETTIME = 86
+ SYS_TIMERFD_GETTIME = 87
+ SYS_UTIMENSAT = 88
+ SYS_ACCT = 89
+ SYS_CAPGET = 90
+ SYS_CAPSET = 91
+ SYS_PERSONALITY = 92
+ SYS_EXIT = 93
+ SYS_EXIT_GROUP = 94
+ SYS_WAITID = 95
+ SYS_SET_TID_ADDRESS = 96
+ SYS_UNSHARE = 97
+ SYS_FUTEX = 98
+ SYS_SET_ROBUST_LIST = 99
+ SYS_GET_ROBUST_LIST = 100
+ SYS_NANOSLEEP = 101
+ SYS_GETITIMER = 102
+ SYS_SETITIMER = 103
+ SYS_KEXEC_LOAD = 104
+ SYS_INIT_MODULE = 105
+ SYS_DELETE_MODULE = 106
+ SYS_TIMER_CREATE = 107
+ SYS_TIMER_GETTIME = 108
+ SYS_TIMER_GETOVERRUN = 109
+ SYS_TIMER_SETTIME = 110
+ SYS_TIMER_DELETE = 111
+ SYS_CLOCK_SETTIME = 112
+ SYS_CLOCK_GETTIME = 113
+ SYS_CLOCK_GETRES = 114
+ SYS_CLOCK_NANOSLEEP = 115
+ SYS_SYSLOG = 116
+ SYS_PTRACE = 117
+ SYS_SCHED_SETPARAM = 118
+ SYS_SCHED_SETSCHEDULER = 119
+ SYS_SCHED_GETSCHEDULER = 120
+ SYS_SCHED_GETPARAM = 121
+ SYS_SCHED_SETAFFINITY = 122
+ SYS_SCHED_GETAFFINITY = 123
+ SYS_SCHED_YIELD = 124
+ SYS_SCHED_GET_PRIORITY_MAX = 125
+ SYS_SCHED_GET_PRIORITY_MIN = 126
+ SYS_SCHED_RR_GET_INTERVAL = 127
+ SYS_RESTART_SYSCALL = 128
+ SYS_KILL = 129
+ SYS_TKILL = 130
+ SYS_TGKILL = 131
+ SYS_SIGALTSTACK = 132
+ SYS_RT_SIGSUSPEND = 133
+ SYS_RT_SIGACTION = 134
+ SYS_RT_SIGPROCMASK = 135
+ SYS_RT_SIGPENDING = 136
+ SYS_RT_SIGTIMEDWAIT = 137
+ SYS_RT_SIGQUEUEINFO = 138
+ SYS_RT_SIGRETURN = 139
+ SYS_SETPRIORITY = 140
+ SYS_GETPRIORITY = 141
+ SYS_REBOOT = 142
+ SYS_SETREGID = 143
+ SYS_SETGID = 144
+ SYS_SETREUID = 145
+ SYS_SETUID = 146
+ SYS_SETRESUID = 147
+ SYS_GETRESUID = 148
+ SYS_SETRESGID = 149
+ SYS_GETRESGID = 150
+ SYS_SETFSUID = 151
+ SYS_SETFSGID = 152
+ SYS_TIMES = 153
+ SYS_SETPGID = 154
+ SYS_GETPGID = 155
+ SYS_GETSID = 156
+ SYS_SETSID = 157
+ SYS_GETGROUPS = 158
+ SYS_SETGROUPS = 159
+ SYS_UNAME = 160
+ SYS_SETHOSTNAME = 161
+ SYS_SETDOMAINNAME = 162
+ SYS_GETRUSAGE = 165
+ SYS_UMASK = 166
+ SYS_PRCTL = 167
+ SYS_GETCPU = 168
+ SYS_GETTIMEOFDAY = 169
+ SYS_SETTIMEOFDAY = 170
+ SYS_ADJTIMEX = 171
+ SYS_GETPID = 172
+ SYS_GETPPID = 173
+ SYS_GETUID = 174
+ SYS_GETEUID = 175
+ SYS_GETGID = 176
+ SYS_GETEGID = 177
+ SYS_GETTID = 178
+ SYS_SYSINFO = 179
+ SYS_MQ_OPEN = 180
+ SYS_MQ_UNLINK = 181
+ SYS_MQ_TIMEDSEND = 182
+ SYS_MQ_TIMEDRECEIVE = 183
+ SYS_MQ_NOTIFY = 184
+ SYS_MQ_GETSETATTR = 185
+ SYS_MSGGET = 186
+ SYS_MSGCTL = 187
+ SYS_MSGRCV = 188
+ SYS_MSGSND = 189
+ SYS_SEMGET = 190
+ SYS_SEMCTL = 191
+ SYS_SEMTIMEDOP = 192
+ SYS_SEMOP = 193
+ SYS_SHMGET = 194
+ SYS_SHMCTL = 195
+ SYS_SHMAT = 196
+ SYS_SHMDT = 197
+ SYS_SOCKET = 198
+ SYS_SOCKETPAIR = 199
+ SYS_BIND = 200
+ SYS_LISTEN = 201
+ SYS_ACCEPT = 202
+ SYS_CONNECT = 203
+ SYS_GETSOCKNAME = 204
+ SYS_GETPEERNAME = 205
+ SYS_SENDTO = 206
+ SYS_RECVFROM = 207
+ SYS_SETSOCKOPT = 208
+ SYS_GETSOCKOPT = 209
+ SYS_SHUTDOWN = 210
+ SYS_SENDMSG = 211
+ SYS_RECVMSG = 212
+ SYS_READAHEAD = 213
+ SYS_BRK = 214
+ SYS_MUNMAP = 215
+ SYS_MREMAP = 216
+ SYS_ADD_KEY = 217
+ SYS_REQUEST_KEY = 218
+ SYS_KEYCTL = 219
+ SYS_CLONE = 220
+ SYS_EXECVE = 221
+ SYS_MMAP = 222
+ SYS_FADVISE64 = 223
+ SYS_SWAPON = 224
+ SYS_SWAPOFF = 225
+ SYS_MPROTECT = 226
+ SYS_MSYNC = 227
+ SYS_MLOCK = 228
+ SYS_MUNLOCK = 229
+ SYS_MLOCKALL = 230
+ SYS_MUNLOCKALL = 231
+ SYS_MINCORE = 232
+ SYS_MADVISE = 233
+ SYS_REMAP_FILE_PAGES = 234
+ SYS_MBIND = 235
+ SYS_GET_MEMPOLICY = 236
+ SYS_SET_MEMPOLICY = 237
+ SYS_MIGRATE_PAGES = 238
+ SYS_MOVE_PAGES = 239
+ SYS_RT_TGSIGQUEUEINFO = 240
+ SYS_PERF_EVENT_OPEN = 241
+ SYS_ACCEPT4 = 242
+ SYS_RECVMMSG = 243
+ SYS_ARCH_SPECIFIC_SYSCALL = 244
+ SYS_WAIT4 = 260
+ SYS_PRLIMIT64 = 261
+ SYS_FANOTIFY_INIT = 262
+ SYS_FANOTIFY_MARK = 263
+ SYS_NAME_TO_HANDLE_AT = 264
+ SYS_OPEN_BY_HANDLE_AT = 265
+ SYS_CLOCK_ADJTIME = 266
+ SYS_SYNCFS = 267
+ SYS_SETNS = 268
+ SYS_SENDMMSG = 269
+ SYS_PROCESS_VM_READV = 270
+ SYS_PROCESS_VM_WRITEV = 271
+ SYS_KCMP = 272
+ SYS_FINIT_MODULE = 273
+ SYS_SCHED_SETATTR = 274
+ SYS_SCHED_GETATTR = 275
+ SYS_RENAMEAT2 = 276
+ SYS_SECCOMP = 277
+ SYS_GETRANDOM = 278
+ SYS_MEMFD_CREATE = 279
+ SYS_BPF = 280
+ SYS_EXECVEAT = 281
+ SYS_USERFAULTFD = 282
+ SYS_MEMBARRIER = 283
+ SYS_MLOCK2 = 284
+ SYS_COPY_FILE_RANGE = 285
+ SYS_PREADV2 = 286
+ SYS_PWRITEV2 = 287
+ SYS_PKEY_MPROTECT = 288
+ SYS_PKEY_ALLOC = 289
+ SYS_PKEY_FREE = 290
+ SYS_STATX = 291
+ SYS_IO_PGETEVENTS = 292
+ SYS_RSEQ = 293
+ SYS_KEXEC_FILE_LOAD = 294
+ SYS_PIDFD_SEND_SIGNAL = 424
+ SYS_IO_URING_SETUP = 425
+ SYS_IO_URING_ENTER = 426
+ SYS_IO_URING_REGISTER = 427
+ SYS_OPEN_TREE = 428
+ SYS_MOVE_MOUNT = 429
+ SYS_FSOPEN = 430
+ SYS_FSCONFIG = 431
+ SYS_FSMOUNT = 432
+ SYS_FSPICK = 433
+ SYS_PIDFD_OPEN = 434
+ SYS_CLONE3 = 435
+ SYS_CLOSE_RANGE = 436
+ SYS_OPENAT2 = 437
+ SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
+ SYS_PROCESS_MADVISE = 440
+ SYS_EPOLL_PWAIT2 = 441
+ SYS_MOUNT_SETATTR = 442
+ SYS_QUOTACTL_FD = 443
+ SYS_LANDLOCK_CREATE_RULESET = 444
+ SYS_LANDLOCK_ADD_RULE = 445
+ SYS_LANDLOCK_RESTRICT_SELF = 446
+ SYS_PROCESS_MRELEASE = 448
+ SYS_FUTEX_WAITV = 449
+ SYS_SET_MEMPOLICY_HOME_NODE = 450
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
new file mode 100644
index 000000000..61fbb24f8
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
@@ -0,0 +1,679 @@
+// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+//go:build loong64 && linux
+// +build loong64,linux
+
+package unix
+
+const (
+ SizeofPtr = 0x8
+ SizeofLong = 0x8
+)
+
+type (
+ _C_long int64
+)
+
+type Timespec struct {
+ Sec int64
+ Nsec int64
+}
+
+type Timeval struct {
+ Sec int64
+ Usec int64
+}
+
+type Timex struct {
+ Modes uint32
+ Offset int64
+ Freq int64
+ Maxerror int64
+ Esterror int64
+ Status int32
+ Constant int64
+ Precision int64
+ Tolerance int64
+ Time Timeval
+ Tick int64
+ Ppsfreq int64
+ Jitter int64
+ Shift int32
+ Stabil int64
+ Jitcnt int64
+ Calcnt int64
+ Errcnt int64
+ Stbcnt int64
+ Tai int32
+ _ [44]byte
+}
+
+type Time_t int64
+
+type Tms struct {
+ Utime int64
+ Stime int64
+ Cutime int64
+ Cstime int64
+}
+
+type Utimbuf struct {
+ Actime int64
+ Modtime int64
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int64
+ Ixrss int64
+ Idrss int64
+ Isrss int64
+ Minflt int64
+ Majflt int64
+ Nswap int64
+ Inblock int64
+ Oublock int64
+ Msgsnd int64
+ Msgrcv int64
+ Nsignals int64
+ Nvcsw int64
+ Nivcsw int64
+}
+
+type Stat_t struct {
+ Dev uint64
+ Ino uint64
+ Mode uint32
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Rdev uint64
+ _ uint64
+ Size int64
+ Blksize int32
+ _ int32
+ Blocks int64
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ _ [2]int32
+}
+
+type Dirent struct {
+ Ino uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Name [256]int8
+ _ [5]byte
+}
+
+type Flock_t struct {
+ Type int16
+ Whence int16
+ Start int64
+ Len int64
+ Pid int32
+ _ [4]byte
+}
+
+type DmNameList struct {
+ Dev uint64
+ Next uint32
+ Name [0]byte
+ _ [4]byte
+}
+
+const (
+ FADV_DONTNEED = 0x4
+ FADV_NOREUSE = 0x5
+)
+
+type RawSockaddrNFCLLCP struct {
+ Sa_family uint16
+ Dev_idx uint32
+ Target_idx uint32
+ Nfc_protocol uint32
+ Dsap uint8
+ Ssap uint8
+ Service_name [63]uint8
+ Service_name_len uint64
+}
+
+type RawSockaddr struct {
+ Family uint16
+ Data [14]int8
+}
+
+type RawSockaddrAny struct {
+ Addr RawSockaddr
+ Pad [96]int8
+}
+
+type Iovec struct {
+ Base *byte
+ Len uint64
+}
+
+type Msghdr struct {
+ Name *byte
+ Namelen uint32
+ Iov *Iovec
+ Iovlen uint64
+ Control *byte
+ Controllen uint64
+ Flags int32
+ _ [4]byte
+}
+
+type Cmsghdr struct {
+ Len uint64
+ Level int32
+ Type int32
+}
+
+type ifreq struct {
+ Ifrn [16]byte
+ Ifru [24]byte
+}
+
+const (
+ SizeofSockaddrNFCLLCP = 0x60
+ SizeofIovec = 0x10
+ SizeofMsghdr = 0x38
+ SizeofCmsghdr = 0x10
+)
+
+const (
+ SizeofSockFprog = 0x10
+)
+
+type PtraceRegs struct {
+ Regs [32]uint64
+ Orig_a0 uint64
+ Era uint64
+ Badv uint64
+ Reserved [10]uint64
+}
+
+type FdSet struct {
+ Bits [16]int64
+}
+
+type Sysinfo_t struct {
+ Uptime int64
+ Loads [3]uint64
+ Totalram uint64
+ Freeram uint64
+ Sharedram uint64
+ Bufferram uint64
+ Totalswap uint64
+ Freeswap uint64
+ Procs uint16
+ Pad uint16
+ Totalhigh uint64
+ Freehigh uint64
+ Unit uint32
+ _ [0]int8
+ _ [4]byte
+}
+
+type Ustat_t struct {
+ Tfree int32
+ Tinode uint64
+ Fname [6]int8
+ Fpack [6]int8
+ _ [4]byte
+}
+
+type EpollEvent struct {
+ Events uint32
+ _ int32
+ Fd int32
+ Pad int32
+}
+
+const (
+ OPEN_TREE_CLOEXEC = 0x80000
+)
+
+const (
+ POLLRDHUP = 0x2000
+)
+
+type Sigset_t struct {
+ Val [16]uint64
+}
+
+const _C__NSIG = 0x41
+
+type Siginfo struct {
+ Signo int32
+ Errno int32
+ Code int32
+ _ int32
+ _ [112]byte
+}
+
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Line uint8
+ Cc [19]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
+
+type Taskstats struct {
+ Version uint16
+ Ac_exitcode uint32
+ Ac_flag uint8
+ Ac_nice uint8
+ Cpu_count uint64
+ Cpu_delay_total uint64
+ Blkio_count uint64
+ Blkio_delay_total uint64
+ Swapin_count uint64
+ Swapin_delay_total uint64
+ Cpu_run_real_total uint64
+ Cpu_run_virtual_total uint64
+ Ac_comm [32]int8
+ Ac_sched uint8
+ Ac_pad [3]uint8
+ _ [4]byte
+ Ac_uid uint32
+ Ac_gid uint32
+ Ac_pid uint32
+ Ac_ppid uint32
+ Ac_btime uint32
+ Ac_etime uint64
+ Ac_utime uint64
+ Ac_stime uint64
+ Ac_minflt uint64
+ Ac_majflt uint64
+ Coremem uint64
+ Virtmem uint64
+ Hiwater_rss uint64
+ Hiwater_vm uint64
+ Read_char uint64
+ Write_char uint64
+ Read_syscalls uint64
+ Write_syscalls uint64
+ Read_bytes uint64
+ Write_bytes uint64
+ Cancelled_write_bytes uint64
+ Nvcsw uint64
+ Nivcsw uint64
+ Ac_utimescaled uint64
+ Ac_stimescaled uint64
+ Cpu_scaled_run_real_total uint64
+ Freepages_count uint64
+ Freepages_delay_total uint64
+ Thrashing_count uint64
+ Thrashing_delay_total uint64
+ Ac_btime64 uint64
+ Compact_count uint64
+ Compact_delay_total uint64
+}
+
+type cpuMask uint64
+
+const (
+ _NCPUBITS = 0x40
+)
+
+const (
+ CBitFieldMaskBit0 = 0x1
+ CBitFieldMaskBit1 = 0x2
+ CBitFieldMaskBit2 = 0x4
+ CBitFieldMaskBit3 = 0x8
+ CBitFieldMaskBit4 = 0x10
+ CBitFieldMaskBit5 = 0x20
+ CBitFieldMaskBit6 = 0x40
+ CBitFieldMaskBit7 = 0x80
+ CBitFieldMaskBit8 = 0x100
+ CBitFieldMaskBit9 = 0x200
+ CBitFieldMaskBit10 = 0x400
+ CBitFieldMaskBit11 = 0x800
+ CBitFieldMaskBit12 = 0x1000
+ CBitFieldMaskBit13 = 0x2000
+ CBitFieldMaskBit14 = 0x4000
+ CBitFieldMaskBit15 = 0x8000
+ CBitFieldMaskBit16 = 0x10000
+ CBitFieldMaskBit17 = 0x20000
+ CBitFieldMaskBit18 = 0x40000
+ CBitFieldMaskBit19 = 0x80000
+ CBitFieldMaskBit20 = 0x100000
+ CBitFieldMaskBit21 = 0x200000
+ CBitFieldMaskBit22 = 0x400000
+ CBitFieldMaskBit23 = 0x800000
+ CBitFieldMaskBit24 = 0x1000000
+ CBitFieldMaskBit25 = 0x2000000
+ CBitFieldMaskBit26 = 0x4000000
+ CBitFieldMaskBit27 = 0x8000000
+ CBitFieldMaskBit28 = 0x10000000
+ CBitFieldMaskBit29 = 0x20000000
+ CBitFieldMaskBit30 = 0x40000000
+ CBitFieldMaskBit31 = 0x80000000
+ CBitFieldMaskBit32 = 0x100000000
+ CBitFieldMaskBit33 = 0x200000000
+ CBitFieldMaskBit34 = 0x400000000
+ CBitFieldMaskBit35 = 0x800000000
+ CBitFieldMaskBit36 = 0x1000000000
+ CBitFieldMaskBit37 = 0x2000000000
+ CBitFieldMaskBit38 = 0x4000000000
+ CBitFieldMaskBit39 = 0x8000000000
+ CBitFieldMaskBit40 = 0x10000000000
+ CBitFieldMaskBit41 = 0x20000000000
+ CBitFieldMaskBit42 = 0x40000000000
+ CBitFieldMaskBit43 = 0x80000000000
+ CBitFieldMaskBit44 = 0x100000000000
+ CBitFieldMaskBit45 = 0x200000000000
+ CBitFieldMaskBit46 = 0x400000000000
+ CBitFieldMaskBit47 = 0x800000000000
+ CBitFieldMaskBit48 = 0x1000000000000
+ CBitFieldMaskBit49 = 0x2000000000000
+ CBitFieldMaskBit50 = 0x4000000000000
+ CBitFieldMaskBit51 = 0x8000000000000
+ CBitFieldMaskBit52 = 0x10000000000000
+ CBitFieldMaskBit53 = 0x20000000000000
+ CBitFieldMaskBit54 = 0x40000000000000
+ CBitFieldMaskBit55 = 0x80000000000000
+ CBitFieldMaskBit56 = 0x100000000000000
+ CBitFieldMaskBit57 = 0x200000000000000
+ CBitFieldMaskBit58 = 0x400000000000000
+ CBitFieldMaskBit59 = 0x800000000000000
+ CBitFieldMaskBit60 = 0x1000000000000000
+ CBitFieldMaskBit61 = 0x2000000000000000
+ CBitFieldMaskBit62 = 0x4000000000000000
+ CBitFieldMaskBit63 = 0x8000000000000000
+)
+
+type SockaddrStorage struct {
+ Family uint16
+ _ [118]int8
+ _ uint64
+}
+
+type HDGeometry struct {
+ Heads uint8
+ Sectors uint8
+ Cylinders uint16
+ Start uint64
+}
+
+type Statfs_t struct {
+ Type int64
+ Bsize int64
+ Blocks uint64
+ Bfree uint64
+ Bavail uint64
+ Files uint64
+ Ffree uint64
+ Fsid Fsid
+ Namelen int64
+ Frsize int64
+ Flags int64
+ Spare [4]int64
+}
+
+type TpacketHdr struct {
+ Status uint64
+ Len uint32
+ Snaplen uint32
+ Mac uint16
+ Net uint16
+ Sec uint32
+ Usec uint32
+ _ [4]byte
+}
+
+const (
+ SizeofTpacketHdr = 0x20
+)
+
+type RTCPLLInfo struct {
+ Ctrl int32
+ Value int32
+ Max int32
+ Min int32
+ Posmult int32
+ Negmult int32
+ Clock int64
+}
+
+type BlkpgPartition struct {
+ Start int64
+ Length int64
+ Pno int32
+ Devname [64]uint8
+ Volname [64]uint8
+ _ [4]byte
+}
+
+const (
+ BLKPG = 0x1269
+)
+
+type XDPUmemReg struct {
+ Addr uint64
+ Len uint64
+ Size uint32
+ Headroom uint32
+ Flags uint32
+ _ [4]byte
+}
+
+type CryptoUserAlg struct {
+ Name [64]int8
+ Driver_name [64]int8
+ Module_name [64]int8
+ Type uint32
+ Mask uint32
+ Refcnt uint32
+ Flags uint32
+}
+
+type CryptoStatAEAD struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatAKCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Verify_cnt uint64
+ Sign_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCipher struct {
+ Type [64]int8
+ Encrypt_cnt uint64
+ Encrypt_tlen uint64
+ Decrypt_cnt uint64
+ Decrypt_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatCompress struct {
+ Type [64]int8
+ Compress_cnt uint64
+ Compress_tlen uint64
+ Decompress_cnt uint64
+ Decompress_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatHash struct {
+ Type [64]int8
+ Hash_cnt uint64
+ Hash_tlen uint64
+ Err_cnt uint64
+}
+
+type CryptoStatKPP struct {
+ Type [64]int8
+ Setsecret_cnt uint64
+ Generate_public_key_cnt uint64
+ Compute_shared_secret_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatRNG struct {
+ Type [64]int8
+ Generate_cnt uint64
+ Generate_tlen uint64
+ Seed_cnt uint64
+ Err_cnt uint64
+}
+
+type CryptoStatLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportLarval struct {
+ Type [64]int8
+}
+
+type CryptoReportHash struct {
+ Type [64]int8
+ Blocksize uint32
+ Digestsize uint32
+}
+
+type CryptoReportCipher struct {
+ Type [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+}
+
+type CryptoReportBlkCipher struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Min_keysize uint32
+ Max_keysize uint32
+ Ivsize uint32
+}
+
+type CryptoReportAEAD struct {
+ Type [64]int8
+ Geniv [64]int8
+ Blocksize uint32
+ Maxauthsize uint32
+ Ivsize uint32
+}
+
+type CryptoReportComp struct {
+ Type [64]int8
+}
+
+type CryptoReportRNG struct {
+ Type [64]int8
+ Seedsize uint32
+}
+
+type CryptoReportAKCipher struct {
+ Type [64]int8
+}
+
+type CryptoReportKPP struct {
+ Type [64]int8
+}
+
+type CryptoReportAcomp struct {
+ Type [64]int8
+}
+
+type LoopInfo struct {
+ Number int32
+ Device uint32
+ Inode uint64
+ Rdevice uint32
+ Offset int32
+ Encrypt_type int32
+ Encrypt_key_size int32
+ Flags int32
+ Name [64]int8
+ Encrypt_key [32]uint8
+ Init [2]uint64
+ Reserved [4]int8
+ _ [4]byte
+}
+
+type TIPCSubscr struct {
+ Seq TIPCServiceRange
+ Timeout uint32
+ Filter uint32
+ Handle [8]int8
+}
+
+type TIPCSIOCLNReq struct {
+ Peer uint32
+ Id uint32
+ Linkname [68]int8
+}
+
+type TIPCSIOCNodeIDReq struct {
+ Peer uint32
+ Id [16]int8
+}
+
+type PPSKInfo struct {
+ Assert_sequence uint32
+ Clear_sequence uint32
+ Assert_tu PPSKTime
+ Clear_tu PPSKTime
+ Current_mode int32
+ _ [4]byte
+}
+
+const (
+ PPS_GETPARAMS = 0x800870a1
+ PPS_SETPARAMS = 0x400870a2
+ PPS_GETCAP = 0x800870a3
+ PPS_FETCH = 0xc00870a4
+)
+
+const (
+ PIDFD_NONBLOCK = 0x800
+)
+
+type SysvIpcPerm struct {
+ Key int32
+ Uid uint32
+ Gid uint32
+ Cuid uint32
+ Cgid uint32
+ Mode uint32
+ _ [0]uint8
+ Seq uint16
+ _ uint16
+ _ uint64
+ _ uint64
+}
+type SysvShmDesc struct {
+ Perm SysvIpcPerm
+ Segsz uint64
+ Atime int64
+ Dtime int64
+ Ctime int64
+ Cpid int32
+ Lpid int32
+ Nattch uint64
+ _ uint64
+ _ uint64
+}
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
index b6b00cf2e..11f4fc369 100644
--- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
+++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
@@ -9,7 +9,11 @@ package inspector
// The initial map-based implementation was too slow;
// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196
-import "go/ast"
+import (
+ "go/ast"
+
+ "golang.org/x/tools/internal/typeparams"
+)
const (
nArrayType = iota
@@ -47,6 +51,7 @@ const (
nImportSpec
nIncDecStmt
nIndexExpr
+ nIndexListExpr
nInterfaceType
nKeyValueExpr
nLabeledStmt
@@ -164,6 +169,8 @@ func typeOf(n ast.Node) uint64 {
return 1 << nIncDecStmt
case *ast.IndexExpr:
return 1 << nIndexExpr
+ case *typeparams.IndexListExpr:
+ return 1 << nIndexListExpr
case *ast.InterfaceType:
return 1 << nInterfaceType
case *ast.KeyValueExpr:
diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go
new file mode 100644
index 000000000..ab6b30b83
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/common.go
@@ -0,0 +1,180 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeparams contains common utilities for writing tools that interact
+// with generic Go code, as introduced with Go 1.18.
+//
+// Many of the types and functions in this package are proxies for the new APIs
+// introduced in the standard library with Go 1.18. For example, the
+// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec
+// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go
+// versions older than 1.18 these helpers are implemented as stubs, allowing
+// users of this package to write code that handles generic constructs inline,
+// even if the Go version being used to compile does not support generics.
+//
+// Additionally, this package contains common utilities for working with the
+// new generic constructs, to supplement the standard library APIs. Notably,
+// the StructuralTerms API computes a minimal representation of the structural
+// restrictions on a type parameter. In the future, this API may be available
+// from go/types.
+//
+// See the example/README.md for a more detailed guide on how to update tools
+// to support generics.
+package typeparams
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// UnpackIndexExpr extracts data from AST nodes that represent index
+// expressions.
+//
+// For an ast.IndexExpr, the resulting indices slice will contain exactly one
+// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
+// number of index expressions.
+//
+// For nodes that don't represent index expressions, the first return value of
+// UnpackIndexExpr will be nil.
+func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
+ switch e := n.(type) {
+ case *ast.IndexExpr:
+ return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
+ case *IndexListExpr:
+ return e.X, e.Lbrack, e.Indices, e.Rbrack
+ }
+ return nil, token.NoPos, nil, token.NoPos
+}
+
+// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
+// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
+// will panic.
+func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
+ switch len(indices) {
+ case 0:
+ panic("empty indices")
+ case 1:
+ return &ast.IndexExpr{
+ X: x,
+ Lbrack: lbrack,
+ Index: indices[0],
+ Rbrack: rbrack,
+ }
+ default:
+ return &IndexListExpr{
+ X: x,
+ Lbrack: lbrack,
+ Indices: indices,
+ Rbrack: rbrack,
+ }
+ }
+}
+
+// IsTypeParam reports whether t is a type parameter.
+func IsTypeParam(t types.Type) bool {
+ _, ok := t.(*TypeParam)
+ return ok
+}
+
+// OriginMethod returns the origin method associated with the method fn.
+// For methods on a non-generic receiver base type, this is just
+// fn. However, for methods with a generic receiver, OriginMethod returns the
+// corresponding method in the method set of the origin type.
+//
+// As a special case, if fn is not a method (has no receiver), OriginMethod
+// returns fn.
+func OriginMethod(fn *types.Func) *types.Func {
+ recv := fn.Type().(*types.Signature).Recv()
+ if recv == nil {
+
+ return fn
+ }
+ base := recv.Type()
+ p, isPtr := base.(*types.Pointer)
+ if isPtr {
+ base = p.Elem()
+ }
+ named, isNamed := base.(*types.Named)
+ if !isNamed {
+ // Receiver is a *types.Interface.
+ return fn
+ }
+ if ForNamed(named).Len() == 0 {
+ // Receiver base has no type parameters, so we can avoid the lookup below.
+ return fn
+ }
+ orig := NamedTypeOrigin(named)
+ gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name())
+ return gfn.(*types.Func)
+}
+
+// GenericAssignableTo is a generalization of types.AssignableTo that
+// implements the following rule for uninstantiated generic types:
+//
+// If V and T are generic named types, then V is considered assignable to T if,
+// for every possible instantation of V[A_1, ..., A_N], the instantiation
+// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N].
+//
+// If T has structural constraints, they must be satisfied by V.
+//
+// For example, consider the following type declarations:
+//
+// type Interface[T any] interface {
+// Accept(T)
+// }
+//
+// type Container[T any] struct {
+// Element T
+// }
+//
+// func (c Container[T]) Accept(t T) { c.Element = t }
+//
+// In this case, GenericAssignableTo reports that instantiations of Container
+// are assignable to the corresponding instantiation of Interface.
+func GenericAssignableTo(ctxt *Context, V, T types.Type) bool {
+ // If V and T are not both named, or do not have matching non-empty type
+ // parameter lists, fall back on types.AssignableTo.
+
+ VN, Vnamed := V.(*types.Named)
+ TN, Tnamed := T.(*types.Named)
+ if !Vnamed || !Tnamed {
+ return types.AssignableTo(V, T)
+ }
+
+ vtparams := ForNamed(VN)
+ ttparams := ForNamed(TN)
+ if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 {
+ return types.AssignableTo(V, T)
+ }
+
+ // V and T have the same (non-zero) number of type params. Instantiate both
+ // with the type parameters of V. This must always succeed for V, and will
+ // succeed for T if and only if the type set of each type parameter of V is a
+ // subset of the type set of the corresponding type parameter of T, meaning
+ // that every instantiation of V corresponds to a valid instantiation of T.
+
+ // Minor optimization: ensure we share a context across the two
+ // instantiations below.
+ if ctxt == nil {
+ ctxt = NewContext()
+ }
+
+ var targs []types.Type
+ for i := 0; i < vtparams.Len(); i++ {
+ targs = append(targs, vtparams.At(i))
+ }
+
+ vinst, err := Instantiate(ctxt, V, targs, true)
+ if err != nil {
+ panic("type parameters should satisfy their own constraints")
+ }
+
+ tinst, err := Instantiate(ctxt, T, targs, true)
+ if err != nil {
+ return false
+ }
+
+ return types.AssignableTo(vinst, tinst)
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go
new file mode 100644
index 000000000..18212390e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go
@@ -0,0 +1,12 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package typeparams
+
+// Enabled reports whether type parameters are enabled in the current build
+// environment.
+const Enabled = false
diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go
new file mode 100644
index 000000000..d67148823
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go
@@ -0,0 +1,15 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package typeparams
+
+// Note: this constant is in a separate file as this is the only acceptable
+// diff between the <1.18 API of this package and the 1.18 API.
+
+// Enabled reports whether type parameters are enabled in the current build
+// environment.
+const Enabled = true
diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
new file mode 100644
index 000000000..090f142a5
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
@@ -0,0 +1,216 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "errors"
+ "fmt"
+ "go/types"
+ "os"
+ "strings"
+)
+
+//go:generate go run copytermlist.go
+
+const debug = false
+
+var ErrEmptyTypeSet = errors.New("empty type set")
+
+// StructuralTerms returns a slice of terms representing the normalized
+// structural type restrictions of a type parameter, if any.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration
+// type T[P interface{~int; m()}] int
+// the structural restriction of the type parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+// type A interface{ ~string|~[]byte }
+//
+// type B interface{ int|string }
+//
+// type C interface { ~string|~int }
+//
+// type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// StructuralTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, StructuralTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the
+// constraint interface is invalid, exceeds complexity bounds, or has an empty
+// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet.
+//
+// StructuralTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func StructuralTerms(tparam *TypeParam) ([]*Term, error) {
+ constraint := tparam.Constraint()
+ if constraint == nil {
+ return nil, fmt.Errorf("%s has nil constraint", tparam)
+ }
+ iface, _ := constraint.Underlying().(*types.Interface)
+ if iface == nil {
+ return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
+ }
+ return InterfaceTermSet(iface)
+}
+
+// InterfaceTermSet computes the normalized terms for a constraint interface,
+// returning an error if the term set cannot be computed or is empty. In the
+// latter case, the error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func InterfaceTermSet(iface *types.Interface) ([]*Term, error) {
+ return computeTermSet(iface)
+}
+
+// UnionTermSet computes the normalized terms for a union, returning an error
+// if the term set cannot be computed or is empty. In the latter case, the
+// error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func UnionTermSet(union *Union) ([]*Term, error) {
+ return computeTermSet(union)
+}
+
+func computeTermSet(typ types.Type) ([]*Term, error) {
+ tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
+ if err != nil {
+ return nil, err
+ }
+ if tset.terms.isEmpty() {
+ return nil, ErrEmptyTypeSet
+ }
+ if tset.terms.isAll() {
+ return nil, nil
+ }
+ var terms []*Term
+ for _, term := range tset.terms {
+ terms = append(terms, NewTerm(term.tilde, term.typ))
+ }
+ return terms, nil
+}
+
+// A termSet holds the normalized set of terms for a given type.
+//
+// The name termSet is intentionally distinct from 'type set': a type set is
+// all types that implement a type (and includes method restrictions), whereas
+// a term set just represents the structural restrictions on a type.
+type termSet struct {
+ complete bool
+ terms termlist
+}
+
+func indentf(depth int, format string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
+}
+
+func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
+ if t == nil {
+ panic("nil type")
+ }
+
+ if debug {
+ indentf(depth, "%s", t.String())
+ defer func() {
+ if err != nil {
+ indentf(depth, "=> %s", err)
+ } else {
+ indentf(depth, "=> %s", res.terms.String())
+ }
+ }()
+ }
+
+ const maxTermCount = 100
+ if tset, ok := seen[t]; ok {
+ if !tset.complete {
+ return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
+ }
+ return tset, nil
+ }
+
+ // Mark the current type as seen to avoid infinite recursion.
+ tset := new(termSet)
+ defer func() {
+ tset.complete = true
+ }()
+ seen[t] = tset
+
+ switch u := t.Underlying().(type) {
+ case *types.Interface:
+ // The term set of an interface is the intersection of the term sets of its
+ // embedded types.
+ tset.terms = allTermlist
+ for i := 0; i < u.NumEmbeddeds(); i++ {
+ embedded := u.EmbeddedType(i)
+ if _, ok := embedded.Underlying().(*TypeParam); ok {
+ return nil, fmt.Errorf("invalid embedded type %T", embedded)
+ }
+ tset2, err := computeTermSetInternal(embedded, seen, depth+1)
+ if err != nil {
+ return nil, err
+ }
+ tset.terms = tset.terms.intersect(tset2.terms)
+ }
+ case *Union:
+ // The term set of a union is the union of term sets of its terms.
+ tset.terms = nil
+ for i := 0; i < u.Len(); i++ {
+ t := u.Term(i)
+ var terms termlist
+ switch t.Type().Underlying().(type) {
+ case *types.Interface:
+ tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
+ if err != nil {
+ return nil, err
+ }
+ terms = tset2.terms
+ case *TypeParam, *Union:
+ // A stand-alone type parameter or union is not permitted as union
+ // term.
+ return nil, fmt.Errorf("invalid union term %T", t)
+ default:
+ if t.Type() == types.Typ[types.Invalid] {
+ continue
+ }
+ terms = termlist{{t.Tilde(), t.Type()}}
+ }
+ tset.terms = tset.terms.union(terms)
+ if len(tset.terms) > maxTermCount {
+ return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
+ }
+ }
+ case *TypeParam:
+ panic("unreachable")
+ default:
+ // For all other types, the term set is just a single non-tilde term
+ // holding the type itself.
+ if u != types.Typ[types.Invalid] {
+ tset.terms = termlist{{false, t}}
+ }
+ }
+ return tset, nil
+}
+
+// under is a facade for the go/types internal function of the same name. It is
+// used by typeterm.go.
+func under(t types.Type) types.Type {
+ return t.Underlying()
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
new file mode 100644
index 000000000..10857d504
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
@@ -0,0 +1,172 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import (
+ "bytes"
+ "go/types"
+)
+
+// A termlist represents the type set represented by the union
+// t1 βˆͺ y2 βˆͺ ... tn of the type sets of the terms t1 to tn.
+// A termlist is in normal form if all terms are disjoint.
+// termlist operations don't require the operands to be in
+// normal form.
+type termlist []*term
+
+// allTermlist represents the set of all types.
+// It is in normal form.
+var allTermlist = termlist{new(term)}
+
+// String prints the termlist exactly (without normalization).
+func (xl termlist) String() string {
+ if len(xl) == 0 {
+ return "βˆ…"
+ }
+ var buf bytes.Buffer
+ for i, x := range xl {
+ if i > 0 {
+ buf.WriteString(" βˆͺ ")
+ }
+ buf.WriteString(x.String())
+ }
+ return buf.String()
+}
+
+// isEmpty reports whether the termlist xl represents the empty set of types.
+func (xl termlist) isEmpty() bool {
+ // If there's a non-nil term, the entire list is not empty.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil {
+ return false
+ }
+ }
+ return true
+}
+
+// isAll reports whether the termlist xl represents the set of all types.
+func (xl termlist) isAll() bool {
+ // If there's a 𝓀 term, the entire list is 𝓀.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil && x.typ == nil {
+ return true
+ }
+ }
+ return false
+}
+
+// norm returns the normal form of xl.
+func (xl termlist) norm() termlist {
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ used := make([]bool, len(xl))
+ var rl termlist
+ for i, xi := range xl {
+ if xi == nil || used[i] {
+ continue
+ }
+ for j := i + 1; j < len(xl); j++ {
+ xj := xl[j]
+ if xj == nil || used[j] {
+ continue
+ }
+ if u1, u2 := xi.union(xj); u2 == nil {
+ // If we encounter a 𝓀 term, the entire list is 𝓀.
+ // Exit early.
+ // (Note that this is not just an optimization;
+ // if we continue, we may end up with a 𝓀 term
+ // and other terms and the result would not be
+ // in normal form.)
+ if u1.typ == nil {
+ return allTermlist
+ }
+ xi = u1
+ used[j] = true // xj is now unioned into xi - ignore it in future iterations
+ }
+ }
+ rl = append(rl, xi)
+ }
+ return rl
+}
+
+// If the type set represented by xl is specified by a single (non-𝓀) term,
+// structuralType returns that type. Otherwise it returns nil.
+func (xl termlist) structuralType() types.Type {
+ if nl := xl.norm(); len(nl) == 1 {
+ return nl[0].typ // if nl.isAll() then typ is nil, which is ok
+ }
+ return nil
+}
+
+// union returns the union xl βˆͺ yl.
+func (xl termlist) union(yl termlist) termlist {
+ return append(xl, yl...).norm()
+}
+
+// intersect returns the intersection xl ∩ yl.
+func (xl termlist) intersect(yl termlist) termlist {
+ if xl.isEmpty() || yl.isEmpty() {
+ return nil
+ }
+
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ var rl termlist
+ for _, x := range xl {
+ for _, y := range yl {
+ if r := x.intersect(y); r != nil {
+ rl = append(rl, r)
+ }
+ }
+ }
+ return rl.norm()
+}
+
+// equal reports whether xl and yl represent the same type set.
+func (xl termlist) equal(yl termlist) bool {
+ // TODO(gri) this should be more efficient
+ return xl.subsetOf(yl) && yl.subsetOf(xl)
+}
+
+// includes reports whether t ∈ xl.
+func (xl termlist) includes(t types.Type) bool {
+ for _, x := range xl {
+ if x.includes(t) {
+ return true
+ }
+ }
+ return false
+}
+
+// supersetOf reports whether y βŠ† xl.
+func (xl termlist) supersetOf(y *term) bool {
+ for _, x := range xl {
+ if y.subsetOf(x) {
+ return true
+ }
+ }
+ return false
+}
+
+// subsetOf reports whether xl βŠ† yl.
+func (xl termlist) subsetOf(yl termlist) bool {
+ if yl.isEmpty() {
+ return xl.isEmpty()
+ }
+
+ // each term x of xl must be a subset of yl
+ for _, x := range xl {
+ if !yl.supersetOf(x) {
+ return false // x is not a subset yl
+ }
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go
new file mode 100644
index 000000000..b4788978f
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go
@@ -0,0 +1,197 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package typeparams
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+func unsupported() {
+ panic("type parameters are unsupported at this go version")
+}
+
+// IndexListExpr is a placeholder type, as type parameters are not supported at
+// this Go version. Its methods panic on use.
+type IndexListExpr struct {
+ ast.Expr
+ X ast.Expr // expression
+ Lbrack token.Pos // position of "["
+ Indices []ast.Expr // index expressions
+ Rbrack token.Pos // position of "]"
+}
+
+// ForTypeSpec returns an empty field list, as type parameters on not supported
+// at this Go version.
+func ForTypeSpec(*ast.TypeSpec) *ast.FieldList {
+ return nil
+}
+
+// ForFuncType returns an empty field list, as type parameters are not
+// supported at this Go version.
+func ForFuncType(*ast.FuncType) *ast.FieldList {
+ return nil
+}
+
+// TypeParam is a placeholder type, as type parameters are not supported at
+// this Go version. Its methods panic on use.
+type TypeParam struct{ types.Type }
+
+func (*TypeParam) Index() int { unsupported(); return 0 }
+func (*TypeParam) Constraint() types.Type { unsupported(); return nil }
+func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil }
+
+// TypeParamList is a placeholder for an empty type parameter list.
+type TypeParamList struct{}
+
+func (*TypeParamList) Len() int { return 0 }
+func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil }
+
+// TypeList is a placeholder for an empty type list.
+type TypeList struct{}
+
+func (*TypeList) Len() int { return 0 }
+func (*TypeList) At(int) types.Type { unsupported(); return nil }
+
+// NewTypeParam is unsupported at this Go version, and panics.
+func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam {
+ unsupported()
+ return nil
+}
+
+// SetTypeParamConstraint is unsupported at this Go version, and panics.
+func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) {
+ unsupported()
+}
+
+// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or
+// typeParams is non-empty.
+func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature {
+ if len(recvTypeParams) != 0 || len(typeParams) != 0 {
+ panic("signatures cannot have type parameters at this Go version")
+ }
+ return types.NewSignature(recv, params, results, variadic)
+}
+
+// ForSignature returns an empty slice.
+func ForSignature(*types.Signature) *TypeParamList {
+ return nil
+}
+
+// RecvTypeParams returns a nil slice.
+func RecvTypeParams(sig *types.Signature) *TypeParamList {
+ return nil
+}
+
+// IsComparable returns false, as no interfaces are type-restricted at this Go
+// version.
+func IsComparable(*types.Interface) bool {
+ return false
+}
+
+// IsMethodSet returns true, as no interfaces are type-restricted at this Go
+// version.
+func IsMethodSet(*types.Interface) bool {
+ return true
+}
+
+// IsImplicit returns false, as no interfaces are implicit at this Go version.
+func IsImplicit(*types.Interface) bool {
+ return false
+}
+
+// MarkImplicit does nothing, because this Go version does not have implicit
+// interfaces.
+func MarkImplicit(*types.Interface) {}
+
+// ForNamed returns an empty type parameter list, as type parameters are not
+// supported at this Go version.
+func ForNamed(*types.Named) *TypeParamList {
+ return nil
+}
+
+// SetForNamed panics if tparams is non-empty.
+func SetForNamed(_ *types.Named, tparams []*TypeParam) {
+ if len(tparams) > 0 {
+ unsupported()
+ }
+}
+
+// NamedTypeArgs returns nil.
+func NamedTypeArgs(*types.Named) *TypeList {
+ return nil
+}
+
+// NamedTypeOrigin is the identity method at this Go version.
+func NamedTypeOrigin(named *types.Named) types.Type {
+ return named
+}
+
+// Term holds information about a structural type restriction.
+type Term struct {
+ tilde bool
+ typ types.Type
+}
+
+func (m *Term) Tilde() bool { return m.tilde }
+func (m *Term) Type() types.Type { return m.typ }
+func (m *Term) String() string {
+ pre := ""
+ if m.tilde {
+ pre = "~"
+ }
+ return pre + m.typ.String()
+}
+
+// NewTerm is unsupported at this Go version, and panics.
+func NewTerm(tilde bool, typ types.Type) *Term {
+ return &Term{tilde, typ}
+}
+
+// Union is a placeholder type, as type parameters are not supported at this Go
+// version. Its methods panic on use.
+type Union struct{ types.Type }
+
+func (*Union) Len() int { return 0 }
+func (*Union) Term(i int) *Term { unsupported(); return nil }
+
+// NewUnion is unsupported at this Go version, and panics.
+func NewUnion(terms []*Term) *Union {
+ unsupported()
+ return nil
+}
+
+// InitInstanceInfo is a noop at this Go version.
+func InitInstanceInfo(*types.Info) {}
+
+// Instance is a placeholder type, as type parameters are not supported at this
+// Go version.
+type Instance struct {
+ TypeArgs *TypeList
+ Type types.Type
+}
+
+// GetInstances returns a nil map, as type parameters are not supported at this
+// Go version.
+func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil }
+
+// Context is a placeholder type, as type parameters are not supported at
+// this Go version.
+type Context struct{}
+
+// NewContext returns a placeholder Context instance.
+func NewContext() *Context {
+ return &Context{}
+}
+
+// Instantiate is unsupported on this Go version, and panics.
+func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) {
+ unsupported()
+ return nil, nil
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go
new file mode 100644
index 000000000..114a36b86
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go
@@ -0,0 +1,151 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package typeparams
+
+import (
+ "go/ast"
+ "go/types"
+)
+
+// IndexListExpr is an alias for ast.IndexListExpr.
+type IndexListExpr = ast.IndexListExpr
+
+// ForTypeSpec returns n.TypeParams.
+func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList {
+ if n == nil {
+ return nil
+ }
+ return n.TypeParams
+}
+
+// ForFuncType returns n.TypeParams.
+func ForFuncType(n *ast.FuncType) *ast.FieldList {
+ if n == nil {
+ return nil
+ }
+ return n.TypeParams
+}
+
+// TypeParam is an alias for types.TypeParam
+type TypeParam = types.TypeParam
+
+// TypeParamList is an alias for types.TypeParamList
+type TypeParamList = types.TypeParamList
+
+// TypeList is an alias for types.TypeList
+type TypeList = types.TypeList
+
+// NewTypeParam calls types.NewTypeParam.
+func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam {
+ return types.NewTypeParam(name, constraint)
+}
+
+// SetTypeParamConstraint calls tparam.SetConstraint(constraint).
+func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) {
+ tparam.SetConstraint(constraint)
+}
+
+// NewSignatureType calls types.NewSignatureType.
+func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature {
+ return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic)
+}
+
+// ForSignature returns sig.TypeParams()
+func ForSignature(sig *types.Signature) *TypeParamList {
+ return sig.TypeParams()
+}
+
+// RecvTypeParams returns sig.RecvTypeParams().
+func RecvTypeParams(sig *types.Signature) *TypeParamList {
+ return sig.RecvTypeParams()
+}
+
+// IsComparable calls iface.IsComparable().
+func IsComparable(iface *types.Interface) bool {
+ return iface.IsComparable()
+}
+
+// IsMethodSet calls iface.IsMethodSet().
+func IsMethodSet(iface *types.Interface) bool {
+ return iface.IsMethodSet()
+}
+
+// IsImplicit calls iface.IsImplicit().
+func IsImplicit(iface *types.Interface) bool {
+ return iface.IsImplicit()
+}
+
+// MarkImplicit calls iface.MarkImplicit().
+func MarkImplicit(iface *types.Interface) {
+ iface.MarkImplicit()
+}
+
+// ForNamed extracts the (possibly empty) type parameter object list from
+// named.
+func ForNamed(named *types.Named) *TypeParamList {
+ return named.TypeParams()
+}
+
+// SetForNamed sets the type params tparams on n. Each tparam must be of
+// dynamic type *types.TypeParam.
+func SetForNamed(n *types.Named, tparams []*TypeParam) {
+ n.SetTypeParams(tparams)
+}
+
+// NamedTypeArgs returns named.TypeArgs().
+func NamedTypeArgs(named *types.Named) *TypeList {
+ return named.TypeArgs()
+}
+
+// NamedTypeOrigin returns named.Orig().
+func NamedTypeOrigin(named *types.Named) types.Type {
+ return named.Origin()
+}
+
+// Term is an alias for types.Term.
+type Term = types.Term
+
+// NewTerm calls types.NewTerm.
+func NewTerm(tilde bool, typ types.Type) *Term {
+ return types.NewTerm(tilde, typ)
+}
+
+// Union is an alias for types.Union
+type Union = types.Union
+
+// NewUnion calls types.NewUnion.
+func NewUnion(terms []*Term) *Union {
+ return types.NewUnion(terms)
+}
+
+// InitInstanceInfo initializes info to record information about type and
+// function instances.
+func InitInstanceInfo(info *types.Info) {
+ info.Instances = make(map[*ast.Ident]types.Instance)
+}
+
+// Instance is an alias for types.Instance.
+type Instance = types.Instance
+
+// GetInstances returns info.Instances.
+func GetInstances(info *types.Info) map[*ast.Ident]Instance {
+ return info.Instances
+}
+
+// Context is an alias for types.Context.
+type Context = types.Context
+
+// NewContext calls types.NewContext.
+func NewContext() *Context {
+ return types.NewContext()
+}
+
+// Instantiate calls types.Instantiate.
+func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) {
+ return types.Instantiate(ctxt, typ, targs, validate)
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
new file mode 100644
index 000000000..7ddee28d9
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
@@ -0,0 +1,170 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import "go/types"
+
+// A term describes elementary type sets:
+//
+// βˆ…: (*term)(nil) == βˆ… // set of no types (empty set)
+// 𝓀: &term{} == 𝓀 // set of all types (𝓀niverse)
+// T: &term{false, T} == {T} // set of type T
+// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
+//
+type term struct {
+ tilde bool // valid if typ != nil
+ typ types.Type
+}
+
+func (x *term) String() string {
+ switch {
+ case x == nil:
+ return "βˆ…"
+ case x.typ == nil:
+ return "𝓀"
+ case x.tilde:
+ return "~" + x.typ.String()
+ default:
+ return x.typ.String()
+ }
+}
+
+// equal reports whether x and y represent the same type set.
+func (x *term) equal(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return x == y
+ case x.typ == nil || y.typ == nil:
+ return x.typ == y.typ
+ }
+ // βˆ… βŠ‚ x, y βŠ‚ 𝓀
+
+ return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
+}
+
+// union returns the union x βˆͺ y: zero, one, or two non-nil terms.
+func (x *term) union(y *term) (_, _ *term) {
+ // easy cases
+ switch {
+ case x == nil && y == nil:
+ return nil, nil // βˆ… βˆͺ βˆ… == βˆ…
+ case x == nil:
+ return y, nil // βˆ… βˆͺ y == y
+ case y == nil:
+ return x, nil // x βˆͺ βˆ… == x
+ case x.typ == nil:
+ return x, nil // 𝓀 βˆͺ y == 𝓀
+ case y.typ == nil:
+ return y, nil // x βˆͺ 𝓀 == 𝓀
+ }
+ // βˆ… βŠ‚ x, y βŠ‚ 𝓀
+
+ if x.disjoint(y) {
+ return x, y // x βˆͺ y == (x, y) if x ∩ y == βˆ…
+ }
+ // x.typ == y.typ
+
+ // ~t βˆͺ ~t == ~t
+ // ~t βˆͺ T == ~t
+ // T βˆͺ ~t == ~t
+ // T βˆͺ T == T
+ if x.tilde || !y.tilde {
+ return x, nil
+ }
+ return y, nil
+}
+
+// intersect returns the intersection x ∩ y.
+func (x *term) intersect(y *term) *term {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return nil // βˆ… ∩ y == βˆ… and ∩ βˆ… == βˆ…
+ case x.typ == nil:
+ return y // 𝓀 ∩ y == y
+ case y.typ == nil:
+ return x // x ∩ 𝓀 == x
+ }
+ // βˆ… βŠ‚ x, y βŠ‚ 𝓀
+
+ if x.disjoint(y) {
+ return nil // x ∩ y == βˆ… if x ∩ y == βˆ…
+ }
+ // x.typ == y.typ
+
+ // ~t ∩ ~t == ~t
+ // ~t ∩ T == T
+ // T ∩ ~t == T
+ // T ∩ T == T
+ if !x.tilde || y.tilde {
+ return x
+ }
+ return y
+}
+
+// includes reports whether t ∈ x.
+func (x *term) includes(t types.Type) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return false // t ∈ βˆ… == false
+ case x.typ == nil:
+ return true // t ∈ 𝓀 == true
+ }
+ // βˆ… βŠ‚ x βŠ‚ 𝓀
+
+ u := t
+ if x.tilde {
+ u = under(u)
+ }
+ return types.Identical(x.typ, u)
+}
+
+// subsetOf reports whether x βŠ† y.
+func (x *term) subsetOf(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return true // βˆ… βŠ† y == true
+ case y == nil:
+ return false // x βŠ† βˆ… == false since x != βˆ…
+ case y.typ == nil:
+ return true // x βŠ† 𝓀 == true
+ case x.typ == nil:
+ return false // 𝓀 βŠ† y == false since y != 𝓀
+ }
+ // βˆ… βŠ‚ x, y βŠ‚ 𝓀
+
+ if x.disjoint(y) {
+ return false // x βŠ† y == false if x ∩ y == βˆ…
+ }
+ // x.typ == y.typ
+
+ // ~t βŠ† ~t == true
+ // ~t βŠ† T == false
+ // T βŠ† ~t == true
+ // T βŠ† T == true
+ return !x.tilde || y.tilde
+}
+
+// disjoint reports whether x ∩ y == βˆ….
+// x.typ and y.typ must not be nil.
+func (x *term) disjoint(y *term) bool {
+ if debug && (x.typ == nil || y.typ == nil) {
+ panic("invalid argument(s)")
+ }
+ ux := x.typ
+ if y.tilde {
+ ux = under(ux)
+ }
+ uy := y.typ
+ if x.tilde {
+ uy = under(uy)
+ }
+ return !types.Identical(ux, uy)
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index bbee8a7fb..6cb4088d6 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -63,7 +63,7 @@ github.com/container-orchestrated-devices/container-device-interface/pkg/cdi
github.com/container-orchestrated-devices/container-device-interface/specs-go
# github.com/containerd/cgroups v1.0.3
github.com/containerd/cgroups/stats/v1
-# github.com/containerd/containerd v1.6.2
+# github.com/containerd/containerd v1.6.3
github.com/containerd/containerd/errdefs
github.com/containerd/containerd/log
github.com/containerd/containerd/pkg/userns
@@ -109,7 +109,7 @@ github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util
github.com/containers/buildah/util
-# github.com/containers/common v0.47.5-0.20220425182415-4081e6be9356
+# github.com/containers/common v0.48.0
## explicit
github.com/containers/common/libimage
github.com/containers/common/libimage/manifests
@@ -155,7 +155,7 @@ github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible
## explicit
github.com/containers/conmon/runner/config
-# github.com/containers/image/v5 v5.21.1-0.20220425080628-be085685e524
+# github.com/containers/image/v5 v5.21.1
## explicit
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
@@ -208,7 +208,7 @@ github.com/containers/image/v5/types
github.com/containers/image/v5/version
# github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a
github.com/containers/libtrust
-# github.com/containers/ocicrypt v1.1.3
+# github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f
## explicit
github.com/containers/ocicrypt
github.com/containers/ocicrypt/blockcipher
@@ -235,7 +235,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.39.1-0.20220422100603-8996869ae40b
+# github.com/containers/storage v1.40.2
## explicit
github.com/containers/storage
github.com/containers/storage/drivers
@@ -450,11 +450,12 @@ github.com/jinzhu/copier
# github.com/json-iterator/go v1.1.12
## explicit
github.com/json-iterator/go
-# github.com/klauspost/compress v1.15.1
+# github.com/klauspost/compress v1.15.2
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
+github.com/klauspost/compress/internal/cpuinfo
github.com/klauspost/compress/internal/snapref
github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
@@ -491,9 +492,6 @@ github.com/modern-go/concurrent
github.com/modern-go/reflect2
# github.com/morikuni/aec v1.0.0
github.com/morikuni/aec
-# github.com/mrunalp/fileutils v0.5.0
-## explicit
-github.com/mrunalp/fileutils
# github.com/nxadm/tail v1.4.8
## explicit
github.com/nxadm/tail
@@ -614,7 +612,7 @@ github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
# github.com/rivo/uniseg v0.2.0
github.com/rivo/uniseg
-# github.com/rootless-containers/rootlesskit v1.0.0
+# github.com/rootless-containers/rootlesskit v1.0.1
## explicit
github.com/rootless-containers/rootlesskit/pkg/api
github.com/rootless-containers/rootlesskit/pkg/msgutil
@@ -646,7 +644,7 @@ github.com/stefanberger/go-pkcs11uri
## explicit
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
-# github.com/sylabs/sif/v2 v2.6.0
+# github.com/sylabs/sif/v2 v2.7.0
github.com/sylabs/sif/v2/pkg/sif
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
## explicit
@@ -736,7 +734,7 @@ golang.org/x/net/trace
## explicit
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
-# golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
+# golang.org/x/sys v0.0.0-20220422013727-9388b58f7150
## explicit
golang.org/x/sys/cpu
golang.org/x/sys/execabs
@@ -770,8 +768,9 @@ golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
-# golang.org/x/tools v0.1.7
+# golang.org/x/tools v0.1.10
golang.org/x/tools/go/ast/inspector
+golang.org/x/tools/internal/typeparams
# google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.44.0