aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml4
-rw-r--r--CONTRIBUTING.md34
-rw-r--r--README.md2
-rw-r--r--cmd/podman/common/create.go5
-rw-r--r--cmd/podman/common/create_opts.go2
-rw-r--r--cmd/podman/common/specgen.go15
-rw-r--r--cmd/podman/root.go2
-rw-r--r--completions/bash/podman2
-rwxr-xr-xdocs/remote-docs.sh62
-rw-r--r--docs/source/markdown/podman-build.1.md57
-rw-r--r--docs/source/markdown/podman-create.1.md12
-rw-r--r--docs/source/markdown/podman-remote.1.md30
-rw-r--r--docs/source/markdown/podman-run.1.md12
-rw-r--r--go.mod25
-rw-r--r--go.sum81
-rw-r--r--libpod/container_api.go85
-rw-r--r--libpod/container_exec.go24
-rw-r--r--libpod/container_inspect.go1
-rw-r--r--libpod/container_internal.go7
-rw-r--r--libpod/container_internal_linux.go3
-rw-r--r--libpod/define/container_inspect.go2
-rw-r--r--libpod/oci.go7
-rw-r--r--libpod/oci_conmon_exec_linux.go39
-rw-r--r--libpod/oci_conmon_linux.go137
-rw-r--r--libpod/oci_missing.go7
-rw-r--r--libpod/util.go22
-rw-r--r--nix/nixpkgs.json6
-rw-r--r--pkg/api/handlers/compat/containers.go10
-rw-r--r--pkg/api/handlers/compat/containers_attach.go82
-rw-r--r--pkg/api/handlers/compat/containers_create.go13
-rw-r--r--pkg/api/handlers/compat/exec.go24
-rw-r--r--pkg/bindings/containers/attach.go2
-rw-r--r--pkg/bindings/containers/containers.go2
-rw-r--r--pkg/domain/infra/abi/system.go3
-rw-r--r--pkg/spec/config_linux_cgo.go2
-rw-r--r--pkg/spec/createconfig.go13
-rw-r--r--pkg/spec/spec.go11
-rw-r--r--pkg/specgen/generate/config_linux.go2
-rw-r--r--pkg/specgen/generate/config_linux_cgo.go2
-rw-r--r--pkg/specgen/generate/security.go26
-rw-r--r--pkg/specgen/generate/validate.go6
-rw-r--r--pkg/specgen/specgen.go4
-rw-r--r--test/apiv2/20-containers.at27
-rw-r--r--test/e2e/run_test.go12
-rw-r--r--test/system/030-run.bats15
-rw-r--r--test/system/070-build.bats2
-rw-r--r--test/system/075-exec.bats3
-rw-r--r--troubleshooting.md34
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go56
-rw-r--r--vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go5
-rw-r--r--vendor/github.com/containers/buildah/.cirrus.yml3
-rw-r--r--vendor/github.com/containers/buildah/buildah.go3
-rw-r--r--vendor/github.com/containers/buildah/chroot/run.go12
-rw-r--r--vendor/github.com/containers/buildah/go.mod12
-rw-r--r--vendor/github.com/containers/buildah/go.sum30
-rw-r--r--vendor/github.com/containers/buildah/image.go38
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go3
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/executor.go2
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/stage_executor.go63
-rw-r--r--vendor/github.com/containers/buildah/info.go4
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go24
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go21
-rw-r--r--vendor/github.com/containers/buildah/pkg/supplemented/supplemented.go4
-rw-r--r--vendor/github.com/containers/buildah/run_linux.go17
-rw-r--r--vendor/github.com/containers/buildah/util.go18
-rw-r--r--vendor/github.com/containers/common/pkg/apparmor/apparmor.go1
-rw-r--r--vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go97
-rw-r--r--vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go113
-rw-r--r--vendor/github.com/containers/common/pkg/auth/auth.go12
-rw-r--r--vendor/github.com/containers/common/pkg/capabilities/capabilities.go10
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go49
-rw-r--r--vendor/github.com/containers/common/pkg/config/config_darwin.go12
-rw-r--r--vendor/github.com/containers/common/pkg/config/config_linux.go21
-rw-r--r--vendor/github.com/containers/common/pkg/config/config_local.go35
-rw-r--r--vendor/github.com/containers/common/pkg/config/config_windows.go10
-rw-r--r--vendor/github.com/containers/common/pkg/config/containers.conf25
-rw-r--r--vendor/github.com/containers/common/pkg/config/default.go30
-rw-r--r--vendor/github.com/containers/common/pkg/config/default_linux.go6
-rw-r--r--vendor/github.com/containers/common/pkg/config/libpodConfig.go23
-rw-r--r--vendor/github.com/containers/common/pkg/config/util_supported.go2
-rw-r--r--vendor/github.com/containers/common/pkg/retry/retry.go12
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/conversion.go32
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/seccomp.json878
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/seccomp_default_linux.go742
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go183
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go40
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/supported.go72
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/types.go110
-rw-r--r--vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go8
-rw-r--r--vendor/github.com/containers/common/version/version.go2
-rw-r--r--vendor/github.com/containers/storage/.cirrus.yml6
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/drivers/copy/copy_linux.go4
-rw-r--r--vendor/github.com/containers/storage/drivers/counter.go4
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go24
-rw-r--r--vendor/github.com/containers/storage/go.mod6
-rw-r--r--vendor/github.com/containers/storage/go.sum4
-rw-r--r--vendor/github.com/containers/storage/store.go3
-rw-r--r--vendor/github.com/go-logr/logr/LICENSE (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE)0
-rw-r--r--vendor/github.com/go-logr/logr/README.md181
-rw-r--r--vendor/github.com/go-logr/logr/go.mod3
-rw-r--r--vendor/github.com/go-logr/logr/logr.go178
-rw-r--r--vendor/github.com/gorilla/schema/.travis.yml18
-rw-r--r--vendor/github.com/gorilla/schema/decoder.go19
-rw-r--r--vendor/github.com/gorilla/schema/encoder.go7
-rw-r--r--vendor/github.com/klauspost/compress/huff0/bitwriter.go13
-rw-r--r--vendor/github.com/klauspost/compress/huff0/compress.go10
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress.go24
-rw-r--r--vendor/github.com/klauspost/compress/huff0/huff0.go3
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockenc.go8
-rw-r--r--vendor/github.com/klauspost/compress/zstd/encoder.go1
-rw-r--r--vendor/github.com/opencontainers/runtime-spec/specs-go/config.go25
-rw-r--r--vendor/github.com/opencontainers/runtime-spec/specs-go/state.go20
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/generate/generate.go77
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go14
-rw-r--r--vendor/github.com/seccomp/containers-golang/conversion.go32
-rw-r--r--vendor/github.com/seccomp/containers-golang/go.mod12
-rw-r--r--vendor/github.com/seccomp/containers-golang/go.sum18
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go2
-rw-r--r--vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go12
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/.travis.yml37
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md (renamed from vendor/github.com/seccomp/libseccomp-golang/SUBMITTING_PATCHES)92
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/README.md (renamed from vendor/github.com/seccomp/libseccomp-golang/README)26
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/go.mod3
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/go.sum23
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/seccomp.go84
-rw-r--r--vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go53
-rw-r--r--vendor/golang.org/x/crypto/poly1305/mac_noasm.go2
-rw-r--r--vendor/golang.org/x/crypto/poly1305/poly1305.go4
-rw-r--r--vendor/golang.org/x/crypto/poly1305/sum_generic.go3
-rw-r--r--vendor/golang.org/x/crypto/poly1305/sum_noasm.go18
-rw-r--r--vendor/golang.org/x/crypto/poly1305/sum_s390x.go72
-rw-r--r--vendor/golang.org/x/crypto/poly1305/sum_s390x.s667
-rw-r--r--vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s909
-rw-r--r--vendor/golang.org/x/crypto/ssh/certs.go4
-rw-r--r--vendor/golang.org/x/crypto/ssh/client_auth.go22
-rw-r--r--vendor/golang.org/x/crypto/ssh/mux.go23
-rw-r--r--vendor/golang.org/x/net/http2/transport.go66
-rw-r--r--vendor/golang.org/x/sys/cpu/byteorder.go11
-rw-r--r--vendor/golang.org/x/sys/cpu/cpu_arm64.go8
-rw-r--r--vendor/golang.org/x/sys/unix/mkerrors.sh7
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_bsd.go17
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux.go40
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go6
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go6
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go6
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go6
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux.go66
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_386.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_arm.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go2
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go6
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go6
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go6
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go6
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go7
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go7
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go7
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go7
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux.go46
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go12
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux.go416
-rw-r--r--vendor/golang.org/x/sys/windows/memory_windows.go5
-rw-r--r--vendor/golang.org/x/sys/windows/syscall_windows.go2
-rw-r--r--vendor/golang.org/x/sys/windows/zsyscall_windows.go19
-rw-r--r--vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go277
-rw-r--r--vendor/google.golang.org/protobuf/encoding/prototext/decode.go7
-rw-r--r--vendor/google.golang.org/protobuf/encoding/prototext/encode.go7
-rw-r--r--vendor/google.golang.org/protobuf/internal/version/version.go2
-rw-r--r--vendor/google.golang.org/protobuf/proto/decode.go7
-rw-r--r--vendor/google.golang.org/protobuf/proto/encode.go5
-rw-r--r--vendor/google.golang.org/protobuf/proto/messageset.go16
-rw-r--r--vendor/google.golang.org/protobuf/proto/size.go33
-rw-r--r--vendor/google.golang.org/protobuf/proto/size_gen.go6
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go2
-rw-r--r--vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go32
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/errors/errors.go82
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS3
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS1
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go8
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go830
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto101
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go4
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go119
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go37
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go12
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go17
-rw-r--r--vendor/k8s.io/apimachinery/pkg/conversion/converter.go10
-rw-r--r--vendor/k8s.io/apimachinery/pkg/fields/selector.go16
-rw-r--r--vendor/k8s.io/apimachinery/pkg/labels/labels.go14
-rw-r--r--vendor/k8s.io/apimachinery/pkg/labels/selector.go41
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/codec.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go10
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/converter.go4
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go17
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/scheme.go13
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go14
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go21
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/net/http.go248
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/net/interface.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/net/util.go31
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/validation/validation.go5
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go6
-rw-r--r--vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go2
-rw-r--r--vendor/k8s.io/apimachinery/pkg/watch/watch.go8
-rw-r--r--vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go4
-rw-r--r--vendor/k8s.io/klog/v2/.gitignore17
-rw-r--r--vendor/k8s.io/klog/v2/CONTRIBUTING.md22
-rw-r--r--vendor/k8s.io/klog/v2/LICENSE191
-rw-r--r--vendor/k8s.io/klog/v2/OWNERS19
-rw-r--r--vendor/k8s.io/klog/v2/README.md99
-rw-r--r--vendor/k8s.io/klog/v2/RELEASE.md9
-rw-r--r--vendor/k8s.io/klog/v2/SECURITY_CONTACTS20
-rw-r--r--vendor/k8s.io/klog/v2/code-of-conduct.md3
-rw-r--r--vendor/k8s.io/klog/v2/go.mod5
-rw-r--r--vendor/k8s.io/klog/v2/go.sum2
-rw-r--r--vendor/k8s.io/klog/v2/klog.go1525
-rw-r--r--vendor/k8s.io/klog/v2/klog_file.go164
-rw-r--r--vendor/modules.txt44
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE201
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/value/allocator.go (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go)0
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/value/doc.go (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go)0
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/value/fields.go (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go)0
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/value/jsontagutil.go (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go)0
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/value/list.go (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go)0
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/value/listreflect.go (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go)0
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/value/listunstructured.go (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go)0
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/value/map.go (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go)0
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go)0
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go)0
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go)0
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/value/scalar.go (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go)0
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/value/structreflect.go (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go)0
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go)0
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/value/valuereflect.go (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go)0
-rw-r--r--vendor/sigs.k8s.io/structured-merge-diff/v4/value/valueunstructured.go (renamed from vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go)0
253 files changed, 8966 insertions, 2854 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 964368743..1bf35e142 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -775,11 +775,11 @@ static_build_task:
build_script: |
set -ex
- mkdir -p /nix
mkdir -p .cache
- mount --bind .cache /nix
+ mv .cache /nix
if [[ -z $(ls -A /nix) ]]; then podman run --rm --privileged -ti -v /:/mnt nixos/nix cp -rfT /nix /mnt/nix; fi
podman run --rm --privileged -ti -v /nix:/nix -v ${PWD}:${PWD} -w ${PWD} nixos/nix nix --print-build-logs --option cores 8 --option max-jobs 8 build --file nix/
+ mv /nix .cache
chown -Rf $(whoami) .cache
binaries_artifacts:
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index b8cacc127..dc48b389e 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,5 +1,5 @@
![PODMAN logo](logo/podman-logo-source.svg)
-# Contributing to libpod
+# Contributing to Podman
We'd love to have you join the community! Below summarizes the processes
that we follow.
@@ -7,8 +7,8 @@ that we follow.
## Topics
* [Reporting Issues](#reporting-issues)
-* [Contributing to libpod](#contributing-to-libpod)
-* [Continuous Integration](#continuous-integration) [![Build Status](https://api.cirrus-ci.com/github/containers/libpod.svg)](https://cirrus-ci.com/github/containers/libpod/master)
+* [Contributing to Podman](#contributing-to-podman)
+* [Continuous Integration](#continuous-integration) [![Build Status](https://api.cirrus-ci.com/github/containers/podman.svg)](https://cirrus-ci.com/github/containers/podman/master)
* [Submitting Pull Requests](#submitting-pull-requests)
* [Communications](#communications)
@@ -28,9 +28,9 @@ The easier it is for us to reproduce it, the faster it'll be fixed!
Please don't include any private/sensitive information in your issue!
-## Contributing to libpod
+## Contributing to Podman
-This section describes how to start a contribution to libpod.
+This section describes how to start a contribution to Podman.
### Prepare your environment
@@ -40,25 +40,25 @@ The install documentation will illustrate the following steps:
- install libs and tools
- check installed versions
- configure network
-- how to install libpod from sources
+- how to install Podman from sources
-### Fork and clone libpod
+### Fork and clone Podman
First you need to fork this project on GitHub.
Be sure to have [defined your `$GOPATH` environment variable](https://github.com/golang/go/wiki/GOPATH).
-Create a path that corresponds to the go import paths of libpod: `mkdir -p $GOPATH/src/github.com/containers`.
+Create a path that corresponds to the go import paths of Podman: `mkdir -p $GOPATH/src/github.com/containers`.
Then clone your fork locally:
```shell
-$ git clone git@github.com:<you>/libpod $GOPATH/src/github.com/containers/podman
+$ git clone git@github.com:<you>/podman $GOPATH/src/github.com/containers/podman
$ cd $GOPATH/src/github.com/containers/podman
```
### Deal with make
-Libpod use a Makefile to realize common action like building etc...
+Podman use a Makefile to realize common action like building etc...
You can list available actions by using:
```shell
@@ -180,7 +180,7 @@ Commit f641c2d9384e ("fix bug in rm -fa parallel deletes") [...]
```
You should also be sure to use at least the first twelve characters of the
-SHA-1 ID. The libpod repository holds a lot of objects, making collisions with
+SHA-1 ID. The Podman repository holds a lot of objects, making collisions with
shorter IDs a real possibility. Bear in mind that, even if there is no
collision with your six-character ID now, that condition may change five years
from now.
@@ -284,13 +284,13 @@ leaving build artifacts in your hosts working directory. It also guarantees
every execution is based upon pristine code provided from the host.
Execution does not require any special permissions from the host. However,
-your libpod repository clone's root must be bind-mounted to the container at
+your Podman repository clone's root must be bind-mounted to the container at
'/usr/src/libpod'. The copy will be made into /var/tmp/go (`$GOSRC` in container)
before running your make target. For example, running `make lint` from a
-repository clone at $HOME/devel/libpod could be done with the commands:
+repository clone at $HOME/devel/podman could be done with the commands:
```bash
-$ cd $HOME/devel/libpod
+$ cd $HOME/devel/podman
$ podman run -it --rm -v $PWD:/usr/src/libpod:ro \
--security-opt label=disable quay.io/libpod/gate:master \
lint
@@ -310,7 +310,7 @@ For example, assuming your have a remote called `upstream` running the
validate target should be done like this:
```bash
-$ cd $HOME/devel/libpod
+$ cd $HOME/devel/podman
$ git remote update upstream
$ export EPOCH_TEST_COMMIT=$(git merge-base upstream/master HEAD)
$ podman run -it --rm -e EPOCH_TEST_COMMIT -v $PWD:/usr/src/libpod:ro \
@@ -320,7 +320,7 @@ $ podman run -it --rm -e EPOCH_TEST_COMMIT -v $PWD:/usr/src/libpod:ro \
### Integration Tests
-Our primary means of performing integration testing for libpod is with the
+Our primary means of performing integration testing for Podman is with the
[Ginkgo](https://github.com/onsi/ginkgo) BDD testing framework. This allows
us to use native Golang to perform our tests and there is a strong affiliation
between Ginkgo and the Go test framework. Adequate test cases are expected to
@@ -344,7 +344,7 @@ documentation.](contrib/cirrus/README.md)
There is always additional complexity added by automation, and so it sometimes
can fail for any number of reasons. This includes post-merge testing on all
branches, which you may occasionally see [red bars on the status graph
-.](https://cirrus-ci.com/github/containers/libpod/master)
+.](https://cirrus-ci.com/github/containers/podman/master)
When the graph shows mostly green bars on the right, it's a good indication
the master branch is currently stable. Alternating red/green bars is indicative
diff --git a/README.md b/README.md
index 50af1bdaf..449a19d45 100644
--- a/README.md
+++ b/README.md
@@ -10,7 +10,7 @@ Podman is based on libpod, a library for container lifecycle management that is
* Latest Remote client for MacOs
* Latest Static Remote client for Linux
-* [Continuous Integration:](contrib/cirrus/README.md) [![Build Status](https://api.cirrus-ci.com/github/containers/libpod.svg)](https://cirrus-ci.com/github/containers/libpod/master)
+* [Continuous Integration:](contrib/cirrus/README.md) [![Build Status](https://api.cirrus-ci.com/github/containers/podman.svg)](https://cirrus-ci.com/github/containers/podman/master)
* [GoDoc: ![GoDoc](https://godoc.org/github.com/containers/podman/libpod?status.svg)](https://godoc.org/github.com/containers/podman/libpod)
## Overview and scope
diff --git a/cmd/podman/common/create.go b/cmd/podman/common/create.go
index 403a1065b..d0bc8d466 100644
--- a/cmd/podman/common/create.go
+++ b/cmd/podman/common/create.go
@@ -516,5 +516,10 @@ func GetCreateFlags(cf *ContainerCLIOpts) *pflag.FlagSet {
"seccomp-policy", "default",
"Policy for selecting a seccomp profile (experimental)",
)
+ createFlags.StringSliceVar(
+ &cf.CgroupConf,
+ "cgroup-conf", []string{},
+ "Configure cgroup v2 (key=value)",
+ )
return &createFlags
}
diff --git a/cmd/podman/common/create_opts.go b/cmd/podman/common/create_opts.go
index f9e4d7ca5..16d41988f 100644
--- a/cmd/podman/common/create_opts.go
+++ b/cmd/podman/common/create_opts.go
@@ -106,4 +106,6 @@ type ContainerCLIOpts struct {
SeccompPolicy string
Net *entities.NetOptions
+
+ CgroupConf []string
}
diff --git a/cmd/podman/common/specgen.go b/cmd/podman/common/specgen.go
index bf50bb56b..4de622916 100644
--- a/cmd/podman/common/specgen.go
+++ b/cmd/podman/common/specgen.go
@@ -450,7 +450,20 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string
s.ResourceLimits.Pids = &pids
}
s.ResourceLimits.CPU = getCPULimits(c)
- if s.ResourceLimits.CPU == nil && s.ResourceLimits.Pids == nil && s.ResourceLimits.BlockIO == nil && s.ResourceLimits.Memory == nil {
+
+ unifieds := make(map[string]string)
+ for _, unified := range c.CgroupConf {
+ splitUnified := strings.SplitN(unified, "=", 2)
+ if len(splitUnified) < 2 {
+ return errors.Errorf("--cgroup-conf must be formatted KEY=VALUE")
+ }
+ unifieds[splitUnified[0]] = splitUnified[1]
+ }
+ if len(unifieds) > 0 {
+ s.ResourceLimits.Unified = unifieds
+ }
+
+ if s.ResourceLimits.CPU == nil && s.ResourceLimits.Pids == nil && s.ResourceLimits.BlockIO == nil && s.ResourceLimits.Memory == nil && s.ResourceLimits.Unified == nil {
s.ResourceLimits = nil
}
diff --git a/cmd/podman/root.go b/cmd/podman/root.go
index 8f77e5893..749a5fbe7 100644
--- a/cmd/podman/root.go
+++ b/cmd/podman/root.go
@@ -226,7 +226,7 @@ func persistentPostRunE(cmd *cobra.Command, args []string) error {
func loggingHook() {
var found bool
for _, l := range logLevels {
- if l == logLevel {
+ if l == strings.ToLower(logLevel) {
found = true
break
}
diff --git a/completions/bash/podman b/completions/bash/podman
index 379ba7fc8..ca2c93153 100644
--- a/completions/bash/podman
+++ b/completions/bash/podman
@@ -2107,6 +2107,7 @@ _podman_container_run() {
--cap-add
--cap-drop
--cgroup-parent
+ --cgroup-conf
--cidfile
--conmon-pidfile
--cpu-period
@@ -3671,6 +3672,7 @@ _podman_podman() {
start
stats
stop
+ system
tag
top
umount
diff --git a/docs/remote-docs.sh b/docs/remote-docs.sh
index 6d520fae6..a9fda4696 100755
--- a/docs/remote-docs.sh
+++ b/docs/remote-docs.sh
@@ -12,21 +12,22 @@ function usage() {
echo >&2 "$0 PLATFORM TARGET SOURCES..."
echo >&2 "PLATFORM: Is either linux, darwin or windows."
echo >&2 "TARGET: Is the directory where files will be staged. eg, docs/build/remote/linux"
- echo >&2 "SOURCES: Are the directories of source files. eg, docs/markdown"
+ echo >&2 "SOURCES: Are the directories of source files. eg, docs/source/markdown"
}
function fail() {
- echo >&2 -e "$@\n"
- usage
+ echo >&2 -e "$(dirname $0): $@\n"
exit 1
}
case $PLATFORM in
darwin|linux)
PUBLISHER=man_fn
+ ext=1
;;
windows)
PUBLISHER=html_fn
+ ext=1.md
;;
-help)
usage
@@ -54,7 +55,7 @@ function man_fn() {
local dir=$(dirname $page)
if [[ ! -f $page ]]; then
- page=$dir/links/${file%.*}.1
+ page=$dir/links/${file%.*}.$ext
fi
install $page $TARGET/${file%%.*}.1
}
@@ -72,6 +73,22 @@ function html_fn() {
pandoc --ascii --lua-filter=docs/links-to-html.lua -o $TARGET/${file%%.*}.html $markdown
}
+# Run 'podman help' (possibly against a subcommand, e.g. 'podman help image')
+# and return a list of each first word under 'Available Commands', that is,
+# the command name but not its description.
+function podman_commands() {
+ $PODMAN help "$@" |\
+ awk '/^Available Commands:/{ok=1;next}/^Flags:/{ok=0}ok { print $1 }' |\
+ grep .
+}
+
+function podman_all_commands(){
+ for cmd in $(podman_commands "$@") ; do
+ echo $@ $cmd
+ podman_all_commands $@ $cmd
+ done
+}
+
## pub_pages finds and publishes the remote manual pages
function pub_pages() {
local source=$1
@@ -80,12 +97,29 @@ function pub_pages() {
$publisher $f
done
- for c in "container" "image" "pod" "volume" ""; do
- local cmd=${c:+-$c}
- for s in $($PODMAN $c --help | sed -n '/^Available Commands:/,/^Flags:/p' | sed -e '1d;$d' -e '/^$/d' | awk '{print $1}'); do
- $publisher $(echo $source/podman$cmd-$s.*)
- done
- done
+
+ while IFS= read -r cmd; do
+ file="podman-${cmd// /-}"
+
+ # Source dir may have man (.1) files (linux/darwin) or .1.md (windows)
+ # but the links subdir will always be .1 (man) files
+ if [ -f $source/$file.$ext -o -f $source/links/$file.1 ]; then
+ $publisher $(echo $source/$file.$ext)
+ else
+ # This is worth failing CI for
+ fail "no doc file nor link $source/$file.$ext for 'podman $cmd'"
+ fi
+ done <<< "$(podman_all_commands)"
+}
+
+## sed syntax is different on darwin and linux
+## sed --help fails on mac, meaning we have to use mac syntax
+function sed_os(){
+ if sed --help > /dev/null 2>&1 ; then
+ $(sed -i "$@")
+ else
+ $(sed -i "" "$@")
+ fi
}
## rename renames podman-remote.ext to podman.ext, and fixes up contents to reflect change
@@ -95,14 +129,14 @@ function rename (){
local ext=${remote##*.}
mv $remote $TARGET/podman.$ext
- $(sed -i "s/podman\\\*-remote/podman/g" $TARGET/podman.$ext)
- $(sed -i "s/A\ remote\ CLI\ for\ Podman\:\ //g" $TARGET/podman.$ext)
+ sed_os "s/podman\\\*-remote/podman/g" $TARGET/podman.$ext
+ sed_os "s/A\ remote\ CLI\ for\ Podman\:\ //g" $TARGET/podman.$ext
case $PLATFORM in
darwin|linux)
- $(sed -i "s/Podman\\\*-remote/Podman\ for\ Mac/g" $TARGET/podman.$ext)
+ sed_os "s/Podman\\\*-remote/Podman\ for\ Mac/g" $TARGET/podman.$ext
;;
windows)
- $(sed -i "s/Podman\\\*-remote/Podman\ for\ Windows/g" $TARGET/podman.$ext)
+ sed_os "s/Podman\\\*-remote/Podman\ for\ Windows/g" $TARGET/podman.$ext
;;
esac
fi
diff --git a/docs/source/markdown/podman-build.1.md b/docs/source/markdown/podman-build.1.md
index a07b55924..6618df1b9 100644
--- a/docs/source/markdown/podman-build.1.md
+++ b/docs/source/markdown/podman-build.1.md
@@ -351,6 +351,15 @@ another process.
Do not use existing cached images for the container build. Build from the start with a new set of cached layers.
+**--omit-timestamp** *bool-value*
+
+Set the create timestamp to epoch 0 to allow for deterministic builds (defaults to false).
+By default, the created timestamp is changed and written into the image manifest with every commit,
+causing the image's sha256 hash to be different even if the sources are exactly the same otherwise.
+When --omit-timestamp is set to true, the created timestamp is always set to the epoch and therefore not
+changed, allowing the image's sha256 to remain the same. All files committed to the layers of the image
+will get the epoch 0 timestamp.
+
**--os**=*string*
Set the OS to the provided value instead of the current operating system of the host.
@@ -736,6 +745,52 @@ $ podman build -f dev/Containerfile https://10.10.10.1/podman/context.tar.gz
## Files
+### `.dockerignore`
+
+If the file .dockerignore exists in the context directory, `podman build` reads
+its contents. Podman uses the content to exclude files and directories from
+the context directory, when executing COPY and ADD directives in the
+Containerfile/Dockerfile
+
+Users can specify a series of Unix shell globals in a .dockerignore file to
+identify files/directories to exclude.
+
+Podman supports a special wildcard string `**` which matches any number of
+directories (including zero). For example, **/*.go will exclude all files that
+end with .go that are found in all directories.
+
+Example .dockerignore file:
+
+```
+# exclude this content for image
+*/*.c
+**/output*
+src
+```
+
+`*/*.c`
+Excludes files and directories whose names ends with .c in any top level subdirectory. For example, the source file include/rootless.c.
+
+`**/output*`
+Excludes files and directories starting with `output` from any directory.
+
+`src`
+Excludes files named src and the directory src as well as any content in it.
+
+Lines starting with ! (exclamation mark) can be used to make exceptions to
+exclusions. The following is an example .dockerignore file that uses this
+mechanism:
+```
+*.doc
+!Help.doc
+```
+
+Exclude all doc files except Help.doc from the image.
+
+This functionality is compatible with the handling of .dockerignore files described here:
+
+https://docs.docker.com/engine/reference/builder/#dockerignore-file
+
**registries.conf** (`/etc/containers/registries.conf`)
registries.conf is the configuration file which specifies which container registries should be consulted when completing image names which do not include a registry or domain portion.
@@ -752,6 +807,8 @@ If you are using `useradd` within your build script, you should pass the `--no-l
podman(1), buildah(1), containers-registries.conf(5), crun(8), runc(8), useradd(8)
## HISTORY
+Aug 2020, Additional options and .dockerignore added by Dan Walsh <dwalsh@redhat.com>
+
May 2018, Minor revisions added by Joe Doss <joe@solidadmin.com>
December 2017, Originally compiled by Tom Sweeney <tsweeney@redhat.com>
diff --git a/docs/source/markdown/podman-create.1.md b/docs/source/markdown/podman-create.1.md
index 976a1e681..2f59f8a09 100644
--- a/docs/source/markdown/podman-create.1.md
+++ b/docs/source/markdown/podman-create.1.md
@@ -89,6 +89,10 @@ The *split* option splits the current cgroup in two sub-cgroups: one for conmon
Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
+**--cgroup-conf**=*KEY=VALUE*
+
+When running on cgroup v2, specify the cgroup file to write to and its value. For example **--cgroup-conf=memory.high=1073741824** sets the memory.high limit to 1GB.
+
**--cidfile**=*id*
Write the container ID to the file
@@ -648,6 +652,14 @@ Host port does not have to be specified (e.g. `podman run -p 127.0.0.1::80`).
If it is not, the container port will be randomly assigned a port on the host.
Use `podman port` to see the actual mapping: `podman port CONTAINER $CONTAINERPORT`
+**Note:** if a container will be run within a pod, it is not necessary to publish the port for
+the containers in the pod. The port must only be published by the pod itself. Pod network
+stacks act like the network stack on the host - you have a variety of containers in the pod,
+and programs in the container, all sharing a single interface and IP address, and
+associated ports. If one container binds to a port, no other container can use that port
+within the pod while it is in use. Containers in the pod can also communicate over localhost
+by having one container bind to localhost in the pod, and another connect to that port.
+
**--publish-all**, **-P**=*true|false*
Publish all exposed ports to random ports on the host interfaces. The default is *false*.
diff --git a/docs/source/markdown/podman-remote.1.md b/docs/source/markdown/podman-remote.1.md
index 23ccaf0e6..3dcfae606 100644
--- a/docs/source/markdown/podman-remote.1.md
+++ b/docs/source/markdown/podman-remote.1.md
@@ -17,7 +17,9 @@ Podman uses Buildah(1) internally to create container images. Both tools share i
(not container) storage, hence each can use or manipulate images (but not containers)
created by the other.
-Podman-remote provides a local client interacting with a Podman backend node through a RESTful API tunneled through a ssh connection. In this context, a Podman node is a Linux system with Podman installed on it and the API service activated. Credentials for this session can be passed in using flags, environment variables, or in `podman-remote.conf`
+Podman-remote provides a local client interacting with a Podman backend node through a RESTful API tunneled through a ssh connection. In this context, a Podman node is a Linux system with Podman installed on it and the API service activated. Credentials for this session can be passed in using flags, environment variables, or in `containers.conf`.
+
+The `containers.conf` file should be placed under `$HOME/.config/containers/containers.conf` on Linux and Mac and `%APPDATA%\containers\containers.conf` on Windows.
**podman [GLOBAL OPTIONS]**
@@ -31,29 +33,19 @@ Remote connection name
Print usage statement
-**--log-level**=*level*
-
-Log messages above specified level: debug, info, warn, error (default), fatal or panic
-
-**--port**=*integer*
-
-Use an alternative port for the ssh connections. The default port is 22
+**--identity**=*path*
-**--remote-config-path**=*path*
+Path to ssh identity file. If the identity file has been encrypted, Podman prompts the user for the passphrase.
+If no identity file is provided and no user is given, Podman defaults to the user running the podman command.
+Podman prompts for the login password on the remote server.
-Alternate path for configuration file
-
-**--remote-host**=*ip*
-
-Remote host IP
-
-**--syslog**
+**--log-level**=*level*
-Output logging information to syslog as well as the console
+Log messages above specified level: debug, info, warn, error (default), fatal or panic
-**--username**=*string*
+**--url**=*value*
-Username on the remote host (defaults to current username)
+URL to access Podman service (default from `containers.conf`, rootless "unix://run/user/$UID/podman/podman.sock" or as root "unix://run/podman/podman.sock).
**--version**
diff --git a/docs/source/markdown/podman-run.1.md b/docs/source/markdown/podman-run.1.md
index b6c1fab17..c86090167 100644
--- a/docs/source/markdown/podman-run.1.md
+++ b/docs/source/markdown/podman-run.1.md
@@ -104,6 +104,10 @@ The **split** option splits the current cgroup in two sub-cgroups: one for conmo
Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
+**--cgroup-conf**=*KEY=VALUE*
+
+When running on cgroup v2, specify the cgroup file to write to and its value. For example **--cgroup-conf=memory.high=1073741824** sets the memory.high limit to 1GB.
+
**--cidfile**=*file*
Write the container ID to *file*.
@@ -662,6 +666,14 @@ If it is not, the container port will be randomly assigned a port on the host.
Use **podman port** to see the actual mapping: **podman port $CONTAINER $CONTAINERPORT**.
+**Note:** if a container will be run within a pod, it is not necessary to publish the port for
+the containers in the pod. The port must only be published by the pod itself. Pod network
+stacks act like the network stack on the host - you have a variety of containers in the pod,
+and programs in the container, all sharing a single interface and IP address, and
+associated ports. If one container binds to a port, no other container can use that port
+within the pod while it is in use. Containers in the pod can also communicate over localhost
+by having one container bind to localhost in the pod, and another connect to that port.
+
**--publish-all**, **-P**=**true**|**false**
Publish all exposed ports to random ports on the host interfaces. The default is **false**.
diff --git a/go.mod b/go.mod
index 3a3173418..1f12a0f1e 100644
--- a/go.mod
+++ b/go.mod
@@ -8,14 +8,14 @@ require (
github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
- github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921
- github.com/containernetworking/plugins v0.8.6
- github.com/containers/buildah v1.15.1-0.20200731151214-29f4d01c621c
- github.com/containers/common v0.18.0
+ github.com/containernetworking/cni v0.8.0
+ github.com/containernetworking/plugins v0.8.7
+ github.com/containers/buildah v1.15.1-0.20200813183340-0a8dc1f8064c
+ github.com/containers/common v0.20.3-0.20200827091701-a550d6a98aa3
github.com/containers/conmon v2.0.19+incompatible
github.com/containers/image/v5 v5.5.2
github.com/containers/psgo v1.5.1
- github.com/containers/storage v1.23.0
+ github.com/containers/storage v1.23.2
github.com/coreos/go-systemd/v22 v22.1.0
github.com/cri-o/ocicni v0.2.0
github.com/cyphar/filepath-securejoin v0.2.2
@@ -30,7 +30,7 @@ require (
github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf
github.com/google/uuid v1.1.1
github.com/gorilla/mux v1.7.4
- github.com/gorilla/schema v1.1.0
+ github.com/gorilla/schema v1.2.0
github.com/hashicorp/go-multierror v1.1.0
github.com/hpcloud/tail v1.0.0
github.com/json-iterator/go v1.1.10
@@ -40,14 +40,13 @@ require (
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/runc v1.0.0-rc91.0.20200708210054-ce54a9d4d79b
- github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
- github.com/opencontainers/runtime-tools v0.9.1-0.20200714183735-07406c5828aa
+ github.com/opencontainers/runtime-spec v1.0.3-0.20200817204227-f9c09b4ea1df
+ github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/selinux v1.6.0
github.com/opentracing/opentracing-go v1.2.0
github.com/pkg/errors v0.9.1
github.com/pmezard/go-difflib v1.0.0
github.com/rootless-containers/rootlesskit v0.10.0
- github.com/seccomp/containers-golang v0.5.0
github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v0.0.7
github.com/spf13/pflag v1.0.5
@@ -58,11 +57,11 @@ require (
github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b
github.com/vishvananda/netlink v1.1.0
go.etcd.io/bbolt v1.3.5
- golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
- golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7
+ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
+ golang.org/x/net v0.0.0-20200707034311-ab3426394381
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
- golang.org/x/sys v0.0.0-20200519105757-fe76b779f299
+ golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1
k8s.io/api v0.18.8
- k8s.io/apimachinery v0.18.8
+ k8s.io/apimachinery v0.19.0
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab
)
diff --git a/go.sum b/go.sum
index d2a3f91f1..4bc7272d9 100644
--- a/go.sum
+++ b/go.sum
@@ -64,20 +64,18 @@ github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
-github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921 h1:eUMd8hlGasYcg1tBqETZtxaW3a7EIxqY7Z1g65gcKQg=
-github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/plugins v0.8.6 h1:npZTLiMa4CRn6m5P9+1Dz4O1j0UeFbm8VYN6dlsw568=
-github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
-github.com/containers/buildah v1.15.1-0.20200731151214-29f4d01c621c h1:+V9RQOhg1LyhyHHU33OVjO+Uan1MoVbkjufH8E/BeLU=
-github.com/containers/buildah v1.15.1-0.20200731151214-29f4d01c621c/go.mod h1:XVOKQHd1sP/7tFpCXIaNsUJZdTNCwVZ7YZiLnnEfrVg=
-github.com/containers/common v0.15.2/go.mod h1:rhpXuGLTEKsk/xX/x0iKGHjRadMHpBd2ZiNDugwXPEM=
-github.com/containers/common v0.18.0 h1:pZB6f17N5QV43TcT06gtx1lb0rxd/4StFdVhP9CtgQg=
-github.com/containers/common v0.18.0/go.mod h1:H2Wqvx6wkqdzT4RcTCqIG4W0HSOZwUbbNiUTX1+VohU=
+github.com/containernetworking/cni v0.8.0 h1:BT9lpgGoH4jw3lFC7Odz2prU5ruiYKcgAjMCbgybcKI=
+github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/plugins v0.8.7 h1:bU7QieuAp+sACI2vCzESJ3FoT860urYP+lThyZkb/2M=
+github.com/containernetworking/plugins v0.8.7/go.mod h1:R7lXeZaBzpfqapcAbHRW8/CYwm0dHzbz0XEjofx0uB0=
+github.com/containers/buildah v1.15.1-0.20200813183340-0a8dc1f8064c h1:elGbJcB3UjBdk7fBxfAzUNS3IT288U1Dzm0gmhgsnB8=
+github.com/containers/buildah v1.15.1-0.20200813183340-0a8dc1f8064c/go.mod h1:+IklBLPix5wxPEWn26aDay5f5q4A5VtmNjkdyK5YVsI=
+github.com/containers/common v0.19.0/go.mod h1:+NUHV8V5Kmo260ja9Dxtr8ialrDnK4RNzyeEbSgmLac=
+github.com/containers/common v0.20.3-0.20200827091701-a550d6a98aa3 h1:rTSiIMOH3fbCBN+2L8Xr9BJ19AejEIaBQvzkAXZCz/k=
+github.com/containers/common v0.20.3-0.20200827091701-a550d6a98aa3/go.mod h1:z5HJtHWU8sopAHO0Od5s9EpVkXPrLIcNszVvN1Fc3fQ=
github.com/containers/conmon v2.0.19+incompatible h1:1bDVRvHy2MUNTUT/SW6LlHsJHQBTSwXvnKNdcB/a1vQ=
github.com/containers/conmon v2.0.19+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
-github.com/containers/image/v5 v5.5.1 h1:h1FCOXH6Ux9/p/E4rndsQOC4yAdRU0msRTfLVeQ7FDQ=
github.com/containers/image/v5 v5.5.1/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
github.com/containers/image/v5 v5.5.2 h1:fv7FArz0zUnjH0W0l8t90CqWFlFcQrPP6Pug+9dUtVI=
github.com/containers/image/v5 v5.5.2/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
@@ -89,10 +87,10 @@ github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQ
github.com/containers/psgo v1.5.1 h1:MQNb7FLbXqBdqz6u4lI2QWizVz4RSTzs1+Nk9XT1iVA=
github.com/containers/psgo v1.5.1/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU=
github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc=
-github.com/containers/storage v1.21.2 h1:bf9IqA+g6ClBviqVG5lVCp5tTH9lvWwjYws7mVYSti0=
-github.com/containers/storage v1.21.2/go.mod h1:I1EIAA7B4OwWRSA0b4yq2AW1wjvvfcY0zLWQuwTa4zw=
github.com/containers/storage v1.23.0 h1:gYyNkBiihC2FvGiHOjOjpnfojYwgxpLVooTUlmD6pxs=
github.com/containers/storage v1.23.0/go.mod h1:I1EIAA7B4OwWRSA0b4yq2AW1wjvvfcY0zLWQuwTa4zw=
+github.com/containers/storage v1.23.2 h1:GPZ8PXYezML1gmZ/uFaXQpyps7AH645lmdvvOJwJYNc=
+github.com/containers/storage v1.23.2/go.mod h1:AyTMMiE5ANvZJiqvatQgSZ85wAl5yHucY3NDN/kemr4=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5 h1:DpHb9vJrZQEFMcVLFKAAGMUVX0XoRC0ptCthinRYm38=
@@ -140,6 +138,7 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
@@ -149,6 +148,7 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc=
+github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
@@ -162,6 +162,8 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
@@ -182,17 +184,20 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekf
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -213,11 +218,12 @@ github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/schema v1.1.0 h1:CamqUDOFUBqzrvxuz2vEwo8+SUdwsluFh7IlzJh30LY=
-github.com/gorilla/schema v1.1.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU=
+github.com/gorilla/schema v1.2.0 h1:YufUaxZYCKGFuAq3c96BOhjgd5nmXiOY9NGzF247Tsc=
+github.com/gorilla/schema v1.2.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
@@ -262,6 +268,8 @@ github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.10 h1:a/y8CglcM7gLGYmlbP/stPE5sR3hbhFRUjCBfd/0B3I=
github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.10.11 h1:K9z59aO18Aywg2b/WSgBaUX99mHy2BES18Cr5lBKZHk=
+github.com/klauspost/compress v1.10.11/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -269,6 +277,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJ
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@@ -316,7 +325,6 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
@@ -342,13 +350,13 @@ github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT5
github.com/opencontainers/runc v1.0.0-rc91.0.20200708210054-ce54a9d4d79b h1:wjSgG2Z5xWv1wpAI7JbwKR9aJH0p4HJ+ROZ7ViKh9qU=
github.com/opencontainers/runc v1.0.0-rc91.0.20200708210054-ce54a9d4d79b/go.mod h1:ZuXhqlr4EiRYgDrBDNfSbE4+n9JX4+V107NwAmF7sZA=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E=
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200817204227-f9c09b4ea1df h1:5AW5dMFSXVH4Mg3WYe4z7ui64bK8n66IoWK8i6T4QZ8=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200817204227-f9c09b4ea1df/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
+github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/runtime-tools v0.9.1-0.20200714183735-07406c5828aa h1:iyj+fFHVBn0xOalz9UChYzSU1K0HJ+d75b4YqShBRhI=
-github.com/opencontainers/runtime-tools v0.9.1-0.20200714183735-07406c5828aa/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY=
@@ -400,11 +408,12 @@ github.com/rootless-containers/rootlesskit v0.10.0/go.mod h1:OZQfuRPb+2MA1p+hmjH
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 h1:2c1EFnZHIPCW8qKWgHMH/fX2PkSabFc5mrVzfUNdg5U=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
-github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
-github.com/seccomp/containers-golang v0.5.0 h1:uUMOZIz/7TUiEO6h4ursAJY5JT55AzYiN/X5GOj9rvY=
-github.com/seccomp/containers-golang v0.5.0/go.mod h1:5fP9lgyYyklJ8fg8Geq193G1QLe0ikf34z+hZKIjmnE=
+github.com/seccomp/containers-golang v0.6.0 h1:VWPMMIDr8pAtNjCX0WvLEEK9EQi5lAm4HtJbDtAtFvQ=
+github.com/seccomp/containers-golang v0.6.0/go.mod h1:Dd9mONHvW4YdbSzdm23yf2CFw0iqvqLhO0mEFvPIvm4=
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf h1:b0+ZBD3rohnkQ4q5duD1+RyTXTg9yk+qTOPMSQtapO0=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v0.0.0-20190403091019-9b3cdde74fbe/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
@@ -501,6 +510,8 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU=
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@@ -525,6 +536,8 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -553,7 +566,6 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -564,8 +576,11 @@ golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 h1:sIky/MyNRSHTrdxfsiUSS4WIAMvInbeXljJz+jDjeYE=
+golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@@ -595,12 +610,15 @@ google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRn
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
@@ -608,8 +626,12 @@ google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -647,21 +669,30 @@ k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY=
k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA=
k8s.io/apimachinery v0.18.8 h1:jimPrycCqgx2QPearX3to1JePz7wSbVLq+7PdBTTwQ0=
k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig=
+k8s.io/apimachinery v0.19.0 h1:gjKnAda/HZp5k4xQYjL0K/Yb66IvNqjthCb03QlKpaQ=
+k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab h1:E8Fecph0qbNsAbijJJQryKu4Oi9QTp5cVpjTE+nqg6g=
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab/go.mod h1:E95RaSlHr79aHaX0aGSwcPNfygDiPKOVXdmivCIZT0k=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
+k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4=
k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/libpod/container_api.go b/libpod/container_api.go
index c44e89042..0d7bbacd0 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -1,18 +1,14 @@
package libpod
import (
- "bufio"
"context"
"io/ioutil"
- "net"
+ "net/http"
"os"
- "strings"
- "sync"
"time"
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/libpod/events"
- "github.com/containers/podman/v2/libpod/logs"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -267,15 +263,10 @@ func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-
// over the socket; if this is not set, but streamLogs is, only the logs will be
// sent.
// At least one of streamAttach and streamLogs must be set.
-func (c *Container) HTTPAttach(httpCon net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, streamAttach, streamLogs bool) (deferredErr error) {
- isTerminal := false
- if c.config.Spec.Process != nil {
- isTerminal = c.config.Spec.Process.Terminal
- }
- // Ensure our contract of writing errors to and closing the HTTP conn is
- // honored.
+func (c *Container) HTTPAttach(r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, streamAttach, streamLogs bool, hijackDone chan<- bool) error {
+ // Ensure we don't leak a goroutine if we exit before hijack completes.
defer func() {
- hijackWriteErrorAndClose(deferredErr, c.ID(), isTerminal, httpCon, httpBuf)
+ close(hijackDone)
}()
if !c.batched {
@@ -299,74 +290,8 @@ func (c *Container) HTTPAttach(httpCon net.Conn, httpBuf *bufio.ReadWriter, stre
logrus.Infof("Performing HTTP Hijack attach to container %s", c.ID())
- logSize := 0
- if streamLogs {
- // Get all logs for the container
- logChan := make(chan *logs.LogLine)
- logOpts := new(logs.LogOptions)
- logOpts.Tail = -1
- logOpts.WaitGroup = new(sync.WaitGroup)
- errChan := make(chan error)
- go func() {
- var err error
- // In non-terminal mode we need to prepend with the
- // stream header.
- logrus.Debugf("Writing logs for container %s to HTTP attach", c.ID())
- for logLine := range logChan {
- if !isTerminal {
- device := logLine.Device
- var header []byte
- headerLen := uint32(len(logLine.Msg))
- logSize += len(logLine.Msg)
- switch strings.ToLower(device) {
- case "stdin":
- header = makeHTTPAttachHeader(0, headerLen)
- case "stdout":
- header = makeHTTPAttachHeader(1, headerLen)
- case "stderr":
- header = makeHTTPAttachHeader(2, headerLen)
- default:
- logrus.Errorf("Unknown device for log line: %s", device)
- header = makeHTTPAttachHeader(1, headerLen)
- }
- _, err = httpBuf.Write(header)
- if err != nil {
- break
- }
- }
- _, err = httpBuf.Write([]byte(logLine.Msg))
- if err != nil {
- break
- }
- _, err = httpBuf.Write([]byte("\n"))
- if err != nil {
- break
- }
- err = httpBuf.Flush()
- if err != nil {
- break
- }
- }
- errChan <- err
- }()
- go func() {
- logOpts.WaitGroup.Wait()
- close(logChan)
- }()
- if err := c.ReadLog(context.Background(), logOpts, logChan); err != nil {
- return err
- }
- logrus.Debugf("Done reading logs for container %s, %d bytes", c.ID(), logSize)
- if err := <-errChan; err != nil {
- return err
- }
- }
- if !streamAttach {
- return nil
- }
-
c.newContainerEvent(events.Attach)
- return c.ociRuntime.HTTPAttach(c, httpCon, httpBuf, streams, detachKeys, cancel)
+ return c.ociRuntime.HTTPAttach(c, r, w, streams, detachKeys, cancel, hijackDone, streamAttach, streamLogs)
}
// AttachResize resizes the container's terminal, which is displayed by Attach
diff --git a/libpod/container_exec.go b/libpod/container_exec.go
index bfeae0a11..2a852ab81 100644
--- a/libpod/container_exec.go
+++ b/libpod/container_exec.go
@@ -1,9 +1,8 @@
package libpod
import (
- "bufio"
"io/ioutil"
- "net"
+ "net/http"
"os"
"path/filepath"
"strconv"
@@ -373,17 +372,12 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS
}
// ExecHTTPStartAndAttach starts and performs an HTTP attach to an exec session.
-func (c *Container) ExecHTTPStartAndAttach(sessionID string, httpCon net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool) (deferredErr error) {
+func (c *Container) ExecHTTPStartAndAttach(sessionID string, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, hijackDone chan<- bool) error {
// TODO: How do we combine streams with the default streams set in the exec session?
- // The flow here is somewhat strange, because we need to determine if
- // there's a terminal ASAP (for error handling).
- // Until we know, assume it's true (don't add standard stream headers).
- // Add a defer to ensure our invariant (HTTP session is closed) is
- // maintained.
- isTerminal := true
+ // Ensure that we don't leak a goroutine here
defer func() {
- hijackWriteErrorAndClose(deferredErr, c.ID(), isTerminal, httpCon, httpBuf)
+ close(hijackDone)
}()
if !c.batched {
@@ -399,8 +393,6 @@ func (c *Container) ExecHTTPStartAndAttach(sessionID string, httpCon net.Conn, h
if !ok {
return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
}
- // We can now finally get the real value of isTerminal.
- isTerminal = session.Config.Terminal
// Verify that we are in a good state to continue
if !c.ensureState(define.ContainerStateRunning) {
@@ -432,7 +424,13 @@ func (c *Container) ExecHTTPStartAndAttach(sessionID string, httpCon net.Conn, h
streams.Stderr = session.Config.AttachStderr
}
- pid, attachChan, err := c.ociRuntime.ExecContainerHTTP(c, session.ID(), execOpts, httpCon, httpBuf, streams, cancel)
+ holdConnOpen := make(chan bool)
+
+ defer func() {
+ close(holdConnOpen)
+ }()
+
+ pid, attachChan, err := c.ociRuntime.ExecContainerHTTP(c, session.ID(), execOpts, r, w, streams, cancel, hijackDone, holdConnOpen)
if err != nil {
session.State = define.ExecStateStopped
session.ExitCode = define.TranslateExecErrorToExitCode(define.ExecErrorCodeGeneric, err)
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 437729c2d..835dccd71 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -465,6 +465,7 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named
if ctrSpec.Linux.Resources.Pids != nil {
hostConfig.PidsLimit = ctrSpec.Linux.Resources.Pids.Limit
}
+ hostConfig.CgroupConf = ctrSpec.Linux.Resources.Unified
if ctrSpec.Linux.Resources.BlockIO != nil {
if ctrSpec.Linux.Resources.BlockIO.Weight != nil {
hostConfig.BlkioWeight = *ctrSpec.Linux.Resources.BlockIO.Weight
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index f3f11f945..c41d81a2b 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -483,12 +483,13 @@ func (c *Container) setupStorage(ctx context.Context) error {
// Set the default Entrypoint and Command
if containerInfo.Config != nil {
+ // Set CMD in the container to the default configuration only if ENTRYPOINT is not set by the user.
+ if c.config.Entrypoint == nil && c.config.Command == nil {
+ c.config.Command = containerInfo.Config.Config.Cmd
+ }
if c.config.Entrypoint == nil {
c.config.Entrypoint = containerInfo.Config.Config.Entrypoint
}
- if c.config.Command == nil {
- c.config.Command = containerInfo.Config.Config.Cmd
- }
}
artifacts := filepath.Join(c.config.StaticDir, artifactsDir)
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index ea4340e00..31dbee572 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -385,7 +385,8 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if err != nil {
return nil, errors.Wrapf(err, "Invalid Umask Value")
}
- g.SetProcessUmask(uint32(decVal))
+ umask := uint32(decVal)
+ g.Config.Process.User.Umask = &umask
}
// Add addition groups if c.config.GroupAdd is not empty
diff --git a/libpod/define/container_inspect.go b/libpod/define/container_inspect.go
index 8adf3c077..44c3d515b 100644
--- a/libpod/define/container_inspect.go
+++ b/libpod/define/container_inspect.go
@@ -518,6 +518,8 @@ type InspectContainerHostConfig struct {
IOMaximumIOps uint64 `json:"IOMaximumIOps"`
// IOMaximumBandwidth is Windows-only and not presently implemented.
IOMaximumBandwidth uint64 `json:"IOMaximumBandwidth"`
+ // CgroupConf is the configuration for cgroup v2.
+ CgroupConf map[string]string `json:"CgroupConf"`
}
// InspectBasicNetworkConfig holds basic configuration information (e.g. IP
diff --git a/libpod/oci.go b/libpod/oci.go
index 89850affc..924c32510 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -1,8 +1,7 @@
package libpod
import (
- "bufio"
- "net"
+ "net/http"
"github.com/containers/podman/v2/libpod/define"
"k8s.io/client-go/tools/remotecommand"
@@ -63,7 +62,7 @@ type OCIRuntime interface {
// used instead. Detach keys of "" will disable detaching via keyboard.
// The streams parameter will determine which streams to forward to the
// client.
- HTTPAttach(ctr *Container, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool) error
+ HTTPAttach(ctr *Container, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, hijackDone chan<- bool, streamAttach, streamLogs bool) error
// AttachResize resizes the terminal in use by the given container.
AttachResize(ctr *Container, newSize remotecommand.TerminalSize) error
@@ -80,7 +79,7 @@ type OCIRuntime interface {
// Maintains the same invariants as ExecContainer (returns on session
// start, with a goroutine running in the background to handle attach).
// The HTTP attach itself maintains the same invariants as HTTPAttach.
- ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error)
+ ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool) (int, chan error, error)
// ExecContainerDetached executes a command in a running container, but
// does not attach to it. Returns the PID of the exec session and an
// error (if starting the exec session failed)
diff --git a/libpod/oci_conmon_exec_linux.go b/libpod/oci_conmon_exec_linux.go
index cfe3745fa..c18da68fe 100644
--- a/libpod/oci_conmon_exec_linux.go
+++ b/libpod/oci_conmon_exec_linux.go
@@ -1,9 +1,9 @@
package libpod
import (
- "bufio"
"fmt"
"net"
+ "net/http"
"os"
"os/exec"
"path/filepath"
@@ -80,7 +80,7 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options
// ExecContainerHTTP executes a new command in an existing container and
// forwards its standard streams over an attach
-func (r *ConmonOCIRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error) {
+func (r *ConmonOCIRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, req *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool) (int, chan error, error) {
if streams != nil {
if !streams.Stdin && !streams.Stdout && !streams.Stderr {
return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
@@ -129,7 +129,7 @@ func (r *ConmonOCIRuntime) ExecContainerHTTP(ctr *Container, sessionID string, o
attachChan := make(chan error)
go func() {
// attachToExec is responsible for closing pipes
- attachChan <- attachExecHTTP(ctr, sessionID, httpBuf, streams, pipes, detachKeys, options.Terminal, cancel)
+ attachChan <- attachExecHTTP(ctr, sessionID, req, w, streams, pipes, detachKeys, options.Terminal, cancel, hijackDone, holdConnOpen)
close(attachChan)
}()
@@ -496,7 +496,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
}
// Attach to a container over HTTP
-func attachExecHTTP(c *Container, sessionID string, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool) error {
+func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool) (deferredErr error) {
if pipes == nil || pipes.startPipe == nil || pipes.attachPipe == nil {
return errors.Wrapf(define.ErrInvalidArg, "must provide a start and attach pipe to finish an exec attach")
}
@@ -549,6 +549,37 @@ func attachExecHTTP(c *Container, sessionID string, httpBuf *bufio.ReadWriter, s
attachStdin = streams.Stdin
}
+ // Perform hijack
+ hijacker, ok := w.(http.Hijacker)
+ if !ok {
+ return errors.Errorf("unable to hijack connection")
+ }
+
+ httpCon, httpBuf, err := hijacker.Hijack()
+ if err != nil {
+ return errors.Wrapf(err, "error hijacking connection")
+ }
+
+ hijackDone <- true
+
+ // Write a header to let the client know what happened
+ writeHijackHeader(r, httpBuf)
+
+ // Force a flush after the header is written.
+ if err := httpBuf.Flush(); err != nil {
+ return errors.Wrapf(err, "error flushing HTTP hijack header")
+ }
+
+ go func() {
+ // We need to hold the connection open until the complete exec
+ // function has finished. This channel will be closed in a defer
+ // in that function, so we can wait for it here.
+ // Can't be a defer, because this would block the function from
+ // returning.
+ <-holdConnOpen
+ hijackWriteErrorAndClose(deferredErr, c.ID(), isTerminal, httpCon, httpBuf)
+ }()
+
// Next, STDIN. Avoid entirely if attachStdin unset.
if attachStdin {
go func() {
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index 67593a68b..f66835771 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -5,16 +5,19 @@ package libpod
import (
"bufio"
"bytes"
+ "context"
"fmt"
"io"
"io/ioutil"
"net"
+ "net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
+ "sync"
"syscall"
"text/template"
"time"
@@ -22,6 +25,7 @@ import (
"github.com/containers/common/pkg/config"
conmonConfig "github.com/containers/conmon/runner/config"
"github.com/containers/podman/v2/libpod/define"
+ "github.com/containers/podman/v2/libpod/logs"
"github.com/containers/podman/v2/pkg/cgroups"
"github.com/containers/podman/v2/pkg/errorhandling"
"github.com/containers/podman/v2/pkg/lookup"
@@ -116,7 +120,7 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
if os.IsNotExist(err) {
continue
}
- return nil, errors.Wrapf(err, "cannot stat %s", path)
+ return nil, errors.Wrapf(err, "cannot stat OCI runtime %s path %q", name, path)
}
if !stat.Mode().IsRegular() {
continue
@@ -503,7 +507,9 @@ func (r *ConmonOCIRuntime) UnpauseContainer(ctr *Container) error {
// this function returns.
// If this is a container with a terminal, we will stream raw. If it is not, we
// will stream with an 8-byte header to multiplex STDOUT and STDERR.
-func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool) (deferredErr error) {
+// Returns any errors that occurred, and whether the connection was successfully
+// hijacked before that error occurred.
+func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, hijackDone chan<- bool, streamAttach, streamLogs bool) (deferredErr error) {
isTerminal := false
if ctr.config.Spec.Process != nil {
isTerminal = ctr.config.Spec.Process.Terminal
@@ -521,17 +527,21 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, httpConn net.Conn, httpBuf
}
socketPath := buildSocketPath(attachSock)
- conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"})
- if err != nil {
- return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath)
- }
- defer func() {
- if err := conn.Close(); err != nil {
- logrus.Errorf("unable to close container %s attach socket: %q", ctr.ID(), err)
+ var conn *net.UnixConn
+ if streamAttach {
+ newConn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"})
+ if err != nil {
+ return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath)
}
- }()
+ conn = newConn
+ defer func() {
+ if err := conn.Close(); err != nil {
+ logrus.Errorf("unable to close container %s attach socket: %q", ctr.ID(), err)
+ }
+ }()
- logrus.Debugf("Successfully connected to container %s attach socket %s", ctr.ID(), socketPath)
+ logrus.Debugf("Successfully connected to container %s attach socket %s", ctr.ID(), socketPath)
+ }
detachString := ctr.runtime.config.Engine.DetachKeys
if detachKeys != nil {
@@ -554,6 +564,111 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, httpConn net.Conn, httpBuf
attachStdin = streams.Stdin
}
+ logrus.Debugf("Going to hijack container %s attach connection", ctr.ID())
+
+ // Alright, let's hijack.
+ hijacker, ok := w.(http.Hijacker)
+ if !ok {
+ return errors.Errorf("unable to hijack connection")
+ }
+
+ httpCon, httpBuf, err := hijacker.Hijack()
+ if err != nil {
+ return errors.Wrapf(err, "error hijacking connection")
+ }
+
+ hijackDone <- true
+
+ writeHijackHeader(req, httpBuf)
+
+ // Force a flush after the header is written.
+ if err := httpBuf.Flush(); err != nil {
+ return errors.Wrapf(err, "error flushing HTTP hijack header")
+ }
+
+ defer func() {
+ hijackWriteErrorAndClose(deferredErr, ctr.ID(), isTerminal, httpCon, httpBuf)
+ }()
+
+ logrus.Debugf("Hijack for container %s attach session done, ready to stream", ctr.ID())
+
+ // TODO: This is gross. Really, really gross.
+ // I want to say we should read all the logs into an array before
+ // calling this, in container_api.go, but that could take a lot of
+ // memory...
+ // On the whole, we need to figure out a better way of doing this,
+ // though.
+ logSize := 0
+ if streamLogs {
+ logrus.Debugf("Will stream logs for container %s attach session", ctr.ID())
+
+ // Get all logs for the container
+ logChan := make(chan *logs.LogLine)
+ logOpts := new(logs.LogOptions)
+ logOpts.Tail = -1
+ logOpts.WaitGroup = new(sync.WaitGroup)
+ errChan := make(chan error)
+ go func() {
+ var err error
+ // In non-terminal mode we need to prepend with the
+ // stream header.
+ logrus.Debugf("Writing logs for container %s to HTTP attach", ctr.ID())
+ for logLine := range logChan {
+ if !isTerminal {
+ device := logLine.Device
+ var header []byte
+ headerLen := uint32(len(logLine.Msg))
+ logSize += len(logLine.Msg)
+ switch strings.ToLower(device) {
+ case "stdin":
+ header = makeHTTPAttachHeader(0, headerLen)
+ case "stdout":
+ header = makeHTTPAttachHeader(1, headerLen)
+ case "stderr":
+ header = makeHTTPAttachHeader(2, headerLen)
+ default:
+ logrus.Errorf("Unknown device for log line: %s", device)
+ header = makeHTTPAttachHeader(1, headerLen)
+ }
+ _, err = httpBuf.Write(header)
+ if err != nil {
+ break
+ }
+ }
+ _, err = httpBuf.Write([]byte(logLine.Msg))
+ if err != nil {
+ break
+ }
+ _, err = httpBuf.Write([]byte("\n"))
+ if err != nil {
+ break
+ }
+ err = httpBuf.Flush()
+ if err != nil {
+ break
+ }
+ }
+ errChan <- err
+ }()
+ go func() {
+ logOpts.WaitGroup.Wait()
+ close(logChan)
+ }()
+ if err := ctr.ReadLog(context.Background(), logOpts, logChan); err != nil {
+ return err
+ }
+ logrus.Debugf("Done reading logs for container %s, %d bytes", ctr.ID(), logSize)
+ if err := <-errChan; err != nil {
+ return err
+ }
+ }
+ if !streamAttach {
+ logrus.Debugf("Done streaming logs for container %s attach, exiting as attach streaming not requested", ctr.ID())
+ return nil
+ }
+
+ logrus.Debugf("Forwarding attach output for container %s", ctr.ID())
+
// Handle STDOUT/STDERR
go func() {
var err error
diff --git a/libpod/oci_missing.go b/libpod/oci_missing.go
index 83a6aaf90..9d12972d4 100644
--- a/libpod/oci_missing.go
+++ b/libpod/oci_missing.go
@@ -1,9 +1,8 @@
package libpod
import (
- "bufio"
"fmt"
- "net"
+ "net/http"
"path/filepath"
"sync"
@@ -111,7 +110,7 @@ func (r *MissingRuntime) UnpauseContainer(ctr *Container) error {
}
// HTTPAttach is not available as the runtime is missing
-func (r *MissingRuntime) HTTPAttach(ctr *Container, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool) error {
+func (r *MissingRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, hijackDone chan<- bool, streamAttach, streamLogs bool) error {
return r.printError()
}
@@ -126,7 +125,7 @@ func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options
}
// ExecContainerHTTP is not available as the runtime is missing
-func (r *MissingRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error) {
+func (r *MissingRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, req *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool) (int, chan error, error) {
return -1, nil, r.printError()
}
diff --git a/libpod/util.go b/libpod/util.go
index c93ba7919..585b07aca 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -5,6 +5,7 @@ import (
"encoding/binary"
"fmt"
"io"
+ "net/http"
"os"
"os/exec"
"path/filepath"
@@ -257,6 +258,27 @@ func makeHTTPAttachHeader(stream byte, length uint32) []byte {
return header
}
+// writeHijackHeader writes a header appropriate for the type of HTTP Hijack
+// that occurred in a hijacked HTTP connection used for attach.
+func writeHijackHeader(r *http.Request, conn io.Writer) {
+ // AttachHeader is the literal header sent for upgraded/hijacked connections for
+ // attach, sourced from Docker at:
+ // https://raw.githubusercontent.com/moby/moby/b95fad8e51bd064be4f4e58a996924f343846c85/api/server/router/container/container_routes.go
+ // Using literally to ensure compatibility with existing clients.
+ c := r.Header.Get("Connection")
+ proto := r.Header.Get("Upgrade")
+ if len(proto) == 0 || !strings.EqualFold(c, "Upgrade") {
+ // OK - can't upgrade if not requested or protocol is not specified
+ fmt.Fprintf(conn,
+ "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
+ } else {
+ // Upraded
+ fmt.Fprintf(conn,
+ "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: %s\r\n\r\n",
+ proto)
+ }
+}
+
// Convert OCICNI port bindings into Inspect-formatted port bindings.
func makeInspectPortBindings(bindings []ocicni.PortMapping) map[string][]define.InspectHostPort {
portBindings := make(map[string][]define.InspectHostPort)
diff --git a/nix/nixpkgs.json b/nix/nixpkgs.json
index 976284ed4..6ef89ff82 100644
--- a/nix/nixpkgs.json
+++ b/nix/nixpkgs.json
@@ -1,7 +1,7 @@
{
"url": "https://github.com/nixos/nixpkgs",
- "rev": "d6a445fe821052861b379d9b6c02d21623c25464",
- "date": "2020-08-11T04:28:16+01:00",
- "sha256": "064scwaxg8qg4xbmq07hag57saa4bhsb4pgg5h5vfs4nhhwvchg9",
+ "rev": "5f212d693fe1c82f9c7e20cd57bc69802b36a321",
+ "date": "2020-08-22T01:42:23+02:00",
+ "sha256": "1h3819ppllcpw07j884bjh07sma07vrrk1md92sf93cg43nmzncf",
"fetchSubmodules": false
}
diff --git a/pkg/api/handlers/compat/containers.go b/pkg/api/handlers/compat/containers.go
index 6943b15ff..1ae6a990b 100644
--- a/pkg/api/handlers/compat/containers.go
+++ b/pkg/api/handlers/compat/containers.go
@@ -319,6 +319,14 @@ func LibpodToContainerJSON(l *libpod.Container, sz bool) (*types.ContainerJSON,
SizeRootFs: &inspect.SizeRootFs,
}
+ // set Path and Args
+ processArgs := l.Config().Spec.Process.Args
+ if len(processArgs) > 0 {
+ cb.Path = processArgs[0]
+ }
+ if len(processArgs) > 1 {
+ cb.Args = processArgs[1:]
+ }
stopTimeout := int(l.StopTimeout())
exposedPorts := make(nat.PortSet)
@@ -346,7 +354,7 @@ func LibpodToContainerJSON(l *libpod.Container, sz bool) (*types.ContainerJSON,
OpenStdin: inspect.Config.OpenStdin,
StdinOnce: inspect.Config.StdinOnce,
Env: inspect.Config.Env,
- Cmd: inspect.Config.Cmd,
+ Cmd: l.Command(),
Healthcheck: nil,
ArgsEscaped: false,
Image: imageName,
diff --git a/pkg/api/handlers/compat/containers_attach.go b/pkg/api/handlers/compat/containers_attach.go
index 2d63ac56d..e20d48d86 100644
--- a/pkg/api/handlers/compat/containers_attach.go
+++ b/pkg/api/handlers/compat/containers_attach.go
@@ -1,12 +1,7 @@
package compat
import (
- "bufio"
- "fmt"
- "io"
- "net"
"net/http"
- "strings"
"github.com/containers/podman/v2/libpod"
"github.com/containers/podman/v2/libpod/define"
@@ -97,75 +92,30 @@ func AttachContainer(w http.ResponseWriter, r *http.Request) {
return
}
- connection, buffer, err := AttachConnection(w, r)
- if err != nil {
- utils.InternalServerError(w, err)
- return
- }
- logrus.Debugf("Hijack for attach of container %s successful", ctr.ID())
+ idleTracker := r.Context().Value("idletracker").(*idletracker.IdleTracker)
+ hijackChan := make(chan bool, 1)
// Perform HTTP attach.
// HTTPAttach will handle everything about the connection from here on
// (including closing it and writing errors to it).
- if err := ctr.HTTPAttach(connection, buffer, streams, detachKeys, nil, query.Stream, query.Logs); err != nil {
+ if err := ctr.HTTPAttach(r, w, streams, detachKeys, nil, query.Stream, query.Logs, hijackChan); err != nil {
+ hijackComplete := <-hijackChan
+
// We can't really do anything about errors anymore. HTTPAttach
// should be writing them to the connection.
logrus.Errorf("Error attaching to container %s: %v", ctr.ID(), err)
- }
- logrus.Debugf("Attach for container %s completed successfully", ctr.ID())
-}
-
-type HijackedConnection struct {
- net.Conn // Connection
- idleTracker *idletracker.IdleTracker // Connection tracker
-}
-
-func (c HijackedConnection) Close() error {
- logrus.Debugf("Hijacked connection closed")
-
- c.idleTracker.TrackHijackedClosed()
- return c.Conn.Close()
-}
-
-func AttachConnection(w http.ResponseWriter, r *http.Request) (net.Conn, *bufio.ReadWriter, error) {
- idleTracker := r.Context().Value("idletracker").(*idletracker.IdleTracker)
-
- // Hijack the connection
- hijacker, ok := w.(http.Hijacker)
- if !ok {
- return nil, nil, errors.Errorf("unable to hijack connection")
- }
-
- connection, buffer, err := hijacker.Hijack()
- if err != nil {
- return nil, nil, errors.Wrapf(err, "error hijacking connection")
- }
- trackedConnection := HijackedConnection{
- Conn: connection,
- idleTracker: idleTracker,
+ if hijackComplete {
+ // We do need to tell the idle tracker that the
+ // connection has been closed, though. We can guarantee
+ // that is true after HTTPAttach exits.
+ idleTracker.TrackHijackedClosed()
+ } else {
+ // A hijack was not successfully completed. We need to
+ // report the error normally.
+ utils.InternalServerError(w, err)
+ }
}
- WriteAttachHeaders(r, trackedConnection)
-
- return trackedConnection, buffer, nil
-}
-
-func WriteAttachHeaders(r *http.Request, connection io.Writer) {
- // AttachHeader is the literal header sent for upgraded/hijacked connections for
- // attach, sourced from Docker at:
- // https://raw.githubusercontent.com/moby/moby/b95fad8e51bd064be4f4e58a996924f343846c85/api/server/router/container/container_routes.go
- // Using literally to ensure compatibility with existing clients.
- c := r.Header.Get("Connection")
- proto := r.Header.Get("Upgrade")
- if len(proto) == 0 || !strings.EqualFold(c, "Upgrade") {
- // OK - can't upgrade if not requested or protocol is not specified
- fmt.Fprintf(connection,
- "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
- } else {
- // Upraded
- fmt.Fprintf(connection,
- "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: %s\r\n\r\n",
- proto)
- }
+ logrus.Debugf("Attach for container %s completed successfully", ctr.ID())
}
diff --git a/pkg/api/handlers/compat/containers_create.go b/pkg/api/handlers/compat/containers_create.go
index 8238d2d93..93e4fe540 100644
--- a/pkg/api/handlers/compat/containers_create.go
+++ b/pkg/api/handlers/compat/containers_create.go
@@ -87,20 +87,21 @@ func makeCreateConfig(ctx context.Context, containerConfig *config.Config, input
workDir = input.WorkingDir
}
- if input.Entrypoint == nil {
- entrypointSlice, err := newImage.Entrypoint(ctx)
+ // Only use image's Cmd when the user does not set the entrypoint
+ if input.Entrypoint == nil && len(input.Cmd) == 0 {
+ cmdSlice, err := newImage.Cmd(ctx)
if err != nil {
return createconfig.CreateConfig{}, err
}
- input.Entrypoint = entrypointSlice
+ input.Cmd = cmdSlice
}
- if len(input.Cmd) == 0 {
- cmdSlice, err := newImage.Cmd(ctx)
+ if input.Entrypoint == nil {
+ entrypointSlice, err := newImage.Entrypoint(ctx)
if err != nil {
return createconfig.CreateConfig{}, err
}
- input.Cmd = cmdSlice
+ input.Entrypoint = entrypointSlice
}
stopTimeout := containerConfig.Engine.StopTimeout
diff --git a/pkg/api/handlers/compat/exec.go b/pkg/api/handlers/compat/exec.go
index 7a62a2b58..1db950f85 100644
--- a/pkg/api/handlers/compat/exec.go
+++ b/pkg/api/handlers/compat/exec.go
@@ -10,6 +10,7 @@ import (
"github.com/containers/podman/v2/libpod/define"
"github.com/containers/podman/v2/pkg/api/handlers"
"github.com/containers/podman/v2/pkg/api/handlers/utils"
+ "github.com/containers/podman/v2/pkg/api/server/idletracker"
"github.com/containers/podman/v2/pkg/specgen/generate"
"github.com/gorilla/mux"
"github.com/pkg/errors"
@@ -173,15 +174,24 @@ func ExecStartHandler(w http.ResponseWriter, r *http.Request) {
return
}
- connection, buffer, err := AttachConnection(w, r)
- if err != nil {
- utils.InternalServerError(w, err)
- return
- }
- logrus.Debugf("Hijack for attach of container %s exec session %s successful", sessionCtr.ID(), sessionID)
+ idleTracker := r.Context().Value("idletracker").(*idletracker.IdleTracker)
+ hijackChan := make(chan bool, 1)
+
+ if err := sessionCtr.ExecHTTPStartAndAttach(sessionID, r, w, nil, nil, nil, hijackChan); err != nil {
+ hijackComplete := <-hijackChan
- if err := sessionCtr.ExecHTTPStartAndAttach(sessionID, connection, buffer, nil, nil, nil); err != nil {
logrus.Errorf("Error attaching to container %s exec session %s: %v", sessionCtr.ID(), sessionID, err)
+
+ if hijackComplete {
+ // We do need to tell the idle tracker that the
+ // connection has been closed, though. We can guarantee
+ // that is true after HTTPAttach exits.
+ idleTracker.TrackHijackedClosed()
+ } else {
+ // A hijack was not successfully completed. We need to
+ // report the error normally.
+ utils.InternalServerError(w, err)
+ }
}
logrus.Debugf("Attach for container %s exec session %s completed successfully", sessionCtr.ID(), sessionID)
diff --git a/pkg/bindings/containers/attach.go b/pkg/bindings/containers/attach.go
index c035b6391..3bd85fbae 100644
--- a/pkg/bindings/containers/attach.go
+++ b/pkg/bindings/containers/attach.go
@@ -46,6 +46,8 @@ func Attach(ctx context.Context, nameOrID string, detachKeys *string, logs, stre
stderr = (io.Writer)(nil)
}
+ logrus.Infof("Going to attach to container %q", nameOrID)
+
conn, err := bindings.GetClient(ctx)
if err != nil {
return err
diff --git a/pkg/bindings/containers/containers.go b/pkg/bindings/containers/containers.go
index c1eb23233..981912665 100644
--- a/pkg/bindings/containers/containers.go
+++ b/pkg/bindings/containers/containers.go
@@ -13,6 +13,7 @@ import (
"github.com/containers/podman/v2/pkg/bindings"
"github.com/containers/podman/v2/pkg/domain/entities"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
var (
@@ -180,6 +181,7 @@ func Restart(ctx context.Context, nameOrID string, timeout *int) error {
// or a partial/full ID. The optional parameter for detach keys are to override the default
// detach key sequence.
func Start(ctx context.Context, nameOrID string, detachKeys *string) error {
+ logrus.Infof("Going to start container %q", nameOrID)
conn, err := bindings.GetClient(ctx)
if err != nil {
return err
diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go
index 478fac1d5..ff1052d86 100644
--- a/pkg/domain/infra/abi/system.go
+++ b/pkg/domain/infra/abi/system.go
@@ -8,6 +8,7 @@ import (
"os/exec"
"path/filepath"
"strconv"
+ "strings"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v2/libpod/define"
@@ -73,7 +74,7 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, cmd *cobra.Command)
initCommand, err := ioutil.ReadFile("/proc/1/comm")
// On errors, default to systemd
- runsUnderSystemd := err != nil || string(initCommand) == "systemd"
+ runsUnderSystemd := err != nil || strings.TrimRight(string(initCommand), "\n") == "systemd"
unitName := fmt.Sprintf("podman-%d.scope", os.Getpid())
if runsUnderSystemd || conf.Engine.CgroupManager == config.SystemdCgroupsManager {
diff --git a/pkg/spec/config_linux_cgo.go b/pkg/spec/config_linux_cgo.go
index 186a3a788..da92f511f 100644
--- a/pkg/spec/config_linux_cgo.go
+++ b/pkg/spec/config_linux_cgo.go
@@ -5,10 +5,10 @@ package createconfig
import (
"io/ioutil"
+ goSeccomp "github.com/containers/common/pkg/seccomp"
"github.com/containers/podman/v2/pkg/seccomp"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
- goSeccomp "github.com/seccomp/containers-golang"
"github.com/sirupsen/logrus"
)
diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go
index c49d51fc5..e0c875fe9 100644
--- a/pkg/spec/createconfig.go
+++ b/pkg/spec/createconfig.go
@@ -31,12 +31,13 @@ const (
type CreateResourceConfig struct {
BlkioWeight uint16 // blkio-weight
BlkioWeightDevice []string // blkio-weight-device
- CPUPeriod uint64 // cpu-period
- CPUQuota int64 // cpu-quota
- CPURtPeriod uint64 // cpu-rt-period
- CPURtRuntime int64 // cpu-rt-runtime
- CPUShares uint64 // cpu-shares
- CPUs float64 // cpus
+ CgroupConf map[string]string
+ CPUPeriod uint64 // cpu-period
+ CPUQuota int64 // cpu-quota
+ CPURtPeriod uint64 // cpu-rt-period
+ CPURtRuntime int64 // cpu-rt-runtime
+ CPUShares uint64 // cpu-shares
+ CPUs float64 // cpus
CPUsetCPUs string
CPUsetMems string // cpuset-mems
DeviceCgroupRules []string //device-cgroup-rule
diff --git a/pkg/spec/spec.go b/pkg/spec/spec.go
index 893ae3cab..5e97620cc 100644
--- a/pkg/spec/spec.go
+++ b/pkg/spec/spec.go
@@ -180,7 +180,16 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
g.AddMount(cgroupMnt)
}
g.SetProcessCwd(config.WorkDir)
- g.SetProcessArgs(config.Command)
+
+ ProcessArgs := make([]string, 0)
+ if len(config.Entrypoint) > 0 {
+ ProcessArgs = config.Entrypoint
+ }
+ if len(config.Command) > 0 {
+ ProcessArgs = append(ProcessArgs, config.Command...)
+ }
+ g.SetProcessArgs(ProcessArgs)
+
g.SetProcessTerminal(config.Tty)
for key, val := range config.Annotations {
diff --git a/pkg/specgen/generate/config_linux.go b/pkg/specgen/generate/config_linux.go
index 35508c023..1d5dcd8e7 100644
--- a/pkg/specgen/generate/config_linux.go
+++ b/pkg/specgen/generate/config_linux.go
@@ -90,7 +90,7 @@ func DevicesFromPath(g *generate.Generator, devicePath string) error {
}
st, err := os.Stat(resolvedDevicePath)
if err != nil {
- return errors.Wrapf(err, "cannot stat %s", devicePath)
+ return errors.Wrapf(err, "cannot stat device path %s", devicePath)
}
if st.IsDir() {
found := false
diff --git a/pkg/specgen/generate/config_linux_cgo.go b/pkg/specgen/generate/config_linux_cgo.go
index f35d56750..21a1c910d 100644
--- a/pkg/specgen/generate/config_linux_cgo.go
+++ b/pkg/specgen/generate/config_linux_cgo.go
@@ -6,12 +6,12 @@ import (
"context"
"io/ioutil"
+ goSeccomp "github.com/containers/common/pkg/seccomp"
"github.com/containers/podman/v2/libpod/image"
"github.com/containers/podman/v2/pkg/seccomp"
"github.com/containers/podman/v2/pkg/specgen"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
- goSeccomp "github.com/seccomp/containers-golang"
"github.com/sirupsen/logrus"
)
diff --git a/pkg/specgen/generate/security.go b/pkg/specgen/generate/security.go
index 5e4cc3399..d3e3d9278 100644
--- a/pkg/specgen/generate/security.go
+++ b/pkg/specgen/generate/security.go
@@ -112,7 +112,7 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator,
// Pass capRequiredRequested in CapAdd field to normalize capabilities names
capsRequired, err := capabilities.MergeCapabilities(nil, capsRequiredRequested, nil)
if err != nil {
- logrus.Errorf("capabilities requested by user or image are not valid: %q", strings.Join(capsRequired, ","))
+ return errors.Wrapf(err, "capabilities requested by user or image are not valid: %q", strings.Join(capsRequired, ","))
} else {
// Verify all capRequiered are in the capList
for _, cap := range capsRequired {
@@ -129,12 +129,6 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator,
}
}
- g.SetProcessNoNewPrivileges(s.NoNewPrivileges)
-
- if err := setupApparmor(s, rtc, g); err != nil {
- return err
- }
-
configSpec := g.Config
configSpec.Process.Capabilities.Bounding = caplist
@@ -142,13 +136,21 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator,
configSpec.Process.Capabilities.Effective = caplist
configSpec.Process.Capabilities.Permitted = caplist
configSpec.Process.Capabilities.Inheritable = caplist
- configSpec.Process.Capabilities.Ambient = caplist
} else {
- configSpec.Process.Capabilities.Effective = []string{}
- configSpec.Process.Capabilities.Permitted = []string{}
- configSpec.Process.Capabilities.Inheritable = []string{}
- configSpec.Process.Capabilities.Ambient = []string{}
+ userCaps, err := capabilities.NormalizeCapabilities(s.CapAdd)
+ if err != nil {
+ return errors.Wrapf(err, "capabilities requested by user are not valid: %q", strings.Join(s.CapAdd, ","))
+ }
+ configSpec.Process.Capabilities.Effective = userCaps
+ configSpec.Process.Capabilities.Permitted = userCaps
}
+
+ g.SetProcessNoNewPrivileges(s.NoNewPrivileges)
+
+ if err := setupApparmor(s, rtc, g); err != nil {
+ return err
+ }
+
// HANDLE SECCOMP
if s.SeccompProfilePath != "unconfined" {
seccompConfig, err := getSeccompConfig(s, configSpec, newImage)
diff --git a/pkg/specgen/generate/validate.go b/pkg/specgen/generate/validate.go
index dca45cc0e..ed337321b 100644
--- a/pkg/specgen/generate/validate.go
+++ b/pkg/specgen/generate/validate.go
@@ -23,6 +23,12 @@ func verifyContainerResources(s *specgen.SpecGenerator) ([]string, error) {
return warnings, nil
}
+ if s.ResourceLimits.Unified != nil {
+ if !cgroup2 {
+ return nil, errors.New("Cannot use --cgroup-conf without cgroup v2")
+ }
+ }
+
// Memory checks
if s.ResourceLimits.Memory != nil {
memory := s.ResourceLimits.Memory
diff --git a/pkg/specgen/specgen.go b/pkg/specgen/specgen.go
index a9161071b..a52225f87 100644
--- a/pkg/specgen/specgen.go
+++ b/pkg/specgen/specgen.go
@@ -415,6 +415,10 @@ type ContainerResourceConfig struct {
ThrottleReadIOPSDevice map[string]spec.LinuxThrottleDevice `json:"throttleReadIOPSDevice,omitempty"`
// IO write rate limit per cgroup per device, IO per second
ThrottleWriteIOPSDevice map[string]spec.LinuxThrottleDevice `json:"throttleWriteIOPSDevice,omitempty"`
+ // CgroupConf are key-value options passed into the container runtime
+ // that are used to configure cgroup v2.
+ // Optional.
+ CgroupConf map[string]string `json:"unified,omitempty"`
}
// ContainerHealthCheckConfig describes a container healthcheck with attributes
diff --git a/test/apiv2/20-containers.at b/test/apiv2/20-containers.at
index ed333d382..9ea3cb7ed 100644
--- a/test/apiv2/20-containers.at
+++ b/test/apiv2/20-containers.at
@@ -151,4 +151,31 @@ t DELETE images/localhost/newrepo:v2?force=true 200
t DELETE libpod/containers/$cid 204
t DELETE libpod/containers/myctr 204
+
+# test apiv2 create container with correct entrypoint and cmd
+# --data '{"Image":"quay.io/libpod/alpine_labels:latest","Entrypoint":["echo"],"Cmd":["param1","param2"]}'
+t POST containers/create '"Image":"'$IMAGE'","Entrypoint":["echo"],"Cmd":["param1","param2"]' 201 \
+ .Id~[0-9a-f]\\{64\\}
+cid=$(jq -r '.Id' <<<"$output")
+t GET containers/$cid/json 200 \
+ .Config.Entrypoint[0]="echo" \
+ .Config.Cmd[0]="param1" \
+ .Config.Cmd[1]="param2" \
+ .Path="echo" \
+ .Args[0]="param1" \
+ .Args[1]="param2"
+t DELETE containers/$cid 204
+
+# test only set the entrpoint, Cmd should be []
+t POST containers/create '"Image":"'$IMAGE'","Entrypoint":["echo","param1"]' 201 \
+ .Id~[0-9a-f]\\{64\\}
+cid=$(jq -r '.Id' <<<"$output")
+t GET containers/$cid/json 200 \
+ .Config.Entrypoint[0]="echo" \
+ .Config.Entrypoint[1]="param1" \
+ .Config.Cmd='[]' \
+ .Path="echo" \
+ .Args[0]="param1"
+t DELETE containers/$cid 204
+
# vim: filetype=sh
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index 157b7d3d7..91b0d3e48 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -1204,4 +1204,16 @@ WORKDIR /madethis`
// nonprintables seem to work their way in.
Expect(session.OutputToString()).To(Not(ContainSubstring("/bin/sh")))
})
+
+ It("podman run a container with log-level (lower case)", func() {
+ session := podmanTest.Podman([]string{"--log-level=info", "run", ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
+
+ It("podman run a container with log-level (upper case)", func() {
+ session := podmanTest.Podman([]string{"--log-level=INFO", "run", ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ })
})
diff --git a/test/system/030-run.bats b/test/system/030-run.bats
index 34afd5bae..198c8881d 100644
--- a/test/system/030-run.bats
+++ b/test/system/030-run.bats
@@ -294,11 +294,22 @@ echo $rand | 0 | $rand
run_podman run -d --userns=keep-id $IMAGE sh -c 'while ! test -e /stop; do sleep 0.1; done'
cid="$output"
+ # Assign a UID that is (a) not in our image /etc/passwd and (b) not
+ # the same as that of the user running the test script; this guarantees
+ # that the added passwd entry will be what we expect.
+ #
+ # For GID, we have to use one that already exists in the container. And
+ # unfortunately, 'adduser' requires a string name. We use 999:ping
+ local uid=4242
+ if [[ $uid == $(id -u) ]]; then
+ uid=4343
+ fi
+
gecos="$(random_string 6) $(random_string 8)"
- run_podman exec --user root $cid adduser -D -g "$gecos" -s /bin/sh newuser3
+ run_podman exec --user root $cid adduser -u $uid -G ping -D -g "$gecos" -s /bin/sh newuser3
is "$output" "" "output from adduser"
run_podman exec $cid tail -1 /etc/passwd
- is "$output" "newuser3:x:1000:1000:$gecos:/home/newuser3:/bin/sh" \
+ is "$output" "newuser3:x:$uid:999:$gecos:/home/newuser3:/bin/sh" \
"newuser3 added to /etc/passwd in container"
run_podman exec $cid touch /stop
diff --git a/test/system/070-build.bats b/test/system/070-build.bats
index 0e6e97d40..997699ecb 100644
--- a/test/system/070-build.bats
+++ b/test/system/070-build.bats
@@ -12,7 +12,7 @@ load helpers
rand_content=$(random_string 50)
tmpdir=$PODMAN_TMPDIR/build-test
- run mkdir -p $tmpdir || die "Could not mkdir $tmpdir"
+ mkdir -p $tmpdir
dockerfile=$tmpdir/Dockerfile
cat >$dockerfile <<EOF
FROM $IMAGE
diff --git a/test/system/075-exec.bats b/test/system/075-exec.bats
index 5f71e2acb..e9db8c27e 100644
--- a/test/system/075-exec.bats
+++ b/test/system/075-exec.bats
@@ -90,7 +90,6 @@ load helpers
}
# #6829 : add username to /etc/passwd inside container if --userns=keep-id
-# #6593 : doesn't actually work with podman exec
@test "podman exec - with keep-id" {
run_podman run -d --userns=keep-id $IMAGE sh -c \
"echo READY;while [ ! -f /stop ]; do sleep 1; done"
@@ -100,8 +99,6 @@ load helpers
run_podman exec $cid id -un
is "$output" "$(id -un)" "container is running as current user"
- # Until #6593 gets fixed, this just hangs. The server process barfs with:
- # unable to find user <username>: no matching entries in passwd file
run_podman exec --user=$(id -un) $cid touch /stop
run_podman wait $cid
run_podman rm $cid
diff --git a/troubleshooting.md b/troubleshooting.md
index 4c452404c..7e8f9bcb0 100644
--- a/troubleshooting.md
+++ b/troubleshooting.md
@@ -558,3 +558,37 @@ _eof
In order to effect root running containers and all users, modify the system
wide defaults in /etc/containers/containers.conf
+
+
+### 23) Container with exposed ports won't run in a pod
+
+A container with ports that have been published with the `--publish` or `-p` option
+can not be run within a pod.
+
+#### Symptom
+
+```
+$ podman pod create --name srcview -p 127.0.0.1:3434:3434 -p 127.0.0.1:7080:7080 -p 127.0.0.1:3370:3370 4b2f4611fa2cbd60b3899b936368c2b3f4f0f68bc8e6593416e0ab8ecb0a3f1d
+
+$ podman run --pod srcview --name src-expose -p 3434:3434 -v "${PWD}:/var/opt/localrepo":Z,ro sourcegraph/src-expose:latest serve /var/opt/localrepo
+Error: cannot set port bindings on an existing container network namespace
+```
+
+#### Solution
+
+This is a known limitation. If a container will be run within a pod, it is not necessary
+to publish the port for the containers in the pod. The port must only be published by the
+pod itself. Pod network stacks act like the network stack on the host - you have a
+variety of containers in the pod, and programs in the container, all sharing a single
+interface and IP address, and associated ports. If one container binds to a port, no other
+container can use that port within the pod while it is in use. Containers in the pod can
+also communicate over localhost by having one container bind to localhost in the pod, and
+another connect to that port.
+
+In the example from the symptom section, dropping the `-p 3434:3434` would allow the
+`podman run` command to complete, and the container as part of the pod would still have
+access to that port. For example:
+
+```
+$ podman run --pod srcview --name src-expose -v "${PWD}:/var/opt/localrepo":Z,ro sourcegraph/src-expose:latest serve /var/opt/localrepo
+```
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go
index 4f89a5dda..5ab5cc885 100644
--- a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go
@@ -21,6 +21,8 @@ import (
"fmt"
"io"
"os/exec"
+ "strings"
+ "time"
"github.com/containernetworking/cni/pkg/types"
)
@@ -31,34 +33,54 @@ type RawExec struct {
func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) {
stdout := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
c := exec.CommandContext(ctx, pluginPath)
c.Env = environ
c.Stdin = bytes.NewBuffer(stdinData)
c.Stdout = stdout
- c.Stderr = e.Stderr
- if err := c.Run(); err != nil {
- return nil, pluginErr(err, stdout.Bytes())
+ c.Stderr = stderr
+
+ // Retry the command on "text file busy" errors
+ for i := 0; i <= 5; i++ {
+ err := c.Run()
+
+ // Command succeeded
+ if err == nil {
+ break
+ }
+
+ // If the plugin is currently about to be written, then we wait a
+ // second and try it again
+ if strings.Contains(err.Error(), "text file busy") {
+ time.Sleep(time.Second)
+ continue
+ }
+
+ // All other errors except than the busy text file
+ return nil, e.pluginErr(err, stdout.Bytes(), stderr.Bytes())
}
+ // Copy stderr to caller's buffer in case plugin printed to both
+ // stdout and stderr for some reason. Ignore failures as stderr is
+ // only informational.
+ if e.Stderr != nil && stderr.Len() > 0 {
+ _, _ = stderr.WriteTo(e.Stderr)
+ }
return stdout.Bytes(), nil
}
-func pluginErr(err error, output []byte) error {
- if exitError, ok := err.(*exec.ExitError); ok {
- emsg := types.Error{}
- if len(output) == 0 {
- if len(exitError.Stderr) == 0 {
- emsg.Msg = "netplugin failed with no error message"
- } else {
- emsg.Msg = fmt.Sprintf("netplugin failed: %q", string(exitError.Stderr))
- }
- } else if perr := json.Unmarshal(output, &emsg); perr != nil {
- emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(output), perr)
+func (e *RawExec) pluginErr(err error, stdout, stderr []byte) error {
+ emsg := types.Error{}
+ if len(stdout) == 0 {
+ if len(stderr) == 0 {
+ emsg.Msg = fmt.Sprintf("netplugin failed with no error message: %v", err)
+ } else {
+ emsg.Msg = fmt.Sprintf("netplugin failed: %q", string(stderr))
}
- return &emsg
+ } else if perr := json.Unmarshal(stdout, &emsg); perr != nil {
+ emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(stdout), perr)
}
-
- return err
+ return &emsg
}
func (e *RawExec) FindInPath(plugin string, paths []string) (string, error) {
diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
index a34f97170..3b745d491 100644
--- a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
+++ b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
@@ -26,6 +26,11 @@ import (
// Returns an object representing the current OS thread's network namespace
func GetCurrentNS() (NetNS, error) {
+ // Lock the thread in case other goroutine executes in it and changes its
+ // network namespace after getCurrentThreadNetNSPath(), otherwise it might
+ // return an unexpected network namespace.
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
return GetNS(getCurrentThreadNetNSPath())
}
diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml
index a47a48453..8fd652ce1 100644
--- a/vendor/github.com/containers/buildah/.cirrus.yml
+++ b/vendor/github.com/containers/buildah/.cirrus.yml
@@ -32,7 +32,8 @@ env:
PRIOR_FEDORA_NAME: "fedora-31"
UBUNTU_NAME: "ubuntu-20"
PRIOR_UBUNTU_NAME: "ubuntu-19"
- _BUILT_IMAGE_SUFFIX: "libpod-6508632441356288"
+
+ _BUILT_IMAGE_SUFFIX: "podman-6530021898584064"
FEDORA_CACHE_IMAGE_NAME: "${FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "${PRIOR_FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "${UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}"
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 8a96ed931..f5be7efbd 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -310,6 +310,9 @@ type CommonBuildOptions struct {
// LabelOpts is the a slice of fields of an SELinux context, given in "field:pair" format, or "disable".
// Recognized field names are "role", "type", and "level".
LabelOpts []string
+ // OmitTimestamp forces epoch 0 as created timestamp to allow for
+ // deterministic, content-addressable builds.
+ OmitTimestamp bool
// SeccompProfilePath is the pathname of a seccomp profile.
SeccompProfilePath string
// ApparmorProfile is the name of an apparmor profile.
diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go
index 8616c4cac..7a83a73a3 100644
--- a/vendor/github.com/containers/buildah/chroot/run.go
+++ b/vendor/github.com/containers/buildah/chroot/run.go
@@ -206,6 +206,11 @@ func runUsingChrootMain() {
os.Exit(1)
}
+ if options.Spec == nil {
+ fmt.Fprintf(os.Stderr, "invalid options spec in runUsingChrootMain\n")
+ os.Exit(1)
+ }
+
// Prepare to shuttle stdio back and forth.
rootUID32, rootGID32, err := util.GetHostRootIDs(options.Spec)
if err != nil {
@@ -657,7 +662,12 @@ func runUsingChrootExecMain() {
// Set the hostname. We're already in a distinct UTS namespace and are admins in the user
// namespace which created it, so we shouldn't get a permissions error, but seccomp policy
// might deny our attempt to call sethostname() anyway, so log a debug message for that.
- if options.Spec != nil && options.Spec.Hostname != "" {
+ if options.Spec == nil {
+ fmt.Fprintf(os.Stderr, "invalid options spec passed in\n")
+ os.Exit(1)
+ }
+
+ if options.Spec.Hostname != "" {
if err := unix.Sethostname([]byte(options.Spec.Hostname)); err != nil {
logrus.Debugf("failed to set hostname %q for process: %v", options.Spec.Hostname, err)
}
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index c4d70e795..9e692546b 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -4,17 +4,17 @@ go 1.12
require (
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784
- github.com/containers/common v0.15.2
+ github.com/containers/common v0.19.0
github.com/containers/image/v5 v5.5.1
github.com/containers/ocicrypt v1.0.3
- github.com/containers/storage v1.20.2
+ github.com/containers/storage v1.23.0
github.com/cyphar/filepath-securejoin v0.2.2
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/go-units v0.4.0
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
github.com/fsouza/go-dockerclient v1.6.5
github.com/ghodss/yaml v1.0.0
- github.com/hashicorp/go-multierror v1.0.0
+ github.com/hashicorp/go-multierror v1.1.0
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
github.com/mattn/go-shellwords v1.0.10
github.com/onsi/ginkgo v1.14.0
@@ -22,12 +22,12 @@ require (
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/runc v1.0.0-rc91
- github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
+ github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445
github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/selinux v1.6.0
github.com/openshift/imagebuilder v1.1.6
github.com/pkg/errors v0.9.1
- github.com/seccomp/containers-golang v0.5.0
+ github.com/seccomp/containers-golang v0.6.0
github.com/seccomp/libseccomp-golang v0.9.1
github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v0.0.7
@@ -37,7 +37,7 @@ require (
go.etcd.io/bbolt v1.3.5
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
- golang.org/x/sys v0.0.0-20200519105757-fe76b779f299
+ golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1
golang.org/x/text v0.3.3 // indirect
k8s.io/klog v1.0.0 // indirect
)
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index 1ea944af7..e7d10f739 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -36,6 +36,7 @@ github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmY
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v1.0.0 h1:fU3UuQapBs+zLJu82NhR11Rif1ny2zfMMAyPJzSN5tQ=
github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
@@ -51,8 +52,8 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDG
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 h1:rqUVLD8I859xRgUx/WMC3v7QAFqbLKZbs+0kqYboRJc=
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containers/common v0.15.2 h1:KNNnSxeWRlghZPTVu07pjMWCRKvDObWykglf4ZFVDVI=
-github.com/containers/common v0.15.2/go.mod h1:rhpXuGLTEKsk/xX/x0iKGHjRadMHpBd2ZiNDugwXPEM=
+github.com/containers/common v0.19.0 h1:nya/Fh51kiyV0cAO31ejoNwvRAeYreymsO820yjfc3Y=
+github.com/containers/common v0.19.0/go.mod h1:+NUHV8V5Kmo260ja9Dxtr8ialrDnK4RNzyeEbSgmLac=
github.com/containers/image/v5 v5.5.1 h1:h1FCOXH6Ux9/p/E4rndsQOC4yAdRU0msRTfLVeQ7FDQ=
github.com/containers/image/v5 v5.5.1/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
@@ -63,6 +64,8 @@ github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6Gz
github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
github.com/containers/storage v1.20.2 h1:tw/uKRPDnmVrluIzer3dawTFG/bTJLP8IEUyHFhltYk=
github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc=
+github.com/containers/storage v1.23.0 h1:gYyNkBiihC2FvGiHOjOjpnfojYwgxpLVooTUlmD6pxs=
+github.com/containers/storage v1.23.0/go.mod h1:I1EIAA7B4OwWRSA0b4yq2AW1wjvvfcY0zLWQuwTa4zw=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
@@ -72,6 +75,7 @@ github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQa
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
@@ -158,6 +162,8 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
+github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
@@ -179,6 +185,8 @@ github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqy
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.8 h1:eLeJ3dr/Y9+XRfJT4l+8ZjmtB5RPJhucH2HeCV5+IZY=
github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.10.10 h1:a/y8CglcM7gLGYmlbP/stPE5sR3hbhFRUjCBfd/0B3I=
+github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
@@ -202,6 +210,7 @@ github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJd
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/moby/sys/mountinfo v0.1.3 h1:KIrhRO14+AkwKvG/g2yIpNMOUVZ02xNhOw8KY1WsLOI=
github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
@@ -222,8 +231,6 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.13.0 h1:M76yO2HkZASFjXL0HSoZJ1AYEmQxNJmY41Jx1zNUq1Y=
-github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -246,10 +253,11 @@ github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT5
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E=
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445 h1:y8cfsJRmn8g3VkM4IDpusKSgMUZEXhudm/BuYANLozE=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
github.com/opencontainers/selinux v1.5.1 h1:jskKwSMFYqyTrHEuJgQoUlTcId0av64S6EWObrIfn5Y=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.5.2 h1:F6DgIsjgBIcDksLW4D5RG9bXok6oqZ3nvMwj4ZoFu/Q=
@@ -294,12 +302,13 @@ github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQl
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
-github.com/seccomp/containers-golang v0.5.0 h1:uUMOZIz/7TUiEO6h4ursAJY5JT55AzYiN/X5GOj9rvY=
-github.com/seccomp/containers-golang v0.5.0/go.mod h1:5fP9lgyYyklJ8fg8Geq193G1QLe0ikf34z+hZKIjmnE=
+github.com/seccomp/containers-golang v0.6.0 h1:VWPMMIDr8pAtNjCX0WvLEEK9EQi5lAm4HtJbDtAtFvQ=
+github.com/seccomp/containers-golang v0.6.0/go.mod h1:Dd9mONHvW4YdbSzdm23yf2CFw0iqvqLhO0mEFvPIvm4=
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@@ -336,6 +345,7 @@ github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4=
github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5 h1:MCfT24H3f//U5+UCrZp1/riVO3B50BovxtDiNn0XKkk=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
@@ -415,7 +425,6 @@ golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -426,6 +435,9 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 h1:sIky/MyNRSHTrdxfsiUSS4WIAMvInbeXljJz+jDjeYE=
+golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 57d8ecb93..8ca94924a 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -1,6 +1,7 @@
package buildah
import (
+ "archive/tar"
"bytes"
"context"
"encoding/json"
@@ -284,6 +285,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
if err != nil {
return nil, err
}
+ omitTimestamp := i.created.Equal(time.Unix(0, 0))
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
for _, layerID := range layers {
@@ -356,7 +358,6 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
}
srcHasher := digest.Canonical.Digester()
- reader := io.TeeReader(rc, srcHasher.Hash())
// Set up to write the possibly-recompressed blob.
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
@@ -367,14 +368,40 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
counter := ioutils.NewWriteCounter(layerFile)
multiWriter := io.MultiWriter(counter, destHasher.Hash())
// Compress the layer, if we're recompressing it.
- writer, err := archive.CompressStream(multiWriter, i.compression)
+ writeCloser, err := archive.CompressStream(multiWriter, i.compression)
if err != nil {
layerFile.Close()
rc.Close()
return nil, errors.Wrapf(err, "error compressing %s", what)
}
- size, err := io.Copy(writer, reader)
- writer.Close()
+ writer := io.MultiWriter(writeCloser, srcHasher.Hash())
+ // Zero out timestamps in the layer, if we're doing that for
+ // history entries.
+ if omitTimestamp {
+ nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
+ writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) {
+ // Changing a zeroed field to a non-zero field
+ // can affect the format that the library uses
+ // for writing the header, so only change
+ // fields that are already set to avoid
+ // changing the format (and as a result,
+ // changing the length) of the header that we
+ // write.
+ if !hdr.ModTime.IsZero() {
+ hdr.ModTime = i.created
+ }
+ if !hdr.AccessTime.IsZero() {
+ hdr.AccessTime = i.created
+ }
+ if !hdr.ChangeTime.IsZero() {
+ hdr.ChangeTime = i.created
+ }
+ return false, false, nil
+ })
+ writer = io.Writer(writeCloser)
+ }
+ size, err := io.Copy(writer, rc)
+ writeCloser.Close()
layerFile.Close()
rc.Close()
if err != nil {
@@ -679,7 +706,7 @@ func (b *Builder) makeImageRef(options CommitOptions, exporting bool) (types.Ima
}
if options.OmitTimestamp {
- created = time.Unix(0, 0)
+ created = time.Unix(0, 0).UTC()
}
parent := ""
@@ -714,5 +741,6 @@ func (b *Builder) makeImageRef(options CommitOptions, exporting bool) (types.Ima
preEmptyLayers: b.PrependedEmptyLayers,
postEmptyLayers: b.AppendedEmptyLayers,
}
+
return ref, nil
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 1fa276d01..185c93ad3 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -168,6 +168,9 @@ type BuildOptions struct {
SignBy string
// Architecture specifies the target architecture of the image to be built.
Architecture string
+ // OmitTimestamp forces epoch 0 as created timestamp to allow for
+ // deterministic, content-addressable builds.
+ OmitTimestamp bool
// OS is the specifies the operating system of the image to be built.
OS string
// MaxPullPushRetries is the maximum number of attempts we'll make to pull or push any one
diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go
index 943e2c8cc..f3ef584e6 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go
@@ -100,6 +100,7 @@ type Executor struct {
devices []configs.Device
signBy string
architecture string
+ omitTimestamp bool
os string
maxPullPushRetries int
retryPullPushDelay time.Duration
@@ -200,6 +201,7 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
devices: devices,
signBy: options.SignBy,
architecture: options.Architecture,
+ omitTimestamp: options.OmitTimestamp,
os: options.OS,
maxPullPushRetries: options.MaxPullPushRetries,
retryPullPushDelay: options.PullPushRetryDelay,
diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
index 5b5828d01..f9cf2312a 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
@@ -296,6 +296,14 @@ func (s *StageExecutor) digestSpecifiedContent(ctx context.Context, node *parser
// container. Update the ID mappings and
// all-content-comes-from-below-this-directory value.
from := strings.TrimPrefix(flag, "--from=")
+
+ // If from has an argument within it, resolve it to its
+ // value. Otherwise just return the value found.
+ var fromErr error
+ from, fromErr = imagebuilder.ProcessWord(from, s.stage.Builder.Arguments())
+ if fromErr != nil {
+ return "", errors.Wrapf(fromErr, "unable to resolve argument %q", from)
+ }
if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil {
return "", err
}
@@ -886,6 +894,14 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// If the source's name corresponds to the
// result of an earlier stage, wait for that
// stage to finish being built.
+
+ // If arr[1] has an argument within it, resolve it to its
+ // value. Otherwise just return the value found.
+ var arr1Err error
+ arr[1], arr1Err = imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments())
+ if arr1Err != nil {
+ return "", nil, errors.Wrapf(arr1Err, "unable to resolve argument %q", arr[1])
+ }
if isStage, err := s.executor.waitForStage(ctx, arr[1], s.stages[:s.index]); isStage && err != nil {
return "", nil, err
}
@@ -1064,6 +1080,31 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return imgID, ref, nil
}
+func historyEntriesEqual(base, derived v1.History) bool {
+ if base.CreatedBy != derived.CreatedBy {
+ return false
+ }
+ if base.Comment != derived.Comment {
+ return false
+ }
+ if base.Author != derived.Author {
+ return false
+ }
+ if base.EmptyLayer != derived.EmptyLayer {
+ return false
+ }
+ if base.Created != nil && derived.Created == nil {
+ return false
+ }
+ if base.Created == nil && derived.Created != nil {
+ return false
+ }
+ if base.Created != nil && derived.Created != nil && !base.Created.Equal(*derived.Created) {
+ return false
+ }
+ return true
+}
+
// historyMatches returns true if a candidate history matches the history of our
// base image (if we have one), plus the current instruction.
// Used to verify whether a cache of the intermediate image exists and whether
@@ -1076,25 +1117,7 @@ func (s *StageExecutor) historyMatches(baseHistory []v1.History, child *parser.N
return false
}
for i := range baseHistory {
- if baseHistory[i].CreatedBy != history[i].CreatedBy {
- return false
- }
- if baseHistory[i].Comment != history[i].Comment {
- return false
- }
- if baseHistory[i].Author != history[i].Author {
- return false
- }
- if baseHistory[i].EmptyLayer != history[i].EmptyLayer {
- return false
- }
- if baseHistory[i].Created != nil && history[i].Created == nil {
- return false
- }
- if baseHistory[i].Created == nil && history[i].Created != nil {
- return false
- }
- if baseHistory[i].Created != nil && history[i].Created != nil && *baseHistory[i].Created != *history[i].Created {
+ if !historyEntriesEqual(baseHistory[i], history[i]) {
return false
}
}
@@ -1290,6 +1313,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
s.builder.SetHealthcheck(nil)
}
s.builder.ClearLabels()
+ s.builder.SetLabel(buildah.BuilderIdentityAnnotation, buildah.Version)
for k, v := range config.Labels {
s.builder.SetLabel(k, v)
}
@@ -1331,6 +1355,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
SignBy: s.executor.signBy,
MaxRetries: s.executor.maxPullPushRetries,
RetryDelay: s.executor.retryPullPushDelay,
+ OmitTimestamp: s.executor.omitTimestamp,
}
imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options)
if err != nil {
diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go
index 06fc09612..f0bf92ddf 100644
--- a/vendor/github.com/containers/buildah/info.go
+++ b/vendor/github.com/containers/buildah/info.go
@@ -64,12 +64,12 @@ func hostInfo() map[string]interface{} {
if err != nil {
logrus.Error(err, "err reading memory info")
info["MemTotal"] = ""
- info["MenFree"] = ""
+ info["MemFree"] = ""
info["SwapTotal"] = ""
info["SwapFree"] = ""
} else {
info["MemTotal"] = mi.MemTotal
- info["MenFree"] = mi.MemFree
+ info["MemFree"] = mi.MemFree
info["SwapTotal"] = mi.SwapTotal
info["SwapFree"] = mi.SwapFree
}
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index 977013a39..c1751bc8c 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -65,6 +65,7 @@ type BudResults struct {
Logfile string
Loglevel int
NoCache bool
+ OmitTimestamp bool
OS string
Platform string
Pull bool
@@ -126,17 +127,12 @@ func GetUserNSFlags(flags *UserNSResults) pflag.FlagSet {
// GetNameSpaceFlags returns the common flags for a namespace menu
func GetNameSpaceFlags(flags *NameSpaceResults) pflag.FlagSet {
fs := pflag.FlagSet{}
- fs.StringVar(&flags.IPC, string(specs.IPCNamespace), "", "'container', `path` of IPC namespace to join, or 'host'")
- fs.StringVar(&flags.Network, string(specs.NetworkNamespace), "", "'container', `path` of network namespace to join, or 'host'")
- // TODO How do we alias net and network?
- fs.StringVar(&flags.Network, "net", "", "'container', `path` of network namespace to join, or 'host'")
- if err := fs.MarkHidden("net"); err != nil {
- panic(fmt.Sprintf("error marking net flag as hidden: %v", err))
- }
+ fs.StringVar(&flags.IPC, string(specs.IPCNamespace), "", "'private', `path` of IPC namespace to join, or 'host'")
+ fs.StringVar(&flags.Network, string(specs.NetworkNamespace), "", "'private', 'none', 'ns:path' of network namespace to join, or 'host'")
fs.StringVar(&flags.CNIConfigDir, "cni-config-dir", util.DefaultCNIConfigDir, "`directory` of CNI configuration files")
fs.StringVar(&flags.CNIPlugInPath, "cni-plugin-path", util.DefaultCNIPluginPath, "`path` of CNI network plugins")
- fs.StringVar(&flags.PID, string(specs.PIDNamespace), "", "container, `path` of PID namespace to join, or 'host'")
- fs.StringVar(&flags.UTS, string(specs.UTSNamespace), "", "container, :`path` of UTS namespace to join, or 'host'")
+ fs.StringVar(&flags.PID, string(specs.PIDNamespace), "", "private, `path` of PID namespace to join, or 'host'")
+ fs.StringVar(&flags.UTS, string(specs.UTSNamespace), "", "private, :`path` of UTS namespace to join, or 'host'")
return fs
}
@@ -168,6 +164,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.BoolVar(&flags.NoCache, "no-cache", false, "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.")
fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr")
fs.IntVar(&flags.Loglevel, "loglevel", 0, "adjust logging level (range from -2 to 3)")
+ fs.BoolVar(&flags.OmitTimestamp, "omit-timestamp", false, "set created timestamp to epoch 0 to allow for deterministic builds")
fs.StringVar(&flags.OS, "os", runtime.GOOS, "set the OS to the provided value instead of the current operating system of the host")
fs.StringVar(&flags.Platform, "platform", parse.DefaultPlatform(), "set the OS/ARCH to the provided value instead of the current operating system and architecture of the host (for example `linux/arm`)")
fs.BoolVar(&flags.Pull, "pull", true, "pull the image from the registry if newer or not present in store, if false, only pull the image if not present")
@@ -282,3 +279,12 @@ func VerifyFlagsArgsOrder(args []string) error {
}
return nil
}
+
+// aliasFlags is a function to handle backwards compatibility with old flags
+func AliasFlags(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ switch name {
+ case "net":
+ name = "network"
+ }
+ return pflag.NormalizedName(name)
+}
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index 656a7c654..f5f91d22d 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -101,7 +101,7 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) {
}
dnsOptions := []string{}
- if c.Flag("dns-search").Changed {
+ if c.Flag("dns-option").Changed {
dnsOptions, _ = c.Flags().GetStringSlice("dns-option")
if noDNS && len(dnsOptions) > 0 {
return nil, errors.Errorf("invalid --dns-option, --dns-option may not be used with --dns=none")
@@ -784,11 +784,14 @@ func IDMappingOptions(c *cobra.Command, isolation buildah.Isolation) (usernsOpti
if c.Flag("userns").Changed {
how := c.Flag("userns").Value.String()
switch how {
- case "", "container":
+ case "", "container", "private":
usernsOption.Host = false
case "host":
usernsOption.Host = true
default:
+ if strings.HasPrefix(how, "ns:") {
+ how = how[3:]
+ }
if _, err := os.Stat(how); err != nil {
return nil, nil, errors.Wrapf(err, "error checking for %s namespace at %q", string(specs.UserNamespace), how)
}
@@ -798,11 +801,8 @@ func IDMappingOptions(c *cobra.Command, isolation buildah.Isolation) (usernsOpti
}
usernsOptions = buildah.NamespaceOptions{usernsOption}
- // Because --net and --network are technically two different flags, we need
- // to check each for nil and .Changed
- usernet := c.Flags().Lookup("net")
usernetwork := c.Flags().Lookup("network")
- if (usernet != nil && usernetwork != nil) && (!usernet.Changed && !usernetwork.Changed) {
+ if usernetwork != nil && !usernetwork.Changed {
usernsOptions = append(usernsOptions, buildah.NamespaceOption{
Name: string(specs.NetworkNamespace),
Host: usernsOption.Host,
@@ -851,15 +851,15 @@ func parseIDMap(spec []string) (m [][3]uint32, err error) {
func NamespaceOptions(c *cobra.Command) (namespaceOptions buildah.NamespaceOptions, networkPolicy buildah.NetworkConfigurationPolicy, err error) {
options := make(buildah.NamespaceOptions, 0, 7)
policy := buildah.NetworkDefault
- for _, what := range []string{string(specs.IPCNamespace), "net", "network", string(specs.PIDNamespace), string(specs.UTSNamespace)} {
+ for _, what := range []string{string(specs.IPCNamespace), "network", string(specs.PIDNamespace), string(specs.UTSNamespace)} {
if c.Flags().Lookup(what) != nil && c.Flag(what).Changed {
how := c.Flag(what).Value.String()
switch what {
- case "net", "network":
+ case "network":
what = string(specs.NetworkNamespace)
}
switch how {
- case "", "container":
+ case "", "container", "private":
logrus.Debugf("setting %q namespace to %q", what, "")
options.AddOrReplace(buildah.NamespaceOption{
Name: what,
@@ -890,6 +890,9 @@ func NamespaceOptions(c *cobra.Command) (namespaceOptions buildah.NamespaceOptio
break
}
}
+ if strings.HasPrefix(how, "ns:") {
+ how = how[3:]
+ }
if _, err := os.Stat(how); err != nil {
return nil, buildah.NetworkDefault, errors.Wrapf(err, "error checking for %s namespace at %q", what, how)
}
diff --git a/vendor/github.com/containers/buildah/pkg/supplemented/supplemented.go b/vendor/github.com/containers/buildah/pkg/supplemented/supplemented.go
index 5e3c6291a..a36c3eda4 100644
--- a/vendor/github.com/containers/buildah/pkg/supplemented/supplemented.go
+++ b/vendor/github.com/containers/buildah/pkg/supplemented/supplemented.go
@@ -370,11 +370,13 @@ func (s *supplementedImageSource) GetSignatures(ctx context.Context, instanceDig
func (s *supplementedImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
var src types.ImageSource
requestInstanceDigest := instanceDigest
+ errMsgDigest := ""
if instanceDigest == nil {
if sourceInstance, ok := s.sourceInstancesByInstance[""]; ok {
src = sourceInstance
}
} else {
+ errMsgDigest = string(*instanceDigest)
if sourceInstance, ok := s.sourceInstancesByInstance[*instanceDigest]; ok {
src = sourceInstance
}
@@ -396,5 +398,5 @@ func (s *supplementedImageSource) LayerInfosForCopy(ctx context.Context, instanc
}
return blobInfos, nil
}
- return nil, errors.Wrapf(ErrDigestNotFound, "error finding instance for instance digest %q to copy layers", *instanceDigest)
+ return nil, errors.Wrapf(ErrDigestNotFound, "error finding instance for instance digest %q to copy layers", errMsgDigest)
}
diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go
index 3af9049b7..e21e3cd91 100644
--- a/vendor/github.com/containers/buildah/run_linux.go
+++ b/vendor/github.com/containers/buildah/run_linux.go
@@ -192,7 +192,10 @@ func (b *Builder) Run(command []string, options RunOptions) error {
if err != nil {
return err
}
- bindFiles["/etc/hosts"] = hostFile
+ // Only bind /etc/hosts if there's a network
+ if options.ConfigureNetwork != NetworkDisabled {
+ bindFiles["/etc/hosts"] = hostFile
+ }
}
if !(contains(volumes, "/etc/resolv.conf") || (len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none")) {
@@ -200,7 +203,10 @@ func (b *Builder) Run(command []string, options RunOptions) error {
if err != nil {
return err
}
- bindFiles["/etc/resolv.conf"] = resolvFile
+ // Only bind /etc/resolv.conf if there's a network
+ if options.ConfigureNetwork != NetworkDisabled {
+ bindFiles["/etc/resolv.conf"] = resolvFile
+ }
}
// Empty file, so no need to recreate if it exists
if _, ok := bindFiles["/run/.containerenv"]; !ok {
@@ -1453,9 +1459,10 @@ func runUsingRuntimeMain() {
if err := setChildProcess(); err != nil {
os.Exit(1)
}
- var ospec *specs.Spec
- if options.Spec != nil {
- ospec = options.Spec
+ ospec := options.Spec
+ if ospec == nil {
+ fmt.Fprintf(os.Stderr, "options spec not specified\n")
+ os.Exit(1)
}
// Run the container, start to finish.
diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go
index 2f923357c..f95c5ba57 100644
--- a/vendor/github.com/containers/buildah/util.go
+++ b/vendor/github.com/containers/buildah/util.go
@@ -420,3 +420,21 @@ func ReserveSELinuxLabels(store storage.Store, id string) error {
}
return nil
}
+
+// IsContainer identifies if the specified container id is a buildah container
+// in the specified store.
+func IsContainer(id string, store storage.Store) (bool, error) {
+ cdir, err := store.ContainerDirectory(id)
+ if err != nil {
+ return false, err
+ }
+ // Assuming that if the stateFile exists, that this is a Buildah
+ // container.
+ if _, err = os.Stat(filepath.Join(cdir, stateFile)); err != nil {
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, errors.Wrapf(err, "error stating %q", filepath.Join(cdir, stateFile))
+ }
+ return true, nil
+}
diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor.go
index 8046f45f5..146280df2 100644
--- a/vendor/github.com/containers/common/pkg/apparmor/apparmor.go
+++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor.go
@@ -15,7 +15,6 @@ const (
)
var (
-
// ErrApparmorUnsupported indicates that AppArmor support is not supported.
ErrApparmorUnsupported = errors.New("AppArmor is not supported")
// ErrApparmorRootless indicates that AppArmor support is not supported in rootless mode.
diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go
index 307249f3d..4f11e4ed2 100644
--- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go
+++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go
@@ -5,7 +5,6 @@ package apparmor
import (
"bufio"
"bytes"
- "fmt"
"io"
"os"
"os/exec"
@@ -14,6 +13,7 @@ import (
"strings"
"text/template"
+ "github.com/containers/common/pkg/apparmor/internal/supported"
"github.com/containers/storage/pkg/unshare"
runcaa "github.com/opencontainers/runc/libcontainer/apparmor"
"github.com/pkg/errors"
@@ -23,12 +23,11 @@ import (
// profileDirectory is the file store for apparmor profiles and macros.
var profileDirectory = "/etc/apparmor.d"
-// IsEnabled returns true if AppArmor is enabled on the host.
+// IsEnabled returns true if AppArmor is enabled on the host. It also checks
+// for the existence of the `apparmor_parser` binary, which will be required to
+// apply profiles.
func IsEnabled() bool {
- if unshare.IsRootless() {
- return false
- }
- return runcaa.IsEnabled()
+ return supported.NewAppArmorVerifier().IsSupported() == nil
}
// profileData holds information about the given profile for generation.
@@ -44,10 +43,10 @@ type profileData struct {
}
// generateDefault creates an apparmor profile from ProfileData.
-func (p *profileData) generateDefault(out io.Writer) error {
+func (p *profileData) generateDefault(apparmorParserPath string, out io.Writer) error {
compiled, err := template.New("apparmor_profile").Parse(defaultProfileTemplate)
if err != nil {
- return err
+ return errors.Wrap(err, "create AppArmor profile from template")
}
if macroExists("tunables/global") {
@@ -60,13 +59,13 @@ func (p *profileData) generateDefault(out io.Writer) error {
p.InnerImports = append(p.InnerImports, "#include <abstractions/base>")
}
- ver, err := getAAParserVersion()
+ ver, err := getAAParserVersion(apparmorParserPath)
if err != nil {
- return err
+ return errors.Wrap(err, "get AppArmor version")
}
p.Version = ver
- return compiled.Execute(out, p)
+ return errors.Wrap(compiled.Execute(out, p), "execute compiled profile")
}
// macrosExists checks if the passed macro exists.
@@ -86,31 +85,37 @@ func InstallDefault(name string) error {
Name: name,
}
- cmd := exec.Command("apparmor_parser", "-Kr")
+ apparmorParserPath, err := supported.NewAppArmorVerifier().FindAppArmorParserBinary()
+ if err != nil {
+ return errors.Wrap(err, "find `apparmor_parser` binary")
+ }
+
+ cmd := exec.Command(apparmorParserPath, "-Kr")
pipe, err := cmd.StdinPipe()
if err != nil {
- return err
+ return errors.Wrapf(err, "execute %s", apparmorParserPath)
}
if err := cmd.Start(); err != nil {
if pipeErr := pipe.Close(); pipeErr != nil {
- logrus.Errorf("unable to close apparmor pipe: %q", pipeErr)
+ logrus.Errorf("unable to close AppArmor pipe: %q", pipeErr)
}
- return err
+ return errors.Wrapf(err, "start %s command", apparmorParserPath)
}
- if err := p.generateDefault(pipe); err != nil {
+ if err := p.generateDefault(apparmorParserPath, pipe); err != nil {
if pipeErr := pipe.Close(); pipeErr != nil {
- logrus.Errorf("unable to close apparmor pipe: %q", pipeErr)
+ logrus.Errorf("unable to close AppArmor pipe: %q", pipeErr)
}
if cmdErr := cmd.Wait(); cmdErr != nil {
- logrus.Errorf("unable to wait for apparmor command: %q", cmdErr)
+ logrus.Errorf("unable to wait for AppArmor command: %q", cmdErr)
}
- return err
+ return errors.Wrap(err, "generate default profile into pipe")
}
if pipeErr := pipe.Close(); pipeErr != nil {
- logrus.Errorf("unable to close apparmor pipe: %q", pipeErr)
+ logrus.Errorf("unable to close AppArmor pipe: %q", pipeErr)
}
- return cmd.Wait()
+
+ return errors.Wrap(cmd.Wait(), "wait for AppArmor command")
}
// DefaultContent returns the default profile content as byte slice. The
@@ -118,11 +123,17 @@ func InstallDefault(name string) error {
// generation fails.
func DefaultContent(name string) ([]byte, error) {
p := profileData{Name: name}
- var bytes bytes.Buffer
- if err := p.generateDefault(&bytes); err != nil {
- return nil, err
+ buffer := &bytes.Buffer{}
+
+ apparmorParserPath, err := supported.NewAppArmorVerifier().FindAppArmorParserBinary()
+ if err != nil {
+ return nil, errors.Wrap(err, "find `apparmor_parser` binary")
+ }
+
+ if err := p.generateDefault(apparmorParserPath, buffer); err != nil {
+ return nil, errors.Wrap(err, "generate default AppAmor profile")
}
- return bytes.Bytes(), nil
+ return buffer.Bytes(), nil
}
// IsLoaded checks if a profile with the given name has been loaded into the
@@ -137,7 +148,7 @@ func IsLoaded(name string) (bool, error) {
if os.IsNotExist(err) {
return false, nil
}
- return false, err
+ return false, errors.Wrap(err, "open AppArmor profile path")
}
defer file.Close()
@@ -148,7 +159,7 @@ func IsLoaded(name string) (bool, error) {
break
}
if err != nil {
- return false, err
+ return false, errors.Wrap(err, "reading AppArmor profile")
}
if strings.HasPrefix(p, name+" ") {
return true, nil
@@ -159,23 +170,23 @@ func IsLoaded(name string) (bool, error) {
}
// execAAParser runs `apparmor_parser` with the passed arguments.
-func execAAParser(dir string, args ...string) (string, error) {
- c := exec.Command("apparmor_parser", args...)
+func execAAParser(apparmorParserPath, dir string, args ...string) (string, error) {
+ c := exec.Command(apparmorParserPath, args...)
c.Dir = dir
- output, err := c.CombinedOutput()
+ output, err := c.Output()
if err != nil {
- return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err)
+ return "", errors.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err)
}
return string(output), nil
}
// getAAParserVersion returns the major and minor version of apparmor_parser.
-func getAAParserVersion() (int, error) {
- output, err := execAAParser("", "--version")
+func getAAParserVersion(apparmorParserPath string) (int, error) {
+ output, err := execAAParser(apparmorParserPath, "", "--version")
if err != nil {
- return -1, err
+ return -1, errors.Wrap(err, "execute apparmor_parser")
}
return parseAAParserVersion(output)
}
@@ -194,7 +205,7 @@ func parseAAParserVersion(output string) (int, error) {
// split by major minor version
v := strings.Split(version, ".")
if len(v) == 0 || len(v) > 3 {
- return -1, fmt.Errorf("parsing version failed for output: `%s`", output)
+ return -1, errors.Errorf("parsing version failed for output: `%s`", output)
}
// Default the versions to 0.
@@ -202,19 +213,19 @@ func parseAAParserVersion(output string) (int, error) {
majorVersion, err := strconv.Atoi(v[0])
if err != nil {
- return -1, err
+ return -1, errors.Wrap(err, "convert AppArmor major version")
}
if len(v) > 1 {
minorVersion, err = strconv.Atoi(v[1])
if err != nil {
- return -1, err
+ return -1, errors.Wrap(err, "convert AppArmor minor version")
}
}
if len(v) > 2 {
patchLevel, err = strconv.Atoi(v[2])
if err != nil {
- return -1, err
+ return -1, errors.Wrap(err, "convert AppArmor patch version")
}
}
@@ -251,7 +262,7 @@ func CheckProfileAndLoadDefault(name string) (string, error) {
if name == "" {
return "", nil
} else {
- return "", fmt.Errorf("profile %q specified but AppArmor is disabled on the host", name)
+ return "", errors.Errorf("profile %q specified but AppArmor is disabled on the host", name)
}
}
@@ -262,10 +273,10 @@ func CheckProfileAndLoadDefault(name string) (string, error) {
// name.
isLoaded, err := IsLoaded(name)
if err != nil {
- return "", err
+ return "", errors.Wrapf(err, "verify if profile %s is loaded", name)
}
if !isLoaded {
- return "", fmt.Errorf("AppArmor profile %q specified but not loaded", name)
+ return "", errors.Errorf("AppArmor profile %q specified but not loaded", name)
}
return name, nil
}
@@ -274,12 +285,12 @@ func CheckProfileAndLoadDefault(name string) (string, error) {
// if it's loaded before installing it.
isLoaded, err := IsLoaded(name)
if err != nil {
- return "", err
+ return "", errors.Wrapf(err, "verify if profile %s is loaded", name)
}
if !isLoaded {
err = InstallDefault(name)
if err != nil {
- return "", err
+ return "", errors.Wrapf(err, "install profile %s", name)
}
logrus.Infof("successfully loaded AppAmor profile %q", name)
} else {
diff --git a/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go b/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go
new file mode 100644
index 000000000..778f4e3a2
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go
@@ -0,0 +1,113 @@
+package supported
+
+import (
+ "os"
+ "os/exec"
+ "path/filepath"
+ "sync"
+
+ "github.com/containers/storage/pkg/unshare"
+ runcaa "github.com/opencontainers/runc/libcontainer/apparmor"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate
+
+// ApparmorVerifier is the global struct for verifying if AppAmor is available
+// on the system.
+type ApparmorVerifier struct {
+ impl verifierImpl
+ parserBinaryPath string
+}
+
+var (
+ singleton *ApparmorVerifier
+ once sync.Once
+)
+
+// NewAppArmorVerifier can be used to retrieve a new ApparmorVerifier instance.
+func NewAppArmorVerifier() *ApparmorVerifier {
+ once.Do(func() {
+ singleton = &ApparmorVerifier{impl: &defaultVerifier{}}
+ })
+ return singleton
+}
+
+// IsSupported returns nil if AppAmor is supported by the host system.
+// The method will error if:
+// - the process runs in rootless mode
+// - AppArmor is disabled by the host system
+// - the `apparmor_parser` binary is not discoverable
+func (a *ApparmorVerifier) IsSupported() error {
+ if a.impl.UnshareIsRootless() {
+ return errors.New("AppAmor is not supported on rootless containers")
+ }
+ if !a.impl.RuncIsEnabled() {
+ return errors.New("AppArmor not supported by the host system")
+ }
+
+ _, err := a.FindAppArmorParserBinary()
+ return err
+}
+
+// FindAppArmorParserBinary returns the `apparmor_parser` binary either from
+// `/sbin` or from `$PATH`. It returns an error if the binary could not be
+// found.
+func (a *ApparmorVerifier) FindAppArmorParserBinary() (string, error) {
+ // Use the memoized path if available
+ if a.parserBinaryPath != "" {
+ logrus.Debugf("Using %s binary", a.parserBinaryPath)
+ return a.parserBinaryPath, nil
+ }
+
+ const (
+ binary = "apparmor_parser"
+ sbin = "/sbin"
+ )
+
+ // `/sbin` is not always in `$PATH`, so we check it explicitly
+ sbinBinaryPath := filepath.Join(sbin, binary)
+ if _, err := a.impl.OsStat(sbinBinaryPath); err == nil {
+ logrus.Debugf("Found %s binary in %s", binary, sbinBinaryPath)
+ a.parserBinaryPath = sbinBinaryPath
+ return sbinBinaryPath, nil
+ }
+
+ // Fallback to checking $PATH
+ if path, err := a.impl.ExecLookPath(binary); err == nil {
+ logrus.Debugf("Found %s binary in %s", binary, path)
+ a.parserBinaryPath = path
+ return path, nil
+ }
+
+ return "", errors.Errorf(
+ "%s binary neither found in %s nor $PATH", binary, sbin,
+ )
+}
+
+//counterfeiter:generate . verifierImpl
+type verifierImpl interface {
+ UnshareIsRootless() bool
+ RuncIsEnabled() bool
+ OsStat(name string) (os.FileInfo, error)
+ ExecLookPath(file string) (string, error)
+}
+
+type defaultVerifier struct{}
+
+func (d *defaultVerifier) UnshareIsRootless() bool {
+ return unshare.IsRootless()
+}
+
+func (d *defaultVerifier) RuncIsEnabled() bool {
+ return runcaa.IsEnabled()
+}
+
+func (d *defaultVerifier) OsStat(name string) (os.FileInfo, error) {
+ return os.Stat(name)
+}
+
+func (d *defaultVerifier) ExecLookPath(file string) (string, error) {
+ return exec.LookPath(file)
+}
diff --git a/vendor/github.com/containers/common/pkg/auth/auth.go b/vendor/github.com/containers/common/pkg/auth/auth.go
index c52dfa01f..21b988187 100644
--- a/vendor/github.com/containers/common/pkg/auth/auth.go
+++ b/vendor/github.com/containers/common/pkg/auth/auth.go
@@ -40,8 +40,8 @@ func CheckAuthFile(authfile string) error {
// data with the original parameter.
func systemContextWithOptions(sys *types.SystemContext, authFile, certDir string) *types.SystemContext {
if sys != nil {
- copy := *sys
- sys = &copy
+ sysCopy := *sys
+ sys = &sysCopy
} else {
sys = &types.SystemContext{}
}
@@ -126,7 +126,7 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
if err = docker.CheckAuth(ctx, systemContext, username, password, server); err == nil {
// Write the new credentials to the authfile
- if err = config.SetAuthentication(systemContext, server, username, password); err != nil {
+ if err := config.SetAuthentication(systemContext, server, username, password); err != nil {
return err
}
}
@@ -150,17 +150,13 @@ func getRegistryName(server string) string {
// gets the registry from the input. If the input is of the form
// quay.io/myuser/myimage, it will parse it and just return quay.io
split := strings.Split(server, "/")
- if len(split) > 1 {
- return split[0]
- }
return split[0]
}
// getUserAndPass gets the username and password from STDIN if not given
// using the -u and -p flags. If the username prompt is left empty, the
// displayed userFromAuthFile will be used instead.
-func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (string, string, error) {
- var err error
+func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user, pass string, err error) {
reader := bufio.NewReader(opts.Stdin)
username := opts.Username
if username == "" {
diff --git a/vendor/github.com/containers/common/pkg/capabilities/capabilities.go b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go
index 941177489..ddfa53be8 100644
--- a/vendor/github.com/containers/common/pkg/capabilities/capabilities.go
+++ b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go
@@ -57,9 +57,9 @@ func AllCapabilities() []string {
return capabilityList
}
-// normalizeCapabilities normalizes caps by adding a "CAP_" prefix (if not yet
+// NormalizeCapabilities normalizes caps by adding a "CAP_" prefix (if not yet
// present).
-func normalizeCapabilities(caps []string) ([]string, error) {
+func NormalizeCapabilities(caps []string) ([]string, error) {
normalized := make([]string, len(caps))
for i, c := range caps {
c = strings.ToUpper(c)
@@ -98,7 +98,7 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) {
var caps []string
// Normalize the base capabilities
- base, err := normalizeCapabilities(base)
+ base, err := NormalizeCapabilities(base)
if err != nil {
return nil, err
}
@@ -106,11 +106,11 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) {
// Nothing to tweak; we're done
return base, nil
}
- capDrop, err := normalizeCapabilities(drops)
+ capDrop, err := NormalizeCapabilities(drops)
if err != nil {
return nil, err
}
- capAdd, err := normalizeCapabilities(adds)
+ capAdd, err := NormalizeCapabilities(adds)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index 80c478505..78811b47a 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -244,6 +244,11 @@ type EngineConfig struct {
// LockType is the type of locking to use.
LockType string `toml:"lock_type,omitempty"`
+ // MultiImageArchive - if true, the container engine allows for storing
+ // archives (e.g., of the docker-archive transport) with multiple
+ // images. By default, Podman creates single-image archives.
+ MultiImageArchive bool `toml:"multi_image_archive,omitempty"`
+
// Namespace is the engine namespace to use. Namespaces are used to create
// scopes to separate containers and pods in the state. When namespace is
// set, engine will only view containers and pods in the same namespace. All
@@ -607,11 +612,11 @@ func (c *ContainersConfig) Validate() error {
}
if c.LogSizeMax >= 0 && c.LogSizeMax < OCIBufSize {
- return fmt.Errorf("log size max should be negative or >= %d", OCIBufSize)
+ return errors.Errorf("log size max should be negative or >= %d", OCIBufSize)
}
if _, err := units.FromHumanSize(c.ShmSize); err != nil {
- return fmt.Errorf("invalid --shm-size %s, %q", c.ShmSize, err)
+ return errors.Errorf("invalid --shm-size %s, %q", c.ShmSize, err)
}
return nil
@@ -754,15 +759,13 @@ func (c *Config) Capabilities(user string, addCapabilities, dropCapabilities []s
// '/dev/sdc:/dev/xvdc"
// '/dev/sdc:/dev/xvdc:rwm"
// '/dev/sdc:rm"
-func Device(device string) (string, string, string, error) {
- src := ""
- dst := ""
- permissions := "rwm"
+func Device(device string) (src, dst, permissions string, err error) {
+ permissions = "rwm"
split := strings.Split(device, ":")
switch len(split) {
case 3:
if !IsValidDeviceMode(split[2]) {
- return "", "", "", fmt.Errorf("invalid device mode: %s", split[2])
+ return "", "", "", errors.Errorf("invalid device mode: %s", split[2])
}
permissions = split[2]
fallthrough
@@ -770,19 +773,19 @@ func Device(device string) (string, string, string, error) {
if IsValidDeviceMode(split[1]) {
permissions = split[1]
} else {
- if len(split[1]) == 0 || split[1][0] != '/' {
- return "", "", "", fmt.Errorf("invalid device mode: %s", split[1])
+ if split[1] == "" || split[1][0] != '/' {
+ return "", "", "", errors.Errorf("invalid device mode: %s", split[1])
}
dst = split[1]
}
fallthrough
case 1:
if !strings.HasPrefix(split[0], "/dev/") {
- return "", "", "", fmt.Errorf("invalid device mode: %s", split[0])
+ return "", "", "", errors.Errorf("invalid device mode: %s", split[0])
}
src = split[0]
default:
- return "", "", "", fmt.Errorf("invalid device specification: %s", device)
+ return "", "", "", errors.Errorf("invalid device specification: %s", device)
}
if dst == "" {
@@ -903,21 +906,6 @@ func Path() string {
return OverrideContainersConfig
}
-func customConfigFile() (string, error) {
- path := os.Getenv("CONTAINERS_CONF")
- if path != "" {
- return path, nil
- }
- if unshare.IsRootless() {
- path, err := rootlessConfigPath()
- if err != nil {
- return "", err
- }
- return path, nil
- }
- return OverrideContainersConfig, nil
-}
-
// ReadCustomConfig reads the custom config and only generates a config based on it
// If the custom config file does not exists, function will return an empty config
func ReadCustomConfig() (*Config, error) {
@@ -938,7 +926,7 @@ func ReadCustomConfig() (*Config, error) {
newConfig := &Config{}
if _, err := os.Stat(path); err == nil {
- if err = readConfigFromFile(path, newConfig); err != nil {
+ if err := readConfigFromFile(path, newConfig); err != nil {
return nil, err
}
} else {
@@ -985,13 +973,12 @@ func Reload() (*Config, error) {
return defConfig()
}
-func (c *Config) ActiveDestination() (string, string, error) {
+func (c *Config) ActiveDestination() (uri, identity string, err error) {
if uri, found := os.LookupEnv("CONTAINER_HOST"); found {
- var ident string
if v, found := os.LookupEnv("CONTAINER_SSHKEY"); found {
- ident = v
+ identity = v
}
- return uri, ident, nil
+ return uri, identity, nil
}
switch {
diff --git a/vendor/github.com/containers/common/pkg/config/config_darwin.go b/vendor/github.com/containers/common/pkg/config/config_darwin.go
new file mode 100644
index 000000000..a264b1888
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/config/config_darwin.go
@@ -0,0 +1,12 @@
+package config
+
+import (
+ "os"
+)
+
+func customConfigFile() (string, error) {
+ if path, found := os.LookupEnv("CONTAINERS_CONF"); found {
+ return path, nil
+ }
+ return rootlessConfigPath()
+}
diff --git a/vendor/github.com/containers/common/pkg/config/config_linux.go b/vendor/github.com/containers/common/pkg/config/config_linux.go
index 17b862967..77e06105e 100644
--- a/vendor/github.com/containers/common/pkg/config/config_linux.go
+++ b/vendor/github.com/containers/common/pkg/config/config_linux.go
@@ -1,7 +1,26 @@
package config
-import selinux "github.com/opencontainers/selinux/go-selinux"
+import (
+ "os"
+
+ "github.com/containers/storage/pkg/unshare"
+ selinux "github.com/opencontainers/selinux/go-selinux"
+)
func selinuxEnabled() bool {
return selinux.GetEnabled()
}
+
+func customConfigFile() (string, error) {
+ if path, found := os.LookupEnv("CONTAINERS_CONF"); found {
+ return path, nil
+ }
+ if unshare.IsRootless() {
+ path, err := rootlessConfigPath()
+ if err != nil {
+ return "", err
+ }
+ return path, nil
+ }
+ return OverrideContainersConfig, nil
+}
diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go
index 282eb80b7..8a27c9626 100644
--- a/vendor/github.com/containers/common/pkg/config/config_local.go
+++ b/vendor/github.com/containers/common/pkg/config/config_local.go
@@ -3,13 +3,14 @@
package config
import (
- "fmt"
"os"
"path/filepath"
"regexp"
+ "strings"
"syscall"
units "github.com/docker/go-units"
+ "github.com/pkg/errors"
)
// isDirectory tests whether the given path exists and is a directory. It
@@ -42,13 +43,13 @@ func (c *EngineConfig) validatePaths() error {
// shift between runs or even parts of the program. - The OCI runtime
// uses a different working directory than we do, for example.
if c.StaticDir != "" && !filepath.IsAbs(c.StaticDir) {
- return fmt.Errorf("static directory must be an absolute path - instead got %q", c.StaticDir)
+ return errors.Errorf("static directory must be an absolute path - instead got %q", c.StaticDir)
}
if c.TmpDir != "" && !filepath.IsAbs(c.TmpDir) {
- return fmt.Errorf("temporary directory must be an absolute path - instead got %q", c.TmpDir)
+ return errors.Errorf("temporary directory must be an absolute path - instead got %q", c.TmpDir)
}
if c.VolumePath != "" && !filepath.IsAbs(c.VolumePath) {
- return fmt.Errorf("volume path must be an absolute path - instead got %q", c.VolumePath)
+ return errors.Errorf("volume path must be an absolute path - instead got %q", c.VolumePath)
}
return nil
}
@@ -67,7 +68,7 @@ func (c *ContainersConfig) validateUlimits() error {
for _, u := range c.DefaultUlimits {
ul, err := units.ParseUlimit(u)
if err != nil {
- return fmt.Errorf("unrecognized ulimit %s: %v", u, err)
+ return errors.Wrapf(err, "unrecognized ulimit %s", u)
}
_, err = ul.GetRlimit()
if err != nil {
@@ -81,18 +82,30 @@ func (c *ContainersConfig) validateTZ() error {
if c.TZ == "local" {
return nil
}
- zonePath := filepath.Join("/usr/share/zoneinfo", c.TZ)
- _, err := os.Stat(zonePath)
- if err != nil {
- return fmt.Errorf("Unrecognized timezone %s", zonePath)
+
+ lookupPaths := []string{
+ "/usr/share/zoneinfo",
+ "/etc/zoneinfo",
}
- return nil
+
+ for _, paths := range lookupPaths {
+ zonePath := filepath.Join(paths, c.TZ)
+ if _, err := os.Stat(zonePath); err == nil {
+ // found zone information
+ return nil
+ }
+ }
+
+ return errors.Errorf(
+ "find timezone %s in paths: %s",
+ c.TZ, strings.Join(lookupPaths, ", "),
+ )
}
func (c *ContainersConfig) validateUmask() error {
validUmask := regexp.MustCompile(`^[0-7]{1,4}$`)
if !validUmask.MatchString(c.Umask) {
- return fmt.Errorf("Not a valid Umask %s", c.Umask)
+ return errors.Errorf("not a valid umask %s", c.Umask)
}
return nil
}
diff --git a/vendor/github.com/containers/common/pkg/config/config_windows.go b/vendor/github.com/containers/common/pkg/config/config_windows.go
new file mode 100644
index 000000000..1c9806e6b
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/config/config_windows.go
@@ -0,0 +1,10 @@
+package config
+
+import "os"
+
+func customConfigFile() (string, error) {
+ if path, found := os.LookupEnv("CONTAINERS_CONF"); found {
+ return path, nil
+ }
+ return os.Getenv("APPDATA") + "\\containers\\containers.conf", nil
+}
diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf
index 780df2a22..5ee72e759 100644
--- a/vendor/github.com/containers/common/pkg/config/containers.conf
+++ b/vendor/github.com/containers/common/pkg/config/containers.conf
@@ -92,7 +92,7 @@
# Ulimits has limits for non privileged container engines.
#
# default_ulimits = [
-# "nofile"="1280:2560",
+# "nofile=1280:2560",
# ]
# List of default DNS options to be added to /etc/resolv.conf inside of the container.
@@ -116,18 +116,13 @@
#
# env = [
# "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+# "TERM=xterm",
# ]
# Pass all host environment variables into the container.
#
# env_host = false
-# Path to OCI hooks directories for automatically executed hooks.
-#
-# hooks_dir = [
-# "/usr/share/containers/oci/hooks.d",
-# ]
-
# Default proxy environment variables passed into the container.
# The environment variables passed in include:
# http_proxy, https_proxy, ftp_proxy, no_proxy, and the upper case versions of
@@ -299,6 +294,12 @@
#
# events_logger = "journald"
+# Path to OCI hooks directories for automatically executed hooks.
+#
+# hooks_dir = [
+# "/usr/share/containers/oci/hooks.d",
+# ]
+
# Default transport method for pulling and pushing for images
#
# image_default_transport = "docker://"
@@ -322,6 +323,12 @@
#
# lock_type** = "shm"
+# MultiImageArchive - if true, the container engine allows for storing archives
+# (e.g., of the docker-archive transport) with multiple images. By default,
+# Podman creates single-image archives.
+#
+# multi_image_archive = "false"
+
# Default engine namespace
# If engine is joined to a namespace, it will see only containers and pods
# that were created in the same namespace, and will create new containers and
@@ -331,6 +338,10 @@
#
# namespace = ""
+# Path to the slirp4netns binary
+#
+# network_cmd_path=""
+
# Whether to use chroot instead of pivot_root in the runtime
#
# no_pivot_root = false
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index 57b703f53..2c398c538 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -12,6 +12,7 @@ import (
"github.com/containers/common/pkg/apparmor"
"github.com/containers/common/pkg/cgroupv2"
"github.com/containers/storage"
+ "github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/selinux/go-selinux"
"github.com/pkg/errors"
@@ -94,8 +95,8 @@ const (
_installPrefix = "/usr"
// _cniConfigDir is the directory where cni configuration is found
_cniConfigDir = "/etc/cni/net.d/"
- // _cniConfigDirRootless is the directory where cni plugins are found
- _cniConfigDirRootless = ".config/cni/net.d/"
+ // _cniConfigDirRootless is the directory in XDG_CONFIG_HOME for cni plugins
+ _cniConfigDirRootless = "cni/net.d/"
// CgroupfsCgroupsManager represents cgroupfs native cgroup manager
CgroupfsCgroupsManager = "cgroupfs"
// DefaultApparmorProfile specifies the default apparmor profile for the container.
@@ -115,9 +116,9 @@ const (
// DefaultSignaturePolicyPath is the default value for the
// policy.json file.
DefaultSignaturePolicyPath = "/etc/containers/policy.json"
- // DefaultRootlessSignaturePolicyPath is the default value for the
- // rootless policy.json file.
- DefaultRootlessSignaturePolicyPath = ".config/containers/policy.json"
+ // DefaultRootlessSignaturePolicyPath is the location within
+ // XDG_CONFIG_HOME of the rootless policy.json file.
+ DefaultRootlessSignaturePolicyPath = "containers/policy.json"
// DefaultShmSize default value
DefaultShmSize = "65536k"
// DefaultUserNSSize default value
@@ -144,11 +145,11 @@ func DefaultConfig() (*Config, error) {
defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath
if unshare.IsRootless() {
- home, err := unshare.HomeDir()
+ configHome, err := homedir.GetConfigHome()
if err != nil {
return nil, err
}
- sigPath := filepath.Join(home, DefaultRootlessSignaturePolicyPath)
+ sigPath := filepath.Join(configHome, DefaultRootlessSignaturePolicyPath)
defaultEngineConfig.SignaturePolicyPath = sigPath
if _, err := os.Stat(sigPath); err != nil {
if _, err := os.Stat(DefaultSignaturePolicyPath); err == nil {
@@ -156,7 +157,7 @@ func DefaultConfig() (*Config, error) {
}
}
netns = "slirp4netns"
- cniConfig = filepath.Join(home, _cniConfigDirRootless)
+ cniConfig = filepath.Join(configHome, _cniConfigDirRootless)
}
cgroupNS := "host"
@@ -181,6 +182,7 @@ func DefaultConfig() (*Config, error) {
EnableLabeling: selinuxEnabled(),
Env: []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "TERM=xterm",
},
EnvHost: false,
HTTPProxy: false,
@@ -222,10 +224,16 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
c.EventsLogFilePath = filepath.Join(c.TmpDir, "events", "events.log")
- storeOpts, err := storage.DefaultStoreOptions(unshare.IsRootless(), unshare.GetRootlessUID())
- if err != nil {
- return nil, err
+ var storeOpts storage.StoreOptions
+ if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok {
+ storage.ReloadConfigurationFile(path, &storeOpts)
+ } else {
+ storeOpts, err = storage.DefaultStoreOptions(unshare.IsRootless(), unshare.GetRootlessUID())
+ if err != nil {
+ return nil, err
+ }
}
+
if storeOpts.GraphRoot == "" {
logrus.Warnf("Storage configuration is unset - using hardcoded default graph root %q", _defaultGraphRoot)
storeOpts.GraphRoot = _defaultGraphRoot
diff --git a/vendor/github.com/containers/common/pkg/config/default_linux.go b/vendor/github.com/containers/common/pkg/config/default_linux.go
index e49413d7a..f61d9ba54 100644
--- a/vendor/github.com/containers/common/pkg/config/default_linux.go
+++ b/vendor/github.com/containers/common/pkg/config/default_linux.go
@@ -32,10 +32,8 @@ func getDefaultProcessLimits() []string {
defaultLimits := []string{}
if err := unix.Setrlimit(unix.RLIMIT_NPROC, &rlim); err == nil {
defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", rlim.Cur, rlim.Max))
- } else {
- if err := unix.Setrlimit(unix.RLIMIT_NPROC, &oldrlim); err == nil {
- defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", oldrlim.Cur, oldrlim.Max))
- }
+ } else if err := unix.Setrlimit(unix.RLIMIT_NPROC, &oldrlim); err == nil {
+ defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", oldrlim.Cur, oldrlim.Max))
}
return defaultLimits
}
diff --git a/vendor/github.com/containers/common/pkg/config/libpodConfig.go b/vendor/github.com/containers/common/pkg/config/libpodConfig.go
index ab507e864..07dd06240 100644
--- a/vendor/github.com/containers/common/pkg/config/libpodConfig.go
+++ b/vendor/github.com/containers/common/pkg/config/libpodConfig.go
@@ -3,7 +3,6 @@ package config
/* libpodConfig.go contains deprecated functionality and should not be used any longer */
import (
- "fmt"
"os"
"os/exec"
"path/filepath"
@@ -168,7 +167,7 @@ type ConfigFromLibpod struct {
// EventsLogFilePath is where the events log is stored.
EventsLogFilePath string `toml:"events_logfile_path,omitempty"`
- //DetachKeys is the sequence of keys used to detach a container.
+ // DetachKeys is the sequence of keys used to detach a container.
DetachKeys string `toml:"detach_keys,omitempty"`
// SDNotify tells Libpod to allow containers to notify the host systemd of
@@ -197,6 +196,10 @@ func newLibpodConfig(c *Config) error {
return errors.Wrapf(err, "error finding config on system")
}
+ if len(configs) == 0 {
+ return nil
+ }
+
for _, path := range configs {
config, err = readLibpodConfigFromFile(path, config)
if err != nil {
@@ -226,7 +229,7 @@ func newLibpodConfig(c *Config) error {
// hard code EventsLogger to "file" to match older podman versions.
if config.EventsLogger != "file" {
- logrus.Debugf("Ignoring libpod.conf EventsLogger setting %q. Use %q if you want to change this setting and remove libpod.conf files.", Path(), config.EventsLogger)
+ logrus.Warnf("Ignoring libpod.conf EventsLogger setting %q. Use %q if you want to change this setting and remove libpod.conf files.", config.EventsLogger, Path())
config.EventsLogger = "file"
}
@@ -243,7 +246,7 @@ func readLibpodConfigFromFile(path string, config *ConfigFromLibpod) (*ConfigFro
logrus.Debugf("Reading configuration file %q", path)
_, err := toml.DecodeFile(path, config)
if err != nil {
- return nil, fmt.Errorf("unable to decode configuration %v: %v", path, err)
+ return nil, errors.Wrapf(err, "decode configuration %s", path)
}
return config, err
@@ -260,9 +263,7 @@ func systemLibpodConfigs() ([]string, error) {
if err != nil {
containersConfPath = filepath.Join("$HOME", UserOverrideContainersConfig)
}
- // TODO: Raise to Warnf, when Podman is updated to
- // remove libpod.conf by default
- logrus.Debugf("Found deprecated file %s, please remove. Use %s to override defaults.\n", Path(), containersConfPath)
+ logrus.Warnf("Found deprecated file %s, please remove. Use %s to override defaults.\n", path, containersConfPath)
return []string{path}, nil
}
return nil, err
@@ -270,15 +271,11 @@ func systemLibpodConfigs() ([]string, error) {
configs := []string{}
if _, err := os.Stat(_rootConfigPath); err == nil {
- // TODO: Raise to Warnf, when Podman is updated to
- // remove libpod.conf by default
- logrus.Debugf("Found deprecated file %s, please remove. Use %s to override defaults.\n", _rootConfigPath, OverrideContainersConfig)
+ logrus.Warnf("Found deprecated file %s, please remove. Use %s to override defaults.\n", _rootConfigPath, OverrideContainersConfig)
configs = append(configs, _rootConfigPath)
}
if _, err := os.Stat(_rootOverrideConfigPath); err == nil {
- // TODO: Raise to Warnf, when Podman is updated to
- // remove libpod.conf by default
- logrus.Debugf("Found deprecated file %s, please remove. Use %s to override defaults.\n", _rootOverrideConfigPath, OverrideContainersConfig)
+ logrus.Warnf("Found deprecated file %s, please remove. Use %s to override defaults.\n", _rootOverrideConfigPath, OverrideContainersConfig)
configs = append(configs, _rootOverrideConfigPath)
}
return configs, nil
diff --git a/vendor/github.com/containers/common/pkg/config/util_supported.go b/vendor/github.com/containers/common/pkg/config/util_supported.go
index 2cdf54549..4595716d1 100644
--- a/vendor/github.com/containers/common/pkg/config/util_supported.go
+++ b/vendor/github.com/containers/common/pkg/config/util_supported.go
@@ -49,7 +49,7 @@ func getRuntimeDir() (string, error) {
if runtimeDir == "" {
home := os.Getenv("HOME")
if home == "" {
- rootlessRuntimeDirError = fmt.Errorf("neither XDG_RUNTIME_DIR nor HOME was set non-empty")
+ rootlessRuntimeDirError = errors.New("neither XDG_RUNTIME_DIR nor HOME was set non-empty")
return
}
resolvedHome, err := filepath.EvalSymlinks(home)
diff --git a/vendor/github.com/containers/common/pkg/retry/retry.go b/vendor/github.com/containers/common/pkg/retry/retry.go
index c20f900d8..d0ac19fb6 100644
--- a/vendor/github.com/containers/common/pkg/retry/retry.go
+++ b/vendor/github.com/containers/common/pkg/retry/retry.go
@@ -2,6 +2,7 @@ package retry
import (
"context"
+ "io"
"math"
"net"
"net/url"
@@ -17,7 +18,8 @@ import (
// RetryOptions defines the option to retry
type RetryOptions struct {
- MaxRetry int // The number of times to possibly retry
+ MaxRetry int // The number of times to possibly retry
+ Delay time.Duration // The delay to use between retries, if set
}
// RetryIfNecessary retries the operation in exponential backoff with the retryOptions
@@ -25,6 +27,9 @@ func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions
err := operation()
for attempt := 0; err != nil && isRetryable(err) && attempt < retryOptions.MaxRetry; attempt++ {
delay := time.Duration(int(math.Pow(2, float64(attempt)))) * time.Second
+ if retryOptions.Delay != 0 {
+ delay = retryOptions.Delay
+ }
logrus.Infof("Warning: failed, retrying in %s ... (%d/%d)", delay, attempt+1, retryOptions.MaxRetry)
select {
case <-time.After(delay):
@@ -58,7 +63,10 @@ func isRetryable(err error) bool {
return true
case *net.OpError:
return isRetryable(e.Err)
- case *url.Error:
+ case *url.Error: // This includes errors returned by the net/http client.
+ if e.Err == io.EOF { // Happens when a server accepts a HTTP connection and sends EOF
+ return true
+ }
return isRetryable(e.Err)
case syscall.Errno:
return e != syscall.ECONNREFUSED
diff --git a/vendor/github.com/containers/common/pkg/seccomp/conversion.go b/vendor/github.com/containers/common/pkg/seccomp/conversion.go
new file mode 100644
index 000000000..79a893ba3
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/seccomp/conversion.go
@@ -0,0 +1,32 @@
+package seccomp
+
+import "fmt"
+
+var goArchToSeccompArchMap = map[string]Arch{
+ "386": ArchX86,
+ "amd64": ArchX86_64,
+ "amd64p32": ArchX32,
+ "arm": ArchARM,
+ "arm64": ArchAARCH64,
+ "mips": ArchMIPS,
+ "mips64": ArchMIPS64,
+ "mips64le": ArchMIPSEL64,
+ "mips64p32": ArchMIPS64N32,
+ "mips64p32le": ArchMIPSEL64N32,
+ "mipsle": ArchMIPSEL,
+ "ppc": ArchPPC,
+ "ppc64": ArchPPC64,
+ "ppc64le": ArchPPC64LE,
+ "s390": ArchS390,
+ "s390x": ArchS390X,
+}
+
+// GoArchToSeccompArch converts a runtime.GOARCH to a seccomp `Arch`. The
+// function returns an error if the architecture conversion is not supported.
+func GoArchToSeccompArch(goArch string) (Arch, error) {
+ arch, ok := goArchToSeccompArchMap[goArch]
+ if !ok {
+ return "", fmt.Errorf("unsupported go arch provided: %s", goArch)
+ }
+ return arch, nil
+}
diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json
new file mode 100644
index 000000000..06b39024a
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json
@@ -0,0 +1,878 @@
+{
+ "defaultAction": "SCMP_ACT_ERRNO",
+ "archMap": [
+ {
+ "architecture": "SCMP_ARCH_X86_64",
+ "subArchitectures": [
+ "SCMP_ARCH_X86",
+ "SCMP_ARCH_X32"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_AARCH64",
+ "subArchitectures": [
+ "SCMP_ARCH_ARM"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_MIPS64",
+ "subArchitectures": [
+ "SCMP_ARCH_MIPS",
+ "SCMP_ARCH_MIPS64N32"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_MIPS64N32",
+ "subArchitectures": [
+ "SCMP_ARCH_MIPS",
+ "SCMP_ARCH_MIPS64"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_MIPSEL64",
+ "subArchitectures": [
+ "SCMP_ARCH_MIPSEL",
+ "SCMP_ARCH_MIPSEL64N32"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_MIPSEL64N32",
+ "subArchitectures": [
+ "SCMP_ARCH_MIPSEL",
+ "SCMP_ARCH_MIPSEL64"
+ ]
+ },
+ {
+ "architecture": "SCMP_ARCH_S390X",
+ "subArchitectures": [
+ "SCMP_ARCH_S390"
+ ]
+ }
+ ],
+ "syscalls": [
+ {
+ "names": [
+ "_llseek",
+ "_newselect",
+ "accept",
+ "accept4",
+ "access",
+ "adjtimex",
+ "alarm",
+ "bind",
+ "brk",
+ "capget",
+ "capset",
+ "chdir",
+ "chmod",
+ "chown",
+ "chown32",
+ "clock_getres",
+ "clock_gettime",
+ "clock_nanosleep",
+ "close",
+ "connect",
+ "copy_file_range",
+ "creat",
+ "dup",
+ "dup2",
+ "dup3",
+ "epoll_create",
+ "epoll_create1",
+ "epoll_ctl",
+ "epoll_ctl_old",
+ "epoll_pwait",
+ "epoll_wait",
+ "epoll_wait_old",
+ "eventfd",
+ "eventfd2",
+ "execve",
+ "execveat",
+ "exit",
+ "exit_group",
+ "faccessat",
+ "fadvise64",
+ "fadvise64_64",
+ "fallocate",
+ "fanotify_mark",
+ "fchdir",
+ "fchmod",
+ "fchmodat",
+ "fchown",
+ "fchown32",
+ "fchownat",
+ "fcntl",
+ "fcntl64",
+ "fdatasync",
+ "fgetxattr",
+ "flistxattr",
+ "flock",
+ "fork",
+ "fremovexattr",
+ "fsetxattr",
+ "fstat",
+ "fstat64",
+ "fstatat64",
+ "fstatfs",
+ "fstatfs64",
+ "fsync",
+ "ftruncate",
+ "ftruncate64",
+ "futex",
+ "futimesat",
+ "get_robust_list",
+ "get_thread_area",
+ "getcpu",
+ "getcwd",
+ "getdents",
+ "getdents64",
+ "getegid",
+ "getegid32",
+ "geteuid",
+ "geteuid32",
+ "getgid",
+ "getgid32",
+ "getgroups",
+ "getgroups32",
+ "getitimer",
+ "getpeername",
+ "getpgid",
+ "getpgrp",
+ "getpid",
+ "getppid",
+ "getpriority",
+ "getrandom",
+ "getresgid",
+ "getresgid32",
+ "getresuid",
+ "getresuid32",
+ "getrlimit",
+ "getrusage",
+ "getsid",
+ "getsockname",
+ "getsockopt",
+ "gettid",
+ "gettimeofday",
+ "getuid",
+ "getuid32",
+ "getxattr",
+ "inotify_add_watch",
+ "inotify_init",
+ "inotify_init1",
+ "inotify_rm_watch",
+ "io_cancel",
+ "io_destroy",
+ "io_getevents",
+ "io_setup",
+ "io_submit",
+ "ioctl",
+ "ioprio_get",
+ "ioprio_set",
+ "ipc",
+ "kill",
+ "lchown",
+ "lchown32",
+ "lgetxattr",
+ "link",
+ "linkat",
+ "listen",
+ "listxattr",
+ "llistxattr",
+ "lremovexattr",
+ "lseek",
+ "lsetxattr",
+ "lstat",
+ "lstat64",
+ "madvise",
+ "memfd_create",
+ "mincore",
+ "mkdir",
+ "mkdirat",
+ "mknod",
+ "mknodat",
+ "mlock",
+ "mlock2",
+ "mlockall",
+ "mmap",
+ "mmap2",
+ "mount",
+ "mprotect",
+ "mq_getsetattr",
+ "mq_notify",
+ "mq_open",
+ "mq_timedreceive",
+ "mq_timedsend",
+ "mq_unlink",
+ "mremap",
+ "msgctl",
+ "msgget",
+ "msgrcv",
+ "msgsnd",
+ "msync",
+ "munlock",
+ "munlockall",
+ "munmap",
+ "name_to_handle_at",
+ "nanosleep",
+ "newfstatat",
+ "open",
+ "openat",
+ "pause",
+ "pipe",
+ "pipe2",
+ "poll",
+ "ppoll",
+ "prctl",
+ "pread64",
+ "preadv",
+ "preadv2",
+ "prlimit64",
+ "pselect6",
+ "pwrite64",
+ "pwritev",
+ "pwritev2",
+ "read",
+ "readahead",
+ "readlink",
+ "readlinkat",
+ "readv",
+ "reboot",
+ "recv",
+ "recvfrom",
+ "recvmmsg",
+ "recvmsg",
+ "remap_file_pages",
+ "removexattr",
+ "rename",
+ "renameat",
+ "renameat2",
+ "restart_syscall",
+ "rmdir",
+ "rt_sigaction",
+ "rt_sigpending",
+ "rt_sigprocmask",
+ "rt_sigqueueinfo",
+ "rt_sigreturn",
+ "rt_sigsuspend",
+ "rt_sigtimedwait",
+ "rt_tgsigqueueinfo",
+ "sched_get_priority_max",
+ "sched_get_priority_min",
+ "sched_getaffinity",
+ "sched_getattr",
+ "sched_getparam",
+ "sched_getscheduler",
+ "sched_rr_get_interval",
+ "sched_setaffinity",
+ "sched_setattr",
+ "sched_setparam",
+ "sched_setscheduler",
+ "sched_yield",
+ "seccomp",
+ "select",
+ "semctl",
+ "semget",
+ "semop",
+ "semtimedop",
+ "send",
+ "sendfile",
+ "sendfile64",
+ "sendmmsg",
+ "sendmsg",
+ "sendto",
+ "set_robust_list",
+ "set_thread_area",
+ "set_tid_address",
+ "setfsgid",
+ "setfsgid32",
+ "setfsuid",
+ "setfsuid32",
+ "setgid",
+ "setgid32",
+ "setgroups",
+ "setgroups32",
+ "setitimer",
+ "setpgid",
+ "setpriority",
+ "setregid",
+ "setregid32",
+ "setresgid",
+ "setresgid32",
+ "setresuid",
+ "setresuid32",
+ "setreuid",
+ "setreuid32",
+ "setrlimit",
+ "setsid",
+ "setsockopt",
+ "setuid",
+ "setuid32",
+ "setxattr",
+ "shmat",
+ "shmctl",
+ "shmdt",
+ "shmget",
+ "shutdown",
+ "sigaltstack",
+ "signalfd",
+ "signalfd4",
+ "sigreturn",
+ "socketcall",
+ "socketpair",
+ "splice",
+ "stat",
+ "stat64",
+ "statfs",
+ "statfs64",
+ "statx",
+ "symlink",
+ "symlinkat",
+ "sync",
+ "sync_file_range",
+ "syncfs",
+ "sysinfo",
+ "syslog",
+ "tee",
+ "tgkill",
+ "time",
+ "timer_create",
+ "timer_delete",
+ "timer_getoverrun",
+ "timer_gettime",
+ "timer_settime",
+ "timerfd_create",
+ "timerfd_gettime",
+ "timerfd_settime",
+ "times",
+ "tkill",
+ "truncate",
+ "truncate64",
+ "ugetrlimit",
+ "umask",
+ "umount",
+ "umount2",
+ "uname",
+ "unlink",
+ "unlinkat",
+ "unshare",
+ "utime",
+ "utimensat",
+ "utimes",
+ "vfork",
+ "vmsplice",
+ "wait4",
+ "waitid",
+ "waitpid",
+ "write",
+ "writev"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "personality"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 0,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "personality"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 8,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "personality"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 131072,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "personality"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 131080,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "personality"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 4294967295,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {}
+ },
+ {
+ "names": [
+ "sync_file_range2"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "arches": [
+ "ppc64le"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "arm_fadvise64_64",
+ "arm_sync_file_range",
+ "sync_file_range2",
+ "breakpoint",
+ "cacheflush",
+ "set_tls"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "arches": [
+ "arm",
+ "arm64"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "arch_prctl"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "arches": [
+ "amd64",
+ "x32"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "modify_ldt"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "arches": [
+ "amd64",
+ "x32",
+ "x86"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "s390_pci_mmio_read",
+ "s390_pci_mmio_write",
+ "s390_runtime_instr"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "arches": [
+ "s390",
+ "s390x"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "open_by_handle_at"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_DAC_READ_SEARCH"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "bpf",
+ "clone",
+ "fanotify_init",
+ "lookup_dcookie",
+ "mount",
+ "name_to_handle_at",
+ "perf_event_open",
+ "quotactl",
+ "setdomainname",
+ "sethostname",
+ "setns",
+ "umount",
+ "umount2",
+ "unshare"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_ADMIN"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "clone"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 2080505856,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_MASKED_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {
+ "caps": [
+ "CAP_SYS_ADMIN"
+ ],
+ "arches": [
+ "s390",
+ "s390x"
+ ]
+ }
+ },
+ {
+ "names": [
+ "clone"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 1,
+ "value": 2080505856,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_MASKED_EQ"
+ }
+ ],
+ "comment": "s390 parameter ordering for clone is different",
+ "includes": {
+ "arches": [
+ "s390",
+ "s390x"
+ ]
+ },
+ "excludes": {
+ "caps": [
+ "CAP_SYS_ADMIN"
+ ]
+ }
+ },
+ {
+ "names": [
+ "reboot"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_BOOT"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "chroot"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_CHROOT"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "delete_module",
+ "init_module",
+ "finit_module",
+ "query_module"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_MODULE"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "get_mempolicy",
+ "mbind",
+ "name_to_handle_at",
+ "set_mempolicy"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_NICE"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "acct"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_PACCT"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "kcmp",
+ "process_vm_readv",
+ "process_vm_writev",
+ "ptrace"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_PTRACE"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "iopl",
+ "ioperm"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_RAWIO"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "settimeofday",
+ "stime",
+ "clock_settime"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_TIME"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "vhangup"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [],
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_SYS_TTY_CONFIG"
+ ]
+ },
+ "excludes": {}
+ },
+ {
+ "names": [
+ "socket"
+ ],
+ "action": "SCMP_ACT_ERRNO",
+ "args": [
+ {
+ "index": 0,
+ "value": 16,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ },
+ {
+ "index": 2,
+ "value": 9,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_EQ"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {
+ "caps": [
+ "CAP_AUDIT_WRITE"
+ ]
+ },
+ "errnoRet": 22
+ },
+ {
+ "names": [
+ "socket"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 2,
+ "value": 9,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_NE"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {
+ "caps": [
+ "CAP_AUDIT_WRITE"
+ ]
+ }
+ },
+ {
+ "names": [
+ "socket"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 0,
+ "value": 16,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_NE"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {
+ "caps": [
+ "CAP_AUDIT_WRITE"
+ ]
+ }
+ },
+ {
+ "names": [
+ "socket"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": [
+ {
+ "index": 2,
+ "value": 9,
+ "valueTwo": 0,
+ "op": "SCMP_CMP_NE"
+ }
+ ],
+ "comment": "",
+ "includes": {},
+ "excludes": {
+ "caps": [
+ "CAP_AUDIT_WRITE"
+ ]
+ }
+ },
+ {
+ "names": [
+ "socket"
+ ],
+ "action": "SCMP_ACT_ALLOW",
+ "args": null,
+ "comment": "",
+ "includes": {
+ "caps": [
+ "CAP_AUDIT_WRITE"
+ ]
+ },
+ "excludes": {}
+ }
+ ]
+} \ No newline at end of file
diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_default_linux.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_default_linux.go
new file mode 100644
index 000000000..f12cf02c9
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_default_linux.go
@@ -0,0 +1,742 @@
+// SPDX-License-Identifier: Apache-2.0
+
+// Copyright 2013-2018 Docker, Inc.
+
+package seccomp
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+func arches() []Architecture {
+ return []Architecture{
+ {
+ Arch: ArchX86_64,
+ SubArches: []Arch{ArchX86, ArchX32},
+ },
+ {
+ Arch: ArchAARCH64,
+ SubArches: []Arch{ArchARM},
+ },
+ {
+ Arch: ArchMIPS64,
+ SubArches: []Arch{ArchMIPS, ArchMIPS64N32},
+ },
+ {
+ Arch: ArchMIPS64N32,
+ SubArches: []Arch{ArchMIPS, ArchMIPS64},
+ },
+ {
+ Arch: ArchMIPSEL64,
+ SubArches: []Arch{ArchMIPSEL, ArchMIPSEL64N32},
+ },
+ {
+ Arch: ArchMIPSEL64N32,
+ SubArches: []Arch{ArchMIPSEL, ArchMIPSEL64},
+ },
+ {
+ Arch: ArchS390X,
+ SubArches: []Arch{ArchS390},
+ },
+ }
+}
+
+// DefaultProfile defines the allowlist for the default seccomp profile.
+func DefaultProfile() *Seccomp {
+ einval := uint(syscall.EINVAL)
+
+ syscalls := []*Syscall{
+ {
+ Names: []string{
+ "_llseek",
+ "_newselect",
+ "accept",
+ "accept4",
+ "access",
+ "adjtimex",
+ "alarm",
+ "bind",
+ "brk",
+ "capget",
+ "capset",
+ "chdir",
+ "chmod",
+ "chown",
+ "chown32",
+ "clock_getres",
+ "clock_gettime",
+ "clock_nanosleep",
+ "close",
+ "connect",
+ "copy_file_range",
+ "creat",
+ "dup",
+ "dup2",
+ "dup3",
+ "epoll_create",
+ "epoll_create1",
+ "epoll_ctl",
+ "epoll_ctl_old",
+ "epoll_pwait",
+ "epoll_wait",
+ "epoll_wait_old",
+ "eventfd",
+ "eventfd2",
+ "execve",
+ "execveat",
+ "exit",
+ "exit_group",
+ "faccessat",
+ "fadvise64",
+ "fadvise64_64",
+ "fallocate",
+ "fanotify_mark",
+ "fchdir",
+ "fchmod",
+ "fchmodat",
+ "fchown",
+ "fchown32",
+ "fchownat",
+ "fcntl",
+ "fcntl64",
+ "fdatasync",
+ "fgetxattr",
+ "flistxattr",
+ "flock",
+ "fork",
+ "fremovexattr",
+ "fsetxattr",
+ "fstat",
+ "fstat64",
+ "fstatat64",
+ "fstatfs",
+ "fstatfs64",
+ "fsync",
+ "ftruncate",
+ "ftruncate64",
+ "futex",
+ "futimesat",
+ "get_robust_list",
+ "get_thread_area",
+ "getcpu",
+ "getcwd",
+ "getdents",
+ "getdents64",
+ "getegid",
+ "getegid32",
+ "geteuid",
+ "geteuid32",
+ "getgid",
+ "getgid32",
+ "getgroups",
+ "getgroups32",
+ "getitimer",
+ "getpeername",
+ "getpgid",
+ "getpgrp",
+ "getpid",
+ "getppid",
+ "getpriority",
+ "getrandom",
+ "getresgid",
+ "getresgid32",
+ "getresuid",
+ "getresuid32",
+ "getrlimit",
+ "getrusage",
+ "getsid",
+ "getsockname",
+ "getsockopt",
+ "gettid",
+ "gettimeofday",
+ "getuid",
+ "getuid32",
+ "getxattr",
+ "inotify_add_watch",
+ "inotify_init",
+ "inotify_init1",
+ "inotify_rm_watch",
+ "io_cancel",
+ "io_destroy",
+ "io_getevents",
+ "io_setup",
+ "io_submit",
+ "ioctl",
+ "ioprio_get",
+ "ioprio_set",
+ "ipc",
+ "kill",
+ "lchown",
+ "lchown32",
+ "lgetxattr",
+ "link",
+ "linkat",
+ "listen",
+ "listxattr",
+ "llistxattr",
+ "lremovexattr",
+ "lseek",
+ "lsetxattr",
+ "lstat",
+ "lstat64",
+ "madvise",
+ "memfd_create",
+ "mincore",
+ "mkdir",
+ "mkdirat",
+ "mknod",
+ "mknodat",
+ "mlock",
+ "mlock2",
+ "mlockall",
+ "mmap",
+ "mmap2",
+ "mount",
+ "mprotect",
+ "mq_getsetattr",
+ "mq_notify",
+ "mq_open",
+ "mq_timedreceive",
+ "mq_timedsend",
+ "mq_unlink",
+ "mremap",
+ "msgctl",
+ "msgget",
+ "msgrcv",
+ "msgsnd",
+ "msync",
+ "munlock",
+ "munlockall",
+ "munmap",
+ "name_to_handle_at",
+ "nanosleep",
+ "newfstatat",
+ "open",
+ "openat",
+ "pause",
+ "pipe",
+ "pipe2",
+ "poll",
+ "ppoll",
+ "prctl",
+ "pread64",
+ "preadv",
+ "preadv2",
+ "prlimit64",
+ "pselect6",
+ "pwrite64",
+ "pwritev",
+ "pwritev2",
+ "read",
+ "readahead",
+ "readlink",
+ "readlinkat",
+ "readv",
+ "reboot",
+ "recv",
+ "recvfrom",
+ "recvmmsg",
+ "recvmsg",
+ "remap_file_pages",
+ "removexattr",
+ "rename",
+ "renameat",
+ "renameat2",
+ "restart_syscall",
+ "rmdir",
+ "rt_sigaction",
+ "rt_sigpending",
+ "rt_sigprocmask",
+ "rt_sigqueueinfo",
+ "rt_sigreturn",
+ "rt_sigsuspend",
+ "rt_sigtimedwait",
+ "rt_tgsigqueueinfo",
+ "sched_get_priority_max",
+ "sched_get_priority_min",
+ "sched_getaffinity",
+ "sched_getattr",
+ "sched_getparam",
+ "sched_getscheduler",
+ "sched_rr_get_interval",
+ "sched_setaffinity",
+ "sched_setattr",
+ "sched_setparam",
+ "sched_setscheduler",
+ "sched_yield",
+ "seccomp",
+ "select",
+ "semctl",
+ "semget",
+ "semop",
+ "semtimedop",
+ "send",
+ "sendfile",
+ "sendfile64",
+ "sendmmsg",
+ "sendmsg",
+ "sendto",
+ "set_robust_list",
+ "set_thread_area",
+ "set_tid_address",
+ "setfsgid",
+ "setfsgid32",
+ "setfsuid",
+ "setfsuid32",
+ "setgid",
+ "setgid32",
+ "setgroups",
+ "setgroups32",
+ "setitimer",
+ "setpgid",
+ "setpriority",
+ "setregid",
+ "setregid32",
+ "setresgid",
+ "setresgid32",
+ "setresuid",
+ "setresuid32",
+ "setreuid",
+ "setreuid32",
+ "setrlimit",
+ "setsid",
+ "setsockopt",
+ "setuid",
+ "setuid32",
+ "setxattr",
+ "shmat",
+ "shmctl",
+ "shmdt",
+ "shmget",
+ "shutdown",
+ "sigaltstack",
+ "signalfd",
+ "signalfd4",
+ "sigreturn",
+ "socketcall",
+ "socketpair",
+ "splice",
+ "stat",
+ "stat64",
+ "statfs",
+ "statfs64",
+ "statx",
+ "symlink",
+ "symlinkat",
+ "sync",
+ "sync_file_range",
+ "syncfs",
+ "sysinfo",
+ "syslog",
+ "tee",
+ "tgkill",
+ "time",
+ "timer_create",
+ "timer_delete",
+ "timer_getoverrun",
+ "timer_gettime",
+ "timer_settime",
+ "timerfd_create",
+ "timerfd_gettime",
+ "timerfd_settime",
+ "times",
+ "tkill",
+ "truncate",
+ "truncate64",
+ "ugetrlimit",
+ "umask",
+ "umount",
+ "umount2",
+ "uname",
+ "unlink",
+ "unlinkat",
+ "unshare",
+ "utime",
+ "utimensat",
+ "utimes",
+ "vfork",
+ "vmsplice",
+ "wait4",
+ "waitid",
+ "waitpid",
+ "write",
+ "writev",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ },
+ {
+ Names: []string{"personality"},
+ Action: ActAllow,
+ Args: []*Arg{
+ {
+ Index: 0,
+ Value: 0x0,
+ Op: OpEqualTo,
+ },
+ },
+ },
+ {
+ Names: []string{"personality"},
+ Action: ActAllow,
+ Args: []*Arg{
+ {
+ Index: 0,
+ Value: 0x0008,
+ Op: OpEqualTo,
+ },
+ },
+ },
+ {
+ Names: []string{"personality"},
+ Action: ActAllow,
+ Args: []*Arg{
+ {
+ Index: 0,
+ Value: 0x20000,
+ Op: OpEqualTo,
+ },
+ },
+ },
+ {
+ Names: []string{"personality"},
+ Action: ActAllow,
+ Args: []*Arg{
+ {
+ Index: 0,
+ Value: 0x20008,
+ Op: OpEqualTo,
+ },
+ },
+ },
+ {
+ Names: []string{"personality"},
+ Action: ActAllow,
+ Args: []*Arg{
+ {
+ Index: 0,
+ Value: 0xffffffff,
+ Op: OpEqualTo,
+ },
+ },
+ },
+ {
+ Names: []string{
+ "sync_file_range2",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ Includes: Filter{
+ Arches: []string{"ppc64le"},
+ },
+ },
+ {
+ Names: []string{
+ "arm_fadvise64_64",
+ "arm_sync_file_range",
+ "sync_file_range2",
+ "breakpoint",
+ "cacheflush",
+ "set_tls",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ Includes: Filter{
+ Arches: []string{"arm", "arm64"},
+ },
+ },
+ {
+ Names: []string{
+ "arch_prctl",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ Includes: Filter{
+ Arches: []string{"amd64", "x32"},
+ },
+ },
+ {
+ Names: []string{
+ "modify_ldt",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ Includes: Filter{
+ Arches: []string{"amd64", "x32", "x86"},
+ },
+ },
+ {
+ Names: []string{
+ "s390_pci_mmio_read",
+ "s390_pci_mmio_write",
+ "s390_runtime_instr",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ Includes: Filter{
+ Arches: []string{"s390", "s390x"},
+ },
+ },
+ {
+ Names: []string{
+ "open_by_handle_at",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ Includes: Filter{
+ Caps: []string{"CAP_DAC_READ_SEARCH"},
+ },
+ },
+ {
+ Names: []string{
+ "bpf",
+ "clone",
+ "fanotify_init",
+ "lookup_dcookie",
+ "mount",
+ "name_to_handle_at",
+ "perf_event_open",
+ "quotactl",
+ "setdomainname",
+ "sethostname",
+ "setns",
+ "umount",
+ "umount2",
+ "unshare",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ Includes: Filter{
+ Caps: []string{"CAP_SYS_ADMIN"},
+ },
+ },
+ {
+ Names: []string{
+ "clone",
+ },
+ Action: ActAllow,
+ Args: []*Arg{
+ {
+ Index: 0,
+ Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET,
+ ValueTwo: 0,
+ Op: OpMaskedEqual,
+ },
+ },
+ Excludes: Filter{
+ Caps: []string{"CAP_SYS_ADMIN"},
+ Arches: []string{"s390", "s390x"},
+ },
+ },
+ {
+ Names: []string{
+ "clone",
+ },
+ Action: ActAllow,
+ Args: []*Arg{
+ {
+ Index: 1,
+ Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET,
+ ValueTwo: 0,
+ Op: OpMaskedEqual,
+ },
+ },
+ Comment: "s390 parameter ordering for clone is different",
+ Includes: Filter{
+ Arches: []string{"s390", "s390x"},
+ },
+ Excludes: Filter{
+ Caps: []string{"CAP_SYS_ADMIN"},
+ },
+ },
+ {
+ Names: []string{
+ "reboot",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ Includes: Filter{
+ Caps: []string{"CAP_SYS_BOOT"},
+ },
+ },
+ {
+ Names: []string{
+ "chroot",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ Includes: Filter{
+ Caps: []string{"CAP_SYS_CHROOT"},
+ },
+ },
+ {
+ Names: []string{
+ "delete_module",
+ "init_module",
+ "finit_module",
+ "query_module",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ Includes: Filter{
+ Caps: []string{"CAP_SYS_MODULE"},
+ },
+ },
+ {
+ Names: []string{
+ "get_mempolicy",
+ "mbind",
+ "name_to_handle_at",
+ "set_mempolicy",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ Includes: Filter{
+ Caps: []string{"CAP_SYS_NICE"},
+ },
+ },
+ {
+ Names: []string{
+ "acct",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ Includes: Filter{
+ Caps: []string{"CAP_SYS_PACCT"},
+ },
+ },
+ {
+ Names: []string{
+ "kcmp",
+ "process_vm_readv",
+ "process_vm_writev",
+ "ptrace",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ Includes: Filter{
+ Caps: []string{"CAP_SYS_PTRACE"},
+ },
+ },
+ {
+ Names: []string{
+ "iopl",
+ "ioperm",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ Includes: Filter{
+ Caps: []string{"CAP_SYS_RAWIO"},
+ },
+ },
+ {
+ Names: []string{
+ "settimeofday",
+ "stime",
+ "clock_settime",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ Includes: Filter{
+ Caps: []string{"CAP_SYS_TIME"},
+ },
+ },
+ {
+ Names: []string{
+ "vhangup",
+ },
+ Action: ActAllow,
+ Args: []*Arg{},
+ Includes: Filter{
+ Caps: []string{"CAP_SYS_TTY_CONFIG"},
+ },
+ },
+ {
+ Names: []string{
+ "socket",
+ },
+ Action: ActErrno,
+ ErrnoRet: &einval,
+ Args: []*Arg{
+ {
+ Index: 0,
+ Value: syscall.AF_NETLINK,
+ Op: OpEqualTo,
+ },
+ {
+ Index: 2,
+ Value: syscall.NETLINK_AUDIT,
+ Op: OpEqualTo,
+ },
+ },
+ Excludes: Filter{
+ Caps: []string{"CAP_AUDIT_WRITE"},
+ },
+ },
+ {
+ Names: []string{
+ "socket",
+ },
+ Action: ActAllow,
+ Args: []*Arg{
+ {
+ Index: 2,
+ Value: syscall.NETLINK_AUDIT,
+ Op: OpNotEqual,
+ },
+ },
+ Excludes: Filter{
+ Caps: []string{"CAP_AUDIT_WRITE"},
+ },
+ },
+ {
+ Names: []string{
+ "socket",
+ },
+ Action: ActAllow,
+ Args: []*Arg{
+ {
+ Index: 0,
+ Value: syscall.AF_NETLINK,
+ Op: OpNotEqual,
+ },
+ },
+ Excludes: Filter{
+ Caps: []string{"CAP_AUDIT_WRITE"},
+ },
+ },
+ {
+ Names: []string{
+ "socket",
+ },
+ Action: ActAllow,
+ Args: []*Arg{
+ {
+ Index: 2,
+ Value: syscall.NETLINK_AUDIT,
+ Op: OpNotEqual,
+ },
+ },
+ Excludes: Filter{
+ Caps: []string{"CAP_AUDIT_WRITE"},
+ },
+ },
+ {
+ Names: []string{
+ "socket",
+ },
+ Action: ActAllow,
+ Includes: Filter{
+ Caps: []string{"CAP_AUDIT_WRITE"},
+ },
+ },
+ }
+
+ return &Seccomp{
+ DefaultAction: ActErrno,
+ ArchMap: arches(),
+ Syscalls: syscalls,
+ }
+}
diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go
new file mode 100644
index 000000000..5655a7572
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go
@@ -0,0 +1,183 @@
+// +build seccomp
+
+// SPDX-License-Identifier: Apache-2.0
+
+// Copyright 2013-2018 Docker, Inc.
+
+package seccomp
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/opencontainers/runtime-spec/specs-go"
+ libseccomp "github.com/seccomp/libseccomp-golang"
+)
+
+//go:generate go run -tags 'seccomp' generate.go
+
+// GetDefaultProfile returns the default seccomp profile.
+func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) {
+ return setupSeccomp(DefaultProfile(), rs)
+}
+
+// LoadProfile takes a json string and decodes the seccomp profile.
+func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
+ var config Seccomp
+ if err := json.Unmarshal([]byte(body), &config); err != nil {
+ return nil, fmt.Errorf("decoding seccomp profile failed: %v", err)
+ }
+ return setupSeccomp(&config, rs)
+}
+
+// LoadProfileFromBytes takes a byte slice and decodes the seccomp profile.
+func LoadProfileFromBytes(body []byte, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
+ config := &Seccomp{}
+ if err := json.Unmarshal(body, config); err != nil {
+ return nil, fmt.Errorf("decoding seccomp profile failed: %v", err)
+ }
+ return setupSeccomp(config, rs)
+}
+
+// LoadProfileFromConfig takes a Seccomp struct and a spec to retrieve a LinuxSeccomp
+func LoadProfileFromConfig(config *Seccomp, specgen *specs.Spec) (*specs.LinuxSeccomp, error) {
+ return setupSeccomp(config, specgen)
+}
+
+var nativeToSeccomp = map[string]Arch{
+ "amd64": ArchX86_64,
+ "arm64": ArchAARCH64,
+ "mips64": ArchMIPS64,
+ "mips64n32": ArchMIPS64N32,
+ "mipsel64": ArchMIPSEL64,
+ "mipsel64n32": ArchMIPSEL64N32,
+ "s390x": ArchS390X,
+}
+
+// inSlice tests whether a string is contained in a slice of strings or not.
+// Comparison is case sensitive
+func inSlice(slice []string, s string) bool {
+ for _, ss := range slice {
+ if s == ss {
+ return true
+ }
+ }
+ return false
+}
+
+func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
+ if config == nil {
+ return nil, nil
+ }
+
+ // No default action specified, no syscalls listed, assume seccomp disabled
+ if config.DefaultAction == "" && len(config.Syscalls) == 0 {
+ return nil, nil
+ }
+
+ newConfig := &specs.LinuxSeccomp{}
+
+ var arch string
+ var native, err = libseccomp.GetNativeArch()
+ if err == nil {
+ arch = native.String()
+ }
+
+ if len(config.Architectures) != 0 && len(config.ArchMap) != 0 {
+ return nil, errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'")
+ }
+
+ // if config.Architectures == 0 then libseccomp will figure out the architecture to use
+ if len(config.Architectures) != 0 {
+ for _, a := range config.Architectures {
+ newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a))
+ }
+ }
+
+ if len(config.ArchMap) != 0 {
+ for _, a := range config.ArchMap {
+ seccompArch, ok := nativeToSeccomp[arch]
+ if ok {
+ if a.Arch == seccompArch {
+ newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a.Arch))
+ for _, sa := range a.SubArches {
+ newConfig.Architectures = append(newConfig.Architectures, specs.Arch(sa))
+ }
+ break
+ }
+ }
+ }
+ }
+
+ newConfig.DefaultAction = specs.LinuxSeccompAction(config.DefaultAction)
+
+Loop:
+ // Loop through all syscall blocks and convert them to libcontainer format after filtering them
+ for _, call := range config.Syscalls {
+ if len(call.Excludes.Arches) > 0 {
+ if inSlice(call.Excludes.Arches, arch) {
+ continue Loop
+ }
+ }
+ if len(call.Excludes.Caps) > 0 {
+ for _, c := range call.Excludes.Caps {
+ if inSlice(rs.Process.Capabilities.Bounding, c) {
+ continue Loop
+ }
+ }
+ }
+ if len(call.Includes.Arches) > 0 {
+ if !inSlice(call.Includes.Arches, arch) {
+ continue Loop
+ }
+ }
+ if len(call.Includes.Caps) > 0 {
+ for _, c := range call.Includes.Caps {
+ if !inSlice(rs.Process.Capabilities.Bounding, c) {
+ continue Loop
+ }
+ }
+ }
+
+ if call.Name != "" && len(call.Names) != 0 {
+ return nil, errors.New("'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'")
+ }
+
+ if call.Name != "" {
+ newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall([]string{call.Name}, call.Action, call.Args, call.ErrnoRet))
+ }
+
+ if len(call.Names) > 0 {
+ newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Names, call.Action, call.Args, call.ErrnoRet))
+ }
+ }
+
+ return newConfig, nil
+}
+
+func createSpecsSyscall(names []string, action Action, args []*Arg, errnoRet *uint) specs.LinuxSyscall {
+ newCall := specs.LinuxSyscall{
+ Names: names,
+ Action: specs.LinuxSeccompAction(action),
+ ErrnoRet: errnoRet,
+ }
+
+ // Loop through all the arguments of the syscall and convert them
+ for _, arg := range args {
+ newArg := specs.LinuxSeccompArg{
+ Index: arg.Index,
+ Value: arg.Value,
+ ValueTwo: arg.ValueTwo,
+ Op: specs.LinuxSeccompOperator(arg.Op),
+ }
+
+ newCall.Args = append(newCall.Args, newArg)
+ }
+ return newCall
+}
+
+// IsEnabled returns true if seccomp is enabled for the host.
+func IsEnabled() bool {
+ return IsSupported()
+}
diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go
new file mode 100644
index 000000000..84a4c6ed5
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go
@@ -0,0 +1,40 @@
+// +build !seccomp
+
+// SPDX-License-Identifier: Apache-2.0
+
+// Copyright 2013-2018 Docker, Inc.
+
+package seccomp
+
+import (
+ "errors"
+
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+var errNotSupported = errors.New("seccomp not enabled in this build")
+
+// LoadProfile returns an error on unsuppored systems
+func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
+ return nil, errNotSupported
+}
+
+// GetDefaultProfile returns an error on unsuppored systems
+func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) {
+ return nil, errNotSupported
+}
+
+// LoadProfileFromBytes takes a byte slice and decodes the seccomp profile.
+func LoadProfileFromBytes(body []byte, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
+ return nil, errNotSupported
+}
+
+// LoadProfileFromConfig takes a Seccomp struct and a spec to retrieve a LinuxSeccomp
+func LoadProfileFromConfig(config *Seccomp, specgen *specs.Spec) (*specs.LinuxSeccomp, error) {
+ return nil, errNotSupported
+}
+
+// IsEnabled returns true if seccomp is enabled for the host.
+func IsEnabled() bool {
+ return false
+}
diff --git a/vendor/github.com/containers/common/pkg/seccomp/supported.go b/vendor/github.com/containers/common/pkg/seccomp/supported.go
new file mode 100644
index 000000000..ab2a94a73
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/seccomp/supported.go
@@ -0,0 +1,72 @@
+package seccomp
+
+import (
+ "bufio"
+ "errors"
+ "os"
+ "strings"
+
+ perrors "github.com/pkg/errors"
+ "golang.org/x/sys/unix"
+)
+
+const statusFilePath = "/proc/self/status"
+
+// IsSupported returns true if the system has been configured to support
+// seccomp.
+func IsSupported() bool {
+ // Since Linux 3.8, the Seccomp field of the /proc/[pid]/status file
+ // provides a method of obtaining the same information, without the risk
+ // that the process is killed; see proc(5).
+ status, err := parseStatusFile(statusFilePath)
+ if err == nil {
+ _, ok := status["Seccomp"]
+ return ok
+ }
+
+ // PR_GET_SECCOMP (since Linux 2.6.23)
+ // Return (as the function result) the secure computing mode of the calling
+ // thread. If the caller is not in secure computing mode, this operation
+ // returns 0; if the caller is in strict secure computing mode, then the
+ // prctl() call will cause a SIGKILL signal to be sent to the process. If
+ // the caller is in filter mode, and this system call is allowed by the
+ // seccomp filters, it returns 2; otherwise, the process is killed with a
+ // SIGKILL signal. This operation is available only if the kernel is
+ // configured with CONFIG_SECCOMP enabled.
+ if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); !errors.Is(err, unix.EINVAL) {
+ // Make sure the kernel has CONFIG_SECCOMP_FILTER.
+ if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); !errors.Is(err, unix.EINVAL) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// parseStatusFile reads the provided `file` into a map of strings.
+func parseStatusFile(file string) (map[string]string, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, perrors.Wrapf(err, "open status file %s", file)
+ }
+ defer f.Close()
+
+ status := make(map[string]string)
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ text := scanner.Text()
+ parts := strings.SplitN(text, ":", 2)
+
+ if len(parts) <= 1 {
+ continue
+ }
+
+ status[strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, perrors.Wrapf(err, "scan status file %s", file)
+ }
+
+ return status, nil
+}
diff --git a/vendor/github.com/containers/common/pkg/seccomp/types.go b/vendor/github.com/containers/common/pkg/seccomp/types.go
new file mode 100644
index 000000000..7b0436dfc
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/seccomp/types.go
@@ -0,0 +1,110 @@
+package seccomp
+
+// SPDX-License-Identifier: Apache-2.0
+
+// Copyright 2013-2018 Docker, Inc.
+
+// Seccomp represents the config for a seccomp profile for syscall restriction.
+type Seccomp struct {
+ DefaultAction Action `json:"defaultAction"`
+ // Architectures is kept to maintain backward compatibility with the old
+ // seccomp profile.
+ Architectures []Arch `json:"architectures,omitempty"`
+ ArchMap []Architecture `json:"archMap,omitempty"`
+ Syscalls []*Syscall `json:"syscalls"`
+}
+
+// Architecture is used to represent a specific architecture
+// and its sub-architectures
+type Architecture struct {
+ Arch Arch `json:"architecture"`
+ SubArches []Arch `json:"subArchitectures"`
+}
+
+// Arch used for architectures
+type Arch string
+
+// Additional architectures permitted to be used for system calls
+// By default only the native architecture of the kernel is permitted
+const (
+ ArchNative Arch = "SCMP_ARCH_NATIVE"
+ ArchX86 Arch = "SCMP_ARCH_X86"
+ ArchX86_64 Arch = "SCMP_ARCH_X86_64"
+ ArchX32 Arch = "SCMP_ARCH_X32"
+ ArchARM Arch = "SCMP_ARCH_ARM"
+ ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
+ ArchMIPS Arch = "SCMP_ARCH_MIPS"
+ ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
+ ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
+ ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
+ ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
+ ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
+ ArchPPC Arch = "SCMP_ARCH_PPC"
+ ArchPPC64 Arch = "SCMP_ARCH_PPC64"
+ ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
+ ArchS390 Arch = "SCMP_ARCH_S390"
+ ArchS390X Arch = "SCMP_ARCH_S390X"
+ ArchPARISC Arch = "SCMP_ARCH_PARISC"
+ ArchPARISC64 Arch = "SCMP_ARCH_PARISC64"
+ ArchRISCV64 Arch = "SCMP_ARCH_RISCV64"
+)
+
+// Action taken upon Seccomp rule match
+type Action string
+
+// Define actions for Seccomp rules
+const (
+ // ActKill results in termination of the thread that made the system call.
+ ActKill Action = "SCMP_ACT_KILL"
+ // ActKillProcess results in termination of the entire process.
+ ActKillProcess Action = "SCMP_ACT_KILL_PROCESS"
+ // ActKillThread kills the thread that violated the rule. It is the same as
+ // ActKill. All other threads from the same thread group will continue to
+ // execute.
+ ActKillThread Action = "SCMP_ACT_KILL_THREAD"
+ ActTrap Action = "SCMP_ACT_TRAP"
+ ActErrno Action = "SCMP_ACT_ERRNO"
+ ActTrace Action = "SCMP_ACT_TRACE"
+ ActAllow Action = "SCMP_ACT_ALLOW"
+ ActLog Action = "SCMP_ACT_LOG"
+)
+
+// Operator used to match syscall arguments in Seccomp
+type Operator string
+
+// Define operators for syscall arguments in Seccomp
+const (
+ OpNotEqual Operator = "SCMP_CMP_NE"
+ OpLessThan Operator = "SCMP_CMP_LT"
+ OpLessEqual Operator = "SCMP_CMP_LE"
+ OpEqualTo Operator = "SCMP_CMP_EQ"
+ OpGreaterEqual Operator = "SCMP_CMP_GE"
+ OpGreaterThan Operator = "SCMP_CMP_GT"
+ OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
+)
+
+// Arg used for matching specific syscall arguments in Seccomp
+type Arg struct {
+ Index uint `json:"index"`
+ Value uint64 `json:"value"`
+ ValueTwo uint64 `json:"valueTwo"`
+ Op Operator `json:"op"`
+}
+
+// Filter is used to conditionally apply Seccomp rules
+type Filter struct {
+ Caps []string `json:"caps,omitempty"`
+ Arches []string `json:"arches,omitempty"`
+}
+
+// Syscall is used to match a group of syscalls in Seccomp
+type Syscall struct {
+ Name string `json:"name,omitempty"`
+ Names []string `json:"names,omitempty"`
+ Action Action `json:"action"`
+ Args []*Arg `json:"args"`
+ Comment string `json:"comment"`
+ Includes Filter `json:"includes"`
+ Excludes Filter `json:"excludes"`
+ ErrnoRet *uint `json:"errnoRet,omitempty"`
+}
diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go
index fcb3cab72..1935d71f1 100644
--- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go
+++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go
@@ -1,7 +1,6 @@
package sysinfo
import (
- "fmt"
"io/ioutil"
"os"
"path"
@@ -9,6 +8,7 @@ import (
"github.com/containers/common/pkg/cgroupv2"
"github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -16,7 +16,7 @@ import (
func findCgroupMountpoints() (map[string]string, error) {
cgMounts, err := cgroups.GetCgroupMounts(false)
if err != nil {
- return nil, fmt.Errorf("failed to parse cgroup information: %v", err)
+ return nil, errors.Wrap(err, "parse cgroup information")
}
mps := make(map[string]string)
for _, m := range cgMounts {
@@ -253,8 +253,8 @@ func cgroupEnabled(mountPoint, name string) bool {
return err == nil
}
-func readProcBool(path string) bool {
- val, err := ioutil.ReadFile(path)
+func readProcBool(file string) bool {
+ val, err := ioutil.ReadFile(file)
if err != nil {
return false
}
diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go
index 6b226eabe..ef9a947f0 100644
--- a/vendor/github.com/containers/common/version/version.go
+++ b/vendor/github.com/containers/common/version/version.go
@@ -1,4 +1,4 @@
package version
// Version is the version of the build.
-const Version = "0.18.0"
+const Version = "0.20.4-dev"
diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml
index fe611f085..6ba46ebbb 100644
--- a/vendor/github.com/containers/storage/.cirrus.yml
+++ b/vendor/github.com/containers/storage/.cirrus.yml
@@ -106,7 +106,7 @@ lint_task:
env:
CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage"
container:
- image: golang:1.13
+ image: golang:1.15
modules_cache:
fingerprint_script: cat go.sum
folder: $GOPATH/pkg/mod
@@ -142,7 +142,7 @@ meta_task:
vendor_task:
container:
- image: golang:1.14
+ image: golang:1.15
modules_cache:
fingerprint_script: cat go.sum
folder: $GOPATH/pkg/mod
@@ -157,6 +157,6 @@ success_task:
- meta
- vendor
container:
- image: golang:1.14
+ image: golang:1.15
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
script: /bin/true
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 4d1e5d262..14bee92c9 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.23.1-dev
+1.23.2
diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
index c00b9e47d..1e380a5ac 100644
--- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
+++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
@@ -143,10 +143,6 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
}
dstPath := filepath.Join(dstDir, relPath)
- if err != nil {
- return err
- }
-
stat, ok := f.Sys().(*syscall.Stat_t)
if !ok {
return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath)
diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/containers/storage/drivers/counter.go
index 72551a38d..3fc45495b 100644
--- a/vendor/github.com/containers/storage/drivers/counter.go
+++ b/vendor/github.com/containers/storage/drivers/counter.go
@@ -51,6 +51,10 @@ func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int {
if c.checker.IsMounted(path) {
m.count++
}
+ } else if !c.checker.IsMounted(path) {
+ // if the unmount was performed outside of this process (e.g. conmon cleanup)
+ //the ref counter would lose track of it. Check if it is still mounted.
+ m.count = 0
}
infoOp(m)
count := m.count
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index fc7010645..1de771bb0 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -274,22 +274,28 @@ func parseOptions(options []string) (*overlayOptions, error) {
if err != nil {
return nil, err
}
- key = strings.ToLower(key)
- switch key {
- case ".override_kernel_check", "overlay.override_kernel_check", "overlay2.override_kernel_check":
+ trimkey := strings.ToLower(key)
+ trimkey = strings.TrimPrefix(trimkey, "overlay.")
+ trimkey = strings.TrimPrefix(trimkey, "overlay2.")
+ trimkey = strings.TrimPrefix(trimkey, ".")
+ switch trimkey {
+ case "override_kernel_check":
logrus.Warnf("overlay: override_kernel_check option was specified, but is no longer necessary")
- case ".mountopt", "overlay.mountopt", "overlay2.mountopt":
+ case "mountopt":
o.mountOptions = val
- case ".size", "overlay.size", "overlay2.size":
+ case "size":
logrus.Debugf("overlay: size=%s", val)
size, err := units.RAMInBytes(val)
if err != nil {
return nil, err
}
o.quota.Size = uint64(size)
- case ".imagestore", "overlay.imagestore", "overlay2.imagestore":
+ case "imagestore", "additionalimagestore":
logrus.Debugf("overlay: imagestore=%s", val)
// Additional read only image stores to use for lower paths
+ if val == "" {
+ continue
+ }
for _, store := range strings.Split(val, ",") {
store = filepath.Clean(store)
if !filepath.IsAbs(store) {
@@ -304,17 +310,17 @@ func parseOptions(options []string) (*overlayOptions, error) {
}
o.imageStores = append(o.imageStores, store)
}
- case ".mount_program", "overlay.mount_program", "overlay2.mount_program":
+ case "mount_program":
logrus.Debugf("overlay: mount_program=%s", val)
_, err := os.Stat(val)
if err != nil {
return nil, fmt.Errorf("overlay: can't stat program %s: %v", val, err)
}
o.mountProgram = val
- case "overlay2.skip_mount_home", "overlay.skip_mount_home", ".skip_mount_home":
+ case "skip_mount_home":
logrus.Debugf("overlay: skip_mount_home=%s", val)
o.skipMountHome, err = strconv.ParseBool(val)
- case ".ignore_chown_errors", "overlay2.ignore_chown_errors", "overlay.ignore_chown_errors":
+ case "ignore_chown_errors":
logrus.Debugf("overlay: ignore_chown_errors=%s", val)
o.ignoreChownErrors, err = strconv.ParseBool(val)
if err != nil {
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index 36101194e..77eef7598 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -1,3 +1,5 @@
+go 1.15
+
module github.com/containers/storage
require (
@@ -6,7 +8,7 @@ require (
github.com/Microsoft/hcsshim v0.8.9
github.com/docker/go-units v0.4.0
github.com/hashicorp/go-multierror v1.1.0
- github.com/klauspost/compress v1.10.10
+ github.com/klauspost/compress v1.10.11
github.com/klauspost/pgzip v1.2.4
github.com/mattn/go-shellwords v1.0.10
github.com/mistifyio/go-zfs v2.1.1+incompatible
@@ -25,5 +27,3 @@ require (
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775
gotest.tools v2.2.0+incompatible
)
-
-go 1.13
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index d9b0d6396..04d48eb4f 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -62,8 +62,8 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.10.10 h1:a/y8CglcM7gLGYmlbP/stPE5sR3hbhFRUjCBfd/0B3I=
-github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.10.11 h1:K9z59aO18Aywg2b/WSgBaUX99mHy2BES18Cr5lBKZHk=
+github.com/klauspost/compress v1.10.11/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 937bf8c3a..2a34c84cc 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -2630,6 +2630,9 @@ func (s *store) mount(id string, options drivers.MountOpts) (string, error) {
if err != nil {
return "", err
}
+
+ s.graphLock.Lock()
+ defer s.graphLock.Unlock()
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE b/vendor/github.com/go-logr/logr/LICENSE
index 8dada3eda..8dada3eda 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE
+++ b/vendor/github.com/go-logr/logr/LICENSE
diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md
new file mode 100644
index 000000000..aca17f382
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/README.md
@@ -0,0 +1,181 @@
+# A more minimal logging API for Go
+
+Before you consider this package, please read [this blog post by the
+inimitable Dave Cheney][warning-makes-no-sense]. I really appreciate what
+he has to say, and it largely aligns with my own experiences. Too many
+choices of levels means inconsistent logs.
+
+This package offers a purely abstract interface, based on these ideas but with
+a few twists. Code can depend on just this interface and have the actual
+logging implementation be injected from callers. Ideally only `main()` knows
+what logging implementation is being used.
+
+# Differences from Dave's ideas
+
+The main differences are:
+
+1) Dave basically proposes doing away with the notion of a logging API in favor
+of `fmt.Printf()`. I disagree, especially when you consider things like output
+locations, timestamps, file and line decorations, and structured logging. I
+restrict the API to just 2 types of logs: info and error.
+
+Info logs are things you want to tell the user which are not errors. Error
+logs are, well, errors. If your code receives an `error` from a subordinate
+function call and is logging that `error` *and not returning it*, use error
+logs.
+
+2) Verbosity-levels on info logs. This gives developers a chance to indicate
+arbitrary grades of importance for info logs, without assigning names with
+semantic meaning such as "warning", "trace", and "debug". Superficially this
+may feel very similar, but the primary difference is the lack of semantics.
+Because verbosity is a numerical value, it's safe to assume that an app running
+with higher verbosity means more (and less important) logs will be generated.
+
+This is a BETA grade API.
+
+There are implementations for the following logging libraries:
+
+- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
+- **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr)
+- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
+- **log** (the Go standard library logger):
+ [stdr](https://github.com/go-logr/stdr)
+- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
+
+# FAQ
+
+## Conceptual
+
+## Why structured logging?
+
+- **Structured logs are more easily queriable**: Since you've got
+ key-value pairs, it's much easier to query your structured logs for
+ particular values by filtering on the contents of a particular key --
+ think searching request logs for error codes, Kubernetes reconcilers for
+ the name and namespace of the reconciled object, etc
+
+- **Structured logging makes it easier to have cross-referencable logs**:
+ Similarly to searchability, if you maintain conventions around your
+ keys, it becomes easy to gather all log lines related to a particular
+ concept.
+
+- **Structured logs allow better dimensions of filtering**: if you have
+ structure to your logs, you've got more precise control over how much
+ information is logged -- you might choose in a particular configuration
+ to log certain keys but not others, only log lines where a certain key
+ matches a certain value, etc, instead of just having v-levels and names
+ to key off of.
+
+- **Structured logs better represent structured data**: sometimes, the
+ data that you want to log is inherently structured (think tuple-link
+ objects). Structured logs allow you to preserve that structure when
+ outputting.
+
+## Why V-levels?
+
+**V-levels give operators an easy way to control the chattiness of log
+operations**. V-levels provide a way for a given package to distinguish
+the relative importance or verbosity of a given log message. Then, if
+a particular logger or package is logging too many messages, the user
+of the package can simply change the v-levels for that library.
+
+## Why not more named levels, like Warning?
+
+Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
+from Dave's ideas](#differences-from-daves-ideas).
+
+## Why not allow format strings, too?
+
+**Format strings negate many of the benefits of structured logs**:
+
+- They're not easily searchable without resorting to fuzzy searching,
+ regular expressions, etc
+
+- They don't store structured data well, since contents are flattened into
+ a string
+
+- They're not cross-referencable
+
+- They don't compress easily, since the message is not constant
+
+(unless you turn positional parameters into key-value pairs with numerical
+keys, at which point you've gotten key-value logging with meaningless
+keys)
+
+## Practical
+
+## Why key-value pairs, and not a map?
+
+Key-value pairs are *much* easier to optimize, especially around
+allocations. Zap (a structured logger that inspired logr's interface) has
+[performance measurements](https://github.com/uber-go/zap#performance)
+that show this quite nicely.
+
+While the interface ends up being a little less obvious, you get
+potentially better performance, plus avoid making users type
+`map[string]string{}` every time they want to log.
+
+## What if my V-levels differ between libraries?
+
+That's fine. Control your V-levels on a per-logger basis, and use the
+`WithName` function to pass different loggers to different libraries.
+
+Generally, you should take care to ensure that you have relatively
+consistent V-levels within a given logger, however, as this makes deciding
+on what verbosity of logs to request easier.
+
+## But I *really* want to use a format string!
+
+That's not actually a question. Assuming your question is "how do
+I convert my mental model of logging with format strings to logging with
+constant messages":
+
+1. figure out what the error actually is, as you'd write in a TL;DR style,
+ and use that as a message
+
+2. For every place you'd write a format specifier, look to the word before
+ it, and add that as a key value pair
+
+For instance, consider the following examples (all taken from spots in the
+Kubernetes codebase):
+
+- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
+ responseCode, err)` becomes `logger.Error(err, "client returned an
+ error", "code", responseCode)`
+
+- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
+ seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
+ response when requesting url", "attempt", retries, "after
+ seconds", seconds, "url", url)`
+
+If you *really* must use a format string, place it as a key value, and
+call `fmt.Sprintf` yourself -- for instance, `log.Printf("unable to
+reflect over type %T")` becomes `logger.Info("unable to reflect over
+type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
+this is necessary should be few and far between.
+
+## How do I choose my V-levels?
+
+This is basically the only hard constraint: increase V-levels to denote
+more verbose or more debug-y logs.
+
+Otherwise, you can start out with `0` as "you always want to see this",
+`1` as "common logging that you might *possibly* want to turn off", and
+`10` as "I would like to performance-test your log collection stack".
+
+Then gradually choose levels in between as you need them, working your way
+down from 10 (for debug and trace style logs) and up from 1 (for chattier
+info-type logs).
+
+## How do I choose my keys
+
+- make your keys human-readable
+- constant keys are generally a good idea
+- be consistent across your codebase
+- keys should naturally match parts of the message string
+
+While key names are mostly unrestricted (and spaces are acceptable),
+it's generally a good idea to stick to printable ascii characters, or at
+least match the general character set of your log lines.
+
+[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
diff --git a/vendor/github.com/go-logr/logr/go.mod b/vendor/github.com/go-logr/logr/go.mod
new file mode 100644
index 000000000..591884e91
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/go.mod
@@ -0,0 +1,3 @@
+module github.com/go-logr/logr
+
+go 1.14
diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go
new file mode 100644
index 000000000..520c4fe55
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/logr.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package logr defines abstract interfaces for logging. Packages can depend on
+// these interfaces and callers can implement logging in whatever way is
+// appropriate.
+//
+// This design derives from Dave Cheney's blog:
+// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
+//
+// This is a BETA grade API. Until there is a significant 2nd implementation,
+// I don't really know how it will change.
+//
+// The logging specifically makes it non-trivial to use format strings, to encourage
+// attaching structured information instead of unstructured format strings.
+//
+// Usage
+//
+// Logging is done using a Logger. Loggers can have name prefixes and named
+// values attached, so that all log messages logged with that Logger have some
+// base context associated.
+//
+// The term "key" is used to refer to the name associated with a particular
+// value, to disambiguate it from the general Logger name.
+//
+// For instance, suppose we're trying to reconcile the state of an object, and
+// we want to log that we've made some decision.
+//
+// With the traditional log package, we might write:
+// log.Printf(
+// "decided to set field foo to value %q for object %s/%s",
+// targetValue, object.Namespace, object.Name)
+//
+// With logr's structured logging, we'd write:
+// // elsewhere in the file, set up the logger to log with the prefix of "reconcilers",
+// // and the named value target-type=Foo, for extra context.
+// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo")
+//
+// // later on...
+// log.Info("setting field foo on object", "value", targetValue, "object", object)
+//
+// Depending on our logging implementation, we could then make logging decisions
+// based on field values (like only logging such events for objects in a certain
+// namespace), or copy the structured information into a structured log store.
+//
+// For logging errors, Logger has a method called Error. Suppose we wanted to
+// log an error while reconciling. With the traditional log package, we might
+// write:
+// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err)
+//
+// With logr, we'd instead write:
+// // assuming the above setup for log
+// log.Error(err, "unable to reconcile object", "object", object)
+//
+// This functions similarly to:
+// log.Info("unable to reconcile object", "error", err, "object", object)
+//
+// However, it ensures that a standard key for the error value ("error") is used
+// across all error logging. Furthermore, certain implementations may choose to
+// attach additional information (such as stack traces) on calls to Error, so
+// it's preferred to use Error to log errors.
+//
+// Parts of a log line
+//
+// Each log message from a Logger has four types of context:
+// logger name, log verbosity, log message, and the named values.
+//
+// The Logger name constists of a series of name "segments" added by successive
+// calls to WithName. These name segments will be joined in some way by the
+// underlying implementation. It is strongly reccomended that name segements
+// contain simple identifiers (letters, digits, and hyphen), and do not contain
+// characters that could muddle the log output or confuse the joining operation
+// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc).
+//
+// Log verbosity represents how little a log matters. Level zero, the default,
+// matters most. Increasing levels matter less and less. Try to avoid lots of
+// different verbosity levels, and instead provide useful keys, logger names,
+// and log messages for users to filter on. It's illegal to pass a log level
+// below zero.
+//
+// The log message consists of a constant message attached to the the log line.
+// This should generally be a simple description of what's occuring, and should
+// never be a format string.
+//
+// Variable information can then be attached using named values (key/value
+// pairs). Keys are arbitrary strings, while values may be any Go value.
+//
+// Key Naming Conventions
+//
+// Keys are not strictly required to conform to any specification or regex, but
+// it is recommended that they:
+// * be human-readable and meaningful (not auto-generated or simple ordinals)
+// * be constant (not dependent on input data)
+// * contain only printable characters
+// * not contain whitespace or punctuation
+//
+// These guidelines help ensure that log data is processed properly regardless
+// of the log implementation. For example, log implementations will try to
+// output JSON data or will store data for later database (e.g. SQL) queries.
+//
+// While users are generally free to use key names of their choice, it's
+// generally best to avoid using the following keys, as they're frequently used
+// by implementations:
+//
+// - `"caller"`: the calling information (file/line) of a particular log line.
+// - `"error"`: the underlying error value in the `Error` method.
+// - `"level"`: the log level.
+// - `"logger"`: the name of the associated logger.
+// - `"msg"`: the log message.
+// - `"stacktrace"`: the stack trace associated with a particular log line or
+// error (often from the `Error` message).
+// - `"ts"`: the timestamp for a log line.
+//
+// Implementations are encouraged to make use of these keys to represent the
+// above concepts, when neccessary (for example, in a pure-JSON output form, it
+// would be necessary to represent at least message and timestamp as ordinary
+// named values).
+package logr
+
+// TODO: consider adding back in format strings if they're really needed
+// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects)
+// TODO: consider other bits of glog functionality like Flush, InfoDepth, OutputStats
+
+// Logger represents the ability to log messages, both errors and not.
+type Logger interface {
+ // Enabled tests whether this Logger is enabled. For example, commandline
+ // flags might be used to set the logging verbosity and disable some info
+ // logs.
+ Enabled() bool
+
+ // Info logs a non-error message with the given key/value pairs as context.
+ //
+ // The msg argument should be used to add some constant description to
+ // the log line. The key/value pairs can then be used to add additional
+ // variable information. The key/value pairs should alternate string
+ // keys and arbitrary values.
+ Info(msg string, keysAndValues ...interface{})
+
+ // Error logs an error, with the given message and key/value pairs as context.
+ // It functions similarly to calling Info with the "error" named value, but may
+ // have unique behavior, and should be preferred for logging errors (see the
+ // package documentations for more information).
+ //
+ // The msg field should be used to add context to any underlying error,
+ // while the err field should be used to attach the actual error that
+ // triggered this log line, if present.
+ Error(err error, msg string, keysAndValues ...interface{})
+
+ // V returns an Logger value for a specific verbosity level, relative to
+ // this Logger. In other words, V values are additive. V higher verbosity
+ // level means a log message is less important. It's illegal to pass a log
+ // level less than zero.
+ V(level int) Logger
+
+ // WithValues adds some key-value pairs of context to a logger.
+ // See Info for documentation on how key/value pairs work.
+ WithValues(keysAndValues ...interface{}) Logger
+
+ // WithName adds a new element to the logger's name.
+ // Successive calls with WithName continue to append
+ // suffixes to the logger's name. It's strongly reccomended
+ // that name segments contain only letters, digits, and hyphens
+ // (see the package documentation for more information).
+ WithName(name string) Logger
+}
diff --git a/vendor/github.com/gorilla/schema/.travis.yml b/vendor/github.com/gorilla/schema/.travis.yml
deleted file mode 100644
index 5f51dce4e..000000000
--- a/vendor/github.com/gorilla/schema/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-language: go
-sudo: false
-
-matrix:
- include:
- - go: 1.5
- - go: 1.6
- - go: 1.7
- - go: 1.8
- - go: tip
- allow_failures:
- - go: tip
-
-script:
- - go get -t -v ./...
- - diff -u <(echo -n) <(gofmt -d .)
- - go vet $(go list ./... | grep -v /vendor/)
- - go test -v -race ./...
diff --git a/vendor/github.com/gorilla/schema/decoder.go b/vendor/github.com/gorilla/schema/decoder.go
index 5afbd921f..025e438b5 100644
--- a/vendor/github.com/gorilla/schema/decoder.go
+++ b/vendor/github.com/gorilla/schema/decoder.go
@@ -152,9 +152,15 @@ type fieldWithPrefix struct {
func isEmptyFields(fields []fieldWithPrefix, src map[string][]string) bool {
for _, f := range fields {
for _, path := range f.paths(f.prefix) {
- if !isEmpty(f.typ, src[path]) {
+ v, ok := src[path]
+ if ok && !isEmpty(f.typ, v) {
return false
}
+ for key := range src {
+ if !isEmpty(f.typ, src[key]) && strings.HasPrefix(key, path) {
+ return false
+ }
+ }
}
}
return true
@@ -182,6 +188,17 @@ func (d *Decoder) decode(v reflect.Value, path string, parts []pathPart, values
}
v = v.Elem()
}
+
+ // alloc embedded structs
+ if v.Type().Kind() == reflect.Struct {
+ for i := 0; i < v.NumField(); i++ {
+ field := v.Field(i)
+ if field.Type().Kind() == reflect.Ptr && field.IsNil() && v.Type().Field(i).Anonymous == true {
+ field.Set(reflect.New(field.Type().Elem()))
+ }
+ }
+ }
+
v = v.FieldByName(name)
}
// Don't even bother for unexported fields.
diff --git a/vendor/github.com/gorilla/schema/encoder.go b/vendor/github.com/gorilla/schema/encoder.go
index bf1d511e6..f0ed63121 100644
--- a/vendor/github.com/gorilla/schema/encoder.go
+++ b/vendor/github.com/gorilla/schema/encoder.go
@@ -57,6 +57,13 @@ func isZero(v reflect.Value) bool {
}
return z
case reflect.Struct:
+ type zero interface {
+ IsZero() bool
+ }
+ if v.Type().Implements(reflect.TypeOf((*zero)(nil)).Elem()) {
+ iz := v.MethodByName("IsZero").Call([]reflect.Value{})[0]
+ return iz.Interface().(bool)
+ }
z := true
for i := 0; i < v.NumField(); i++ {
z = z && isZero(v.Field(i))
diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
index bda4021ef..6bce4e87d 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
@@ -43,6 +43,11 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
func (b *bitWriter) encSymbol(ct cTable, symbol byte) {
enc := ct[symbol]
b.bitContainer |= uint64(enc.val) << (b.nBits & 63)
+ if false {
+ if enc.nBits == 0 {
+ panic("nbits 0")
+ }
+ }
b.nBits += enc.nBits
}
@@ -54,6 +59,14 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
sh := b.nBits & 63
combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63))
b.bitContainer |= combined << sh
+ if false {
+ if encA.nBits == 0 {
+ panic("nbitsA 0")
+ }
+ if encB.nBits == 0 {
+ panic("nbitsB 0")
+ }
+ }
b.nBits += encA.nBits + encB.nBits
}
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
index 0843cb014..f9ed5f830 100644
--- a/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -77,8 +77,11 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
// Each symbol present maximum once or too well distributed.
return nil, false, ErrIncompressible
}
-
- if s.Reuse == ReusePolicyPrefer && canReuse {
+ if s.Reuse == ReusePolicyMust && !canReuse {
+ // We must reuse, but we can't.
+ return nil, false, ErrIncompressible
+ }
+ if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse {
keepTable := s.cTable
keepTL := s.actualTableLog
s.cTable = s.prevTable
@@ -90,6 +93,9 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
s.OutData = s.Out
return s.Out, true, nil
}
+ if s.Reuse == ReusePolicyMust {
+ return nil, false, ErrIncompressible
+ }
// Do not attempt to re-use later.
s.prevTable = s.prevTable[:0]
}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index a03b2634a..41703bba4 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -32,7 +32,7 @@ const use8BitTables = true
// The size of the input may be larger than the table definition.
// Any content remaining after the table definition will be returned.
// If no Scratch is provided a new one is allocated.
-// The returned Scratch can be used for decoding input using this table.
+// The returned Scratch can be used for encoding or decoding input using this table.
func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
s, err = s.prepare(in)
if err != nil {
@@ -58,8 +58,8 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
s.symbolLen = uint16(oSize)
in = in[iSize:]
} else {
- if len(in) <= int(iSize) {
- return s, nil, errors.New("input too small for table")
+ if len(in) < int(iSize) {
+ return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in))
}
// FSE compressed weights
s.fse.DecompressLimit = 255
@@ -138,15 +138,33 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
if len(s.dt.single) != tSize {
s.dt.single = make([]dEntrySingle, tSize)
}
+ cTable := s.prevTable
+ if cap(cTable) < maxSymbolValue+1 {
+ cTable = make([]cTableEntry, 0, maxSymbolValue+1)
+ }
+ cTable = cTable[:maxSymbolValue+1]
+ s.prevTable = cTable[:s.symbolLen]
+ s.prevTableLog = s.actualTableLog
+
for n, w := range s.huffWeight[:s.symbolLen] {
if w == 0 {
+ cTable[n] = cTableEntry{
+ val: 0,
+ nBits: 0,
+ }
continue
}
length := (uint32(1) << w) >> 1
d := dEntrySingle{
entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8),
}
+
rank := &rankStats[w]
+ cTable[n] = cTableEntry{
+ val: uint16(*rank >> (w - 1)),
+ nBits: uint8(d.entry),
+ }
+
single := s.dt.single[*rank : *rank+length]
for i := range single {
single[i] = d
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index 177d6c4ea..5dd66854b 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -55,6 +55,9 @@ const (
// ReusePolicyNone will disable re-use of tables.
// This is slightly faster than ReusePolicyAllow but may produce larger output.
ReusePolicyNone
+
+ // ReusePolicyMust must allow reuse and produce smaller output.
+ ReusePolicyMust
)
type Scratch struct {
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index c584f6aab..be718afd4 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -295,7 +295,7 @@ func (b *blockEnc) encodeRaw(a []byte) {
b.output = bh.appendTo(b.output[:0])
b.output = append(b.output, a...)
if debug {
- println("Adding RAW block, length", len(a))
+ println("Adding RAW block, length", len(a), "last:", b.last)
}
}
@@ -308,7 +308,7 @@ func (b *blockEnc) encodeRawTo(dst, src []byte) []byte {
dst = bh.appendTo(dst)
dst = append(dst, src...)
if debug {
- println("Adding RAW block, length", len(src))
+ println("Adding RAW block, length", len(src), "last:", b.last)
}
return dst
}
@@ -322,7 +322,7 @@ func (b *blockEnc) encodeLits(raw bool) error {
// Don't compress extremely small blocks
if len(b.literals) < 32 || raw {
if debug {
- println("Adding RAW block, length", len(b.literals))
+ println("Adding RAW block, length", len(b.literals), "last:", b.last)
}
bh.setType(blockTypeRaw)
b.output = bh.appendTo(b.output)
@@ -349,7 +349,7 @@ func (b *blockEnc) encodeLits(raw bool) error {
switch err {
case huff0.ErrIncompressible:
if debug {
- println("Adding RAW block, length", len(b.literals))
+ println("Adding RAW block, length", len(b.literals), "last:", b.last)
}
bh.setType(blockTypeRaw)
b.output = bh.appendTo(b.output)
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index c56d2241f..95ebc3d84 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -190,6 +190,7 @@ func (e *Encoder) nextBlock(final bool) error {
s.filling = s.filling[:0]
s.headerWritten = true
s.fullFrameWritten = true
+ s.eofWritten = true
return nil
}
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
index 7b60f8bb3..5fceeb635 100644
--- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
@@ -60,7 +60,7 @@ type Process struct {
SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"`
}
-// LinuxCapabilities specifies the whitelist of capabilities that are kept for a process.
+// LinuxCapabilities specifies the list of allowed capabilities that are kept for a process.
// http://man7.org/linux/man-pages/man7/capabilities.7.html
type LinuxCapabilities struct {
// Bounding is the set of capabilities checked by the kernel.
@@ -90,7 +90,7 @@ type User struct {
// GID is the group id.
GID uint32 `json:"gid" platform:"linux,solaris"`
// Umask is the umask for the init process.
- Umask uint32 `json:"umask,omitempty" platform:"linux,solaris"`
+ Umask *uint32 `json:"umask,omitempty" platform:"linux,solaris"`
// AdditionalGids are additional group ids set for the container's process.
AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"`
// Username is the user name.
@@ -354,7 +354,7 @@ type LinuxRdma struct {
// LinuxResources has container runtime resource constraints
type LinuxResources struct {
- // Devices configures the device whitelist.
+ // Devices configures the device allowlist.
Devices []LinuxDeviceCgroup `json:"devices,omitempty"`
// Memory restriction configuration
Memory *LinuxMemory `json:"memory,omitempty"`
@@ -372,6 +372,8 @@ type LinuxResources struct {
// Limits are a set of key value pairs that define RDMA resource limits,
// where the key is device name and value is resource limits.
Rdma map[string]LinuxRdma `json:"rdma,omitempty"`
+ // Unified resources.
+ Unified map[string]string `json:"unified,omitempty"`
}
// LinuxDevice represents the mknod information for a Linux special device file
@@ -392,7 +394,8 @@ type LinuxDevice struct {
GID *uint32 `json:"gid,omitempty"`
}
-// LinuxDeviceCgroup represents a device rule for the whitelist controller
+// LinuxDeviceCgroup represents a device rule for the devices specified to
+// the device controller
type LinuxDeviceCgroup struct {
// Allow or deny
Allow bool `json:"allow"`
@@ -628,6 +631,7 @@ const (
ArchS390X Arch = "SCMP_ARCH_S390X"
ArchPARISC Arch = "SCMP_ARCH_PARISC"
ArchPARISC64 Arch = "SCMP_ARCH_PARISC64"
+ ArchRISCV64 Arch = "SCMP_ARCH_RISCV64"
)
// LinuxSeccompAction taken upon Seccomp rule match
@@ -635,12 +639,13 @@ type LinuxSeccompAction string
// Define actions for Seccomp rules
const (
- ActKill LinuxSeccompAction = "SCMP_ACT_KILL"
- ActTrap LinuxSeccompAction = "SCMP_ACT_TRAP"
- ActErrno LinuxSeccompAction = "SCMP_ACT_ERRNO"
- ActTrace LinuxSeccompAction = "SCMP_ACT_TRACE"
- ActAllow LinuxSeccompAction = "SCMP_ACT_ALLOW"
- ActLog LinuxSeccompAction = "SCMP_ACT_LOG"
+ ActKill LinuxSeccompAction = "SCMP_ACT_KILL"
+ ActKillProcess LinuxSeccompAction = "SCMP_ACT_KILL_PROCESS"
+ ActTrap LinuxSeccompAction = "SCMP_ACT_TRAP"
+ ActErrno LinuxSeccompAction = "SCMP_ACT_ERRNO"
+ ActTrace LinuxSeccompAction = "SCMP_ACT_TRACE"
+ ActAllow LinuxSeccompAction = "SCMP_ACT_ALLOW"
+ ActLog LinuxSeccompAction = "SCMP_ACT_LOG"
)
// LinuxSeccompOperator used to match syscall arguments in Seccomp
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go
index 89dce34be..e2e64c663 100644
--- a/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go
@@ -1,5 +1,23 @@
package specs
+// ContainerState represents the state of a container.
+type ContainerState string
+
+const (
+ // StateCreating indicates that the container is being created
+ StateCreating ContainerState = "creating"
+
+ // StateCreated indicates that the runtime has finished the create operation
+ StateCreated ContainerState = "created"
+
+ // StateRunning indicates that the container process has executed the
+ // user-specified program but has not exited
+ StateRunning ContainerState = "running"
+
+ // StateStopped indicates that the container process has exited
+ StateStopped ContainerState = "stopped"
+)
+
// State holds information about the runtime state of the container.
type State struct {
// Version is the version of the specification that is supported.
@@ -7,7 +25,7 @@ type State struct {
// ID is the container ID
ID string `json:"id"`
// Status is the runtime status of the container.
- Status string `json:"status"`
+ Status ContainerState `json:"status"`
// Pid is the process ID for the container process.
Pid int `json:"pid,omitempty"`
// Bundle is the path to the container's bundle directory.
diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go
index c757c20e0..6d3268902 100644
--- a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go
+++ b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go
@@ -29,9 +29,6 @@ var (
type Generator struct {
Config *rspec.Spec
HostSpecific bool
- // This is used to keep a cache of the ENVs added to improve
- // performance when adding a huge number of ENV variables
- envMap map[string]int
}
// ExportOptions have toggles for exporting only certain parts of the specification
@@ -239,12 +236,7 @@ func New(os string) (generator Generator, err error) {
}
}
- envCache := map[string]int{}
- if config.Process != nil {
- envCache = createEnvCacheMap(config.Process.Env)
- }
-
- return Generator{Config: &config, envMap: envCache}, nil
+ return Generator{Config: &config}, nil
}
// NewFromSpec creates a configuration Generator from a given
@@ -254,14 +246,8 @@ func New(os string) (generator Generator, err error) {
//
// generator := Generator{Config: config}
func NewFromSpec(config *rspec.Spec) Generator {
- envCache := map[string]int{}
- if config != nil && config.Process != nil {
- envCache = createEnvCacheMap(config.Process.Env)
- }
-
return Generator{
Config: config,
- envMap: envCache,
}
}
@@ -287,27 +273,11 @@ func NewFromTemplate(r io.Reader) (Generator, error) {
if err := json.NewDecoder(r).Decode(&config); err != nil {
return Generator{}, err
}
-
- envCache := map[string]int{}
- if config.Process != nil {
- envCache = createEnvCacheMap(config.Process.Env)
- }
-
return Generator{
Config: &config,
- envMap: envCache,
}, nil
}
-// createEnvCacheMap creates a hash map with the ENV variables given by the config
-func createEnvCacheMap(env []string) map[string]int {
- envMap := make(map[string]int, len(env))
- for i, val := range env {
- envMap[val] = i
- }
- return envMap
-}
-
// SetSpec sets the configuration in the Generator g.
//
// Deprecated: Replace with:
@@ -444,12 +414,6 @@ func (g *Generator) SetProcessUsername(username string) {
g.Config.Process.User.Username = username
}
-// SetProcessUmask sets g.Config.Process.User.Umask.
-func (g *Generator) SetProcessUmask(umask uint32) {
- g.initConfigProcess()
- g.Config.Process.User.Umask = umask
-}
-
// SetProcessGID sets g.Config.Process.User.GID.
func (g *Generator) SetProcessGID(gid uint32) {
g.initConfigProcess()
@@ -492,44 +456,21 @@ func (g *Generator) ClearProcessEnv() {
return
}
g.Config.Process.Env = []string{}
- // Clear out the env cache map as well
- g.envMap = map[string]int{}
}
// AddProcessEnv adds name=value into g.Config.Process.Env, or replaces an
// existing entry with the given name.
func (g *Generator) AddProcessEnv(name, value string) {
- if name == "" {
- return
- }
-
- g.initConfigProcess()
- g.addEnv(fmt.Sprintf("%s=%s", name, value), name)
-}
-
-// AddMultipleProcessEnv adds multiple name=value into g.Config.Process.Env, or replaces
-// existing entries with the given name.
-func (g *Generator) AddMultipleProcessEnv(envs []string) {
g.initConfigProcess()
- for _, val := range envs {
- split := strings.SplitN(val, "=", 2)
- g.addEnv(val, split[0])
- }
-}
-
-// addEnv looks through adds ENV to the Process and checks envMap for
-// any duplicates
-// This is called by both AddMultipleProcessEnv and AddProcessEnv
-func (g *Generator) addEnv(env, key string) {
- if idx, ok := g.envMap[key]; ok {
- // The ENV exists in the cache, so change its value in g.Config.Process.Env
- g.Config.Process.Env[idx] = env
- } else {
- // else the env doesn't exist, so add it and add it's index to g.envMap
- g.Config.Process.Env = append(g.Config.Process.Env, env)
- g.envMap[key] = len(g.Config.Process.Env) - 1
+ env := fmt.Sprintf("%s=%s", name, value)
+ for idx := range g.Config.Process.Env {
+ if strings.HasPrefix(g.Config.Process.Env[idx], name+"=") {
+ g.Config.Process.Env[idx] = env
+ return
+ }
}
+ g.Config.Process.Env = append(g.Config.Process.Env, env)
}
// AddProcessRlimits adds rlimit into g.Config.Process.Rlimits.
@@ -1502,7 +1443,7 @@ func (g *Generator) AddDevice(device rspec.LinuxDevice) {
return
}
if dev.Type == device.Type && dev.Major == device.Major && dev.Minor == device.Minor {
- fmt.Fprintf(os.Stderr, "WARNING: Creating device %q with same type, major and minor as existing %q.\n", device.Path, dev.Path)
+ fmt.Fprintln(os.Stderr, "WARNING: The same type, major and minor should not be used for multiple devices.")
}
}
diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go
index 8a8dc3970..5fee5a3b2 100644
--- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go
+++ b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default.go
@@ -566,20 +566,6 @@ func DefaultProfile(rs *specs.Spec) *rspec.LinuxSeccomp {
},
}...)
/* Flags parameter of the clone syscall is the 2nd on s390 */
- syscalls = append(syscalls, []rspec.LinuxSyscall{
- {
- Names: []string{"clone"},
- Action: rspec.ActAllow,
- Args: []rspec.LinuxSeccompArg{
- {
- Index: 1,
- Value: 2080505856,
- ValueTwo: 0,
- Op: rspec.OpMaskedEqual,
- },
- },
- },
- }...)
}
return &rspec.LinuxSeccomp{
diff --git a/vendor/github.com/seccomp/containers-golang/conversion.go b/vendor/github.com/seccomp/containers-golang/conversion.go
new file mode 100644
index 000000000..05564487b
--- /dev/null
+++ b/vendor/github.com/seccomp/containers-golang/conversion.go
@@ -0,0 +1,32 @@
+package seccomp // import "github.com/seccomp/containers-golang"
+
+import "fmt"
+
+var goArchToSeccompArchMap = map[string]Arch{
+ "386": ArchX86,
+ "amd64": ArchX86_64,
+ "amd64p32": ArchX32,
+ "arm": ArchARM,
+ "arm64": ArchAARCH64,
+ "mips": ArchMIPS,
+ "mips64": ArchMIPS64,
+ "mips64le": ArchMIPSEL64,
+ "mips64p32": ArchMIPS64N32,
+ "mips64p32le": ArchMIPSEL64N32,
+ "mipsle": ArchMIPSEL,
+ "ppc": ArchPPC,
+ "ppc64": ArchPPC64,
+ "ppc64le": ArchPPC64LE,
+ "s390": ArchS390,
+ "s390x": ArchS390X,
+}
+
+// GoArchToSeccompArch converts a runtime.GOARCH to a seccomp `Arch`. The
+// function returns an error if the architecture conversion is not supported.
+func GoArchToSeccompArch(goArch string) (Arch, error) {
+ arch, ok := goArchToSeccompArchMap[goArch]
+ if !ok {
+ return "", fmt.Errorf("unsupported go arch provided: %s", goArch)
+ }
+ return arch, nil
+}
diff --git a/vendor/github.com/seccomp/containers-golang/go.mod b/vendor/github.com/seccomp/containers-golang/go.mod
index 2b56d46fd..8e21f0f99 100644
--- a/vendor/github.com/seccomp/containers-golang/go.mod
+++ b/vendor/github.com/seccomp/containers-golang/go.mod
@@ -1,16 +1,16 @@
module github.com/seccomp/containers-golang
-go 1.13
+go 1.14
require (
github.com/blang/semver v3.5.1+incompatible // indirect
- github.com/hashicorp/go-multierror v1.0.0 // indirect
- github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
+ github.com/hashicorp/go-multierror v1.1.0 // indirect
+ github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445
github.com/opencontainers/runtime-tools v0.9.0
- github.com/opencontainers/selinux v1.3.0 // indirect
+ github.com/opencontainers/selinux v1.6.0 // indirect
github.com/seccomp/libseccomp-golang v0.9.1
- github.com/sirupsen/logrus v1.4.2 // indirect
+ github.com/sirupsen/logrus v1.6.0 // indirect
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
- golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc
+ golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666
)
diff --git a/vendor/github.com/seccomp/containers-golang/go.sum b/vendor/github.com/seccomp/containers-golang/go.sum
index ba00acd09..d7fc538c0 100644
--- a/vendor/github.com/seccomp/containers-golang/go.sum
+++ b/vendor/github.com/seccomp/containers-golang/go.sum
@@ -1,3 +1,4 @@
+github.com/blang/semver v1.1.0 h1:ol1rO7QQB5uy7umSNV7VAmLugfLRD+17sYJujRNYPhg=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -6,8 +7,12 @@ github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/U
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
+github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 h1:Dliu5QO+4JYWu/yMshaMU7G3JN2POGpwjJN7gjy10Go=
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ=
@@ -16,23 +21,33 @@ github.com/opencontainers/runtime-spec v1.0.2-0.20191007145322-19e92ca81777 h1:7
github.com/opencontainers/runtime-spec v1.0.2-0.20191007145322-19e92ca81777/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E=
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445 h1:y8cfsJRmn8g3VkM4IDpusKSgMUZEXhudm/BuYANLozE=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/selinux v1.2.2 h1:Kx9J6eDG5/24A6DtUquGSpJQ+m2MUTahn4FtGEe8bFg=
github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
github.com/opencontainers/selinux v1.3.0 h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqGe5TgR0g=
github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
+github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY=
+github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM=
+github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
@@ -46,3 +61,6 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc h1:EinpED/Eb9JUgDi6pkoFjw+tz69c3lHUZr2+Va84S0w=
golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666 h1:gVCS+QOncANNPlmlO1AhlU3oxs4V9z+gTtPwIk3p2N8=
+golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go b/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go
index 2e3e337ac..86c73bf99 100644
--- a/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go
+++ b/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go
@@ -45,7 +45,7 @@ func arches() []Architecture {
}
}
-// DefaultProfile defines the whitelist for the default seccomp profile.
+// DefaultProfile defines the allowlist for the default seccomp profile.
func DefaultProfile() *Seccomp {
einval := uint(syscall.EINVAL)
diff --git a/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go b/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go
index 936a9a641..763f22982 100644
--- a/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go
+++ b/vendor/github.com/seccomp/containers-golang/seccomp_unsupported.go
@@ -7,11 +7,13 @@
package seccomp // import "github.com/seccomp/containers-golang"
import (
- "fmt"
+ "errors"
"github.com/opencontainers/runtime-spec/specs-go"
)
+var errNotSupported = errors.New("seccomp not enabled in this build")
+
// DefaultProfile returns a nil pointer on unsupported systems.
func DefaultProfile() *Seccomp {
return nil
@@ -19,22 +21,22 @@ func DefaultProfile() *Seccomp {
// LoadProfile returns an error on unsuppored systems
func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- return nil, fmt.Errorf("Seccomp not supported on this platform")
+ return nil, errNotSupported
}
// GetDefaultProfile returns an error on unsuppored systems
func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- return nil, fmt.Errorf("Seccomp not supported on this platform")
+ return nil, errNotSupported
}
// LoadProfileFromBytes takes a byte slice and decodes the seccomp profile.
func LoadProfileFromBytes(body []byte, rs *specs.Spec) (*specs.LinuxSeccomp, error) {
- return nil, fmt.Errorf("Seccomp not supported on this platform")
+ return nil, errNotSupported
}
// LoadProfileFromConfig takes a Seccomp struct and a spec to retrieve a LinuxSeccomp
func LoadProfileFromConfig(config *Seccomp, specgen *specs.Spec) (*specs.LinuxSeccomp, error) {
- return nil, fmt.Errorf("Seccomp not supported on this platform")
+ return nil, errNotSupported
}
// IsEnabled returns true if seccomp is enabled for the host.
diff --git a/vendor/github.com/seccomp/libseccomp-golang/.travis.yml b/vendor/github.com/seccomp/libseccomp-golang/.travis.yml
new file mode 100644
index 000000000..feef144d1
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/.travis.yml
@@ -0,0 +1,37 @@
+# Travis CI configuration for libseccomp-golang
+
+# https://docs.travis-ci.com/user/reference/bionic
+# https://wiki.ubuntu.com/Releases
+
+dist: bionic
+sudo: false
+
+notifications:
+ email:
+ on_success: always
+ on_failure: always
+
+arch:
+ - amd64
+
+os:
+ - linux
+
+language: go
+
+addons:
+ apt:
+ packages:
+ - build-essential
+ # TODO: use the main libseccomp git repo instead of a distro package
+ - libseccomp2
+ - libseccomp-dev
+
+install:
+ - go get -u golang.org/x/lint/golint
+
+# run all of the tests independently, fail if any of the tests error
+script:
+ - make check-syntax
+ - make lint
+ - make check
diff --git a/vendor/github.com/seccomp/libseccomp-golang/SUBMITTING_PATCHES b/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md
index 744e5cd64..d6862cbd5 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/SUBMITTING_PATCHES
+++ b/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md
@@ -8,11 +8,11 @@ to the rules described here, but by following the instructions below you
should have a much easier time getting your work merged with the upstream
project.
-* Test Your Code
+## Test Your Code Using Existing Tests
-There are two possible tests you can run to verify your code. The first test
-is used to check the formatting and coding style of your changes, you can run
-the test with the following command:
+There are two possible tests you can run to verify your code. The first
+test is used to check the formatting and coding style of your changes, you
+can run the test with the following command:
# make check-syntax
@@ -27,30 +27,13 @@ with the following command:
... if there are any faults or errors they will be displayed.
-* Generate the Patch(es)
+## Add New Tests for New Functionality
-Depending on how you decided to work with the libseccomp code base and what
-tools you are using there are different ways to generate your patch(es).
-However, regardless of what tools you use, you should always generate your
-patches using the "unified" diff/patch format and the patches should always
-apply to the libseccomp source tree using the following command from the top
-directory of the libseccomp sources:
+Any submissions which add functionality, or significantly change the existing
+code, should include additional tests to verify the proper operation of the
+proposed changes.
- # patch -p1 < changes.patch
-
-If you are not using git, stacked git (stgit), or some other tool which can
-generate patch files for you automatically, you may find the following command
-helpful in generating patches, where "libseccomp.orig/" is the unmodified
-source code directory and "libseccomp/" is the source code directory with your
-changes:
-
- # diff -purN libseccomp-golang.orig/ libseccomp-golang/
-
-When in doubt please generate your patch and try applying it to an unmodified
-copy of the libseccomp sources; if it fails for you, it will fail for the rest
-of us.
-
-* Explain Your Work
+## Explain Your Work
At the top of every patch you should include a description of the problem you
are trying to solve, how you solved it, and why you chose the solution you
@@ -59,7 +42,7 @@ if you can describe/include a reproducer for the problem in the description as
well as instructions on how to test for the bug and verify that it has been
fixed.
-* Sign Your Work
+## Sign Your Work
The sign-off is a simple line at the end of the patch description, which
certifies that you wrote it or otherwise have the right to pass it on as an
@@ -97,16 +80,49 @@ your real name, saying:
Signed-off-by: Random J Developer <random@developer.example.org>
-* Email Your Patch(es)
+You can add this to your commit description in `git` with `git commit -s`
+
+## Post Your Patches Upstream
+
+The libseccomp project accepts both GitHub pull requests and patches sent via
+the mailing list. GitHub pull requests are preferred. This sections below
+explain how to contribute via either method. Please read each step and perform
+all steps that apply to your chosen contribution method.
+
+### Submitting via Email
+
+Depending on how you decided to work with the libseccomp code base and what
+tools you are using there are different ways to generate your patch(es).
+However, regardless of what tools you use, you should always generate your
+patches using the "unified" diff/patch format and the patches should always
+apply to the libseccomp source tree using the following command from the top
+directory of the libseccomp sources:
+
+ # patch -p1 < changes.patch
+
+If you are not using git, stacked git (stgit), or some other tool which can
+generate patch files for you automatically, you may find the following command
+helpful in generating patches, where "libseccomp.orig/" is the unmodified
+source code directory and "libseccomp/" is the source code directory with your
+changes:
+
+ # diff -purN libseccomp.orig/ libseccomp/
+
+When in doubt please generate your patch and try applying it to an unmodified
+copy of the libseccomp sources; if it fails for you, it will fail for the rest
+of us.
Finally, you will need to email your patches to the mailing list so they can
-be reviewed and potentially merged into the main libseccomp-golang repository.
-When sending patches to the mailing list it is important to send your email in
-text form, no HTML mail please, and ensure that your email client does not
-mangle your patches. It should be possible to save your raw email to disk and
-apply it directly to the libseccomp source code; if that fails then you likely
-have a problem with your email client. When in doubt try a test first by
-sending yourself an email with your patch and attempting to apply the emailed
-patch to the libseccomp-golang repository; if it fails for you, it will fail
-for the rest of us trying to test your patch and include it in the main
-libseccomp-golang repository.
+be reviewed and potentially merged into the main libseccomp repository. When
+sending patches to the mailing list it is important to send your email in text
+form, no HTML mail please, and ensure that your email client does not mangle
+your patches. It should be possible to save your raw email to disk and apply
+it directly to the libseccomp source code; if that fails then you likely have
+a problem with your email client. When in doubt try a test first by sending
+yourself an email with your patch and attempting to apply the emailed patch to
+the libseccomp repository; if it fails for you, it will fail for the rest of
+us trying to test your patch and include it in the main libseccomp repository.
+
+### Submitting via GitHub
+
+See [this guide](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request) if you've never done this before.
diff --git a/vendor/github.com/seccomp/libseccomp-golang/README b/vendor/github.com/seccomp/libseccomp-golang/README.md
index 66839a466..27423f2d9 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/README
+++ b/vendor/github.com/seccomp/libseccomp-golang/README.md
@@ -1,7 +1,8 @@
-libseccomp-golang: Go Language Bindings for the libseccomp Project
+![libseccomp Golang Bindings](https://github.com/seccomp/libseccomp-artwork/blob/main/logo/libseccomp-color_text.png)
===============================================================================
https://github.com/seccomp/libseccomp-golang
-https://github.com/seccomp/libseccomp
+
+[![Build Status](https://img.shields.io/travis/seccomp/libseccomp-golang/master.svg)](https://travis-ci.org/seccomp/libseccomp-golang)
The libseccomp library provides an easy to use, platform independent, interface
to the Linux Kernel's syscall filtering mechanism. The libseccomp API is
@@ -12,40 +13,39 @@ be familiar to, and easily adopted by, application developers.
The libseccomp-golang library provides a Go based interface to the libseccomp
library.
-* Online Resources
+## Online Resources
The library source repository currently lives on GitHub at the following URLs:
- -> https://github.com/seccomp/libseccomp-golang
- -> https://github.com/seccomp/libseccomp
+* https://github.com/seccomp/libseccomp-golang
+* https://github.com/seccomp/libseccomp
The project mailing list is currently hosted on Google Groups at the URL below,
please note that a Google account is not required to subscribe to the mailing
list.
- -> https://groups.google.com/d/forum/libseccomp
+* https://groups.google.com/d/forum/libseccomp
Documentation is also available at:
- -> https://godoc.org/github.com/seccomp/libseccomp-golang
+* https://godoc.org/github.com/seccomp/libseccomp-golang
-* Installing the package
+## Installing the package
The libseccomp-golang bindings require at least Go v1.2.1 and GCC v4.8.4;
earlier versions may yield unpredictable results. If you meet these
requirements you can install this package using the command below:
- $ go get github.com/seccomp/libseccomp-golang
+ # go get github.com/seccomp/libseccomp-golang
-* Testing the Library
+## Testing the Library
A number of tests and lint related recipes are provided in the Makefile, if
you want to run the standard regression tests, you can excute the following:
- $ make check
+ # make check
In order to execute the 'make lint' recipe the 'golint' tool is needed, it
can be found at:
- -> https://github.com/golang/lint
-
+* https://github.com/golang/lint
diff --git a/vendor/github.com/seccomp/libseccomp-golang/go.mod b/vendor/github.com/seccomp/libseccomp-golang/go.mod
new file mode 100644
index 000000000..6384b3769
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/go.mod
@@ -0,0 +1,3 @@
+module github.com/seccomp/libseccomp-golang
+
+go 1.14
diff --git a/vendor/github.com/seccomp/libseccomp-golang/go.sum b/vendor/github.com/seccomp/libseccomp-golang/go.sum
new file mode 100644
index 000000000..72ae16111
--- /dev/null
+++ b/vendor/github.com/seccomp/libseccomp-golang/go.sum
@@ -0,0 +1,23 @@
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 h1:EBZoQjiKKPaLbPrbpssUfuHtwM6KV/vb4U85g/cigFY=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200313205530-4303120df7d8 h1:gkI/wGGwpcG5W4hLCzZNGxA4wzWBGGDStRI1MrjDl2Q=
+golang.org/x/tools v0.0.0-20200313205530-4303120df7d8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
index a3cc53822..e489b9ebd 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
+++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go
@@ -125,7 +125,8 @@ const (
// ActInvalid is a placeholder to ensure uninitialized ScmpAction
// variables are invalid
ActInvalid ScmpAction = iota
- // ActKill kills the process
+ // ActKill kills the thread that violated the rule. It is the same as ActKillThread.
+ // All other threads from the same thread group will continue to execute.
ActKill ScmpAction = iota
// ActTrap throws SIGSYS
ActTrap ScmpAction = iota
@@ -141,6 +142,14 @@ const (
// This action is only usable when libseccomp API level 3 or higher is
// supported.
ActLog ScmpAction = iota
+ // ActKillThread kills the thread that violated the rule. It is the same as ActKill.
+ // All other threads from the same thread group will continue to execute.
+ ActKillThread ScmpAction = iota
+ // ActKillProcess kills the process that violated the rule.
+ // All threads in the thread group are also terminated.
+ // This action is only usable when libseccomp API level 3 or higher is
+ // supported.
+ ActKillProcess ScmpAction = iota
)
const (
@@ -290,8 +299,10 @@ func (a ScmpCompareOp) String() string {
// String returns a string representation of a seccomp match action
func (a ScmpAction) String() string {
switch a & 0xFFFF {
- case ActKill:
- return "Action: Kill Process"
+ case ActKill, ActKillThread:
+ return "Action: Kill thread"
+ case ActKillProcess:
+ return "Action: Kill process"
case ActTrap:
return "Action: Send SIGSYS"
case ActErrno:
@@ -334,23 +345,23 @@ func GetLibraryVersion() (major, minor, micro uint) {
return verMajor, verMinor, verMicro
}
-// GetApi returns the API level supported by the system.
+// GetAPI returns the API level supported by the system.
// Returns a positive int containing the API level, or 0 with an error if the
// API level could not be detected due to the library being older than v2.4.0.
// See the seccomp_api_get(3) man page for details on available API levels:
// https://github.com/seccomp/libseccomp/blob/master/doc/man/man3/seccomp_api_get.3
-func GetApi() (uint, error) {
- return getApi()
+func GetAPI() (uint, error) {
+ return getAPI()
}
-// SetApi forcibly sets the API level. General use of this function is strongly
+// SetAPI forcibly sets the API level. General use of this function is strongly
// discouraged.
// Returns an error if the API level could not be set. An error is always
// returned if the library is older than v2.4.0
// See the seccomp_api_get(3) man page for details on available API levels:
// https://github.com/seccomp/libseccomp/blob/master/doc/man/man3/seccomp_api_get.3
-func SetApi(api uint) error {
- return setApi(api)
+func SetAPI(api uint) error {
+ return setAPI(api)
}
// Syscall functions
@@ -552,9 +563,8 @@ func (f *ScmpFilter) Reset(defaultAction ScmpAction) error {
return errBadFilter
}
- retCode := C.seccomp_reset(f.filterCtx, defaultAction.toNative())
- if retCode != 0 {
- return syscall.Errno(-1 * retCode)
+ if retCode := C.seccomp_reset(f.filterCtx, defaultAction.toNative()); retCode != 0 {
+ return errRc(retCode)
}
return nil
@@ -600,11 +610,12 @@ func (f *ScmpFilter) Merge(src *ScmpFilter) error {
}
// Merge the filters
- retCode := C.seccomp_merge(f.filterCtx, src.filterCtx)
- if syscall.Errno(-1*retCode) == syscall.EINVAL {
- return fmt.Errorf("filters could not be merged due to a mismatch in attributes or invalid filter")
- } else if retCode != 0 {
- return syscall.Errno(-1 * retCode)
+ if retCode := C.seccomp_merge(f.filterCtx, src.filterCtx); retCode != 0 {
+ e := errRc(retCode)
+ if e == syscall.EINVAL {
+ return fmt.Errorf("filters could not be merged due to a mismatch in attributes or invalid filter")
+ }
+ return e
}
src.valid = false
@@ -633,12 +644,13 @@ func (f *ScmpFilter) IsArchPresent(arch ScmpArch) (bool, error) {
return false, errBadFilter
}
- retCode := C.seccomp_arch_exist(f.filterCtx, arch.toNative())
- if syscall.Errno(-1*retCode) == syscall.EEXIST {
- // -EEXIST is "arch not present"
- return false, nil
- } else if retCode != 0 {
- return false, syscall.Errno(-1 * retCode)
+ if retCode := C.seccomp_arch_exist(f.filterCtx, arch.toNative()); retCode != 0 {
+ e := errRc(retCode)
+ if e == syscall.EEXIST {
+ // -EEXIST is "arch not present"
+ return false, nil
+ }
+ return false, e
}
return true, nil
@@ -661,9 +673,10 @@ func (f *ScmpFilter) AddArch(arch ScmpArch) error {
// Libseccomp returns -EEXIST if the specified architecture is already
// present. Succeed silently in this case, as it's not fatal, and the
// architecture is present already.
- retCode := C.seccomp_arch_add(f.filterCtx, arch.toNative())
- if retCode != 0 && syscall.Errno(-1*retCode) != syscall.EEXIST {
- return syscall.Errno(-1 * retCode)
+ if retCode := C.seccomp_arch_add(f.filterCtx, arch.toNative()); retCode != 0 {
+ if e := errRc(retCode); e != syscall.EEXIST {
+ return e
+ }
}
return nil
@@ -686,9 +699,10 @@ func (f *ScmpFilter) RemoveArch(arch ScmpArch) error {
// Similar to AddArch, -EEXIST is returned if the arch is not present
// Succeed silently in that case, this is not fatal and the architecture
// is not present in the filter after RemoveArch
- retCode := C.seccomp_arch_remove(f.filterCtx, arch.toNative())
- if retCode != 0 && syscall.Errno(-1*retCode) != syscall.EEXIST {
- return syscall.Errno(-1 * retCode)
+ if retCode := C.seccomp_arch_remove(f.filterCtx, arch.toNative()); retCode != 0 {
+ if e := errRc(retCode); e != syscall.EEXIST {
+ return e
+ }
}
return nil
@@ -705,7 +719,7 @@ func (f *ScmpFilter) Load() error {
}
if retCode := C.seccomp_load(f.filterCtx); retCode != 0 {
- return syscall.Errno(-1 * retCode)
+ return errRc(retCode)
}
return nil
@@ -764,7 +778,7 @@ func (f *ScmpFilter) GetNoNewPrivsBit() (bool, error) {
func (f *ScmpFilter) GetLogBit() (bool, error) {
log, err := f.getFilterAttr(filterAttrLog)
if err != nil {
- api, apiErr := getApi()
+ api, apiErr := getAPI()
if (apiErr != nil && api == 0) || (apiErr == nil && api < 3) {
return false, fmt.Errorf("getting the log bit is only supported in libseccomp 2.4.0 and newer with API level 3 or higher")
}
@@ -818,7 +832,7 @@ func (f *ScmpFilter) SetLogBit(state bool) error {
err := f.setFilterAttr(filterAttrLog, toSet)
if err != nil {
- api, apiErr := getApi()
+ api, apiErr := getAPI()
if (apiErr != nil && api == 0) || (apiErr == nil && api < 3) {
return fmt.Errorf("setting the log bit is only supported in libseccomp 2.4.0 and newer with API level 3 or higher")
}
@@ -842,7 +856,7 @@ func (f *ScmpFilter) SetSyscallPriority(call ScmpSyscall, priority uint8) error
if retCode := C.seccomp_syscall_priority(f.filterCtx, C.int(call),
C.uint8_t(priority)); retCode != 0 {
- return syscall.Errno(-1 * retCode)
+ return errRc(retCode)
}
return nil
@@ -907,7 +921,7 @@ func (f *ScmpFilter) ExportPFC(file *os.File) error {
}
if retCode := C.seccomp_export_pfc(f.filterCtx, C.int(fd)); retCode != 0 {
- return syscall.Errno(-1 * retCode)
+ return errRc(retCode)
}
return nil
@@ -928,7 +942,7 @@ func (f *ScmpFilter) ExportBPF(file *os.File) error {
}
if retCode := C.seccomp_export_bpf(f.filterCtx, C.int(fd)); retCode != 0 {
- return syscall.Errno(-1 * retCode)
+ return errRc(retCode)
}
return nil
diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
index 4e36b27ae..0982e930f 100644
--- a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
+++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go
@@ -72,7 +72,17 @@ const uint32_t C_ARCH_S390X = SCMP_ARCH_S390X;
#define SCMP_ACT_LOG 0x7ffc0000U
#endif
+#ifndef SCMP_ACT_KILL_PROCESS
+#define SCMP_ACT_KILL_PROCESS 0x80000000U
+#endif
+
+#ifndef SCMP_ACT_KILL_THREAD
+#define SCMP_ACT_KILL_THREAD 0x00000000U
+#endif
+
const uint32_t C_ACT_KILL = SCMP_ACT_KILL;
+const uint32_t C_ACT_KILL_PROCESS = SCMP_ACT_KILL_PROCESS;
+const uint32_t C_ACT_KILL_THREAD = SCMP_ACT_KILL_THREAD;
const uint32_t C_ACT_TRAP = SCMP_ACT_TRAP;
const uint32_t C_ACT_ERRNO = SCMP_ACT_ERRNO(0);
const uint32_t C_ACT_TRACE = SCMP_ACT_TRACE(0);
@@ -203,7 +213,7 @@ const (
archEnd ScmpArch = ArchS390X
// Comparison boundaries to check for action validity
actionStart ScmpAction = ActKill
- actionEnd ScmpAction = ActLog
+ actionEnd ScmpAction = ActKillProcess
// Comparison boundaries to check for comparison operator validity
compareOpStart ScmpCompareOp = CompareNotEqual
compareOpEnd ScmpCompareOp = CompareMaskedEqual
@@ -236,7 +246,7 @@ func ensureSupportedVersion() error {
}
// Get the API level
-func getApi() (uint, error) {
+func getAPI() (uint, error) {
api := C.seccomp_api_get()
if api == 0 {
return 0, fmt.Errorf("API level operations are not supported")
@@ -246,9 +256,9 @@ func getApi() (uint, error) {
}
// Set the API level
-func setApi(api uint) error {
+func setAPI(api uint) error {
if retCode := C.seccomp_api_set(C.uint(api)); retCode != 0 {
- if syscall.Errno(-1*retCode) == syscall.EOPNOTSUPP {
+ if errRc(retCode) == syscall.EOPNOTSUPP {
return fmt.Errorf("API level operations are not supported")
}
@@ -265,6 +275,10 @@ func filterFinalizer(f *ScmpFilter) {
f.Release()
}
+func errRc(rc C.int) error {
+ return syscall.Errno(-1 * rc)
+}
+
// Get a raw filter attribute
func (f *ScmpFilter) getFilterAttr(attr scmpFilterAttr) (C.uint32_t, error) {
f.lock.Lock()
@@ -278,7 +292,7 @@ func (f *ScmpFilter) getFilterAttr(attr scmpFilterAttr) (C.uint32_t, error) {
retCode := C.seccomp_attr_get(f.filterCtx, attr.toNative(), &attribute)
if retCode != 0 {
- return 0x0, syscall.Errno(-1 * retCode)
+ return 0x0, errRc(retCode)
}
return attribute, nil
@@ -295,7 +309,7 @@ func (f *ScmpFilter) setFilterAttr(attr scmpFilterAttr, value C.uint32_t) error
retCode := C.seccomp_attr_set(f.filterCtx, attr.toNative(), value)
if retCode != 0 {
- return syscall.Errno(-1 * retCode)
+ return errRc(retCode)
}
return nil
@@ -316,14 +330,17 @@ func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact b
retCode = C.seccomp_rule_add_array(f.filterCtx, action.toNative(), C.int(call), length, cond)
}
- if syscall.Errno(-1*retCode) == syscall.EFAULT {
- return fmt.Errorf("unrecognized syscall %#x", int32(call))
- } else if syscall.Errno(-1*retCode) == syscall.EPERM {
- return fmt.Errorf("requested action matches default action of filter")
- } else if syscall.Errno(-1*retCode) == syscall.EINVAL {
- return fmt.Errorf("two checks on same syscall argument")
- } else if retCode != 0 {
- return syscall.Errno(-1 * retCode)
+ if retCode != 0 {
+ switch e := errRc(retCode); e {
+ case syscall.EFAULT:
+ return fmt.Errorf("unrecognized syscall %#x", int32(call))
+ case syscall.EPERM:
+ return fmt.Errorf("requested action matches default action of filter")
+ case syscall.EINVAL:
+ return fmt.Errorf("two checks on same syscall argument")
+ default:
+ return e
+ }
}
return nil
@@ -517,6 +534,10 @@ func actionFromNative(a C.uint32_t) (ScmpAction, error) {
switch a & 0xFFFF0000 {
case C.C_ACT_KILL:
return ActKill, nil
+ case C.C_ACT_KILL_PROCESS:
+ return ActKillProcess, nil
+ case C.C_ACT_KILL_THREAD:
+ return ActKillThread, nil
case C.C_ACT_TRAP:
return ActTrap, nil
case C.C_ACT_ERRNO:
@@ -537,6 +558,10 @@ func (a ScmpAction) toNative() C.uint32_t {
switch a & 0xFFFF {
case ActKill:
return C.C_ACT_KILL
+ case ActKillProcess:
+ return C.C_ACT_KILL_PROCESS
+ case ActKillThread:
+ return C.C_ACT_KILL_THREAD
case ActTrap:
return C.C_ACT_TRAP
case ActErrno:
diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go
index 347c8b15f..d118f30ed 100644
--- a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go
+++ b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !amd64,!ppc64le gccgo purego
+// +build !amd64,!ppc64le,!s390x gccgo purego
package poly1305
diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go
index 3c75c2a67..9d7a6af09 100644
--- a/vendor/golang.org/x/crypto/poly1305/poly1305.go
+++ b/vendor/golang.org/x/crypto/poly1305/poly1305.go
@@ -26,7 +26,9 @@ const TagSize = 16
// 16-byte result into out. Authenticating two different messages with the same
// key allows an attacker to forge messages at will.
func Sum(out *[16]byte, m []byte, key *[32]byte) {
- sum(out, m, key)
+ h := New(key)
+ h.Write(m)
+ h.Sum(out[:0])
}
// Verify returns true if mac is a valid authenticator for m with the given key.
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/poly1305/sum_generic.go
index c77ff179d..c942a6590 100644
--- a/vendor/golang.org/x/crypto/poly1305/sum_generic.go
+++ b/vendor/golang.org/x/crypto/poly1305/sum_generic.go
@@ -41,7 +41,8 @@ func newMACGeneric(key *[32]byte) macGeneric {
// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸.
type macState struct {
// h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but
- // can grow larger during and after rounds.
+ // can grow larger during and after rounds. It must, however, remain below
+ // 2 * (2¹³⁰ - 5).
h [3]uint64
// r and s are the private key components.
r [2]uint64
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go
deleted file mode 100644
index 2b55a29c5..000000000
--- a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// At this point only s390x has an assembly implementation of sum. All other
-// platforms have assembly implementations of mac, and just define sum as using
-// that through New. Once s390x is ported, this file can be deleted and the body
-// of sum moved into Sum.
-
-// +build !go1.11 !s390x gccgo purego
-
-package poly1305
-
-func sum(out *[TagSize]byte, msg []byte, key *[32]byte) {
- h := New(key)
- h.Write(msg)
- h.Sum(out[:0])
-}
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go
index 5f91ff84a..958fedc07 100644
--- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go
+++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build go1.11,!gccgo,!purego
+// +build !gccgo,!purego
package poly1305
@@ -10,30 +10,66 @@ import (
"golang.org/x/sys/cpu"
)
-// poly1305vx is an assembly implementation of Poly1305 that uses vector
+// updateVX is an assembly implementation of Poly1305 that uses vector
// instructions. It must only be called if the vector facility (vx) is
// available.
//go:noescape
-func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
+func updateVX(state *macState, msg []byte)
-// poly1305vmsl is an assembly implementation of Poly1305 that uses vector
-// instructions, including VMSL. It must only be called if the vector facility (vx) is
-// available and if VMSL is supported.
-//go:noescape
-func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
+// mac is a replacement for macGeneric that uses a larger buffer and redirects
+// calls that would have gone to updateGeneric to updateVX if the vector
+// facility is installed.
+//
+// A larger buffer is required for good performance because the vector
+// implementation has a higher fixed cost per call than the generic
+// implementation.
+type mac struct {
+ macState
+
+ buffer [16 * TagSize]byte // size must be a multiple of block size (16)
+ offset int
+}
-func sum(out *[16]byte, m []byte, key *[32]byte) {
- if cpu.S390X.HasVX {
- var mPtr *byte
- if len(m) > 0 {
- mPtr = &m[0]
+func (h *mac) Write(p []byte) (int, error) {
+ nn := len(p)
+ if h.offset > 0 {
+ n := copy(h.buffer[h.offset:], p)
+ if h.offset+n < len(h.buffer) {
+ h.offset += n
+ return nn, nil
}
- if cpu.S390X.HasVXE && len(m) > 256 {
- poly1305vmsl(out, mPtr, uint64(len(m)), key)
+ p = p[n:]
+ h.offset = 0
+ if cpu.S390X.HasVX {
+ updateVX(&h.macState, h.buffer[:])
} else {
- poly1305vx(out, mPtr, uint64(len(m)), key)
+ updateGeneric(&h.macState, h.buffer[:])
}
- } else {
- sumGeneric(out, m, key)
}
+
+ tail := len(p) % len(h.buffer) // number of bytes to copy into buffer
+ body := len(p) - tail // number of bytes to process now
+ if body > 0 {
+ if cpu.S390X.HasVX {
+ updateVX(&h.macState, p[:body])
+ } else {
+ updateGeneric(&h.macState, p[:body])
+ }
+ }
+ h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0
+ return nn, nil
+}
+
+func (h *mac) Sum(out *[TagSize]byte) {
+ state := h.macState
+ remainder := h.buffer[:h.offset]
+
+ // Use the generic implementation if we have 2 or fewer blocks left
+ // to sum. The vector implementation has a higher startup time.
+ if cpu.S390X.HasVX && len(remainder) > 2*TagSize {
+ updateVX(&state, remainder)
+ } else if len(remainder) > 0 {
+ updateGeneric(&state, remainder)
+ }
+ finalize(out, &state.h, &state.s)
}
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s
index 806d1694b..0fa9ee6e0 100644
--- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s
+++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s
@@ -2,115 +2,187 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build go1.11,!gccgo,!purego
+// +build !gccgo,!purego
#include "textflag.h"
-// Implementation of Poly1305 using the vector facility (vx).
-
-// constants
-#define MOD26 V0
-#define EX0 V1
-#define EX1 V2
-#define EX2 V3
-
-// temporaries
-#define T_0 V4
-#define T_1 V5
-#define T_2 V6
-#define T_3 V7
-#define T_4 V8
-
-// key (r)
-#define R_0 V9
-#define R_1 V10
-#define R_2 V11
-#define R_3 V12
-#define R_4 V13
-#define R5_1 V14
-#define R5_2 V15
-#define R5_3 V16
-#define R5_4 V17
-#define RSAVE_0 R5
-#define RSAVE_1 R6
-#define RSAVE_2 R7
-#define RSAVE_3 R8
-#define RSAVE_4 R9
-#define R5SAVE_1 V28
-#define R5SAVE_2 V29
-#define R5SAVE_3 V30
-#define R5SAVE_4 V31
-
-// message block
-#define F_0 V18
-#define F_1 V19
-#define F_2 V20
-#define F_3 V21
-#define F_4 V22
-
-// accumulator
-#define H_0 V23
-#define H_1 V24
-#define H_2 V25
-#define H_3 V26
-#define H_4 V27
-
-GLOBL ·keyMask<>(SB), RODATA, $16
-DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f
-DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f
-
-GLOBL ·bswapMask<>(SB), RODATA, $16
-DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908
-DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100
-
-GLOBL ·constants<>(SB), RODATA, $64
-// MOD26
-DATA ·constants<>+0(SB)/8, $0x3ffffff
-DATA ·constants<>+8(SB)/8, $0x3ffffff
+// This implementation of Poly1305 uses the vector facility (vx)
+// to process up to 2 blocks (32 bytes) per iteration using an
+// algorithm based on the one described in:
+//
+// NEON crypto, Daniel J. Bernstein & Peter Schwabe
+// https://cryptojedi.org/papers/neoncrypto-20120320.pdf
+//
+// This algorithm uses 5 26-bit limbs to represent a 130-bit
+// value. These limbs are, for the most part, zero extended and
+// placed into 64-bit vector register elements. Each vector
+// register is 128-bits wide and so holds 2 of these elements.
+// Using 26-bit limbs allows us plenty of headroom to accomodate
+// accumulations before and after multiplication without
+// overflowing either 32-bits (before multiplication) or 64-bits
+// (after multiplication).
+//
+// In order to parallelise the operations required to calculate
+// the sum we use two separate accumulators and then sum those
+// in an extra final step. For compatibility with the generic
+// implementation we perform this summation at the end of every
+// updateVX call.
+//
+// To use two accumulators we must multiply the message blocks
+// by r² rather than r. Only the final message block should be
+// multiplied by r.
+//
+// Example:
+//
+// We want to calculate the sum (h) for a 64 byte message (m):
+//
+// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r
+//
+// To do this we split the calculation into the even indices
+// and odd indices of the message. These form our SIMD 'lanes':
+//
+// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0
+// m[16:32]r³ + m[48:64]r <- lane 1
+//
+// To calculate this iteratively we refactor so that both lanes
+// are written in terms of r² and r:
+//
+// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0
+// (m[16:32]r² + m[48:64])r <- lane 1
+// ^ ^
+// | coefficients for second iteration
+// coefficients for first iteration
+//
+// So in this case we would have two iterations. In the first
+// both lanes are multiplied by r². In the second only the
+// first lane is multiplied by r² and the second lane is
+// instead multiplied by r. This gives use the odd and even
+// powers of r that we need from the original equation.
+//
+// Notation:
+//
+// h - accumulator
+// r - key
+// m - message
+//
+// [a, b] - SIMD register holding two 64-bit values
+// [a, b, c, d] - SIMD register holding four 32-bit values
+// xᵢ[n] - limb n of variable x with bit width i
+//
+// Limbs are expressed in little endian order, so for 26-bit
+// limbs x₂₆[4] will be the most significant limb and x₂₆[0]
+// will be the least significant limb.
+
+// masking constants
+#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits
+#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits
+
+// expansion constants (see EXPAND macro)
+#define EX0 V2
+#define EX1 V3
+#define EX2 V4
+
+// key (r², r or 1 depending on context)
+#define R_0 V5
+#define R_1 V6
+#define R_2 V7
+#define R_3 V8
+#define R_4 V9
+
+// precalculated coefficients (5r², 5r or 0 depending on context)
+#define R5_1 V10
+#define R5_2 V11
+#define R5_3 V12
+#define R5_4 V13
+
+// message block (m)
+#define M_0 V14
+#define M_1 V15
+#define M_2 V16
+#define M_3 V17
+#define M_4 V18
+
+// accumulator (h)
+#define H_0 V19
+#define H_1 V20
+#define H_2 V21
+#define H_3 V22
+#define H_4 V23
+
+// temporary registers (for short-lived values)
+#define T_0 V24
+#define T_1 V25
+#define T_2 V26
+#define T_3 V27
+#define T_4 V28
+
+GLOBL ·constants<>(SB), RODATA, $0x30
// EX0
-DATA ·constants<>+16(SB)/8, $0x0006050403020100
-DATA ·constants<>+24(SB)/8, $0x1016151413121110
+DATA ·constants<>+0x00(SB)/8, $0x0006050403020100
+DATA ·constants<>+0x08(SB)/8, $0x1016151413121110
// EX1
-DATA ·constants<>+32(SB)/8, $0x060c0b0a09080706
-DATA ·constants<>+40(SB)/8, $0x161c1b1a19181716
+DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706
+DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716
// EX2
-DATA ·constants<>+48(SB)/8, $0x0d0d0d0d0d0f0e0d
-DATA ·constants<>+56(SB)/8, $0x1d1d1d1d1d1f1e1d
-
-// h = (f*g) % (2**130-5) [partial reduction]
+DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d
+DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d
+
+// MULTIPLY multiplies each lane of f and g, partially reduced
+// modulo 2¹³⁰ - 5. The result, h, consists of partial products
+// in each lane that need to be reduced further to produce the
+// final result.
+//
+// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰
+//
+// Note that the multiplication by 5 of the high bits is
+// achieved by precalculating the multiplication of four of the
+// g coefficients by 5. These are g51-g54.
#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \
VMLOF f0, g0, h0 \
- VMLOF f0, g1, h1 \
- VMLOF f0, g2, h2 \
VMLOF f0, g3, h3 \
+ VMLOF f0, g1, h1 \
VMLOF f0, g4, h4 \
+ VMLOF f0, g2, h2 \
VMLOF f1, g54, T_0 \
- VMLOF f1, g0, T_1 \
- VMLOF f1, g1, T_2 \
VMLOF f1, g2, T_3 \
+ VMLOF f1, g0, T_1 \
VMLOF f1, g3, T_4 \
+ VMLOF f1, g1, T_2 \
VMALOF f2, g53, h0, h0 \
- VMALOF f2, g54, h1, h1 \
- VMALOF f2, g0, h2, h2 \
VMALOF f2, g1, h3, h3 \
+ VMALOF f2, g54, h1, h1 \
VMALOF f2, g2, h4, h4 \
+ VMALOF f2, g0, h2, h2 \
VMALOF f3, g52, T_0, T_0 \
- VMALOF f3, g53, T_1, T_1 \
- VMALOF f3, g54, T_2, T_2 \
VMALOF f3, g0, T_3, T_3 \
+ VMALOF f3, g53, T_1, T_1 \
VMALOF f3, g1, T_4, T_4 \
+ VMALOF f3, g54, T_2, T_2 \
VMALOF f4, g51, h0, h0 \
- VMALOF f4, g52, h1, h1 \
- VMALOF f4, g53, h2, h2 \
VMALOF f4, g54, h3, h3 \
+ VMALOF f4, g52, h1, h1 \
VMALOF f4, g0, h4, h4 \
+ VMALOF f4, g53, h2, h2 \
VAG T_0, h0, h0 \
- VAG T_1, h1, h1 \
- VAG T_2, h2, h2 \
VAG T_3, h3, h3 \
- VAG T_4, h4, h4
-
-// carry h0->h1 h3->h4, h1->h2 h4->h0, h0->h1 h2->h3, h3->h4
+ VAG T_1, h1, h1 \
+ VAG T_4, h4, h4 \
+ VAG T_2, h2, h2
+
+// REDUCE performs the following carry operations in four
+// stages, as specified in Bernstein & Schwabe:
+//
+// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4]
+// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0]
+// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3]
+// 4: h₂₆[3]->h₂₆[4]
+//
+// The result is that all of the limbs are limited to 26-bits
+// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits.
+//
+// Note that although each limb is aligned at 26-bit intervals
+// they may contain values that exceed 2²⁶ - 1, hence the need
+// to carry the excess bits in each limb.
#define REDUCE(h0, h1, h2, h3, h4) \
VESRLG $26, h0, T_0 \
VESRLG $26, h3, T_1 \
@@ -136,144 +208,155 @@ DATA ·constants<>+56(SB)/8, $0x1d1d1d1d1d1f1e1d
VN MOD26, h3, h3 \
VAG T_2, h4, h4
-// expand in0 into d[0] and in1 into d[1]
+// EXPAND splits the 128-bit little-endian values in0 and in1
+// into 26-bit big-endian limbs and places the results into
+// the first and second lane of d₂₆[0:4] respectively.
+//
+// The EX0, EX1 and EX2 constants are arrays of byte indices
+// for permutation. The permutation both reverses the bytes
+// in the input and ensures the bytes are copied into the
+// destination limb ready to be shifted into their final
+// position.
#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \
- VGBM $0x0707, d1 \ // d1=tmp
- VPERM in0, in1, EX2, d4 \
VPERM in0, in1, EX0, d0 \
VPERM in0, in1, EX1, d2 \
- VN d1, d4, d4 \
+ VPERM in0, in1, EX2, d4 \
VESRLG $26, d0, d1 \
VESRLG $30, d2, d3 \
VESRLG $4, d2, d2 \
- VN MOD26, d0, d0 \
- VN MOD26, d1, d1 \
- VN MOD26, d2, d2 \
- VN MOD26, d3, d3
-
-// pack h4:h0 into h1:h0 (no carry)
-#define PACK(h0, h1, h2, h3, h4) \
- VESLG $26, h1, h1 \
- VESLG $26, h3, h3 \
- VO h0, h1, h0 \
- VO h2, h3, h2 \
- VESLG $4, h2, h2 \
- VLEIB $7, $48, h1 \
- VSLB h1, h2, h2 \
- VO h0, h2, h0 \
- VLEIB $7, $104, h1 \
- VSLB h1, h4, h3 \
- VO h3, h0, h0 \
- VLEIB $7, $24, h1 \
- VSRLB h1, h4, h1
-
-// if h > 2**130-5 then h -= 2**130-5
-#define MOD(h0, h1, t0, t1, t2) \
- VZERO t0 \
- VLEIG $1, $5, t0 \
- VACCQ h0, t0, t1 \
- VAQ h0, t0, t0 \
- VONE t2 \
- VLEIG $1, $-4, t2 \
- VAQ t2, t1, t1 \
- VACCQ h1, t1, t1 \
- VONE t2 \
- VAQ t2, t1, t1 \
- VN h0, t1, t2 \
- VNC t0, t1, t1 \
- VO t1, t2, h0
-
-// func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]key)
-TEXT ·poly1305vx(SB), $0-32
- // This code processes up to 2 blocks (32 bytes) per iteration
- // using the algorithm described in:
- // NEON crypto, Daniel J. Bernstein & Peter Schwabe
- // https://cryptojedi.org/papers/neoncrypto-20120320.pdf
- LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key
-
- // load MOD26, EX0, EX1 and EX2
+ VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]]
+ VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]]
+ VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]]
+ VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]]
+ VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]]
+
+// func updateVX(state *macState, msg []byte)
+TEXT ·updateVX(SB), NOSPLIT, $0
+ MOVD state+0(FP), R1
+ LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len
+
+ // load EX0, EX1 and EX2
MOVD $·constants<>(SB), R5
- VLM (R5), MOD26, EX2
-
- // setup r
- VL (R4), T_0
- MOVD $·keyMask<>(SB), R6
- VL (R6), T_1
- VN T_0, T_1, T_0
- EXPAND(T_0, T_0, R_0, R_1, R_2, R_3, R_4)
-
- // setup r*5
- VLEIG $0, $5, T_0
- VLEIG $1, $5, T_0
-
- // store r (for final block)
- VMLOF T_0, R_1, R5SAVE_1
- VMLOF T_0, R_2, R5SAVE_2
- VMLOF T_0, R_3, R5SAVE_3
- VMLOF T_0, R_4, R5SAVE_4
- VLGVG $0, R_0, RSAVE_0
- VLGVG $0, R_1, RSAVE_1
- VLGVG $0, R_2, RSAVE_2
- VLGVG $0, R_3, RSAVE_3
- VLGVG $0, R_4, RSAVE_4
-
- // skip r**2 calculation
+ VLM (R5), EX0, EX2
+
+ // generate masks
+ VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff]
+ VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff]
+
+ // load h (accumulator) and r (key) from state
+ VZERO T_1 // [0, 0]
+ VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]]
+ VLEG $0, 16(R1), T_1 // [h₆₄[2], 0]
+ VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]]
+ VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]]
+ VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]]
+
+ // unpack h and r into 26-bit limbs
+ // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value
+ VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]]
+ VZERO H_1 // [0, 0]
+ VZERO H_3 // [0, 0]
+ VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out
+ VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0]
+ VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]]
+ VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only
+ VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]]
+ VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only
+ VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete
+ VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete
+
+ // replicate r across all 4 vector elements
+ VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]]
+ VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]]
+ VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]]
+ VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]]
+ VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]]
+
+ // zero out lane 1 of h
+ VLEIG $1, $0, H_0 // [h₂₆[0], 0]
+ VLEIG $1, $0, H_1 // [h₂₆[1], 0]
+ VLEIG $1, $0, H_2 // [h₂₆[2], 0]
+ VLEIG $1, $0, H_3 // [h₂₆[3], 0]
+ VLEIG $1, $0, H_4 // [h₂₆[4], 0]
+
+ // calculate 5r (ignore least significant limb)
+ VREPIF $5, T_0
+ VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]]
+ VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]]
+ VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]]
+ VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]]
+
+ // skip r² calculation if we are only calculating one block
CMPBLE R3, $16, skip
- // calculate r**2
- MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5SAVE_1, R5SAVE_2, R5SAVE_3, R5SAVE_4, H_0, H_1, H_2, H_3, H_4)
- REDUCE(H_0, H_1, H_2, H_3, H_4)
- VLEIG $0, $5, T_0
- VLEIG $1, $5, T_0
- VMLOF T_0, H_1, R5_1
- VMLOF T_0, H_2, R5_2
- VMLOF T_0, H_3, R5_3
- VMLOF T_0, H_4, R5_4
- VLR H_0, R_0
- VLR H_1, R_1
- VLR H_2, R_2
- VLR H_3, R_3
- VLR H_4, R_4
-
- // initialize h
- VZERO H_0
- VZERO H_1
- VZERO H_2
- VZERO H_3
- VZERO H_4
+ // calculate r²
+ MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4)
+ REDUCE(M_0, M_1, M_2, M_3, M_4)
+ VGBM $0x0f0f, T_0
+ VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]]
+ VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]]
+ VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]]
+ VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]]
+ VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]]
+
+ // calculate 5r² (ignore least significant limb)
+ VREPIF $5, T_0
+ VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]]
+ VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]]
+ VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]]
+ VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]]
loop:
- CMPBLE R3, $32, b2
- VLM (R2), T_0, T_1
- SUB $32, R3
- MOVD $32(R2), R2
- EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4)
- VLEIB $4, $1, F_4
- VLEIB $12, $1, F_4
+ CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients
+
+ // load next 2 blocks from message
+ VLM (R2), T_0, T_1
+
+ // update message slice
+ SUB $32, R3
+ MOVD $32(R2), R2
+
+ // unpack message blocks into 26-bit big-endian limbs
+ EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
+
+ // add 2¹²⁸ to each message block value
+ VLEIB $4, $1, M_4
+ VLEIB $12, $1, M_4
multiply:
- VAG H_0, F_0, F_0
- VAG H_1, F_1, F_1
- VAG H_2, F_2, F_2
- VAG H_3, F_3, F_3
- VAG H_4, F_4, F_4
- MULTIPLY(F_0, F_1, F_2, F_3, F_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4)
+ // accumulate the incoming message
+ VAG H_0, M_0, M_0
+ VAG H_3, M_3, M_3
+ VAG H_1, M_1, M_1
+ VAG H_4, M_4, M_4
+ VAG H_2, M_2, M_2
+
+ // multiply the accumulator by the key coefficient
+ MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4)
+
+ // carry and partially reduce the partial products
REDUCE(H_0, H_1, H_2, H_3, H_4)
+
CMPBNE R3, $0, loop
finish:
- // sum vectors
+ // sum lane 0 and lane 1 and put the result in lane 1
VZERO T_0
VSUMQG H_0, T_0, H_0
- VSUMQG H_1, T_0, H_1
- VSUMQG H_2, T_0, H_2
VSUMQG H_3, T_0, H_3
+ VSUMQG H_1, T_0, H_1
VSUMQG H_4, T_0, H_4
+ VSUMQG H_2, T_0, H_2
- // h may be >= 2*(2**130-5) so we need to reduce it again
+ // reduce again after summation
+ // TODO(mundaym): there might be a more efficient way to do this
+ // now that we only have 1 active lane. For example, we could
+ // simultaneously pack the values as we reduce them.
REDUCE(H_0, H_1, H_2, H_3, H_4)
- // carry h1->h4
+ // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1
+ // TODO(mundaym): in testing this final carry was unnecessary.
+ // Needs a proof before it can be removed though.
VESRLG $26, H_1, T_1
VN MOD26, H_1, H_1
VAQ T_1, H_2, H_2
@@ -284,95 +367,137 @@ finish:
VN MOD26, H_3, H_3
VAQ T_3, H_4, H_4
- // h is now < 2*(2**130-5)
- // pack h into h1 (hi) and h0 (lo)
- PACK(H_0, H_1, H_2, H_3, H_4)
-
- // if h > 2**130-5 then h -= 2**130-5
- MOD(H_0, H_1, T_0, T_1, T_2)
-
- // h += s
- MOVD $·bswapMask<>(SB), R5
- VL (R5), T_1
- VL 16(R4), T_0
- VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big)
- VAQ T_0, H_0, H_0
- VPERM H_0, H_0, T_1, H_0 // reverse bytes (to little)
- VST H_0, (R1)
-
+ // h is now < 2(2¹³⁰ - 5)
+ // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1].
+ VESLG $26, H_1, H_1
+ VESLG $26, H_3, H_3
+ VO H_0, H_1, H_0
+ VO H_2, H_3, H_2
+ VESLG $4, H_2, H_2
+ VLEIB $7, $48, H_1
+ VSLB H_1, H_2, H_2
+ VO H_0, H_2, H_0
+ VLEIB $7, $104, H_1
+ VSLB H_1, H_4, H_3
+ VO H_3, H_0, H_0
+ VLEIB $7, $24, H_1
+ VSRLB H_1, H_4, H_1
+
+ // update state
+ VSTEG $1, H_0, 0(R1)
+ VSTEG $0, H_0, 8(R1)
+ VSTEG $1, H_1, 16(R1)
RET
-b2:
+b2: // 2 or fewer blocks remaining
CMPBLE R3, $16, b1
- // 2 blocks remaining
- SUB $17, R3
- VL (R2), T_0
- VLL R3, 16(R2), T_1
- ADD $1, R3
+ // Load the 2 remaining blocks (17-32 bytes remaining).
+ MOVD $-17(R3), R0 // index of final byte to load modulo 16
+ VL (R2), T_0 // load full 16 byte block
+ VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes
+
+ // The Poly1305 algorithm requires that a 1 bit be appended to
+ // each message block. If the final block is less than 16 bytes
+ // long then it is easiest to insert the 1 before the message
+ // block is split into 26-bit limbs. If, on the other hand, the
+ // final message block is 16 bytes long then we append the 1 bit
+ // after expansion as normal.
MOVBZ $1, R0
- CMPBEQ R3, $16, 2(PC)
- VLVGB R3, R0, T_1
- EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4)
+ MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16)
+ CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long
+ VLVGB R3, R0, T_1 // insert 1 into the byte at index R3
+
+ // Split both blocks into 26-bit limbs in the appropriate lanes.
+ EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
+
+ // Append a 1 byte to the end of the second to last block.
+ VLEIB $4, $1, M_4
+
+ // Append a 1 byte to the end of the last block only if it is a
+ // full 16 byte block.
CMPBNE R3, $16, 2(PC)
- VLEIB $12, $1, F_4
- VLEIB $4, $1, F_4
-
- // setup [r²,r]
- VLVGG $1, RSAVE_0, R_0
- VLVGG $1, RSAVE_1, R_1
- VLVGG $1, RSAVE_2, R_2
- VLVGG $1, RSAVE_3, R_3
- VLVGG $1, RSAVE_4, R_4
- VPDI $0, R5_1, R5SAVE_1, R5_1
- VPDI $0, R5_2, R5SAVE_2, R5_2
- VPDI $0, R5_3, R5SAVE_3, R5_3
- VPDI $0, R5_4, R5SAVE_4, R5_4
+ VLEIB $12, $1, M_4
+
+ // Finally, set up the coefficients for the final multiplication.
+ // We have previously saved r and 5r in the 32-bit even indexes
+ // of the R_[0-4] and R5_[1-4] coefficient registers.
+ //
+ // We want lane 0 to be multiplied by r² so that can be kept the
+ // same. We want lane 1 to be multiplied by r so we need to move
+ // the saved r value into the 32-bit odd index in lane 1 by
+ // rotating the 64-bit lane by 32.
+ VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only
+ VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]]
+ VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]]
+ VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]]
+ VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]]
+ VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]]
+ VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]]
+ VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]]
+ VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]]
+ VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]]
MOVD $0, R3
BR multiply
skip:
- VZERO H_0
- VZERO H_1
- VZERO H_2
- VZERO H_3
- VZERO H_4
-
CMPBEQ R3, $0, finish
-b1:
- // 1 block remaining
- SUB $1, R3
- VLL R3, (R2), T_0
- ADD $1, R3
+b1: // 1 block remaining
+
+ // Load the final block (1-16 bytes). This will be placed into
+ // lane 0.
+ MOVD $-1(R3), R0
+ VLL R0, (R2), T_0 // pad to 16 bytes with zeros
+
+ // The Poly1305 algorithm requires that a 1 bit be appended to
+ // each message block. If the final block is less than 16 bytes
+ // long then it is easiest to insert the 1 before the message
+ // block is split into 26-bit limbs. If, on the other hand, the
+ // final message block is 16 bytes long then we append the 1 bit
+ // after expansion as normal.
MOVBZ $1, R0
CMPBEQ R3, $16, 2(PC)
VLVGB R3, R0, T_0
- VZERO T_1
- EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4)
+
+ // Set the message block in lane 1 to the value 0 so that it
+ // can be accumulated without affecting the final result.
+ VZERO T_1
+
+ // Split the final message block into 26-bit limbs in lane 0.
+ // Lane 1 will be contain 0.
+ EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
+
+ // Append a 1 byte to the end of the last block only if it is a
+ // full 16 byte block.
CMPBNE R3, $16, 2(PC)
- VLEIB $4, $1, F_4
- VLEIG $1, $1, R_0
- VZERO R_1
- VZERO R_2
- VZERO R_3
- VZERO R_4
- VZERO R5_1
- VZERO R5_2
- VZERO R5_3
- VZERO R5_4
-
- // setup [r, 1]
- VLVGG $0, RSAVE_0, R_0
- VLVGG $0, RSAVE_1, R_1
- VLVGG $0, RSAVE_2, R_2
- VLVGG $0, RSAVE_3, R_3
- VLVGG $0, RSAVE_4, R_4
- VPDI $0, R5SAVE_1, R5_1, R5_1
- VPDI $0, R5SAVE_2, R5_2, R5_2
- VPDI $0, R5SAVE_3, R5_3, R5_3
- VPDI $0, R5SAVE_4, R5_4, R5_4
+ VLEIB $4, $1, M_4
+
+ // We have previously saved r and 5r in the 32-bit even indexes
+ // of the R_[0-4] and R5_[1-4] coefficient registers.
+ //
+ // We want lane 0 to be multiplied by r so we need to move the
+ // saved r value into the 32-bit odd index in lane 0. We want
+ // lane 1 to be set to the value 1. This makes multiplication
+ // a no-op. We do this by setting lane 1 in every register to 0
+ // and then just setting the 32-bit index 3 in R_0 to 1.
+ VZERO T_0
+ MOVD $0, R0
+ MOVD $0x10111213, R12
+ VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000]
+ VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0]
+ VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0]
+ VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0]
+ VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0]
+ VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0]
+ VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0]
+ VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0]
+ VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0]
+ VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0]
+
+ // Set the value of lane 1 to be 1.
+ VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1]
MOVD $0, R3
BR multiply
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s
deleted file mode 100644
index b439af936..000000000
--- a/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s
+++ /dev/null
@@ -1,909 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.11,!gccgo,!purego
-
-#include "textflag.h"
-
-// Implementation of Poly1305 using the vector facility (vx) and the VMSL instruction.
-
-// constants
-#define EX0 V1
-#define EX1 V2
-#define EX2 V3
-
-// temporaries
-#define T_0 V4
-#define T_1 V5
-#define T_2 V6
-#define T_3 V7
-#define T_4 V8
-#define T_5 V9
-#define T_6 V10
-#define T_7 V11
-#define T_8 V12
-#define T_9 V13
-#define T_10 V14
-
-// r**2 & r**4
-#define R_0 V15
-#define R_1 V16
-#define R_2 V17
-#define R5_1 V18
-#define R5_2 V19
-// key (r)
-#define RSAVE_0 R7
-#define RSAVE_1 R8
-#define RSAVE_2 R9
-#define R5SAVE_1 R10
-#define R5SAVE_2 R11
-
-// message block
-#define M0 V20
-#define M1 V21
-#define M2 V22
-#define M3 V23
-#define M4 V24
-#define M5 V25
-
-// accumulator
-#define H0_0 V26
-#define H1_0 V27
-#define H2_0 V28
-#define H0_1 V29
-#define H1_1 V30
-#define H2_1 V31
-
-GLOBL ·keyMask<>(SB), RODATA, $16
-DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f
-DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f
-
-GLOBL ·bswapMask<>(SB), RODATA, $16
-DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908
-DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100
-
-GLOBL ·constants<>(SB), RODATA, $48
-// EX0
-DATA ·constants<>+0(SB)/8, $0x18191a1b1c1d1e1f
-DATA ·constants<>+8(SB)/8, $0x0000050403020100
-// EX1
-DATA ·constants<>+16(SB)/8, $0x18191a1b1c1d1e1f
-DATA ·constants<>+24(SB)/8, $0x00000a0908070605
-// EX2
-DATA ·constants<>+32(SB)/8, $0x18191a1b1c1d1e1f
-DATA ·constants<>+40(SB)/8, $0x0000000f0e0d0c0b
-
-GLOBL ·c<>(SB), RODATA, $48
-// EX0
-DATA ·c<>+0(SB)/8, $0x0000050403020100
-DATA ·c<>+8(SB)/8, $0x0000151413121110
-// EX1
-DATA ·c<>+16(SB)/8, $0x00000a0908070605
-DATA ·c<>+24(SB)/8, $0x00001a1918171615
-// EX2
-DATA ·c<>+32(SB)/8, $0x0000000f0e0d0c0b
-DATA ·c<>+40(SB)/8, $0x0000001f1e1d1c1b
-
-GLOBL ·reduce<>(SB), RODATA, $32
-// 44 bit
-DATA ·reduce<>+0(SB)/8, $0x0
-DATA ·reduce<>+8(SB)/8, $0xfffffffffff
-// 42 bit
-DATA ·reduce<>+16(SB)/8, $0x0
-DATA ·reduce<>+24(SB)/8, $0x3ffffffffff
-
-// h = (f*g) % (2**130-5) [partial reduction]
-// uses T_0...T_9 temporary registers
-// input: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2
-// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9
-// output: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2
-#define MULTIPLY(m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9) \
- \ // Eliminate the dependency for the last 2 VMSLs
- VMSLG m02_0, r_2, m4_2, m4_2 \
- VMSLG m13_0, r_2, m5_2, m5_2 \ // 8 VMSLs pipelined
- VMSLG m02_0, r_0, m4_0, m4_0 \
- VMSLG m02_1, r5_2, V0, T_0 \
- VMSLG m02_0, r_1, m4_1, m4_1 \
- VMSLG m02_1, r_0, V0, T_1 \
- VMSLG m02_1, r_1, V0, T_2 \
- VMSLG m02_2, r5_1, V0, T_3 \
- VMSLG m02_2, r5_2, V0, T_4 \
- VMSLG m13_0, r_0, m5_0, m5_0 \
- VMSLG m13_1, r5_2, V0, T_5 \
- VMSLG m13_0, r_1, m5_1, m5_1 \
- VMSLG m13_1, r_0, V0, T_6 \
- VMSLG m13_1, r_1, V0, T_7 \
- VMSLG m13_2, r5_1, V0, T_8 \
- VMSLG m13_2, r5_2, V0, T_9 \
- VMSLG m02_2, r_0, m4_2, m4_2 \
- VMSLG m13_2, r_0, m5_2, m5_2 \
- VAQ m4_0, T_0, m02_0 \
- VAQ m4_1, T_1, m02_1 \
- VAQ m5_0, T_5, m13_0 \
- VAQ m5_1, T_6, m13_1 \
- VAQ m02_0, T_3, m02_0 \
- VAQ m02_1, T_4, m02_1 \
- VAQ m13_0, T_8, m13_0 \
- VAQ m13_1, T_9, m13_1 \
- VAQ m4_2, T_2, m02_2 \
- VAQ m5_2, T_7, m13_2 \
-
-// SQUARE uses three limbs of r and r_2*5 to output square of r
-// uses T_1, T_5 and T_7 temporary registers
-// input: r_0, r_1, r_2, r5_2
-// temp: TEMP0, TEMP1, TEMP2
-// output: p0, p1, p2
-#define SQUARE(r_0, r_1, r_2, r5_2, p0, p1, p2, TEMP0, TEMP1, TEMP2) \
- VMSLG r_0, r_0, p0, p0 \
- VMSLG r_1, r5_2, V0, TEMP0 \
- VMSLG r_2, r5_2, p1, p1 \
- VMSLG r_0, r_1, V0, TEMP1 \
- VMSLG r_1, r_1, p2, p2 \
- VMSLG r_0, r_2, V0, TEMP2 \
- VAQ TEMP0, p0, p0 \
- VAQ TEMP1, p1, p1 \
- VAQ TEMP2, p2, p2 \
- VAQ TEMP0, p0, p0 \
- VAQ TEMP1, p1, p1 \
- VAQ TEMP2, p2, p2 \
-
-// carry h0->h1->h2->h0 || h3->h4->h5->h3
-// uses T_2, T_4, T_5, T_7, T_8, T_9
-// t6, t7, t8, t9, t10, t11
-// input: h0, h1, h2, h3, h4, h5
-// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11
-// output: h0, h1, h2, h3, h4, h5
-#define REDUCE(h0, h1, h2, h3, h4, h5, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) \
- VLM (R12), t6, t7 \ // 44 and 42 bit clear mask
- VLEIB $7, $0x28, t10 \ // 5 byte shift mask
- VREPIB $4, t8 \ // 4 bit shift mask
- VREPIB $2, t11 \ // 2 bit shift mask
- VSRLB t10, h0, t0 \ // h0 byte shift
- VSRLB t10, h1, t1 \ // h1 byte shift
- VSRLB t10, h2, t2 \ // h2 byte shift
- VSRLB t10, h3, t3 \ // h3 byte shift
- VSRLB t10, h4, t4 \ // h4 byte shift
- VSRLB t10, h5, t5 \ // h5 byte shift
- VSRL t8, t0, t0 \ // h0 bit shift
- VSRL t8, t1, t1 \ // h2 bit shift
- VSRL t11, t2, t2 \ // h2 bit shift
- VSRL t8, t3, t3 \ // h3 bit shift
- VSRL t8, t4, t4 \ // h4 bit shift
- VESLG $2, t2, t9 \ // h2 carry x5
- VSRL t11, t5, t5 \ // h5 bit shift
- VN t6, h0, h0 \ // h0 clear carry
- VAQ t2, t9, t2 \ // h2 carry x5
- VESLG $2, t5, t9 \ // h5 carry x5
- VN t6, h1, h1 \ // h1 clear carry
- VN t7, h2, h2 \ // h2 clear carry
- VAQ t5, t9, t5 \ // h5 carry x5
- VN t6, h3, h3 \ // h3 clear carry
- VN t6, h4, h4 \ // h4 clear carry
- VN t7, h5, h5 \ // h5 clear carry
- VAQ t0, h1, h1 \ // h0->h1
- VAQ t3, h4, h4 \ // h3->h4
- VAQ t1, h2, h2 \ // h1->h2
- VAQ t4, h5, h5 \ // h4->h5
- VAQ t2, h0, h0 \ // h2->h0
- VAQ t5, h3, h3 \ // h5->h3
- VREPG $1, t6, t6 \ // 44 and 42 bit masks across both halves
- VREPG $1, t7, t7 \
- VSLDB $8, h0, h0, h0 \ // set up [h0/1/2, h3/4/5]
- VSLDB $8, h1, h1, h1 \
- VSLDB $8, h2, h2, h2 \
- VO h0, h3, h3 \
- VO h1, h4, h4 \
- VO h2, h5, h5 \
- VESRLG $44, h3, t0 \ // 44 bit shift right
- VESRLG $44, h4, t1 \
- VESRLG $42, h5, t2 \
- VN t6, h3, h3 \ // clear carry bits
- VN t6, h4, h4 \
- VN t7, h5, h5 \
- VESLG $2, t2, t9 \ // multiply carry by 5
- VAQ t9, t2, t2 \
- VAQ t0, h4, h4 \
- VAQ t1, h5, h5 \
- VAQ t2, h3, h3 \
-
-// carry h0->h1->h2->h0
-// input: h0, h1, h2
-// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8
-// output: h0, h1, h2
-#define REDUCE2(h0, h1, h2, t0, t1, t2, t3, t4, t5, t6, t7, t8) \
- VLEIB $7, $0x28, t3 \ // 5 byte shift mask
- VREPIB $4, t4 \ // 4 bit shift mask
- VREPIB $2, t7 \ // 2 bit shift mask
- VGBM $0x003F, t5 \ // mask to clear carry bits
- VSRLB t3, h0, t0 \
- VSRLB t3, h1, t1 \
- VSRLB t3, h2, t2 \
- VESRLG $4, t5, t5 \ // 44 bit clear mask
- VSRL t4, t0, t0 \
- VSRL t4, t1, t1 \
- VSRL t7, t2, t2 \
- VESRLG $2, t5, t6 \ // 42 bit clear mask
- VESLG $2, t2, t8 \
- VAQ t8, t2, t2 \
- VN t5, h0, h0 \
- VN t5, h1, h1 \
- VN t6, h2, h2 \
- VAQ t0, h1, h1 \
- VAQ t1, h2, h2 \
- VAQ t2, h0, h0 \
- VSRLB t3, h0, t0 \
- VSRLB t3, h1, t1 \
- VSRLB t3, h2, t2 \
- VSRL t4, t0, t0 \
- VSRL t4, t1, t1 \
- VSRL t7, t2, t2 \
- VN t5, h0, h0 \
- VN t5, h1, h1 \
- VESLG $2, t2, t8 \
- VN t6, h2, h2 \
- VAQ t0, h1, h1 \
- VAQ t8, t2, t2 \
- VAQ t1, h2, h2 \
- VAQ t2, h0, h0 \
-
-// expands two message blocks into the lower halfs of the d registers
-// moves the contents of the d registers into upper halfs
-// input: in1, in2, d0, d1, d2, d3, d4, d5
-// temp: TEMP0, TEMP1, TEMP2, TEMP3
-// output: d0, d1, d2, d3, d4, d5
-#define EXPACC(in1, in2, d0, d1, d2, d3, d4, d5, TEMP0, TEMP1, TEMP2, TEMP3) \
- VGBM $0xff3f, TEMP0 \
- VGBM $0xff1f, TEMP1 \
- VESLG $4, d1, TEMP2 \
- VESLG $4, d4, TEMP3 \
- VESRLG $4, TEMP0, TEMP0 \
- VPERM in1, d0, EX0, d0 \
- VPERM in2, d3, EX0, d3 \
- VPERM in1, d2, EX2, d2 \
- VPERM in2, d5, EX2, d5 \
- VPERM in1, TEMP2, EX1, d1 \
- VPERM in2, TEMP3, EX1, d4 \
- VN TEMP0, d0, d0 \
- VN TEMP0, d3, d3 \
- VESRLG $4, d1, d1 \
- VESRLG $4, d4, d4 \
- VN TEMP1, d2, d2 \
- VN TEMP1, d5, d5 \
- VN TEMP0, d1, d1 \
- VN TEMP0, d4, d4 \
-
-// expands one message block into the lower halfs of the d registers
-// moves the contents of the d registers into upper halfs
-// input: in, d0, d1, d2
-// temp: TEMP0, TEMP1, TEMP2
-// output: d0, d1, d2
-#define EXPACC2(in, d0, d1, d2, TEMP0, TEMP1, TEMP2) \
- VGBM $0xff3f, TEMP0 \
- VESLG $4, d1, TEMP2 \
- VGBM $0xff1f, TEMP1 \
- VPERM in, d0, EX0, d0 \
- VESRLG $4, TEMP0, TEMP0 \
- VPERM in, d2, EX2, d2 \
- VPERM in, TEMP2, EX1, d1 \
- VN TEMP0, d0, d0 \
- VN TEMP1, d2, d2 \
- VESRLG $4, d1, d1 \
- VN TEMP0, d1, d1 \
-
-// pack h2:h0 into h1:h0 (no carry)
-// input: h0, h1, h2
-// output: h0, h1, h2
-#define PACK(h0, h1, h2) \
- VMRLG h1, h2, h2 \ // copy h1 to upper half h2
- VESLG $44, h1, h1 \ // shift limb 1 44 bits, leaving 20
- VO h0, h1, h0 \ // combine h0 with 20 bits from limb 1
- VESRLG $20, h2, h1 \ // put top 24 bits of limb 1 into h1
- VLEIG $1, $0, h1 \ // clear h2 stuff from lower half of h1
- VO h0, h1, h0 \ // h0 now has 88 bits (limb 0 and 1)
- VLEIG $0, $0, h2 \ // clear upper half of h2
- VESRLG $40, h2, h1 \ // h1 now has upper two bits of result
- VLEIB $7, $88, h1 \ // for byte shift (11 bytes)
- VSLB h1, h2, h2 \ // shift h2 11 bytes to the left
- VO h0, h2, h0 \ // combine h0 with 20 bits from limb 1
- VLEIG $0, $0, h1 \ // clear upper half of h1
-
-// if h > 2**130-5 then h -= 2**130-5
-// input: h0, h1
-// temp: t0, t1, t2
-// output: h0
-#define MOD(h0, h1, t0, t1, t2) \
- VZERO t0 \
- VLEIG $1, $5, t0 \
- VACCQ h0, t0, t1 \
- VAQ h0, t0, t0 \
- VONE t2 \
- VLEIG $1, $-4, t2 \
- VAQ t2, t1, t1 \
- VACCQ h1, t1, t1 \
- VONE t2 \
- VAQ t2, t1, t1 \
- VN h0, t1, t2 \
- VNC t0, t1, t1 \
- VO t1, t2, h0 \
-
-// func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]key)
-TEXT ·poly1305vmsl(SB), $0-32
- // This code processes 6 + up to 4 blocks (32 bytes) per iteration
- // using the algorithm described in:
- // NEON crypto, Daniel J. Bernstein & Peter Schwabe
- // https://cryptojedi.org/papers/neoncrypto-20120320.pdf
- // And as moddified for VMSL as described in
- // Accelerating Poly1305 Cryptographic Message Authentication on the z14
- // O'Farrell et al, CASCON 2017, p48-55
- // https://ibm.ent.box.com/s/jf9gedj0e9d2vjctfyh186shaztavnht
-
- LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key
- VZERO V0 // c
-
- // load EX0, EX1 and EX2
- MOVD $·constants<>(SB), R5
- VLM (R5), EX0, EX2 // c
-
- // setup r
- VL (R4), T_0
- MOVD $·keyMask<>(SB), R6
- VL (R6), T_1
- VN T_0, T_1, T_0
- VZERO T_2 // limbs for r
- VZERO T_3
- VZERO T_4
- EXPACC2(T_0, T_2, T_3, T_4, T_1, T_5, T_7)
-
- // T_2, T_3, T_4: [0, r]
-
- // setup r*20
- VLEIG $0, $0, T_0
- VLEIG $1, $20, T_0 // T_0: [0, 20]
- VZERO T_5
- VZERO T_6
- VMSLG T_0, T_3, T_5, T_5
- VMSLG T_0, T_4, T_6, T_6
-
- // store r for final block in GR
- VLGVG $1, T_2, RSAVE_0 // c
- VLGVG $1, T_3, RSAVE_1 // c
- VLGVG $1, T_4, RSAVE_2 // c
- VLGVG $1, T_5, R5SAVE_1 // c
- VLGVG $1, T_6, R5SAVE_2 // c
-
- // initialize h
- VZERO H0_0
- VZERO H1_0
- VZERO H2_0
- VZERO H0_1
- VZERO H1_1
- VZERO H2_1
-
- // initialize pointer for reduce constants
- MOVD $·reduce<>(SB), R12
-
- // calculate r**2 and 20*(r**2)
- VZERO R_0
- VZERO R_1
- VZERO R_2
- SQUARE(T_2, T_3, T_4, T_6, R_0, R_1, R_2, T_1, T_5, T_7)
- REDUCE2(R_0, R_1, R_2, M0, M1, M2, M3, M4, R5_1, R5_2, M5, T_1)
- VZERO R5_1
- VZERO R5_2
- VMSLG T_0, R_1, R5_1, R5_1
- VMSLG T_0, R_2, R5_2, R5_2
-
- // skip r**4 calculation if 3 blocks or less
- CMPBLE R3, $48, b4
-
- // calculate r**4 and 20*(r**4)
- VZERO T_8
- VZERO T_9
- VZERO T_10
- SQUARE(R_0, R_1, R_2, R5_2, T_8, T_9, T_10, T_1, T_5, T_7)
- REDUCE2(T_8, T_9, T_10, M0, M1, M2, M3, M4, T_2, T_3, M5, T_1)
- VZERO T_2
- VZERO T_3
- VMSLG T_0, T_9, T_2, T_2
- VMSLG T_0, T_10, T_3, T_3
-
- // put r**2 to the right and r**4 to the left of R_0, R_1, R_2
- VSLDB $8, T_8, T_8, T_8
- VSLDB $8, T_9, T_9, T_9
- VSLDB $8, T_10, T_10, T_10
- VSLDB $8, T_2, T_2, T_2
- VSLDB $8, T_3, T_3, T_3
-
- VO T_8, R_0, R_0
- VO T_9, R_1, R_1
- VO T_10, R_2, R_2
- VO T_2, R5_1, R5_1
- VO T_3, R5_2, R5_2
-
- CMPBLE R3, $80, load // less than or equal to 5 blocks in message
-
- // 6(or 5+1) blocks
- SUB $81, R3
- VLM (R2), M0, M4
- VLL R3, 80(R2), M5
- ADD $1, R3
- MOVBZ $1, R0
- CMPBGE R3, $16, 2(PC)
- VLVGB R3, R0, M5
- MOVD $96(R2), R2
- EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
- EXPACC(M2, M3, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
- VLEIB $2, $1, H2_0
- VLEIB $2, $1, H2_1
- VLEIB $10, $1, H2_0
- VLEIB $10, $1, H2_1
-
- VZERO M0
- VZERO M1
- VZERO M2
- VZERO M3
- VZERO T_4
- VZERO T_10
- EXPACC(M4, M5, M0, M1, M2, M3, T_4, T_10, T_0, T_1, T_2, T_3)
- VLR T_4, M4
- VLEIB $10, $1, M2
- CMPBLT R3, $16, 2(PC)
- VLEIB $10, $1, T_10
- MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
- REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9)
- VMRHG V0, H0_1, H0_0
- VMRHG V0, H1_1, H1_0
- VMRHG V0, H2_1, H2_0
- VMRLG V0, H0_1, H0_1
- VMRLG V0, H1_1, H1_1
- VMRLG V0, H2_1, H2_1
-
- SUB $16, R3
- CMPBLE R3, $0, square
-
-load:
- // load EX0, EX1 and EX2
- MOVD $·c<>(SB), R5
- VLM (R5), EX0, EX2
-
-loop:
- CMPBLE R3, $64, add // b4 // last 4 or less blocks left
-
- // next 4 full blocks
- VLM (R2), M2, M5
- SUB $64, R3
- MOVD $64(R2), R2
- REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, T_0, T_1, T_3, T_4, T_5, T_2, T_7, T_8, T_9)
-
- // expacc in-lined to create [m2, m3] limbs
- VGBM $0x3f3f, T_0 // 44 bit clear mask
- VGBM $0x1f1f, T_1 // 40 bit clear mask
- VPERM M2, M3, EX0, T_3
- VESRLG $4, T_0, T_0 // 44 bit clear mask ready
- VPERM M2, M3, EX1, T_4
- VPERM M2, M3, EX2, T_5
- VN T_0, T_3, T_3
- VESRLG $4, T_4, T_4
- VN T_1, T_5, T_5
- VN T_0, T_4, T_4
- VMRHG H0_1, T_3, H0_0
- VMRHG H1_1, T_4, H1_0
- VMRHG H2_1, T_5, H2_0
- VMRLG H0_1, T_3, H0_1
- VMRLG H1_1, T_4, H1_1
- VMRLG H2_1, T_5, H2_1
- VLEIB $10, $1, H2_0
- VLEIB $10, $1, H2_1
- VPERM M4, M5, EX0, T_3
- VPERM M4, M5, EX1, T_4
- VPERM M4, M5, EX2, T_5
- VN T_0, T_3, T_3
- VESRLG $4, T_4, T_4
- VN T_1, T_5, T_5
- VN T_0, T_4, T_4
- VMRHG V0, T_3, M0
- VMRHG V0, T_4, M1
- VMRHG V0, T_5, M2
- VMRLG V0, T_3, M3
- VMRLG V0, T_4, M4
- VMRLG V0, T_5, M5
- VLEIB $10, $1, M2
- VLEIB $10, $1, M5
-
- MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
- CMPBNE R3, $0, loop
- REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
- VMRHG V0, H0_1, H0_0
- VMRHG V0, H1_1, H1_0
- VMRHG V0, H2_1, H2_0
- VMRLG V0, H0_1, H0_1
- VMRLG V0, H1_1, H1_1
- VMRLG V0, H2_1, H2_1
-
- // load EX0, EX1, EX2
- MOVD $·constants<>(SB), R5
- VLM (R5), EX0, EX2
-
- // sum vectors
- VAQ H0_0, H0_1, H0_0
- VAQ H1_0, H1_1, H1_0
- VAQ H2_0, H2_1, H2_0
-
- // h may be >= 2*(2**130-5) so we need to reduce it again
- // M0...M4 are used as temps here
- REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
-
-next: // carry h1->h2
- VLEIB $7, $0x28, T_1
- VREPIB $4, T_2
- VGBM $0x003F, T_3
- VESRLG $4, T_3
-
- // byte shift
- VSRLB T_1, H1_0, T_4
-
- // bit shift
- VSRL T_2, T_4, T_4
-
- // clear h1 carry bits
- VN T_3, H1_0, H1_0
-
- // add carry
- VAQ T_4, H2_0, H2_0
-
- // h is now < 2*(2**130-5)
- // pack h into h1 (hi) and h0 (lo)
- PACK(H0_0, H1_0, H2_0)
-
- // if h > 2**130-5 then h -= 2**130-5
- MOD(H0_0, H1_0, T_0, T_1, T_2)
-
- // h += s
- MOVD $·bswapMask<>(SB), R5
- VL (R5), T_1
- VL 16(R4), T_0
- VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big)
- VAQ T_0, H0_0, H0_0
- VPERM H0_0, H0_0, T_1, H0_0 // reverse bytes (to little)
- VST H0_0, (R1)
- RET
-
-add:
- // load EX0, EX1, EX2
- MOVD $·constants<>(SB), R5
- VLM (R5), EX0, EX2
-
- REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
- VMRHG V0, H0_1, H0_0
- VMRHG V0, H1_1, H1_0
- VMRHG V0, H2_1, H2_0
- VMRLG V0, H0_1, H0_1
- VMRLG V0, H1_1, H1_1
- VMRLG V0, H2_1, H2_1
- CMPBLE R3, $64, b4
-
-b4:
- CMPBLE R3, $48, b3 // 3 blocks or less
-
- // 4(3+1) blocks remaining
- SUB $49, R3
- VLM (R2), M0, M2
- VLL R3, 48(R2), M3
- ADD $1, R3
- MOVBZ $1, R0
- CMPBEQ R3, $16, 2(PC)
- VLVGB R3, R0, M3
- MOVD $64(R2), R2
- EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
- VLEIB $10, $1, H2_0
- VLEIB $10, $1, H2_1
- VZERO M0
- VZERO M1
- VZERO M4
- VZERO M5
- VZERO T_4
- VZERO T_10
- EXPACC(M2, M3, M0, M1, M4, M5, T_4, T_10, T_0, T_1, T_2, T_3)
- VLR T_4, M2
- VLEIB $10, $1, M4
- CMPBNE R3, $16, 2(PC)
- VLEIB $10, $1, T_10
- MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M4, M5, M2, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
- REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
- VMRHG V0, H0_1, H0_0
- VMRHG V0, H1_1, H1_0
- VMRHG V0, H2_1, H2_0
- VMRLG V0, H0_1, H0_1
- VMRLG V0, H1_1, H1_1
- VMRLG V0, H2_1, H2_1
- SUB $16, R3
- CMPBLE R3, $0, square // this condition must always hold true!
-
-b3:
- CMPBLE R3, $32, b2
-
- // 3 blocks remaining
-
- // setup [r²,r]
- VSLDB $8, R_0, R_0, R_0
- VSLDB $8, R_1, R_1, R_1
- VSLDB $8, R_2, R_2, R_2
- VSLDB $8, R5_1, R5_1, R5_1
- VSLDB $8, R5_2, R5_2, R5_2
-
- VLVGG $1, RSAVE_0, R_0
- VLVGG $1, RSAVE_1, R_1
- VLVGG $1, RSAVE_2, R_2
- VLVGG $1, R5SAVE_1, R5_1
- VLVGG $1, R5SAVE_2, R5_2
-
- // setup [h0, h1]
- VSLDB $8, H0_0, H0_0, H0_0
- VSLDB $8, H1_0, H1_0, H1_0
- VSLDB $8, H2_0, H2_0, H2_0
- VO H0_1, H0_0, H0_0
- VO H1_1, H1_0, H1_0
- VO H2_1, H2_0, H2_0
- VZERO H0_1
- VZERO H1_1
- VZERO H2_1
-
- VZERO M0
- VZERO M1
- VZERO M2
- VZERO M3
- VZERO M4
- VZERO M5
-
- // H*[r**2, r]
- MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
- REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, T_10, M5)
-
- SUB $33, R3
- VLM (R2), M0, M1
- VLL R3, 32(R2), M2
- ADD $1, R3
- MOVBZ $1, R0
- CMPBEQ R3, $16, 2(PC)
- VLVGB R3, R0, M2
-
- // H += m0
- VZERO T_1
- VZERO T_2
- VZERO T_3
- EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)
- VLEIB $10, $1, T_3
- VAG H0_0, T_1, H0_0
- VAG H1_0, T_2, H1_0
- VAG H2_0, T_3, H2_0
-
- VZERO M0
- VZERO M3
- VZERO M4
- VZERO M5
- VZERO T_10
-
- // (H+m0)*r
- MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M3, M4, M5, V0, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
- REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_10, H0_1, H1_1, H2_1, T_9)
-
- // H += m1
- VZERO V0
- VZERO T_1
- VZERO T_2
- VZERO T_3
- EXPACC2(M1, T_1, T_2, T_3, T_4, T_5, T_6)
- VLEIB $10, $1, T_3
- VAQ H0_0, T_1, H0_0
- VAQ H1_0, T_2, H1_0
- VAQ H2_0, T_3, H2_0
- REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10)
-
- // [H, m2] * [r**2, r]
- EXPACC2(M2, H0_0, H1_0, H2_0, T_1, T_2, T_3)
- CMPBNE R3, $16, 2(PC)
- VLEIB $10, $1, H2_0
- VZERO M0
- VZERO M1
- VZERO M2
- VZERO M3
- VZERO M4
- VZERO M5
- MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
- REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, M5, T_10)
- SUB $16, R3
- CMPBLE R3, $0, next // this condition must always hold true!
-
-b2:
- CMPBLE R3, $16, b1
-
- // 2 blocks remaining
-
- // setup [r²,r]
- VSLDB $8, R_0, R_0, R_0
- VSLDB $8, R_1, R_1, R_1
- VSLDB $8, R_2, R_2, R_2
- VSLDB $8, R5_1, R5_1, R5_1
- VSLDB $8, R5_2, R5_2, R5_2
-
- VLVGG $1, RSAVE_0, R_0
- VLVGG $1, RSAVE_1, R_1
- VLVGG $1, RSAVE_2, R_2
- VLVGG $1, R5SAVE_1, R5_1
- VLVGG $1, R5SAVE_2, R5_2
-
- // setup [h0, h1]
- VSLDB $8, H0_0, H0_0, H0_0
- VSLDB $8, H1_0, H1_0, H1_0
- VSLDB $8, H2_0, H2_0, H2_0
- VO H0_1, H0_0, H0_0
- VO H1_1, H1_0, H1_0
- VO H2_1, H2_0, H2_0
- VZERO H0_1
- VZERO H1_1
- VZERO H2_1
-
- VZERO M0
- VZERO M1
- VZERO M2
- VZERO M3
- VZERO M4
- VZERO M5
-
- // H*[r**2, r]
- MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
- REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9)
- VMRHG V0, H0_1, H0_0
- VMRHG V0, H1_1, H1_0
- VMRHG V0, H2_1, H2_0
- VMRLG V0, H0_1, H0_1
- VMRLG V0, H1_1, H1_1
- VMRLG V0, H2_1, H2_1
-
- // move h to the left and 0s at the right
- VSLDB $8, H0_0, H0_0, H0_0
- VSLDB $8, H1_0, H1_0, H1_0
- VSLDB $8, H2_0, H2_0, H2_0
-
- // get message blocks and append 1 to start
- SUB $17, R3
- VL (R2), M0
- VLL R3, 16(R2), M1
- ADD $1, R3
- MOVBZ $1, R0
- CMPBEQ R3, $16, 2(PC)
- VLVGB R3, R0, M1
- VZERO T_6
- VZERO T_7
- VZERO T_8
- EXPACC2(M0, T_6, T_7, T_8, T_1, T_2, T_3)
- EXPACC2(M1, T_6, T_7, T_8, T_1, T_2, T_3)
- VLEIB $2, $1, T_8
- CMPBNE R3, $16, 2(PC)
- VLEIB $10, $1, T_8
-
- // add [m0, m1] to h
- VAG H0_0, T_6, H0_0
- VAG H1_0, T_7, H1_0
- VAG H2_0, T_8, H2_0
-
- VZERO M2
- VZERO M3
- VZERO M4
- VZERO M5
- VZERO T_10
- VZERO M0
-
- // at this point R_0 .. R5_2 look like [r**2, r]
- MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M2, M3, M4, M5, T_10, M0, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
- REDUCE2(H0_0, H1_0, H2_0, M2, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10)
- SUB $16, R3, R3
- CMPBLE R3, $0, next
-
-b1:
- CMPBLE R3, $0, next
-
- // 1 block remaining
-
- // setup [r²,r]
- VSLDB $8, R_0, R_0, R_0
- VSLDB $8, R_1, R_1, R_1
- VSLDB $8, R_2, R_2, R_2
- VSLDB $8, R5_1, R5_1, R5_1
- VSLDB $8, R5_2, R5_2, R5_2
-
- VLVGG $1, RSAVE_0, R_0
- VLVGG $1, RSAVE_1, R_1
- VLVGG $1, RSAVE_2, R_2
- VLVGG $1, R5SAVE_1, R5_1
- VLVGG $1, R5SAVE_2, R5_2
-
- // setup [h0, h1]
- VSLDB $8, H0_0, H0_0, H0_0
- VSLDB $8, H1_0, H1_0, H1_0
- VSLDB $8, H2_0, H2_0, H2_0
- VO H0_1, H0_0, H0_0
- VO H1_1, H1_0, H1_0
- VO H2_1, H2_0, H2_0
- VZERO H0_1
- VZERO H1_1
- VZERO H2_1
-
- VZERO M0
- VZERO M1
- VZERO M2
- VZERO M3
- VZERO M4
- VZERO M5
-
- // H*[r**2, r]
- MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
- REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
-
- // set up [0, m0] limbs
- SUB $1, R3
- VLL R3, (R2), M0
- ADD $1, R3
- MOVBZ $1, R0
- CMPBEQ R3, $16, 2(PC)
- VLVGB R3, R0, M0
- VZERO T_1
- VZERO T_2
- VZERO T_3
- EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)// limbs: [0, m]
- CMPBNE R3, $16, 2(PC)
- VLEIB $10, $1, T_3
-
- // h+m0
- VAQ H0_0, T_1, H0_0
- VAQ H1_0, T_2, H1_0
- VAQ H2_0, T_3, H2_0
-
- VZERO M0
- VZERO M1
- VZERO M2
- VZERO M3
- VZERO M4
- VZERO M5
- MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
- REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
-
- BR next
-
-square:
- // setup [r²,r]
- VSLDB $8, R_0, R_0, R_0
- VSLDB $8, R_1, R_1, R_1
- VSLDB $8, R_2, R_2, R_2
- VSLDB $8, R5_1, R5_1, R5_1
- VSLDB $8, R5_2, R5_2, R5_2
-
- VLVGG $1, RSAVE_0, R_0
- VLVGG $1, RSAVE_1, R_1
- VLVGG $1, RSAVE_2, R_2
- VLVGG $1, R5SAVE_1, R5_1
- VLVGG $1, R5SAVE_2, R5_2
-
- // setup [h0, h1]
- VSLDB $8, H0_0, H0_0, H0_0
- VSLDB $8, H1_0, H1_0, H1_0
- VSLDB $8, H2_0, H2_0, H2_0
- VO H0_1, H0_0, H0_0
- VO H1_1, H1_0, H1_0
- VO H2_1, H2_0, H2_0
- VZERO H0_1
- VZERO H1_1
- VZERO H2_1
-
- VZERO M0
- VZERO M1
- VZERO M2
- VZERO M3
- VZERO M4
- VZERO M5
-
- // (h0*r**2) + (h1*r)
- MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
- REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
- BR next
diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go
index 0f89aec1c..916c840b6 100644
--- a/vendor/golang.org/x/crypto/ssh/certs.go
+++ b/vendor/golang.org/x/crypto/ssh/certs.go
@@ -414,8 +414,8 @@ func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {
return nil
}
-// SignCert sets c.SignatureKey to the authority's public key and stores a
-// Signature, by authority, in the certificate.
+// SignCert signs the certificate with an authority, setting the Nonce,
+// SignatureKey, and Signature fields.
func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
c.Nonce = make([]byte, 32)
if _, err := io.ReadFull(rand, c.Nonce); err != nil {
diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go
index 0590070e2..f3265655e 100644
--- a/vendor/golang.org/x/crypto/ssh/client_auth.go
+++ b/vendor/golang.org/x/crypto/ssh/client_auth.go
@@ -36,7 +36,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
// during the authentication phase the client first attempts the "none" method
// then any untried methods suggested by the server.
- tried := make(map[string]bool)
+ var tried []string
var lastMethods []string
sessionID := c.transport.getSessionID()
@@ -49,7 +49,9 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
// success
return nil
} else if ok == authFailure {
- tried[auth.method()] = true
+ if m := auth.method(); !contains(tried, m) {
+ tried = append(tried, m)
+ }
}
if methods == nil {
methods = lastMethods
@@ -61,7 +63,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
findNext:
for _, a := range config.Auth {
candidateMethod := a.method()
- if tried[candidateMethod] {
+ if contains(tried, candidateMethod) {
continue
}
for _, meth := range methods {
@@ -72,16 +74,16 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
}
}
}
- return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried))
+ return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried)
}
-func keys(m map[string]bool) []string {
- s := make([]string, 0, len(m))
-
- for key := range m {
- s = append(s, key)
+func contains(list []string, e string) bool {
+ for _, s := range list {
+ if s == e {
+ return true
+ }
}
- return s
+ return false
}
// An AuthMethod represents an instance of an RFC 4252 authentication method.
diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go
index f19016270..9654c0186 100644
--- a/vendor/golang.org/x/crypto/ssh/mux.go
+++ b/vendor/golang.org/x/crypto/ssh/mux.go
@@ -240,7 +240,7 @@ func (m *mux) onePacket() error {
id := binary.BigEndian.Uint32(packet[1:])
ch := m.chanList.getChan(id)
if ch == nil {
- return fmt.Errorf("ssh: invalid channel %d", id)
+ return m.handleUnknownChannelPacket(id, packet)
}
return ch.handlePacket(packet)
@@ -328,3 +328,24 @@ func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) {
return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg)
}
}
+
+func (m *mux) handleUnknownChannelPacket(id uint32, packet []byte) error {
+ msg, err := decode(packet)
+ if err != nil {
+ return err
+ }
+
+ switch msg := msg.(type) {
+ // RFC 4254 section 5.4 says unrecognized channel requests should
+ // receive a failure response.
+ case *channelRequestMsg:
+ if msg.WantReply {
+ return m.sendMessage(channelRequestFailureMsg{
+ PeersID: msg.PeersID,
+ })
+ }
+ return nil
+ default:
+ return fmt.Errorf("ssh: invalid channel %d", id)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 54acc1e36..76a92e0ca 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -108,6 +108,19 @@ type Transport struct {
// waiting for their turn.
StrictMaxConcurrentStreams bool
+ // ReadIdleTimeout is the timeout after which a health check using ping
+ // frame will be carried out if no frame is received on the connection.
+ // Note that a ping response will is considered a received frame, so if
+ // there is no other traffic on the connection, the health check will
+ // be performed every ReadIdleTimeout interval.
+ // If zero, no health check is performed.
+ ReadIdleTimeout time.Duration
+
+ // PingTimeout is the timeout after which the connection will be closed
+ // if a response to Ping is not received.
+ // Defaults to 15s.
+ PingTimeout time.Duration
+
// t1, if non-nil, is the standard library Transport using
// this transport. Its settings are used (but not its
// RoundTrip method, etc).
@@ -131,6 +144,14 @@ func (t *Transport) disableCompression() bool {
return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
}
+func (t *Transport) pingTimeout() time.Duration {
+ if t.PingTimeout == 0 {
+ return 15 * time.Second
+ }
+ return t.PingTimeout
+
+}
+
// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
// It returns an error if t1 has already been HTTP/2-enabled.
func ConfigureTransport(t1 *http.Transport) error {
@@ -675,6 +696,20 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
return cc, nil
}
+func (cc *ClientConn) healthCheck() {
+ pingTimeout := cc.t.pingTimeout()
+ // We don't need to periodically ping in the health check, because the readLoop of ClientConn will
+ // trigger the healthCheck again if there is no frame received.
+ ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
+ defer cancel()
+ err := cc.Ping(ctx)
+ if err != nil {
+ cc.closeForLostPing()
+ cc.t.connPool().MarkDead(cc)
+ return
+ }
+}
+
func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
cc.mu.Lock()
defer cc.mu.Unlock()
@@ -846,14 +881,12 @@ func (cc *ClientConn) sendGoAway() error {
return nil
}
-// Close closes the client connection immediately.
-//
-// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
-func (cc *ClientConn) Close() error {
+// closes the client connection immediately. In-flight requests are interrupted.
+// err is sent to streams.
+func (cc *ClientConn) closeForError(err error) error {
cc.mu.Lock()
defer cc.cond.Broadcast()
defer cc.mu.Unlock()
- err := errors.New("http2: client connection force closed via ClientConn.Close")
for id, cs := range cc.streams {
select {
case cs.resc <- resAndError{err: err}:
@@ -866,6 +899,20 @@ func (cc *ClientConn) Close() error {
return cc.tconn.Close()
}
+// Close closes the client connection immediately.
+//
+// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
+func (cc *ClientConn) Close() error {
+ err := errors.New("http2: client connection force closed via ClientConn.Close")
+ return cc.closeForError(err)
+}
+
+// closes the client connection immediately. In-flight requests are interrupted.
+func (cc *ClientConn) closeForLostPing() error {
+ err := errors.New("http2: client connection lost")
+ return cc.closeForError(err)
+}
+
const maxAllocFrameSize = 512 << 10
// frameBuffer returns a scratch buffer suitable for writing DATA frames.
@@ -1737,8 +1784,17 @@ func (rl *clientConnReadLoop) run() error {
rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
gotReply := false // ever saw a HEADERS reply
gotSettings := false
+ readIdleTimeout := cc.t.ReadIdleTimeout
+ var t *time.Timer
+ if readIdleTimeout != 0 {
+ t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
+ defer t.Stop()
+ }
for {
f, err := cc.fr.ReadFrame()
+ if t != nil {
+ t.Reset(readIdleTimeout)
+ }
if err != nil {
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
}
diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go
index ed8da8dea..dcbb14ef3 100644
--- a/vendor/golang.org/x/sys/cpu/byteorder.go
+++ b/vendor/golang.org/x/sys/cpu/byteorder.go
@@ -39,20 +39,25 @@ func (bigEndian) Uint64(b []byte) uint64 {
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
}
-// hostByteOrder returns binary.LittleEndian on little-endian machines and
-// binary.BigEndian on big-endian machines.
+// hostByteOrder returns littleEndian on little-endian machines and
+// bigEndian on big-endian machines.
func hostByteOrder() byteOrder {
switch runtime.GOARCH {
case "386", "amd64", "amd64p32",
+ "alpha",
"arm", "arm64",
"mipsle", "mips64le", "mips64p32le",
+ "nios2",
"ppc64le",
- "riscv", "riscv64":
+ "riscv", "riscv64",
+ "sh":
return littleEndian{}
case "armbe", "arm64be",
+ "m68k",
"mips", "mips64", "mips64p32",
"ppc", "ppc64",
"s390", "s390x",
+ "shbe",
"sparc", "sparc64":
return bigEndian{}
}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go
index 9c87677ae..7bcb36c7b 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go
@@ -10,8 +10,14 @@ const cacheLineSize = 64
func init() {
switch runtime.GOOS {
- case "android", "darwin":
+ case "android", "darwin", "netbsd":
// Android and iOS don't seem to allow reading these registers.
+ //
+ // NetBSD:
+ // ID_AA64ISAR0_EL1 is a privileged register and cannot be read from EL0.
+ // It can be read via sysctl(3). Example for future implementers:
+ // https://nxr.netbsd.org/xref/src/usr.sbin/cpuctl/arch/aarch64.c
+ //
// Fake the minimal features expected by
// TestARM64minimalFeatures.
ARM64.HasASIMD = true
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index 780e387e3..53a249312 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -107,6 +107,7 @@ includes_FreeBSD='
#include <sys/types.h>
#include <sys/disk.h>
#include <sys/event.h>
+#include <sys/sched.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <sys/sockio.h>
@@ -297,6 +298,7 @@ includes_NetBSD='
#include <sys/extattr.h>
#include <sys/mman.h>
#include <sys/mount.h>
+#include <sys/sched.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <sys/sockio.h>
@@ -325,6 +327,7 @@ includes_OpenBSD='
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/select.h>
+#include <sys/sched.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/stat.h>
@@ -507,9 +510,11 @@ ccflags="$@"
$2 ~ /^(CLOCK|TIMER)_/ ||
$2 ~ /^CAN_/ ||
$2 ~ /^CAP_/ ||
+ $2 ~ /^CP_/ ||
+ $2 ~ /^CPUSTATES$/ ||
$2 ~ /^ALG_/ ||
$2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE)/ ||
- $2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|GETFLAGS)/ ||
+ $2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|[GS]ETFLAGS)/ ||
$2 ~ /^FS_VERITY_/ ||
$2 ~ /^FSCRYPT_/ ||
$2 ~ /^GRND_/ ||
diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go
index 68605db62..60bbe10ad 100644
--- a/vendor/golang.org/x/sys/unix/syscall_bsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go
@@ -527,6 +527,23 @@ func SysctlClockinfo(name string) (*Clockinfo, error) {
return &ci, nil
}
+func SysctlTimeval(name string) (*Timeval, error) {
+ mib, err := sysctlmib(name)
+ if err != nil {
+ return nil, err
+ }
+
+ var tv Timeval
+ n := uintptr(unsafe.Sizeof(tv))
+ if err := sysctl(mib, (*byte)(unsafe.Pointer(&tv)), &n, nil, 0); err != nil {
+ return nil, err
+ }
+ if n != unsafe.Sizeof(tv) {
+ return nil, EIO
+ }
+ return &tv, nil
+}
+
//sys utimes(path string, timeval *[2]Timeval) (err error)
func Utimes(path string, tv []Timeval) error {
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 942a4bbf7..fad483bb9 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -97,6 +97,12 @@ func IoctlSetRTCTime(fd int, value *RTCTime) error {
return err
}
+func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error {
+ err := ioctl(fd, RTC_WKALM_SET, uintptr(unsafe.Pointer(value)))
+ runtime.KeepAlive(value)
+ return err
+}
+
func IoctlGetUint32(fd int, req uint) (uint32, error) {
var value uint32
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
@@ -109,6 +115,12 @@ func IoctlGetRTCTime(fd int) (*RTCTime, error) {
return &value, err
}
+func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) {
+ var value RTCWkAlrm
+ err := ioctl(fd, RTC_WKALM_RD, uintptr(unsafe.Pointer(&value)))
+ return &value, err
+}
+
//sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error)
func Link(oldpath string, newpath string) (err error) {
@@ -1938,6 +1950,20 @@ func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) {
return int(n), nil
}
+func isGroupMember(gid int) bool {
+ groups, err := Getgroups()
+ if err != nil {
+ return false
+ }
+
+ for _, g := range groups {
+ if g == gid {
+ return true
+ }
+ }
+ return false
+}
+
//sys faccessat(dirfd int, path string, mode uint32) (err error)
func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
@@ -1995,7 +2021,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
gid = Getgid()
}
- if uint32(gid) == st.Gid {
+ if uint32(gid) == st.Gid || isGroupMember(gid) {
fmode = (st.Mode >> 3) & 7
} else {
fmode = st.Mode & 7
@@ -2096,6 +2122,18 @@ func Klogset(typ int, arg int) (err error) {
return nil
}
+// RemoteIovec is Iovec with the pointer replaced with an integer.
+// It is used for ProcessVMReadv and ProcessVMWritev, where the pointer
+// refers to a location in a different process' address space, which
+// would confuse the Go garbage collector.
+type RemoteIovec struct {
+ Base uintptr
+ Len int
+}
+
+//sys ProcessVMReadv(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) = SYS_PROCESS_VM_READV
+//sys ProcessVMWritev(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) = SYS_PROCESS_VM_WRITEV
+
/*
* Unimplemented
*/
diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
index 848245873..3689c8084 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
@@ -339,6 +339,12 @@ const (
CLOCK_UPTIME_FAST = 0x8
CLOCK_UPTIME_PRECISE = 0x7
CLOCK_VIRTUAL = 0x1
+ CPUSTATES = 0x5
+ CP_IDLE = 0x4
+ CP_INTR = 0x3
+ CP_NICE = 0x1
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x30000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
index 4acd101c3..b8f7c3c93 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
@@ -339,6 +339,12 @@ const (
CLOCK_UPTIME_FAST = 0x8
CLOCK_UPTIME_PRECISE = 0x7
CLOCK_VIRTUAL = 0x1
+ CPUSTATES = 0x5
+ CP_IDLE = 0x4
+ CP_INTR = 0x3
+ CP_NICE = 0x1
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x30000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go
index e4719873b..be14bb1a4 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go
@@ -339,6 +339,12 @@ const (
CLOCK_UPTIME_FAST = 0x8
CLOCK_UPTIME_PRECISE = 0x7
CLOCK_VIRTUAL = 0x1
+ CPUSTATES = 0x5
+ CP_IDLE = 0x4
+ CP_INTR = 0x3
+ CP_NICE = 0x1
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x30000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go
index 5e49769d9..7ce9c0081 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go
@@ -339,6 +339,12 @@ const (
CLOCK_UPTIME_FAST = 0x8
CLOCK_UPTIME_PRECISE = 0x7
CLOCK_VIRTUAL = 0x1
+ CPUSTATES = 0x5
+ CP_IDLE = 0x4
+ CP_INTR = 0x3
+ CP_NICE = 0x1
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x30000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 6e3cfec46..f8bd50c11 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -160,78 +160,28 @@ const (
BPF_A = 0x10
BPF_ABS = 0x20
BPF_ADD = 0x0
- BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff
- BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38
BPF_ALU = 0x4
BPF_ALU64 = 0x7
BPF_AND = 0x50
- BPF_ANY = 0x0
BPF_ARSH = 0xc0
BPF_B = 0x10
BPF_BUILD_ID_SIZE = 0x14
BPF_CALL = 0x80
- BPF_DEVCG_ACC_MKNOD = 0x1
- BPF_DEVCG_ACC_READ = 0x2
- BPF_DEVCG_ACC_WRITE = 0x4
- BPF_DEVCG_DEV_BLOCK = 0x1
- BPF_DEVCG_DEV_CHAR = 0x2
BPF_DIV = 0x30
BPF_DW = 0x18
BPF_END = 0xd0
- BPF_EXIST = 0x2
BPF_EXIT = 0x90
- BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1
- BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4
- BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2
BPF_FROM_BE = 0x8
BPF_FROM_LE = 0x0
BPF_FS_MAGIC = 0xcafe4a11
- BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2
- BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4
- BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8
- BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10
- BPF_F_ADJ_ROOM_FIXED_GSO = 0x1
BPF_F_ALLOW_MULTI = 0x2
BPF_F_ALLOW_OVERRIDE = 0x1
BPF_F_ANY_ALIGNMENT = 0x2
- BPF_F_CLONE = 0x200
- BPF_F_CTXLEN_MASK = 0xfffff00000000
- BPF_F_CURRENT_CPU = 0xffffffff
- BPF_F_CURRENT_NETNS = -0x1
- BPF_F_DONT_FRAGMENT = 0x4
- BPF_F_FAST_STACK_CMP = 0x200
- BPF_F_HDR_FIELD_MASK = 0xf
- BPF_F_INDEX_MASK = 0xffffffff
- BPF_F_INGRESS = 0x1
- BPF_F_INVALIDATE_HASH = 0x2
- BPF_F_LOCK = 0x4
- BPF_F_MARK_ENFORCE = 0x40
- BPF_F_MARK_MANGLED_0 = 0x20
- BPF_F_MMAPABLE = 0x400
- BPF_F_NO_COMMON_LRU = 0x2
- BPF_F_NO_PREALLOC = 0x1
- BPF_F_NUMA_NODE = 0x4
- BPF_F_PSEUDO_HDR = 0x10
BPF_F_QUERY_EFFECTIVE = 0x1
- BPF_F_RDONLY = 0x8
- BPF_F_RDONLY_PROG = 0x80
- BPF_F_RECOMPUTE_CSUM = 0x1
BPF_F_REPLACE = 0x4
- BPF_F_REUSE_STACKID = 0x400
- BPF_F_SEQ_NUMBER = 0x8
- BPF_F_SKIP_FIELD_MASK = 0xff
- BPF_F_STACK_BUILD_ID = 0x20
BPF_F_STRICT_ALIGNMENT = 0x1
- BPF_F_SYSCTL_BASE_NAME = 0x1
BPF_F_TEST_RND_HI32 = 0x4
BPF_F_TEST_STATE_FREQ = 0x8
- BPF_F_TUNINFO_IPV6 = 0x1
- BPF_F_USER_BUILD_ID = 0x800
- BPF_F_USER_STACK = 0x100
- BPF_F_WRONLY = 0x10
- BPF_F_WRONLY_PROG = 0x100
- BPF_F_ZERO_CSUM_TX = 0x2
- BPF_F_ZERO_SEED = 0x40
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
@@ -267,7 +217,6 @@ const (
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_NET_OFF = -0x100000
- BPF_NOEXIST = 0x1
BPF_OBJ_NAME_LEN = 0x10
BPF_OR = 0x40
BPF_PSEUDO_CALL = 0x1
@@ -275,12 +224,6 @@ const (
BPF_PSEUDO_MAP_VALUE = 0x2
BPF_RET = 0x6
BPF_RSH = 0x70
- BPF_SK_STORAGE_GET_F_CREATE = 0x1
- BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf
- BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
- BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
- BPF_SOCK_OPS_RTT_CB_FLAG = 0x8
- BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
@@ -378,12 +321,14 @@ const (
CLOCK_TXINT = 0x3
CLONE_ARGS_SIZE_VER0 = 0x40
CLONE_ARGS_SIZE_VER1 = 0x50
+ CLONE_ARGS_SIZE_VER2 = 0x58
CLONE_CHILD_CLEARTID = 0x200000
CLONE_CHILD_SETTID = 0x1000000
CLONE_CLEAR_SIGHAND = 0x100000000
CLONE_DETACHED = 0x400000
CLONE_FILES = 0x400
CLONE_FS = 0x200
+ CLONE_INTO_CGROUP = 0x200000000
CLONE_IO = 0x80000000
CLONE_NEWCGROUP = 0x2000000
CLONE_NEWIPC = 0x8000000
@@ -598,7 +543,9 @@ const (
FAN_DELETE = 0x200
FAN_DELETE_SELF = 0x400
FAN_DENY = 0x2
+ FAN_DIR_MODIFY = 0x80000
FAN_ENABLE_AUDIT = 0x40
+ FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2
FAN_EVENT_INFO_TYPE_FID = 0x1
FAN_EVENT_METADATA_LEN = 0x18
FAN_EVENT_ON_CHILD = 0x8000000
@@ -2108,8 +2055,6 @@ const (
TCOFLUSH = 0x1
TCOOFF = 0x0
TCOON = 0x1
- TCP_BPF_IW = 0x3e9
- TCP_BPF_SNDCWND_CLAMP = 0x3ea
TCP_CC_INFO = 0x1a
TCP_CM_INQ = 0x24
TCP_CONGESTION = 0xd
@@ -2384,8 +2329,9 @@ const (
XDP_COPY = 0x2
XDP_FLAGS_DRV_MODE = 0x4
XDP_FLAGS_HW_MODE = 0x8
- XDP_FLAGS_MASK = 0xf
+ XDP_FLAGS_MASK = 0x1f
XDP_FLAGS_MODES = 0xe
+ XDP_FLAGS_REPLACE = 0x10
XDP_FLAGS_SKB_MODE = 0x2
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
XDP_MMAP_OFFSETS = 0x1
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 5e974110d..11b25f68c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -75,8 +75,10 @@ const (
FP_XSTATE_MAGIC2 = 0x46505845
FS_IOC_ENABLE_VERITY = 0x40806685
FS_IOC_GETFLAGS = 0x80046601
+ FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
+ FS_IOC_SETFLAGS = 0x40046602
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
F_GETLK = 0xc
F_GETLK64 = 0xc
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index 47a57fe46..f92cff6ea 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -75,8 +75,10 @@ const (
FP_XSTATE_MAGIC2 = 0x46505845
FS_IOC_ENABLE_VERITY = 0x40806685
FS_IOC_GETFLAGS = 0x80086601
+ FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
+ FS_IOC_SETFLAGS = 0x40086602
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
F_GETLK = 0x5
F_GETLK64 = 0x5
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index df2eea4bb..12bcbf88d 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -74,8 +74,10 @@ const (
FLUSHO = 0x1000
FS_IOC_ENABLE_VERITY = 0x40806685
FS_IOC_GETFLAGS = 0x80046601
+ FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
+ FS_IOC_SETFLAGS = 0x40046602
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
F_GETLK = 0xc
F_GETLK64 = 0xc
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 4e1214217..8b0e024b9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -77,8 +77,10 @@ const (
FPSIMD_MAGIC = 0x46508001
FS_IOC_ENABLE_VERITY = 0x40806685
FS_IOC_GETFLAGS = 0x80086601
+ FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
+ FS_IOC_SETFLAGS = 0x40086602
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
F_GETLK = 0x5
F_GETLK64 = 0x5
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index a23b08029..eeadea943 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -74,8 +74,10 @@ const (
FLUSHO = 0x2000
FS_IOC_ENABLE_VERITY = 0x80806685
FS_IOC_GETFLAGS = 0x40046601
+ FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
+ FS_IOC_SETFLAGS = 0x80046602
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
F_GETLK = 0x21
F_GETLK64 = 0x21
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index a5a921e43..0be6c4ccc 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -74,8 +74,10 @@ const (
FLUSHO = 0x2000
FS_IOC_ENABLE_VERITY = 0x80806685
FS_IOC_GETFLAGS = 0x40086601
+ FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
+ FS_IOC_SETFLAGS = 0x80086602
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
F_GETLK = 0xe
F_GETLK64 = 0xe
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index d088e197b..0880b745c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -74,8 +74,10 @@ const (
FLUSHO = 0x2000
FS_IOC_ENABLE_VERITY = 0x80806685
FS_IOC_GETFLAGS = 0x40086601
+ FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
+ FS_IOC_SETFLAGS = 0x80086602
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
F_GETLK = 0xe
F_GETLK64 = 0xe
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 0ddf9d5fe..c8a66627a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -74,8 +74,10 @@ const (
FLUSHO = 0x2000
FS_IOC_ENABLE_VERITY = 0x80806685
FS_IOC_GETFLAGS = 0x40046601
+ FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
+ FS_IOC_SETFLAGS = 0x80046602
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
F_GETLK = 0x21
F_GETLK64 = 0x21
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index a93ffc180..97aae63f1 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -74,8 +74,10 @@ const (
FLUSHO = 0x800000
FS_IOC_ENABLE_VERITY = 0x80806685
FS_IOC_GETFLAGS = 0x40086601
+ FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
+ FS_IOC_SETFLAGS = 0x80086602
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
F_GETLK = 0x5
F_GETLK64 = 0xc
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index c1ea48b95..b0c3b0664 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -74,8 +74,10 @@ const (
FLUSHO = 0x800000
FS_IOC_ENABLE_VERITY = 0x80806685
FS_IOC_GETFLAGS = 0x40086601
+ FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
+ FS_IOC_SETFLAGS = 0x80086602
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
F_GETLK = 0x5
F_GETLK64 = 0xc
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index 7def950ba..0c0518193 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -74,8 +74,10 @@ const (
FLUSHO = 0x1000
FS_IOC_ENABLE_VERITY = 0x40806685
FS_IOC_GETFLAGS = 0x80086601
+ FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
+ FS_IOC_SETFLAGS = 0x40086602
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
F_GETLK = 0x5
F_GETLK64 = 0x5
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index d39293c87..0b96bd462 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -74,8 +74,10 @@ const (
FLUSHO = 0x1000
FS_IOC_ENABLE_VERITY = 0x40806685
FS_IOC_GETFLAGS = 0x80086601
+ FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
+ FS_IOC_SETFLAGS = 0x40086602
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
F_GETLK = 0x5
F_GETLK64 = 0x5
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index 3ff3ec681..bd5c30577 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -78,8 +78,10 @@ const (
FLUSHO = 0x1000
FS_IOC_ENABLE_VERITY = 0x80806685
FS_IOC_GETFLAGS = 0x40086601
+ FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
+ FS_IOC_SETFLAGS = 0x80086602
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
F_GETLK = 0x7
F_GETLK64 = 0x7
diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
index 96b9b8ab3..20f3a5799 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
@@ -158,6 +158,12 @@ const (
CLONE_SIGHAND = 0x800
CLONE_VFORK = 0x4000
CLONE_VM = 0x100
+ CPUSTATES = 0x5
+ CP_IDLE = 0x4
+ CP_INTR = 0x3
+ CP_NICE = 0x1
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
index ed522a84e..90b8fcd29 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
@@ -158,6 +158,12 @@ const (
CLONE_SIGHAND = 0x800
CLONE_VFORK = 0x4000
CLONE_VM = 0x100
+ CPUSTATES = 0x5
+ CP_IDLE = 0x4
+ CP_INTR = 0x3
+ CP_NICE = 0x1
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
index c8d36fe99..c5c03993b 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
@@ -150,6 +150,12 @@ const (
BRKINT = 0x2
CFLUSH = 0xf
CLOCAL = 0x8000
+ CPUSTATES = 0x5
+ CP_IDLE = 0x4
+ CP_INTR = 0x3
+ CP_NICE = 0x1
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go
index f1c146a74..14dd3c1d1 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go
@@ -158,6 +158,12 @@ const (
CLONE_SIGHAND = 0x800
CLONE_VFORK = 0x4000
CLONE_VM = 0x100
+ CPUSTATES = 0x5
+ CP_IDLE = 0x4
+ CP_INTR = 0x3
+ CP_NICE = 0x1
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
index 5402bd55c..c865a10df 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
@@ -146,6 +146,13 @@ const (
BRKINT = 0x2
CFLUSH = 0xf
CLOCAL = 0x8000
+ CPUSTATES = 0x6
+ CP_IDLE = 0x5
+ CP_INTR = 0x4
+ CP_NICE = 0x1
+ CP_SPIN = 0x3
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
index ffaf2d2f9..9db6b2fb6 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
@@ -153,6 +153,13 @@ const (
CLOCK_REALTIME = 0x0
CLOCK_THREAD_CPUTIME_ID = 0x4
CLOCK_UPTIME = 0x5
+ CPUSTATES = 0x6
+ CP_IDLE = 0x5
+ CP_INTR = 0x4
+ CP_NICE = 0x1
+ CP_SPIN = 0x3
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
index 7aa796a64..7072526a6 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
@@ -146,6 +146,13 @@ const (
BRKINT = 0x2
CFLUSH = 0xf
CLOCAL = 0x8000
+ CPUSTATES = 0x6
+ CP_IDLE = 0x5
+ CP_INTR = 0x4
+ CP_NICE = 0x1
+ CP_SPIN = 0x3
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
index 1792d3f13..ac5efbe5a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
@@ -156,6 +156,13 @@ const (
CLOCK_REALTIME = 0x0
CLOCK_THREAD_CPUTIME_ID = 0x4
CLOCK_UPTIME = 0x5
+ CPUSTATES = 0x6
+ CP_IDLE = 0x5
+ CP_INTR = 0x4
+ CP_NICE = 0x1
+ CP_SPIN = 0x3
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index df217825f..f6603de4f 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -1847,6 +1847,52 @@ func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ProcessVMReadv(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(localIov) > 0 {
+ _p0 = unsafe.Pointer(&localIov[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ var _p1 unsafe.Pointer
+ if len(remoteIov) > 0 {
+ _p1 = unsafe.Pointer(&remoteIov[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PROCESS_VM_READV, uintptr(pid), uintptr(_p0), uintptr(len(localIov)), uintptr(_p1), uintptr(len(remoteIov)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ProcessVMWritev(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(localIov) > 0 {
+ _p0 = unsafe.Pointer(&localIov[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ var _p1 unsafe.Pointer
+ if len(remoteIov) > 0 {
+ _p1 = unsafe.Pointer(&remoteIov[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PROCESS_VM_WRITEV, uintptr(pid), uintptr(_p0), uintptr(len(localIov)), uintptr(_p1), uintptr(len(remoteIov)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func pipe2(p *[2]_C_int, flags int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
index 6f79227d7..b91c2ae0f 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
@@ -125,9 +125,9 @@ type Statfs_t struct {
Owner uint32
Fsid Fsid
Charspare [80]int8
- Fstypename [16]int8
- Mntfromname [1024]int8
- Mntonname [1024]int8
+ Fstypename [16]byte
+ Mntfromname [1024]byte
+ Mntonname [1024]byte
}
type statfs_freebsd11_t struct {
@@ -150,9 +150,9 @@ type statfs_freebsd11_t struct {
Owner uint32
Fsid Fsid
Charspare [80]int8
- Fstypename [16]int8
- Mntfromname [88]int8
- Mntonname [88]int8
+ Fstypename [16]byte
+ Mntfromname [88]byte
+ Mntonname [88]byte
}
type Flock_t struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 416f7767e..27d67ac8f 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -1871,175 +1871,249 @@ const (
)
const (
- BPF_REG_0 = 0x0
- BPF_REG_1 = 0x1
- BPF_REG_2 = 0x2
- BPF_REG_3 = 0x3
- BPF_REG_4 = 0x4
- BPF_REG_5 = 0x5
- BPF_REG_6 = 0x6
- BPF_REG_7 = 0x7
- BPF_REG_8 = 0x8
- BPF_REG_9 = 0x9
- BPF_REG_10 = 0xa
- BPF_MAP_CREATE = 0x0
- BPF_MAP_LOOKUP_ELEM = 0x1
- BPF_MAP_UPDATE_ELEM = 0x2
- BPF_MAP_DELETE_ELEM = 0x3
- BPF_MAP_GET_NEXT_KEY = 0x4
- BPF_PROG_LOAD = 0x5
- BPF_OBJ_PIN = 0x6
- BPF_OBJ_GET = 0x7
- BPF_PROG_ATTACH = 0x8
- BPF_PROG_DETACH = 0x9
- BPF_PROG_TEST_RUN = 0xa
- BPF_PROG_GET_NEXT_ID = 0xb
- BPF_MAP_GET_NEXT_ID = 0xc
- BPF_PROG_GET_FD_BY_ID = 0xd
- BPF_MAP_GET_FD_BY_ID = 0xe
- BPF_OBJ_GET_INFO_BY_FD = 0xf
- BPF_PROG_QUERY = 0x10
- BPF_RAW_TRACEPOINT_OPEN = 0x11
- BPF_BTF_LOAD = 0x12
- BPF_BTF_GET_FD_BY_ID = 0x13
- BPF_TASK_FD_QUERY = 0x14
- BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
- BPF_MAP_FREEZE = 0x16
- BPF_BTF_GET_NEXT_ID = 0x17
- BPF_MAP_TYPE_UNSPEC = 0x0
- BPF_MAP_TYPE_HASH = 0x1
- BPF_MAP_TYPE_ARRAY = 0x2
- BPF_MAP_TYPE_PROG_ARRAY = 0x3
- BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
- BPF_MAP_TYPE_PERCPU_HASH = 0x5
- BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
- BPF_MAP_TYPE_STACK_TRACE = 0x7
- BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
- BPF_MAP_TYPE_LRU_HASH = 0x9
- BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
- BPF_MAP_TYPE_LPM_TRIE = 0xb
- BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
- BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
- BPF_MAP_TYPE_DEVMAP = 0xe
- BPF_MAP_TYPE_SOCKMAP = 0xf
- BPF_MAP_TYPE_CPUMAP = 0x10
- BPF_MAP_TYPE_XSKMAP = 0x11
- BPF_MAP_TYPE_SOCKHASH = 0x12
- BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
- BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
- BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
- BPF_MAP_TYPE_QUEUE = 0x16
- BPF_MAP_TYPE_STACK = 0x17
- BPF_MAP_TYPE_SK_STORAGE = 0x18
- BPF_MAP_TYPE_DEVMAP_HASH = 0x19
- BPF_PROG_TYPE_UNSPEC = 0x0
- BPF_PROG_TYPE_SOCKET_FILTER = 0x1
- BPF_PROG_TYPE_KPROBE = 0x2
- BPF_PROG_TYPE_SCHED_CLS = 0x3
- BPF_PROG_TYPE_SCHED_ACT = 0x4
- BPF_PROG_TYPE_TRACEPOINT = 0x5
- BPF_PROG_TYPE_XDP = 0x6
- BPF_PROG_TYPE_PERF_EVENT = 0x7
- BPF_PROG_TYPE_CGROUP_SKB = 0x8
- BPF_PROG_TYPE_CGROUP_SOCK = 0x9
- BPF_PROG_TYPE_LWT_IN = 0xa
- BPF_PROG_TYPE_LWT_OUT = 0xb
- BPF_PROG_TYPE_LWT_XMIT = 0xc
- BPF_PROG_TYPE_SOCK_OPS = 0xd
- BPF_PROG_TYPE_SK_SKB = 0xe
- BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
- BPF_PROG_TYPE_SK_MSG = 0x10
- BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
- BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
- BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
- BPF_PROG_TYPE_LIRC_MODE2 = 0x14
- BPF_PROG_TYPE_SK_REUSEPORT = 0x15
- BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
- BPF_PROG_TYPE_CGROUP_SYSCTL = 0x17
- BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 0x18
- BPF_PROG_TYPE_CGROUP_SOCKOPT = 0x19
- BPF_PROG_TYPE_TRACING = 0x1a
- BPF_CGROUP_INET_INGRESS = 0x0
- BPF_CGROUP_INET_EGRESS = 0x1
- BPF_CGROUP_INET_SOCK_CREATE = 0x2
- BPF_CGROUP_SOCK_OPS = 0x3
- BPF_SK_SKB_STREAM_PARSER = 0x4
- BPF_SK_SKB_STREAM_VERDICT = 0x5
- BPF_CGROUP_DEVICE = 0x6
- BPF_SK_MSG_VERDICT = 0x7
- BPF_CGROUP_INET4_BIND = 0x8
- BPF_CGROUP_INET6_BIND = 0x9
- BPF_CGROUP_INET4_CONNECT = 0xa
- BPF_CGROUP_INET6_CONNECT = 0xb
- BPF_CGROUP_INET4_POST_BIND = 0xc
- BPF_CGROUP_INET6_POST_BIND = 0xd
- BPF_CGROUP_UDP4_SENDMSG = 0xe
- BPF_CGROUP_UDP6_SENDMSG = 0xf
- BPF_LIRC_MODE2 = 0x10
- BPF_FLOW_DISSECTOR = 0x11
- BPF_CGROUP_SYSCTL = 0x12
- BPF_CGROUP_UDP4_RECVMSG = 0x13
- BPF_CGROUP_UDP6_RECVMSG = 0x14
- BPF_CGROUP_GETSOCKOPT = 0x15
- BPF_CGROUP_SETSOCKOPT = 0x16
- BPF_TRACE_RAW_TP = 0x17
- BPF_TRACE_FENTRY = 0x18
- BPF_TRACE_FEXIT = 0x19
- BPF_STACK_BUILD_ID_EMPTY = 0x0
- BPF_STACK_BUILD_ID_VALID = 0x1
- BPF_STACK_BUILD_ID_IP = 0x2
- BPF_ADJ_ROOM_NET = 0x0
- BPF_ADJ_ROOM_MAC = 0x1
- BPF_HDR_START_MAC = 0x0
- BPF_HDR_START_NET = 0x1
- BPF_LWT_ENCAP_SEG6 = 0x0
- BPF_LWT_ENCAP_SEG6_INLINE = 0x1
- BPF_LWT_ENCAP_IP = 0x2
- BPF_OK = 0x0
- BPF_DROP = 0x2
- BPF_REDIRECT = 0x7
- BPF_LWT_REROUTE = 0x80
- BPF_SOCK_OPS_VOID = 0x0
- BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
- BPF_SOCK_OPS_RWND_INIT = 0x2
- BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
- BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
- BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
- BPF_SOCK_OPS_NEEDS_ECN = 0x6
- BPF_SOCK_OPS_BASE_RTT = 0x7
- BPF_SOCK_OPS_RTO_CB = 0x8
- BPF_SOCK_OPS_RETRANS_CB = 0x9
- BPF_SOCK_OPS_STATE_CB = 0xa
- BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
- BPF_SOCK_OPS_RTT_CB = 0xc
- BPF_TCP_ESTABLISHED = 0x1
- BPF_TCP_SYN_SENT = 0x2
- BPF_TCP_SYN_RECV = 0x3
- BPF_TCP_FIN_WAIT1 = 0x4
- BPF_TCP_FIN_WAIT2 = 0x5
- BPF_TCP_TIME_WAIT = 0x6
- BPF_TCP_CLOSE = 0x7
- BPF_TCP_CLOSE_WAIT = 0x8
- BPF_TCP_LAST_ACK = 0x9
- BPF_TCP_LISTEN = 0xa
- BPF_TCP_CLOSING = 0xb
- BPF_TCP_NEW_SYN_RECV = 0xc
- BPF_TCP_MAX_STATES = 0xd
- BPF_FIB_LKUP_RET_SUCCESS = 0x0
- BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
- BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
- BPF_FIB_LKUP_RET_PROHIBIT = 0x3
- BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
- BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
- BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
- BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
- BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
- BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
- BPF_FD_TYPE_TRACEPOINT = 0x1
- BPF_FD_TYPE_KPROBE = 0x2
- BPF_FD_TYPE_KRETPROBE = 0x3
- BPF_FD_TYPE_UPROBE = 0x4
- BPF_FD_TYPE_URETPROBE = 0x5
+ BPF_REG_0 = 0x0
+ BPF_REG_1 = 0x1
+ BPF_REG_2 = 0x2
+ BPF_REG_3 = 0x3
+ BPF_REG_4 = 0x4
+ BPF_REG_5 = 0x5
+ BPF_REG_6 = 0x6
+ BPF_REG_7 = 0x7
+ BPF_REG_8 = 0x8
+ BPF_REG_9 = 0x9
+ BPF_REG_10 = 0xa
+ BPF_MAP_CREATE = 0x0
+ BPF_MAP_LOOKUP_ELEM = 0x1
+ BPF_MAP_UPDATE_ELEM = 0x2
+ BPF_MAP_DELETE_ELEM = 0x3
+ BPF_MAP_GET_NEXT_KEY = 0x4
+ BPF_PROG_LOAD = 0x5
+ BPF_OBJ_PIN = 0x6
+ BPF_OBJ_GET = 0x7
+ BPF_PROG_ATTACH = 0x8
+ BPF_PROG_DETACH = 0x9
+ BPF_PROG_TEST_RUN = 0xa
+ BPF_PROG_GET_NEXT_ID = 0xb
+ BPF_MAP_GET_NEXT_ID = 0xc
+ BPF_PROG_GET_FD_BY_ID = 0xd
+ BPF_MAP_GET_FD_BY_ID = 0xe
+ BPF_OBJ_GET_INFO_BY_FD = 0xf
+ BPF_PROG_QUERY = 0x10
+ BPF_RAW_TRACEPOINT_OPEN = 0x11
+ BPF_BTF_LOAD = 0x12
+ BPF_BTF_GET_FD_BY_ID = 0x13
+ BPF_TASK_FD_QUERY = 0x14
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
+ BPF_MAP_FREEZE = 0x16
+ BPF_BTF_GET_NEXT_ID = 0x17
+ BPF_MAP_LOOKUP_BATCH = 0x18
+ BPF_MAP_LOOKUP_AND_DELETE_BATCH = 0x19
+ BPF_MAP_UPDATE_BATCH = 0x1a
+ BPF_MAP_DELETE_BATCH = 0x1b
+ BPF_LINK_CREATE = 0x1c
+ BPF_LINK_UPDATE = 0x1d
+ BPF_MAP_TYPE_UNSPEC = 0x0
+ BPF_MAP_TYPE_HASH = 0x1
+ BPF_MAP_TYPE_ARRAY = 0x2
+ BPF_MAP_TYPE_PROG_ARRAY = 0x3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
+ BPF_MAP_TYPE_PERCPU_HASH = 0x5
+ BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
+ BPF_MAP_TYPE_STACK_TRACE = 0x7
+ BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
+ BPF_MAP_TYPE_LRU_HASH = 0x9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
+ BPF_MAP_TYPE_LPM_TRIE = 0xb
+ BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
+ BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
+ BPF_MAP_TYPE_DEVMAP = 0xe
+ BPF_MAP_TYPE_SOCKMAP = 0xf
+ BPF_MAP_TYPE_CPUMAP = 0x10
+ BPF_MAP_TYPE_XSKMAP = 0x11
+ BPF_MAP_TYPE_SOCKHASH = 0x12
+ BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
+ BPF_MAP_TYPE_QUEUE = 0x16
+ BPF_MAP_TYPE_STACK = 0x17
+ BPF_MAP_TYPE_SK_STORAGE = 0x18
+ BPF_MAP_TYPE_DEVMAP_HASH = 0x19
+ BPF_MAP_TYPE_STRUCT_OPS = 0x1a
+ BPF_PROG_TYPE_UNSPEC = 0x0
+ BPF_PROG_TYPE_SOCKET_FILTER = 0x1
+ BPF_PROG_TYPE_KPROBE = 0x2
+ BPF_PROG_TYPE_SCHED_CLS = 0x3
+ BPF_PROG_TYPE_SCHED_ACT = 0x4
+ BPF_PROG_TYPE_TRACEPOINT = 0x5
+ BPF_PROG_TYPE_XDP = 0x6
+ BPF_PROG_TYPE_PERF_EVENT = 0x7
+ BPF_PROG_TYPE_CGROUP_SKB = 0x8
+ BPF_PROG_TYPE_CGROUP_SOCK = 0x9
+ BPF_PROG_TYPE_LWT_IN = 0xa
+ BPF_PROG_TYPE_LWT_OUT = 0xb
+ BPF_PROG_TYPE_LWT_XMIT = 0xc
+ BPF_PROG_TYPE_SOCK_OPS = 0xd
+ BPF_PROG_TYPE_SK_SKB = 0xe
+ BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
+ BPF_PROG_TYPE_SK_MSG = 0x10
+ BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
+ BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
+ BPF_PROG_TYPE_LIRC_MODE2 = 0x14
+ BPF_PROG_TYPE_SK_REUSEPORT = 0x15
+ BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
+ BPF_PROG_TYPE_CGROUP_SYSCTL = 0x17
+ BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 0x18
+ BPF_PROG_TYPE_CGROUP_SOCKOPT = 0x19
+ BPF_PROG_TYPE_TRACING = 0x1a
+ BPF_PROG_TYPE_STRUCT_OPS = 0x1b
+ BPF_PROG_TYPE_EXT = 0x1c
+ BPF_PROG_TYPE_LSM = 0x1d
+ BPF_CGROUP_INET_INGRESS = 0x0
+ BPF_CGROUP_INET_EGRESS = 0x1
+ BPF_CGROUP_INET_SOCK_CREATE = 0x2
+ BPF_CGROUP_SOCK_OPS = 0x3
+ BPF_SK_SKB_STREAM_PARSER = 0x4
+ BPF_SK_SKB_STREAM_VERDICT = 0x5
+ BPF_CGROUP_DEVICE = 0x6
+ BPF_SK_MSG_VERDICT = 0x7
+ BPF_CGROUP_INET4_BIND = 0x8
+ BPF_CGROUP_INET6_BIND = 0x9
+ BPF_CGROUP_INET4_CONNECT = 0xa
+ BPF_CGROUP_INET6_CONNECT = 0xb
+ BPF_CGROUP_INET4_POST_BIND = 0xc
+ BPF_CGROUP_INET6_POST_BIND = 0xd
+ BPF_CGROUP_UDP4_SENDMSG = 0xe
+ BPF_CGROUP_UDP6_SENDMSG = 0xf
+ BPF_LIRC_MODE2 = 0x10
+ BPF_FLOW_DISSECTOR = 0x11
+ BPF_CGROUP_SYSCTL = 0x12
+ BPF_CGROUP_UDP4_RECVMSG = 0x13
+ BPF_CGROUP_UDP6_RECVMSG = 0x14
+ BPF_CGROUP_GETSOCKOPT = 0x15
+ BPF_CGROUP_SETSOCKOPT = 0x16
+ BPF_TRACE_RAW_TP = 0x17
+ BPF_TRACE_FENTRY = 0x18
+ BPF_TRACE_FEXIT = 0x19
+ BPF_MODIFY_RETURN = 0x1a
+ BPF_LSM_MAC = 0x1b
+ BPF_ANY = 0x0
+ BPF_NOEXIST = 0x1
+ BPF_EXIST = 0x2
+ BPF_F_LOCK = 0x4
+ BPF_F_NO_PREALLOC = 0x1
+ BPF_F_NO_COMMON_LRU = 0x2
+ BPF_F_NUMA_NODE = 0x4
+ BPF_F_RDONLY = 0x8
+ BPF_F_WRONLY = 0x10
+ BPF_F_STACK_BUILD_ID = 0x20
+ BPF_F_ZERO_SEED = 0x40
+ BPF_F_RDONLY_PROG = 0x80
+ BPF_F_WRONLY_PROG = 0x100
+ BPF_F_CLONE = 0x200
+ BPF_F_MMAPABLE = 0x400
+ BPF_STACK_BUILD_ID_EMPTY = 0x0
+ BPF_STACK_BUILD_ID_VALID = 0x1
+ BPF_STACK_BUILD_ID_IP = 0x2
+ BPF_F_RECOMPUTE_CSUM = 0x1
+ BPF_F_INVALIDATE_HASH = 0x2
+ BPF_F_HDR_FIELD_MASK = 0xf
+ BPF_F_PSEUDO_HDR = 0x10
+ BPF_F_MARK_MANGLED_0 = 0x20
+ BPF_F_MARK_ENFORCE = 0x40
+ BPF_F_INGRESS = 0x1
+ BPF_F_TUNINFO_IPV6 = 0x1
+ BPF_F_SKIP_FIELD_MASK = 0xff
+ BPF_F_USER_STACK = 0x100
+ BPF_F_FAST_STACK_CMP = 0x200
+ BPF_F_REUSE_STACKID = 0x400
+ BPF_F_USER_BUILD_ID = 0x800
+ BPF_F_ZERO_CSUM_TX = 0x2
+ BPF_F_DONT_FRAGMENT = 0x4
+ BPF_F_SEQ_NUMBER = 0x8
+ BPF_F_INDEX_MASK = 0xffffffff
+ BPF_F_CURRENT_CPU = 0xffffffff
+ BPF_F_CTXLEN_MASK = 0xfffff00000000
+ BPF_F_CURRENT_NETNS = -0x1
+ BPF_F_ADJ_ROOM_FIXED_GSO = 0x1
+ BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2
+ BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4
+ BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8
+ BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10
+ BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff
+ BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38
+ BPF_F_SYSCTL_BASE_NAME = 0x1
+ BPF_SK_STORAGE_GET_F_CREATE = 0x1
+ BPF_F_GET_BRANCH_RECORDS_SIZE = 0x1
+ BPF_ADJ_ROOM_NET = 0x0
+ BPF_ADJ_ROOM_MAC = 0x1
+ BPF_HDR_START_MAC = 0x0
+ BPF_HDR_START_NET = 0x1
+ BPF_LWT_ENCAP_SEG6 = 0x0
+ BPF_LWT_ENCAP_SEG6_INLINE = 0x1
+ BPF_LWT_ENCAP_IP = 0x2
+ BPF_OK = 0x0
+ BPF_DROP = 0x2
+ BPF_REDIRECT = 0x7
+ BPF_LWT_REROUTE = 0x80
+ BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
+ BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
+ BPF_SOCK_OPS_RTT_CB_FLAG = 0x8
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf
+ BPF_SOCK_OPS_VOID = 0x0
+ BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
+ BPF_SOCK_OPS_RWND_INIT = 0x2
+ BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
+ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
+ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
+ BPF_SOCK_OPS_NEEDS_ECN = 0x6
+ BPF_SOCK_OPS_BASE_RTT = 0x7
+ BPF_SOCK_OPS_RTO_CB = 0x8
+ BPF_SOCK_OPS_RETRANS_CB = 0x9
+ BPF_SOCK_OPS_STATE_CB = 0xa
+ BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
+ BPF_SOCK_OPS_RTT_CB = 0xc
+ BPF_TCP_ESTABLISHED = 0x1
+ BPF_TCP_SYN_SENT = 0x2
+ BPF_TCP_SYN_RECV = 0x3
+ BPF_TCP_FIN_WAIT1 = 0x4
+ BPF_TCP_FIN_WAIT2 = 0x5
+ BPF_TCP_TIME_WAIT = 0x6
+ BPF_TCP_CLOSE = 0x7
+ BPF_TCP_CLOSE_WAIT = 0x8
+ BPF_TCP_LAST_ACK = 0x9
+ BPF_TCP_LISTEN = 0xa
+ BPF_TCP_CLOSING = 0xb
+ BPF_TCP_NEW_SYN_RECV = 0xc
+ BPF_TCP_MAX_STATES = 0xd
+ TCP_BPF_IW = 0x3e9
+ TCP_BPF_SNDCWND_CLAMP = 0x3ea
+ BPF_DEVCG_ACC_MKNOD = 0x1
+ BPF_DEVCG_ACC_READ = 0x2
+ BPF_DEVCG_ACC_WRITE = 0x4
+ BPF_DEVCG_DEV_BLOCK = 0x1
+ BPF_DEVCG_DEV_CHAR = 0x2
+ BPF_FIB_LOOKUP_DIRECT = 0x1
+ BPF_FIB_LOOKUP_OUTPUT = 0x2
+ BPF_FIB_LKUP_RET_SUCCESS = 0x0
+ BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
+ BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
+ BPF_FIB_LKUP_RET_PROHIBIT = 0x3
+ BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
+ BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
+ BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
+ BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
+ BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
+ BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
+ BPF_FD_TYPE_TRACEPOINT = 0x1
+ BPF_FD_TYPE_KPROBE = 0x2
+ BPF_FD_TYPE_KRETPROBE = 0x3
+ BPF_FD_TYPE_UPROBE = 0x4
+ BPF_FD_TYPE_URETPROBE = 0x5
+ BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1
+ BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2
+ BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4
)
const (
@@ -2205,7 +2279,7 @@ const (
DEVLINK_CMD_DPIPE_ENTRIES_GET = 0x20
DEVLINK_CMD_DPIPE_HEADERS_GET = 0x21
DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET = 0x22
- DEVLINK_CMD_MAX = 0x44
+ DEVLINK_CMD_MAX = 0x48
DEVLINK_PORT_TYPE_NOTSET = 0x0
DEVLINK_PORT_TYPE_AUTO = 0x1
DEVLINK_PORT_TYPE_ETH = 0x2
@@ -2285,7 +2359,7 @@ const (
DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE = 0x3c
DEVLINK_ATTR_PAD = 0x3d
DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 0x3e
- DEVLINK_ATTR_MAX = 0x8c
+ DEVLINK_ATTR_MAX = 0x90
DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0
DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1
DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0
diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go
index f80a4204f..e409d76f0 100644
--- a/vendor/golang.org/x/sys/windows/memory_windows.go
+++ b/vendor/golang.org/x/sys/windows/memory_windows.go
@@ -23,4 +23,9 @@ const (
PAGE_EXECUTE_READ = 0x20
PAGE_EXECUTE_READWRITE = 0x40
PAGE_EXECUTE_WRITECOPY = 0x80
+
+ QUOTA_LIMITS_HARDWS_MIN_DISABLE = 0x00000002
+ QUOTA_LIMITS_HARDWS_MIN_ENABLE = 0x00000001
+ QUOTA_LIMITS_HARDWS_MAX_DISABLE = 0x00000008
+ QUOTA_LIMITS_HARDWS_MAX_ENABLE = 0x00000004
)
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 12c0544cb..62cf70e9f 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -308,6 +308,8 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys GetProcessId(process Handle) (id uint32, err error)
//sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error)
//sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost
+//sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32)
+//sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error)
// Volume Management Functions
//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 2aa4fa642..8a562feed 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -217,6 +217,8 @@ var (
procGetProcessId = modkernel32.NewProc("GetProcessId")
procOpenThread = modkernel32.NewProc("OpenThread")
procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost")
+ procGetProcessWorkingSetSizeEx = modkernel32.NewProc("GetProcessWorkingSetSizeEx")
+ procSetProcessWorkingSetSizeEx = modkernel32.NewProc("SetProcessWorkingSetSizeEx")
procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW")
procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW")
procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW")
@@ -2414,6 +2416,23 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) {
return
}
+func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) {
+ syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0)
+ return
+}
+
+func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) {
+ r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = errnoErr(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) {
r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)))
if r1 == 0 {
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
index 0b9907f89..063d724cb 100644
--- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
@@ -1,163 +1,206 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.22.0
+// protoc v3.11.2
// source: google/rpc/status.proto
package status
import (
- fmt "fmt"
- math "math"
+ reflect "reflect"
+ sync "sync"
proto "github.com/golang/protobuf/proto"
any "github.com/golang/protobuf/ptypes/any"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
// The `Status` type defines a logical error model that is suitable for
// different programming environments, including REST APIs and RPC APIs. It is
-// used by [gRPC](https://github.com/grpc). The error model is designed to be:
-//
-// - Simple to use and understand for most users
-// - Flexible enough to meet unexpected needs
-//
-// # Overview
-//
-// The `Status` message contains three pieces of data: error code, error
-// message, and error details. The error code should be an enum value of
-// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes
-// if needed. The error message should be a developer-facing English message
-// that helps developers *understand* and *resolve* the error. If a localized
-// user-facing error message is needed, put the localized message in the error
-// details or localize it in the client. The optional error details may contain
-// arbitrary information about the error. There is a predefined set of error
-// detail types in the package `google.rpc` that can be used for common error
-// conditions.
-//
-// # Language mapping
-//
-// The `Status` message is the logical representation of the error model, but it
-// is not necessarily the actual wire format. When the `Status` message is
-// exposed in different client libraries and different wire protocols, it can be
-// mapped differently. For example, it will likely be mapped to some exceptions
-// in Java, but more likely mapped to some error codes in C.
+// used by [gRPC](https://github.com/grpc). Each `Status` message contains
+// three pieces of data: error code, error message, and error details.
//
-// # Other uses
-//
-// The error model and the `Status` message can be used in a variety of
-// environments, either with or without APIs, to provide a
-// consistent developer experience across different environments.
-//
-// Example uses of this error model include:
-//
-// - Partial errors. If a service needs to return partial errors to the client,
-// it may embed the `Status` in the normal response to indicate the partial
-// errors.
-//
-// - Workflow errors. A typical workflow has multiple steps. Each step may
-// have a `Status` message for error reporting.
-//
-// - Batch operations. If a client uses batch request and batch response, the
-// `Status` message should be used directly inside batch response, one for
-// each error sub-response.
-//
-// - Asynchronous operations. If an API call embeds asynchronous operation
-// results in its response, the status of those operations should be
-// represented directly using the `Status` message.
-//
-// - Logging. If some API errors are stored in logs, the message `Status` could
-// be used directly after any stripping needed for security/privacy reasons.
+// You can find out more about this error model and how to work with it in the
+// [API Design Guide](https://cloud.google.com/apis/design/errors).
type Status struct {
- // The status code, which should be an enum value of
- // [google.rpc.Code][google.rpc.Code].
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
// A developer-facing error message, which should be in English. Any
// user-facing error message should be localized and sent in the
- // [google.rpc.Status.details][google.rpc.Status.details] field, or localized
- // by the client.
+ // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
// A list of messages that carry the error details. There is a common set of
// message types for APIs to use.
- Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
}
-func (m *Status) Reset() { *m = Status{} }
-func (m *Status) String() string { return proto.CompactTextString(m) }
-func (*Status) ProtoMessage() {}
-func (*Status) Descriptor() ([]byte, []int) {
- return fileDescriptor_24d244abaf643bfe, []int{0}
+func (x *Status) Reset() {
+ *x = Status{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_status_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Status) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Status.Unmarshal(m, b)
-}
-func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Status.Marshal(b, m, deterministic)
-}
-func (m *Status) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Status.Merge(m, src)
+func (x *Status) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Status) XXX_Size() int {
- return xxx_messageInfo_Status.Size(m)
-}
-func (m *Status) XXX_DiscardUnknown() {
- xxx_messageInfo_Status.DiscardUnknown(m)
+
+func (*Status) ProtoMessage() {}
+
+func (x *Status) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_status_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Status proto.InternalMessageInfo
+// Deprecated: Use Status.ProtoReflect.Descriptor instead.
+func (*Status) Descriptor() ([]byte, []int) {
+ return file_google_rpc_status_proto_rawDescGZIP(), []int{0}
+}
-func (m *Status) GetCode() int32 {
- if m != nil {
- return m.Code
+func (x *Status) GetCode() int32 {
+ if x != nil {
+ return x.Code
}
return 0
}
-func (m *Status) GetMessage() string {
- if m != nil {
- return m.Message
+func (x *Status) GetMessage() string {
+ if x != nil {
+ return x.Message
}
return ""
}
-func (m *Status) GetDetails() []*any.Any {
- if m != nil {
- return m.Details
+func (x *Status) GetDetails() []*any.Any {
+ if x != nil {
+ return x.Details
}
return nil
}
-func init() {
- proto.RegisterType((*Status)(nil), "google.rpc.Status")
+var File_google_rpc_status_proto protoreflect.FileDescriptor
+
+var file_google_rpc_status_proto_rawDesc = []byte{
+ 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f,
+ 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18,
+ 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61,
+ 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
+ 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
}
-func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) }
-
-var fileDescriptor_24d244abaf643bfe = []byte{
- // 209 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
- 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81,
- 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1,
- 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83,
- 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05,
- 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7,
- 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7,
- 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c,
- 0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12,
- 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12,
- 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1,
- 0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00,
- 0x00,
+var (
+ file_google_rpc_status_proto_rawDescOnce sync.Once
+ file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc
+)
+
+func file_google_rpc_status_proto_rawDescGZIP() []byte {
+ file_google_rpc_status_proto_rawDescOnce.Do(func() {
+ file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData)
+ })
+ return file_google_rpc_status_proto_rawDescData
+}
+
+var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_rpc_status_proto_goTypes = []interface{}{
+ (*Status)(nil), // 0: google.rpc.Status
+ (*any.Any)(nil), // 1: google.protobuf.Any
+}
+var file_google_rpc_status_proto_depIdxs = []int32{
+ 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_google_rpc_status_proto_init() }
+func file_google_rpc_status_proto_init() {
+ if File_google_rpc_status_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Status); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_rpc_status_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_rpc_status_proto_goTypes,
+ DependencyIndexes: file_google_rpc_status_proto_depIdxs,
+ MessageInfos: file_google_rpc_status_proto_msgTypes,
+ }.Build()
+ File_google_rpc_status_proto = out.File
+ file_google_rpc_status_proto_rawDesc = nil
+ file_google_rpc_status_proto_goTypes = nil
+ file_google_rpc_status_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
index 77dbe1b5c..c2f8f28f2 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
@@ -54,6 +54,13 @@ type UnmarshalOptions struct {
// Unmarshal reads the given []byte and populates the given proto.Message using options in
// UnmarshalOptions object.
func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
+ return o.unmarshal(b, m)
+}
+
+// unmarshal is a centralized function that all unmarshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for unmarshal that do not go through this.
+func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error {
proto.Reset(m)
if o.Resolver == nil {
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
index dece22973..41e5c773c 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
@@ -102,6 +102,13 @@ func (o MarshalOptions) Format(m proto.Message) string {
// MarshalOptions object. Do not depend on the output being stable. It may
// change over time across different versions of the program.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
+ return o.marshal(m)
+}
+
+// marshal is a centralized function that all marshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for marshal that do not go through this.
+func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
var delims = [2]byte{'{', '}'}
if o.Multiline && o.Indent == "" {
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index 4088e59c6..6b3001c66 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -52,7 +52,7 @@ import (
// 10. Send out the CL for review and submit it.
const (
Major = 1
- Minor = 23
+ Minor = 24
Patch = 0
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index 128214760..4974b16d5 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -63,12 +63,15 @@ func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoifa
return o.unmarshal(in.Buf, in.Message)
}
+// unmarshal is a centralized function that all unmarshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for unmarshal that do not go through this.
func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out protoiface.UnmarshalOutput, err error) {
if o.Resolver == nil {
o.Resolver = protoregistry.GlobalTypes
}
if !o.Merge {
- Reset(m.Interface()) // TODO
+ Reset(m.Interface())
}
allowPartial := o.AllowPartial
o.Merge = true
@@ -105,7 +108,7 @@ func (o UnmarshalOptions) unmarshalMessage(b []byte, m protoreflect.Message) err
func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) error {
md := m.Descriptor()
if messageset.IsMessageSet(md) {
- return unmarshalMessageSet(b, m, o)
+ return o.unmarshalMessageSet(b, m)
}
fields := md.Fields()
for len(b) > 0 {
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
index 456bfda47..7b47a1180 100644
--- a/vendor/google.golang.org/protobuf/proto/encode.go
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -134,6 +134,9 @@ func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.Mar
return o.marshal(in.Buf, in.Message)
}
+// marshal is a centralized function that all marshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for marshal that do not go through this.
func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoiface.MarshalOutput, err error) {
allowPartial := o.AllowPartial
o.AllowPartial = true
@@ -206,7 +209,7 @@ func growcap(oldcap, wantcap int) (newcap int) {
func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([]byte, error) {
if messageset.IsMessageSet(m.Descriptor()) {
- return marshalMessageSet(b, m, o)
+ return o.marshalMessageSet(b, m)
}
// There are many choices for what order we visit fields in. The default one here
// is chosen for reasonable efficiency and simplicity given the protoreflect API.
diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go
index b6b3de591..1d692c3a8 100644
--- a/vendor/google.golang.org/protobuf/proto/messageset.go
+++ b/vendor/google.golang.org/protobuf/proto/messageset.go
@@ -13,24 +13,24 @@ import (
"google.golang.org/protobuf/reflect/protoregistry"
)
-func sizeMessageSet(m protoreflect.Message) (size int) {
+func (o MarshalOptions) sizeMessageSet(m protoreflect.Message) (size int) {
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
size += messageset.SizeField(fd.Number())
size += protowire.SizeTag(messageset.FieldMessage)
- size += protowire.SizeBytes(sizeMessage(v.Message()))
+ size += protowire.SizeBytes(o.size(v.Message()))
return true
})
size += messageset.SizeUnknown(m.GetUnknown())
return size
}
-func marshalMessageSet(b []byte, m protoreflect.Message, o MarshalOptions) ([]byte, error) {
+func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]byte, error) {
if !flags.ProtoLegacy {
return b, errors.New("no support for message_set_wire_format")
}
var err error
o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- b, err = marshalMessageSetField(b, fd, v, o)
+ b, err = o.marshalMessageSetField(b, fd, v)
return err == nil
})
if err != nil {
@@ -39,7 +39,7 @@ func marshalMessageSet(b []byte, m protoreflect.Message, o MarshalOptions) ([]by
return messageset.AppendUnknown(b, m.GetUnknown())
}
-func marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value, o MarshalOptions) ([]byte, error) {
+func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) {
b = messageset.AppendFieldStart(b, fd.Number())
b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType)
b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface())))
@@ -51,12 +51,12 @@ func marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value pro
return b, nil
}
-func unmarshalMessageSet(b []byte, m protoreflect.Message, o UnmarshalOptions) error {
+func (o UnmarshalOptions) unmarshalMessageSet(b []byte, m protoreflect.Message) error {
if !flags.ProtoLegacy {
return errors.New("no support for message_set_wire_format")
}
return messageset.Unmarshal(b, false, func(num protowire.Number, v []byte) error {
- err := unmarshalMessageSetField(m, num, v, o)
+ err := o.unmarshalMessageSetField(m, num, v)
if err == errUnknown {
unknown := m.GetUnknown()
unknown = protowire.AppendTag(unknown, num, protowire.BytesType)
@@ -68,7 +68,7 @@ func unmarshalMessageSet(b []byte, m protoreflect.Message, o UnmarshalOptions) e
})
}
-func unmarshalMessageSetField(m protoreflect.Message, num protowire.Number, v []byte, o UnmarshalOptions) error {
+func (o UnmarshalOptions) unmarshalMessageSetField(m protoreflect.Message, num protowire.Number, v []byte) error {
md := m.Descriptor()
if !md.ExtensionRanges().Has(num) {
return errUnknown
diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go
index 11ba84146..554b9c6c0 100644
--- a/vendor/google.golang.org/protobuf/proto/size.go
+++ b/vendor/google.golang.org/protobuf/proto/size.go
@@ -23,10 +23,13 @@ func (o MarshalOptions) Size(m Message) int {
return 0
}
- return sizeMessage(m.ProtoReflect())
+ return o.size(m.ProtoReflect())
}
-func sizeMessage(m protoreflect.Message) (size int) {
+// size is a centralized function that all size operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for size that do not go through this.
+func (o MarshalOptions) size(m protoreflect.Message) (size int) {
methods := protoMethods(m)
if methods != nil && methods.Size != nil {
out := methods.Size(protoiface.SizeInput{
@@ -42,52 +45,52 @@ func sizeMessage(m protoreflect.Message) (size int) {
})
return len(out.Buf)
}
- return sizeMessageSlow(m)
+ return o.sizeMessageSlow(m)
}
-func sizeMessageSlow(m protoreflect.Message) (size int) {
+func (o MarshalOptions) sizeMessageSlow(m protoreflect.Message) (size int) {
if messageset.IsMessageSet(m.Descriptor()) {
- return sizeMessageSet(m)
+ return o.sizeMessageSet(m)
}
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- size += sizeField(fd, v)
+ size += o.sizeField(fd, v)
return true
})
size += len(m.GetUnknown())
return size
}
-func sizeField(fd protoreflect.FieldDescriptor, value protoreflect.Value) (size int) {
+func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protoreflect.Value) (size int) {
num := fd.Number()
switch {
case fd.IsList():
- return sizeList(num, fd, value.List())
+ return o.sizeList(num, fd, value.List())
case fd.IsMap():
- return sizeMap(num, fd, value.Map())
+ return o.sizeMap(num, fd, value.Map())
default:
- return protowire.SizeTag(num) + sizeSingular(num, fd.Kind(), value)
+ return protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), value)
}
}
-func sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) {
+func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) {
if fd.IsPacked() && list.Len() > 0 {
content := 0
for i, llen := 0, list.Len(); i < llen; i++ {
- content += sizeSingular(num, fd.Kind(), list.Get(i))
+ content += o.sizeSingular(num, fd.Kind(), list.Get(i))
}
return protowire.SizeTag(num) + protowire.SizeBytes(content)
}
for i, llen := 0, list.Len(); i < llen; i++ {
- size += protowire.SizeTag(num) + sizeSingular(num, fd.Kind(), list.Get(i))
+ size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i))
}
return size
}
-func sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) {
+func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) {
mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool {
size += protowire.SizeTag(num)
- size += protowire.SizeBytes(sizeField(fd.MapKey(), key.Value()) + sizeField(fd.MapValue(), value))
+ size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value))
return true
})
return size
diff --git a/vendor/google.golang.org/protobuf/proto/size_gen.go b/vendor/google.golang.org/protobuf/proto/size_gen.go
index 1118460f6..3cf61a824 100644
--- a/vendor/google.golang.org/protobuf/proto/size_gen.go
+++ b/vendor/google.golang.org/protobuf/proto/size_gen.go
@@ -11,7 +11,7 @@ import (
"google.golang.org/protobuf/reflect/protoreflect"
)
-func sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.Value) int {
+func (o MarshalOptions) sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.Value) int {
switch kind {
case protoreflect.BoolKind:
return protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
@@ -46,9 +46,9 @@ func sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.V
case protoreflect.BytesKind:
return protowire.SizeBytes(len(v.Bytes()))
case protoreflect.MessageKind:
- return protowire.SizeBytes(sizeMessage(v.Message()))
+ return protowire.SizeBytes(o.size(v.Message()))
case protoreflect.GroupKind:
- return protowire.SizeGroup(num, sizeMessage(v.Message()))
+ return protowire.SizeGroup(num, o.size(v.Message()))
default:
return 0
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
index f334f71bc..5a3414724 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
@@ -85,6 +85,8 @@ func ValueOf(v interface{}) Value {
return ValueOfEnum(v)
case Message, List, Map:
return valueOfIface(v)
+ case ProtoMessage:
+ panic(fmt.Sprintf("invalid proto.Message(%T) type, expected a protoreflect.Message type", v))
default:
panic(fmt.Sprintf("invalid type: %T", v))
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
index 43f16c616..5e5f96716 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
@@ -96,6 +96,38 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error {
}
path := file.Path()
if prev := r.filesByPath[path]; prev != nil {
+ // TODO: Remove this after some soak-in period after moving these types.
+ var prevPath string
+ const prevModule = "google.golang.org/genproto"
+ const prevVersion = "cb27e3aa (May 26th, 2020)"
+ switch path {
+ case "google/protobuf/field_mask.proto":
+ prevPath = prevModule + "/protobuf/field_mask"
+ case "google/protobuf/api.proto":
+ prevPath = prevModule + "/protobuf/api"
+ case "google/protobuf/type.proto":
+ prevPath = prevModule + "/protobuf/ptype"
+ case "google/protobuf/source_context.proto":
+ prevPath = prevModule + "/protobuf/source_context"
+ }
+ if r == GlobalFiles && prevPath != "" {
+ pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto")
+ pkgName = strings.Replace(pkgName, "_", "", -1) + "pb"
+ currPath := "google.golang.org/protobuf/types/known/" + pkgName
+ panic(fmt.Sprintf(""+
+ "duplicate registration of %q\n"+
+ "\n"+
+ "The generated definition for this file has moved:\n"+
+ "\tfrom: %q\n"+
+ "\tto: %q\n"+
+ "A dependency on the %q module must\n"+
+ "be at version %v or higher.\n"+
+ "\n"+
+ "Upgrade the dependency by running:\n"+
+ "\tgo get -u %v\n",
+ path, prevPath, currPath, prevModule, prevVersion, prevPath))
+ }
+
err := errors.New("file %q is already registered", file.Path())
err = amendErrorWithCaller(err, prev, file)
if r == GlobalFiles && ignoreConflict(file, err) {
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
index 435297a8d..d18a17885 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
@@ -17,9 +17,7 @@ reviewers:
- saad-ali
- janetkuo
- tallclair
-- eparis
- dims
- hongchaodeng
- krousey
- cjcullen
-- david-mcmahon
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
index e53c3e61f..d3927d817 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
@@ -18,6 +18,7 @@ package errors
import (
"encoding/json"
+ "errors"
"fmt"
"net/http"
"reflect"
@@ -29,14 +30,6 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
)
-const (
- // StatusTooManyRequests means the server experienced too many requests within a
- // given window and that the client must wait to perform the action again.
- // DEPRECATED: please use http.StatusTooManyRequests, this will be removed in
- // the future version.
- StatusTooManyRequests = http.StatusTooManyRequests
-)
-
// StatusError is an error intended for consumption by a REST API server; it can also be
// reconstructed by clients from a REST response. Public to allow easy type switches.
type StatusError struct {
@@ -483,127 +476,141 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr
}
// IsNotFound returns true if the specified error was created by NewNotFound.
+// It supports wrapped errors.
func IsNotFound(err error) bool {
return ReasonForError(err) == metav1.StatusReasonNotFound
}
// IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists.
+// It supports wrapped errors.
func IsAlreadyExists(err error) bool {
return ReasonForError(err) == metav1.StatusReasonAlreadyExists
}
// IsConflict determines if the err is an error which indicates the provided update conflicts.
+// It supports wrapped errors.
func IsConflict(err error) bool {
return ReasonForError(err) == metav1.StatusReasonConflict
}
// IsInvalid determines if the err is an error which indicates the provided resource is not valid.
+// It supports wrapped errors.
func IsInvalid(err error) bool {
return ReasonForError(err) == metav1.StatusReasonInvalid
}
// IsGone is true if the error indicates the requested resource is no longer available.
+// It supports wrapped errors.
func IsGone(err error) bool {
return ReasonForError(err) == metav1.StatusReasonGone
}
// IsResourceExpired is true if the error indicates the resource has expired and the current action is
// no longer possible.
+// It supports wrapped errors.
func IsResourceExpired(err error) bool {
return ReasonForError(err) == metav1.StatusReasonExpired
}
// IsNotAcceptable determines if err is an error which indicates that the request failed due to an invalid Accept header
+// It supports wrapped errors.
func IsNotAcceptable(err error) bool {
return ReasonForError(err) == metav1.StatusReasonNotAcceptable
}
// IsUnsupportedMediaType determines if err is an error which indicates that the request failed due to an invalid Content-Type header
+// It supports wrapped errors.
func IsUnsupportedMediaType(err error) bool {
return ReasonForError(err) == metav1.StatusReasonUnsupportedMediaType
}
// IsMethodNotSupported determines if the err is an error which indicates the provided action could not
// be performed because it is not supported by the server.
+// It supports wrapped errors.
func IsMethodNotSupported(err error) bool {
return ReasonForError(err) == metav1.StatusReasonMethodNotAllowed
}
// IsServiceUnavailable is true if the error indicates the underlying service is no longer available.
+// It supports wrapped errors.
func IsServiceUnavailable(err error) bool {
return ReasonForError(err) == metav1.StatusReasonServiceUnavailable
}
// IsBadRequest determines if err is an error which indicates that the request is invalid.
+// It supports wrapped errors.
func IsBadRequest(err error) bool {
return ReasonForError(err) == metav1.StatusReasonBadRequest
}
// IsUnauthorized determines if err is an error which indicates that the request is unauthorized and
// requires authentication by the user.
+// It supports wrapped errors.
func IsUnauthorized(err error) bool {
return ReasonForError(err) == metav1.StatusReasonUnauthorized
}
// IsForbidden determines if err is an error which indicates that the request is forbidden and cannot
// be completed as requested.
+// It supports wrapped errors.
func IsForbidden(err error) bool {
return ReasonForError(err) == metav1.StatusReasonForbidden
}
// IsTimeout determines if err is an error which indicates that request times out due to long
// processing.
+// It supports wrapped errors.
func IsTimeout(err error) bool {
return ReasonForError(err) == metav1.StatusReasonTimeout
}
// IsServerTimeout determines if err is an error which indicates that the request needs to be retried
// by the client.
+// It supports wrapped errors.
func IsServerTimeout(err error) bool {
return ReasonForError(err) == metav1.StatusReasonServerTimeout
}
// IsInternalError determines if err is an error which indicates an internal server error.
+// It supports wrapped errors.
func IsInternalError(err error) bool {
return ReasonForError(err) == metav1.StatusReasonInternalError
}
// IsTooManyRequests determines if err is an error which indicates that there are too many requests
// that the server cannot handle.
+// It supports wrapped errors.
func IsTooManyRequests(err error) bool {
if ReasonForError(err) == metav1.StatusReasonTooManyRequests {
return true
}
- switch t := err.(type) {
- case APIStatus:
- return t.Status().Code == http.StatusTooManyRequests
+ if status := APIStatus(nil); errors.As(err, &status) {
+ return status.Status().Code == http.StatusTooManyRequests
}
return false
}
// IsRequestEntityTooLargeError determines if err is an error which indicates
// the request entity is too large.
+// It supports wrapped errors.
func IsRequestEntityTooLargeError(err error) bool {
if ReasonForError(err) == metav1.StatusReasonRequestEntityTooLarge {
return true
}
- switch t := err.(type) {
- case APIStatus:
- return t.Status().Code == http.StatusRequestEntityTooLarge
+ if status := APIStatus(nil); errors.As(err, &status) {
+ return status.Status().Code == http.StatusRequestEntityTooLarge
}
return false
}
// IsUnexpectedServerError returns true if the server response was not in the expected API format,
// and may be the result of another HTTP actor.
+// It supports wrapped errors.
func IsUnexpectedServerError(err error) bool {
- switch t := err.(type) {
- case APIStatus:
- if d := t.Status().Details; d != nil {
- for _, cause := range d.Causes {
- if cause.Type == metav1.CauseTypeUnexpectedServerResponse {
- return true
- }
+ if status := APIStatus(nil); errors.As(err, &status) && status.Status().Details != nil {
+ for _, cause := range status.Status().Details.Causes {
+ if cause.Type == metav1.CauseTypeUnexpectedServerResponse {
+ return true
}
}
}
@@ -611,38 +618,37 @@ func IsUnexpectedServerError(err error) bool {
}
// IsUnexpectedObjectError determines if err is due to an unexpected object from the master.
+// It supports wrapped errors.
func IsUnexpectedObjectError(err error) bool {
- _, ok := err.(*UnexpectedObjectError)
- return err != nil && ok
+ uoe := &UnexpectedObjectError{}
+ return err != nil && errors.As(err, &uoe)
}
// SuggestsClientDelay returns true if this error suggests a client delay as well as the
// suggested seconds to wait, or false if the error does not imply a wait. It does not
// address whether the error *should* be retried, since some errors (like a 3xx) may
// request delay without retry.
+// It supports wrapped errors.
func SuggestsClientDelay(err error) (int, bool) {
- switch t := err.(type) {
- case APIStatus:
- if t.Status().Details != nil {
- switch t.Status().Reason {
- // this StatusReason explicitly requests the caller to delay the action
- case metav1.StatusReasonServerTimeout:
- return int(t.Status().Details.RetryAfterSeconds), true
- }
- // If the client requests that we retry after a certain number of seconds
- if t.Status().Details.RetryAfterSeconds > 0 {
- return int(t.Status().Details.RetryAfterSeconds), true
- }
+ if t := APIStatus(nil); errors.As(err, &t) && t.Status().Details != nil {
+ switch t.Status().Reason {
+ // this StatusReason explicitly requests the caller to delay the action
+ case metav1.StatusReasonServerTimeout:
+ return int(t.Status().Details.RetryAfterSeconds), true
+ }
+ // If the client requests that we retry after a certain number of seconds
+ if t.Status().Details.RetryAfterSeconds > 0 {
+ return int(t.Status().Details.RetryAfterSeconds), true
}
}
return 0, false
}
// ReasonForError returns the HTTP status for a particular error.
+// It supports wrapped errors.
func ReasonForError(err error) metav1.StatusReason {
- switch t := err.(type) {
- case APIStatus:
- return t.Status().Reason
+ if status := APIStatus(nil); errors.As(err, &status) {
+ return status.Status().Reason
}
return metav1.StatusReasonUnknown
}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS
index dc7740190..7ac0fe11a 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS
@@ -9,8 +9,5 @@ reviewers:
- mikedanese
- saad-ali
- janetkuo
-- tallclair
-- eparis
- xiang90
- mbohlool
-- david-mcmahon
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
index 77cfb0c1a..15b4c875a 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
@@ -25,7 +25,6 @@ reviewers:
- krousey
- mml
- mbohlool
-- david-mcmahon
- therc
- mqliang
- kevin-wangzefeng
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go
index b937398cd..8eaebb80e 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go
@@ -345,3 +345,11 @@ func Convert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions,
}
return nil
}
+
+// Convert_Slice_string_To_v1_ResourceVersionMatch allows converting a URL query parameter to ResourceVersionMatch
+func Convert_Slice_string_To_v1_ResourceVersionMatch(in *[]string, out *ResourceVersionMatch, s conversion.Scope) error {
+ if len(*in) > 0 {
+ *out = ResourceVersionMatch((*in)[0])
+ }
+ return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
index 3288c5649..e74a51099 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
@@ -189,10 +189,38 @@ func (m *APIVersions) XXX_DiscardUnknown() {
var xxx_messageInfo_APIVersions proto.InternalMessageInfo
+func (m *Condition) Reset() { *m = Condition{} }
+func (*Condition) ProtoMessage() {}
+func (*Condition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_cf52fa777ced5367, []int{5}
+}
+func (m *Condition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Condition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Condition.Merge(m, src)
+}
+func (m *Condition) XXX_Size() int {
+ return m.Size()
+}
+func (m *Condition) XXX_DiscardUnknown() {
+ xxx_messageInfo_Condition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Condition proto.InternalMessageInfo
+
func (m *CreateOptions) Reset() { *m = CreateOptions{} }
func (*CreateOptions) ProtoMessage() {}
func (*CreateOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{5}
+ return fileDescriptor_cf52fa777ced5367, []int{6}
}
func (m *CreateOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -220,7 +248,7 @@ var xxx_messageInfo_CreateOptions proto.InternalMessageInfo
func (m *DeleteOptions) Reset() { *m = DeleteOptions{} }
func (*DeleteOptions) ProtoMessage() {}
func (*DeleteOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{6}
+ return fileDescriptor_cf52fa777ced5367, []int{7}
}
func (m *DeleteOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -248,7 +276,7 @@ var xxx_messageInfo_DeleteOptions proto.InternalMessageInfo
func (m *Duration) Reset() { *m = Duration{} }
func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{7}
+ return fileDescriptor_cf52fa777ced5367, []int{8}
}
func (m *Duration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -276,7 +304,7 @@ var xxx_messageInfo_Duration proto.InternalMessageInfo
func (m *ExportOptions) Reset() { *m = ExportOptions{} }
func (*ExportOptions) ProtoMessage() {}
func (*ExportOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{8}
+ return fileDescriptor_cf52fa777ced5367, []int{9}
}
func (m *ExportOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -304,7 +332,7 @@ var xxx_messageInfo_ExportOptions proto.InternalMessageInfo
func (m *FieldsV1) Reset() { *m = FieldsV1{} }
func (*FieldsV1) ProtoMessage() {}
func (*FieldsV1) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{9}
+ return fileDescriptor_cf52fa777ced5367, []int{10}
}
func (m *FieldsV1) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -332,7 +360,7 @@ var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo
func (m *GetOptions) Reset() { *m = GetOptions{} }
func (*GetOptions) ProtoMessage() {}
func (*GetOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{10}
+ return fileDescriptor_cf52fa777ced5367, []int{11}
}
func (m *GetOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -360,7 +388,7 @@ var xxx_messageInfo_GetOptions proto.InternalMessageInfo
func (m *GroupKind) Reset() { *m = GroupKind{} }
func (*GroupKind) ProtoMessage() {}
func (*GroupKind) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{11}
+ return fileDescriptor_cf52fa777ced5367, []int{12}
}
func (m *GroupKind) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -388,7 +416,7 @@ var xxx_messageInfo_GroupKind proto.InternalMessageInfo
func (m *GroupResource) Reset() { *m = GroupResource{} }
func (*GroupResource) ProtoMessage() {}
func (*GroupResource) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{12}
+ return fileDescriptor_cf52fa777ced5367, []int{13}
}
func (m *GroupResource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -416,7 +444,7 @@ var xxx_messageInfo_GroupResource proto.InternalMessageInfo
func (m *GroupVersion) Reset() { *m = GroupVersion{} }
func (*GroupVersion) ProtoMessage() {}
func (*GroupVersion) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{13}
+ return fileDescriptor_cf52fa777ced5367, []int{14}
}
func (m *GroupVersion) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -444,7 +472,7 @@ var xxx_messageInfo_GroupVersion proto.InternalMessageInfo
func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} }
func (*GroupVersionForDiscovery) ProtoMessage() {}
func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{14}
+ return fileDescriptor_cf52fa777ced5367, []int{15}
}
func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -472,7 +500,7 @@ var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo
func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} }
func (*GroupVersionKind) ProtoMessage() {}
func (*GroupVersionKind) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{15}
+ return fileDescriptor_cf52fa777ced5367, []int{16}
}
func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -500,7 +528,7 @@ var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo
func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} }
func (*GroupVersionResource) ProtoMessage() {}
func (*GroupVersionResource) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{16}
+ return fileDescriptor_cf52fa777ced5367, []int{17}
}
func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -528,7 +556,7 @@ var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo
func (m *LabelSelector) Reset() { *m = LabelSelector{} }
func (*LabelSelector) ProtoMessage() {}
func (*LabelSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{17}
+ return fileDescriptor_cf52fa777ced5367, []int{18}
}
func (m *LabelSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -556,7 +584,7 @@ var xxx_messageInfo_LabelSelector proto.InternalMessageInfo
func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} }
func (*LabelSelectorRequirement) ProtoMessage() {}
func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{18}
+ return fileDescriptor_cf52fa777ced5367, []int{19}
}
func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -584,7 +612,7 @@ var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo
func (m *List) Reset() { *m = List{} }
func (*List) ProtoMessage() {}
func (*List) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{19}
+ return fileDescriptor_cf52fa777ced5367, []int{20}
}
func (m *List) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -612,7 +640,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo
func (m *ListMeta) Reset() { *m = ListMeta{} }
func (*ListMeta) ProtoMessage() {}
func (*ListMeta) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{20}
+ return fileDescriptor_cf52fa777ced5367, []int{21}
}
func (m *ListMeta) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -640,7 +668,7 @@ var xxx_messageInfo_ListMeta proto.InternalMessageInfo
func (m *ListOptions) Reset() { *m = ListOptions{} }
func (*ListOptions) ProtoMessage() {}
func (*ListOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{21}
+ return fileDescriptor_cf52fa777ced5367, []int{22}
}
func (m *ListOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -668,7 +696,7 @@ var xxx_messageInfo_ListOptions proto.InternalMessageInfo
func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} }
func (*ManagedFieldsEntry) ProtoMessage() {}
func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{22}
+ return fileDescriptor_cf52fa777ced5367, []int{23}
}
func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -696,7 +724,7 @@ var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo
func (m *MicroTime) Reset() { *m = MicroTime{} }
func (*MicroTime) ProtoMessage() {}
func (*MicroTime) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{23}
+ return fileDescriptor_cf52fa777ced5367, []int{24}
}
func (m *MicroTime) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MicroTime.Unmarshal(m, b)
@@ -719,7 +747,7 @@ var xxx_messageInfo_MicroTime proto.InternalMessageInfo
func (m *ObjectMeta) Reset() { *m = ObjectMeta{} }
func (*ObjectMeta) ProtoMessage() {}
func (*ObjectMeta) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{24}
+ return fileDescriptor_cf52fa777ced5367, []int{25}
}
func (m *ObjectMeta) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -747,7 +775,7 @@ var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo
func (m *OwnerReference) Reset() { *m = OwnerReference{} }
func (*OwnerReference) ProtoMessage() {}
func (*OwnerReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{25}
+ return fileDescriptor_cf52fa777ced5367, []int{26}
}
func (m *OwnerReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -775,7 +803,7 @@ var xxx_messageInfo_OwnerReference proto.InternalMessageInfo
func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} }
func (*PartialObjectMetadata) ProtoMessage() {}
func (*PartialObjectMetadata) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{26}
+ return fileDescriptor_cf52fa777ced5367, []int{27}
}
func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -803,7 +831,7 @@ var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo
func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
func (*PartialObjectMetadataList) ProtoMessage() {}
func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{27}
+ return fileDescriptor_cf52fa777ced5367, []int{28}
}
func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -831,7 +859,7 @@ var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo
func (m *Patch) Reset() { *m = Patch{} }
func (*Patch) ProtoMessage() {}
func (*Patch) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{28}
+ return fileDescriptor_cf52fa777ced5367, []int{29}
}
func (m *Patch) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -859,7 +887,7 @@ var xxx_messageInfo_Patch proto.InternalMessageInfo
func (m *PatchOptions) Reset() { *m = PatchOptions{} }
func (*PatchOptions) ProtoMessage() {}
func (*PatchOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{29}
+ return fileDescriptor_cf52fa777ced5367, []int{30}
}
func (m *PatchOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -887,7 +915,7 @@ var xxx_messageInfo_PatchOptions proto.InternalMessageInfo
func (m *Preconditions) Reset() { *m = Preconditions{} }
func (*Preconditions) ProtoMessage() {}
func (*Preconditions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{30}
+ return fileDescriptor_cf52fa777ced5367, []int{31}
}
func (m *Preconditions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -915,7 +943,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo
func (m *RootPaths) Reset() { *m = RootPaths{} }
func (*RootPaths) ProtoMessage() {}
func (*RootPaths) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{31}
+ return fileDescriptor_cf52fa777ced5367, []int{32}
}
func (m *RootPaths) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -943,7 +971,7 @@ var xxx_messageInfo_RootPaths proto.InternalMessageInfo
func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} }
func (*ServerAddressByClientCIDR) ProtoMessage() {}
func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{32}
+ return fileDescriptor_cf52fa777ced5367, []int{33}
}
func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -971,7 +999,7 @@ var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo
func (m *Status) Reset() { *m = Status{} }
func (*Status) ProtoMessage() {}
func (*Status) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{33}
+ return fileDescriptor_cf52fa777ced5367, []int{34}
}
func (m *Status) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -999,7 +1027,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo
func (m *StatusCause) Reset() { *m = StatusCause{} }
func (*StatusCause) ProtoMessage() {}
func (*StatusCause) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{34}
+ return fileDescriptor_cf52fa777ced5367, []int{35}
}
func (m *StatusCause) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1027,7 +1055,7 @@ var xxx_messageInfo_StatusCause proto.InternalMessageInfo
func (m *StatusDetails) Reset() { *m = StatusDetails{} }
func (*StatusDetails) ProtoMessage() {}
func (*StatusDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{35}
+ return fileDescriptor_cf52fa777ced5367, []int{36}
}
func (m *StatusDetails) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1055,7 +1083,7 @@ var xxx_messageInfo_StatusDetails proto.InternalMessageInfo
func (m *TableOptions) Reset() { *m = TableOptions{} }
func (*TableOptions) ProtoMessage() {}
func (*TableOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{36}
+ return fileDescriptor_cf52fa777ced5367, []int{37}
}
func (m *TableOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1083,7 +1111,7 @@ var xxx_messageInfo_TableOptions proto.InternalMessageInfo
func (m *Time) Reset() { *m = Time{} }
func (*Time) ProtoMessage() {}
func (*Time) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{37}
+ return fileDescriptor_cf52fa777ced5367, []int{38}
}
func (m *Time) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Time.Unmarshal(m, b)
@@ -1106,7 +1134,7 @@ var xxx_messageInfo_Time proto.InternalMessageInfo
func (m *Timestamp) Reset() { *m = Timestamp{} }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{38}
+ return fileDescriptor_cf52fa777ced5367, []int{39}
}
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1134,7 +1162,7 @@ var xxx_messageInfo_Timestamp proto.InternalMessageInfo
func (m *TypeMeta) Reset() { *m = TypeMeta{} }
func (*TypeMeta) ProtoMessage() {}
func (*TypeMeta) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{39}
+ return fileDescriptor_cf52fa777ced5367, []int{40}
}
func (m *TypeMeta) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1162,7 +1190,7 @@ var xxx_messageInfo_TypeMeta proto.InternalMessageInfo
func (m *UpdateOptions) Reset() { *m = UpdateOptions{} }
func (*UpdateOptions) ProtoMessage() {}
func (*UpdateOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{40}
+ return fileDescriptor_cf52fa777ced5367, []int{41}
}
func (m *UpdateOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1190,7 +1218,7 @@ var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo
func (m *Verbs) Reset() { *m = Verbs{} }
func (*Verbs) ProtoMessage() {}
func (*Verbs) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{41}
+ return fileDescriptor_cf52fa777ced5367, []int{42}
}
func (m *Verbs) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1218,7 +1246,7 @@ var xxx_messageInfo_Verbs proto.InternalMessageInfo
func (m *WatchEvent) Reset() { *m = WatchEvent{} }
func (*WatchEvent) ProtoMessage() {}
func (*WatchEvent) Descriptor() ([]byte, []int) {
- return fileDescriptor_cf52fa777ced5367, []int{42}
+ return fileDescriptor_cf52fa777ced5367, []int{43}
}
func (m *WatchEvent) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1249,6 +1277,7 @@ func init() {
proto.RegisterType((*APIResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResource")
proto.RegisterType((*APIResourceList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResourceList")
proto.RegisterType((*APIVersions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIVersions")
+ proto.RegisterType((*Condition)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Condition")
proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions")
proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions")
proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration")
@@ -1297,177 +1326,184 @@ func init() {
}
var fileDescriptor_cf52fa777ced5367 = []byte{
- // 2713 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x19, 0xcd, 0x6f, 0x1b, 0x59,
- 0x3d, 0x63, 0xc7, 0x8e, 0xfd, 0x73, 0x9c, 0x8f, 0x97, 0x16, 0xdc, 0x00, 0x71, 0x76, 0x16, 0xad,
- 0x52, 0xe8, 0x3a, 0x9b, 0x02, 0xab, 0xd2, 0x65, 0x0b, 0x71, 0x9c, 0x74, 0xc3, 0x36, 0x4d, 0xf4,
- 0xd2, 0x16, 0x28, 0x15, 0xea, 0x64, 0xe6, 0xc5, 0x19, 0x32, 0x9e, 0xf1, 0xbe, 0x19, 0x27, 0x35,
- 0x1c, 0xd8, 0x03, 0x08, 0x90, 0x60, 0xd5, 0x23, 0xe2, 0x80, 0xb6, 0x82, 0xbf, 0x80, 0x13, 0x7f,
- 0x00, 0x12, 0xbd, 0x20, 0xad, 0xc4, 0x65, 0x25, 0x90, 0xb5, 0x0d, 0x07, 0x8e, 0x88, 0x6b, 0x4e,
- 0xe8, 0x7d, 0xcd, 0x87, 0x1d, 0x37, 0x63, 0xba, 0xac, 0xf6, 0xe6, 0xf9, 0x7d, 0xff, 0xde, 0xfb,
- 0xbd, 0xdf, 0x97, 0x61, 0xeb, 0xf0, 0x9a, 0x5f, 0xb3, 0xbd, 0xe5, 0xc3, 0xce, 0x1e, 0xa1, 0x2e,
- 0x09, 0x88, 0xbf, 0x7c, 0x44, 0x5c, 0xcb, 0xa3, 0xcb, 0x12, 0x61, 0xb4, 0xed, 0x96, 0x61, 0x1e,
- 0xd8, 0x2e, 0xa1, 0xdd, 0xe5, 0xf6, 0x61, 0x93, 0x01, 0xfc, 0xe5, 0x16, 0x09, 0x8c, 0xe5, 0xa3,
- 0x95, 0xe5, 0x26, 0x71, 0x09, 0x35, 0x02, 0x62, 0xd5, 0xda, 0xd4, 0x0b, 0x3c, 0xf4, 0x45, 0xc1,
- 0x55, 0x8b, 0x73, 0xd5, 0xda, 0x87, 0x4d, 0x06, 0xf0, 0x6b, 0x8c, 0xab, 0x76, 0xb4, 0x32, 0xff,
- 0x6a, 0xd3, 0x0e, 0x0e, 0x3a, 0x7b, 0x35, 0xd3, 0x6b, 0x2d, 0x37, 0xbd, 0xa6, 0xb7, 0xcc, 0x99,
- 0xf7, 0x3a, 0xfb, 0xfc, 0x8b, 0x7f, 0xf0, 0x5f, 0x42, 0xe8, 0xfc, 0x50, 0x53, 0x68, 0xc7, 0x0d,
- 0xec, 0x16, 0xe9, 0xb7, 0x62, 0xfe, 0xf5, 0xf3, 0x18, 0x7c, 0xf3, 0x80, 0xb4, 0x8c, 0x7e, 0x3e,
- 0xfd, 0x2f, 0x59, 0x28, 0xac, 0xee, 0x6c, 0xde, 0xa4, 0x5e, 0xa7, 0x8d, 0x16, 0x61, 0xdc, 0x35,
- 0x5a, 0xa4, 0xa2, 0x2d, 0x6a, 0x4b, 0xc5, 0xfa, 0xe4, 0xd3, 0x5e, 0x75, 0xec, 0xa4, 0x57, 0x1d,
- 0xbf, 0x6d, 0xb4, 0x08, 0xe6, 0x18, 0xe4, 0x40, 0xe1, 0x88, 0x50, 0xdf, 0xf6, 0x5c, 0xbf, 0x92,
- 0x59, 0xcc, 0x2e, 0x95, 0xae, 0xde, 0xa8, 0xa5, 0xf1, 0xbf, 0xc6, 0x15, 0xdc, 0x13, 0xac, 0x1b,
- 0x1e, 0x6d, 0xd8, 0xbe, 0xe9, 0x1d, 0x11, 0xda, 0xad, 0xcf, 0x48, 0x2d, 0x05, 0x89, 0xf4, 0x71,
- 0xa8, 0x01, 0xfd, 0x54, 0x83, 0x99, 0x36, 0x25, 0xfb, 0x84, 0x52, 0x62, 0x49, 0x7c, 0x25, 0xbb,
- 0xa8, 0x7d, 0x0c, 0x6a, 0x2b, 0x52, 0xed, 0xcc, 0x4e, 0x9f, 0x7c, 0x3c, 0xa0, 0x11, 0xfd, 0x5e,
- 0x83, 0x79, 0x9f, 0xd0, 0x23, 0x42, 0x57, 0x2d, 0x8b, 0x12, 0xdf, 0xaf, 0x77, 0xd7, 0x1c, 0x9b,
- 0xb8, 0xc1, 0xda, 0x66, 0x03, 0xfb, 0x95, 0x71, 0x7e, 0x0e, 0xdf, 0x4c, 0x67, 0xd0, 0xee, 0x30,
- 0x39, 0x75, 0x5d, 0x5a, 0x34, 0x3f, 0x94, 0xc4, 0xc7, 0xcf, 0x31, 0x43, 0xdf, 0x87, 0x49, 0x75,
- 0x91, 0xb7, 0x6c, 0x3f, 0x40, 0xf7, 0x20, 0xdf, 0x64, 0x1f, 0x7e, 0x45, 0xe3, 0x06, 0xd6, 0xd2,
- 0x19, 0xa8, 0x64, 0xd4, 0xa7, 0xa4, 0x3d, 0x79, 0xfe, 0xe9, 0x63, 0x29, 0x4d, 0xff, 0xe5, 0x38,
- 0x94, 0x56, 0x77, 0x36, 0x31, 0xf1, 0xbd, 0x0e, 0x35, 0x49, 0x8a, 0xa0, 0xb9, 0x06, 0x93, 0xbe,
- 0xed, 0x36, 0x3b, 0x8e, 0x41, 0x19, 0xb4, 0x92, 0xe7, 0x94, 0x17, 0x24, 0xe5, 0xe4, 0x6e, 0x0c,
- 0x87, 0x13, 0x94, 0xe8, 0x2a, 0x00, 0x93, 0xe0, 0xb7, 0x0d, 0x93, 0x58, 0x95, 0xcc, 0xa2, 0xb6,
- 0x54, 0xa8, 0x23, 0xc9, 0x07, 0xb7, 0x43, 0x0c, 0x8e, 0x51, 0xa1, 0x97, 0x21, 0xc7, 0x2d, 0xad,
- 0x14, 0xb8, 0x9a, 0xb2, 0x24, 0xcf, 0x71, 0x37, 0xb0, 0xc0, 0xa1, 0xcb, 0x30, 0x21, 0xa3, 0xac,
- 0x52, 0xe4, 0x64, 0xd3, 0x92, 0x6c, 0x42, 0x85, 0x81, 0xc2, 0x33, 0xff, 0x0e, 0x6d, 0xd7, 0xe2,
- 0x71, 0x17, 0xf3, 0xef, 0x6d, 0xdb, 0xb5, 0x30, 0xc7, 0xa0, 0x5b, 0x90, 0x3b, 0x22, 0x74, 0x8f,
- 0x45, 0x02, 0x0b, 0xcd, 0x2f, 0xa7, 0x3b, 0xe8, 0x7b, 0x8c, 0xa5, 0x5e, 0x64, 0xa6, 0xf1, 0x9f,
- 0x58, 0x08, 0x41, 0x35, 0x00, 0xff, 0xc0, 0xa3, 0x01, 0x77, 0xaf, 0x92, 0x5b, 0xcc, 0x2e, 0x15,
- 0xeb, 0x53, 0xcc, 0xdf, 0xdd, 0x10, 0x8a, 0x63, 0x14, 0x8c, 0xde, 0x34, 0x02, 0xd2, 0xf4, 0xa8,
- 0x4d, 0xfc, 0xca, 0x44, 0x44, 0xbf, 0x16, 0x42, 0x71, 0x8c, 0x02, 0x7d, 0x1b, 0x90, 0x1f, 0x78,
- 0xd4, 0x68, 0x12, 0xe9, 0xea, 0x5b, 0x86, 0x7f, 0x50, 0x01, 0xee, 0xdd, 0xbc, 0xf4, 0x0e, 0xed,
- 0x0e, 0x50, 0xe0, 0x33, 0xb8, 0xf4, 0x3f, 0x6a, 0x30, 0x1d, 0x8b, 0x05, 0x1e, 0x77, 0xd7, 0x60,
- 0xb2, 0x19, 0x7b, 0x75, 0x32, 0x2e, 0xc2, 0xdb, 0x8e, 0xbf, 0x48, 0x9c, 0xa0, 0x44, 0x04, 0x8a,
- 0x54, 0x4a, 0x52, 0xd9, 0x65, 0x25, 0x75, 0xd0, 0x2a, 0x1b, 0x22, 0x4d, 0x31, 0xa0, 0x8f, 0x23,
- 0xc9, 0xfa, 0xbf, 0x34, 0x1e, 0xc0, 0x2a, 0xdf, 0xa0, 0xa5, 0x58, 0x4e, 0xd3, 0xf8, 0xf1, 0x4d,
- 0x0e, 0xc9, 0x47, 0xe7, 0x24, 0x82, 0xcc, 0xa7, 0x22, 0x11, 0x5c, 0x2f, 0xfc, 0xe6, 0xfd, 0xea,
- 0xd8, 0xbb, 0xff, 0x58, 0x1c, 0xd3, 0x5b, 0x50, 0x5e, 0xa3, 0xc4, 0x08, 0xc8, 0x76, 0x3b, 0xe0,
- 0x0e, 0xe8, 0x90, 0xb7, 0x68, 0x17, 0x77, 0x5c, 0xe9, 0x28, 0xb0, 0xf7, 0xdd, 0xe0, 0x10, 0x2c,
- 0x31, 0xec, 0xfe, 0xf6, 0x6d, 0xe2, 0x58, 0x5b, 0x86, 0x6b, 0x34, 0x09, 0x95, 0x71, 0x1f, 0x9e,
- 0xea, 0x46, 0x0c, 0x87, 0x13, 0x94, 0xfa, 0xcf, 0xb3, 0x50, 0x6e, 0x10, 0x87, 0x44, 0xfa, 0x36,
- 0x00, 0x35, 0xa9, 0x61, 0x92, 0x1d, 0x42, 0x6d, 0xcf, 0xda, 0x25, 0xa6, 0xe7, 0x5a, 0x3e, 0x8f,
- 0x88, 0x6c, 0xfd, 0x33, 0x2c, 0xce, 0x6e, 0x0e, 0x60, 0xf1, 0x19, 0x1c, 0xc8, 0x81, 0x72, 0x9b,
- 0xf2, 0xdf, 0x76, 0x20, 0x6b, 0x0f, 0x7b, 0x69, 0x5f, 0x49, 0x77, 0xd4, 0x3b, 0x71, 0xd6, 0xfa,
- 0xec, 0x49, 0xaf, 0x5a, 0x4e, 0x80, 0x70, 0x52, 0x38, 0xfa, 0x16, 0xcc, 0x78, 0xb4, 0x7d, 0x60,
- 0xb8, 0x0d, 0xd2, 0x26, 0xae, 0x45, 0xdc, 0xc0, 0xe7, 0xa7, 0x50, 0xa8, 0x5f, 0x60, 0x15, 0x63,
- 0xbb, 0x0f, 0x87, 0x07, 0xa8, 0xd1, 0x7d, 0x98, 0x6d, 0x53, 0xaf, 0x6d, 0x34, 0x0d, 0x26, 0x71,
- 0xc7, 0x73, 0x6c, 0xb3, 0xcb, 0xb3, 0x43, 0xb1, 0x7e, 0xe5, 0xa4, 0x57, 0x9d, 0xdd, 0xe9, 0x47,
- 0x9e, 0xf6, 0xaa, 0x73, 0xfc, 0xe8, 0x18, 0x24, 0x42, 0xe2, 0x41, 0x31, 0xb1, 0x3b, 0xcc, 0x0d,
- 0xbb, 0x43, 0x7d, 0x13, 0x0a, 0x8d, 0x0e, 0xe5, 0x5c, 0xe8, 0x4d, 0x28, 0x58, 0xf2, 0xb7, 0x3c,
- 0xf9, 0x97, 0x54, 0xc9, 0x55, 0x34, 0xa7, 0xbd, 0x6a, 0x99, 0x35, 0x09, 0x35, 0x05, 0xc0, 0x21,
- 0x8b, 0xfe, 0x00, 0xca, 0xeb, 0x8f, 0xda, 0x1e, 0x0d, 0xd4, 0x9d, 0xbe, 0x02, 0x79, 0xc2, 0x01,
- 0x5c, 0x5a, 0x21, 0xaa, 0x13, 0x82, 0x0c, 0x4b, 0x2c, 0xcb, 0xc3, 0xe4, 0x91, 0x61, 0x06, 0x32,
- 0x6d, 0x87, 0x79, 0x78, 0x9d, 0x01, 0xb1, 0xc0, 0xe9, 0x9f, 0x87, 0x02, 0x0f, 0x28, 0xff, 0xde,
- 0x0a, 0x9a, 0x81, 0x2c, 0x36, 0x8e, 0xb9, 0xd4, 0x49, 0x9c, 0xa5, 0xc6, 0xb1, 0xbe, 0x0d, 0x70,
- 0x93, 0x84, 0x8a, 0x57, 0x61, 0x5a, 0x3d, 0xe2, 0x64, 0x6e, 0xf9, 0xac, 0x14, 0x3d, 0x8d, 0x93,
- 0x68, 0xdc, 0x4f, 0xaf, 0x3f, 0x80, 0x22, 0xcf, 0x3f, 0x2c, 0x79, 0x47, 0x85, 0x42, 0x7b, 0x4e,
- 0xa1, 0x50, 0xd9, 0x3f, 0x33, 0x2c, 0xfb, 0xc7, 0x9e, 0x9b, 0x03, 0x65, 0xc1, 0xab, 0x4a, 0x63,
- 0x2a, 0x0d, 0x57, 0xa0, 0xa0, 0xcc, 0x94, 0x5a, 0xc2, 0x96, 0x48, 0x09, 0xc2, 0x21, 0x45, 0x4c,
- 0xdb, 0x01, 0x24, 0x72, 0x69, 0x3a, 0x65, 0xb1, 0xba, 0x97, 0x79, 0x7e, 0xdd, 0x8b, 0x69, 0xfa,
- 0x09, 0x54, 0x86, 0xf5, 0x51, 0x2f, 0x90, 0xed, 0xd3, 0x9b, 0xa2, 0xbf, 0xa7, 0xc1, 0x4c, 0x5c,
- 0x52, 0xfa, 0xeb, 0x4b, 0xaf, 0xe4, 0xfc, 0x3a, 0x1f, 0x3b, 0x91, 0xdf, 0x69, 0x70, 0x21, 0xe1,
- 0xda, 0x48, 0x37, 0x3e, 0x82, 0x51, 0xf1, 0xe0, 0xc8, 0x8e, 0x10, 0x1c, 0x7f, 0xcb, 0x40, 0xf9,
- 0x96, 0xb1, 0x47, 0x9c, 0x5d, 0xe2, 0x10, 0x33, 0xf0, 0x28, 0xfa, 0x31, 0x94, 0x5a, 0x46, 0x60,
- 0x1e, 0x70, 0xa8, 0xea, 0x09, 0x1b, 0xe9, 0x12, 0x68, 0x42, 0x52, 0x6d, 0x2b, 0x12, 0xb3, 0xee,
- 0x06, 0xb4, 0x5b, 0x9f, 0x93, 0x26, 0x95, 0x62, 0x18, 0x1c, 0xd7, 0xc6, 0x1b, 0x79, 0xfe, 0xbd,
- 0xfe, 0xa8, 0xcd, 0x0a, 0xd6, 0xe8, 0xf3, 0x43, 0xc2, 0x04, 0x4c, 0xde, 0xe9, 0xd8, 0x94, 0xb4,
- 0x88, 0x1b, 0x44, 0x8d, 0xfc, 0x56, 0x9f, 0x7c, 0x3c, 0xa0, 0x71, 0xfe, 0x06, 0xcc, 0xf4, 0x1b,
- 0xcf, 0xb2, 0xce, 0x21, 0xe9, 0x8a, 0xfb, 0xc2, 0xec, 0x27, 0xba, 0x00, 0xb9, 0x23, 0xc3, 0xe9,
- 0xc8, 0xd7, 0x88, 0xc5, 0xc7, 0xf5, 0xcc, 0x35, 0x4d, 0xff, 0x83, 0x06, 0x95, 0x61, 0x86, 0xa0,
- 0x2f, 0xc4, 0x04, 0xd5, 0x4b, 0xd2, 0xaa, 0xec, 0xdb, 0xa4, 0x2b, 0xa4, 0xae, 0x43, 0xc1, 0x6b,
- 0xb3, 0xd1, 0xcb, 0xa3, 0xf2, 0xd6, 0x2f, 0xab, 0x9b, 0xdc, 0x96, 0xf0, 0xd3, 0x5e, 0xf5, 0x62,
- 0x42, 0xbc, 0x42, 0xe0, 0x90, 0x95, 0x65, 0x7f, 0x6e, 0x0f, 0xab, 0x48, 0x61, 0xf6, 0xbf, 0xc7,
- 0x21, 0x58, 0x62, 0xf4, 0x3f, 0x69, 0x30, 0xce, 0x5b, 0xb1, 0x07, 0x50, 0x60, 0xe7, 0x67, 0x19,
- 0x81, 0xc1, 0xed, 0x4a, 0x3d, 0x04, 0x30, 0xee, 0x2d, 0x12, 0x18, 0x51, 0xb4, 0x29, 0x08, 0x0e,
- 0x25, 0x22, 0x0c, 0x39, 0x3b, 0x20, 0x2d, 0x75, 0x91, 0xaf, 0x0e, 0x15, 0x2d, 0x47, 0xd0, 0x1a,
- 0x36, 0x8e, 0xd7, 0x1f, 0x05, 0xc4, 0x65, 0x97, 0x11, 0x3d, 0x8d, 0x4d, 0x26, 0x03, 0x0b, 0x51,
- 0xfa, 0x7f, 0x34, 0x08, 0x55, 0xb1, 0xe0, 0xf7, 0x89, 0xb3, 0x7f, 0xcb, 0x76, 0x0f, 0xe5, 0xb1,
- 0x86, 0xe6, 0xec, 0x4a, 0x38, 0x0e, 0x29, 0xce, 0x2a, 0x0f, 0x99, 0xd1, 0xca, 0x03, 0x53, 0x68,
- 0x7a, 0x6e, 0x60, 0xbb, 0x9d, 0x81, 0xd7, 0xb6, 0x26, 0xe1, 0x38, 0xa4, 0x60, 0xcd, 0x0d, 0x25,
- 0x2d, 0xc3, 0x76, 0x6d, 0xb7, 0xc9, 0x9c, 0x58, 0xf3, 0x3a, 0x6e, 0xc0, 0xab, 0xbc, 0x6c, 0x6e,
- 0xf0, 0x00, 0x16, 0x9f, 0xc1, 0xa1, 0xff, 0x35, 0x0b, 0x25, 0xe6, 0xb3, 0xaa, 0x73, 0x6f, 0x40,
- 0xd9, 0x89, 0x47, 0x81, 0xf4, 0xfd, 0xa2, 0x34, 0x25, 0xf9, 0xae, 0x71, 0x92, 0x96, 0x31, 0xf3,
- 0x9e, 0x2c, 0x64, 0xce, 0x24, 0x99, 0x37, 0xe2, 0x48, 0x9c, 0xa4, 0x65, 0xd9, 0xeb, 0x98, 0xbd,
- 0x0f, 0xd9, 0xed, 0x84, 0x57, 0xf4, 0x1d, 0x06, 0xc4, 0x02, 0x87, 0xb6, 0x60, 0xce, 0x70, 0x1c,
- 0xef, 0x98, 0x03, 0xeb, 0x9e, 0x77, 0xd8, 0x32, 0xe8, 0xa1, 0xcf, 0xc7, 0xa8, 0x42, 0xfd, 0x73,
- 0x92, 0x65, 0x6e, 0x75, 0x90, 0x04, 0x9f, 0xc5, 0x77, 0xd6, 0xb5, 0x8d, 0x8f, 0x78, 0x6d, 0xd7,
- 0x61, 0x8a, 0xc5, 0x97, 0xd7, 0x09, 0x54, 0x87, 0x99, 0xe3, 0x97, 0x80, 0x4e, 0x7a, 0xd5, 0xa9,
- 0x3b, 0x09, 0x0c, 0xee, 0xa3, 0x64, 0x2e, 0x3b, 0x76, 0xcb, 0x0e, 0x2a, 0x13, 0x9c, 0x25, 0x74,
- 0xf9, 0x16, 0x03, 0x62, 0x81, 0x4b, 0xc4, 0x45, 0xe1, 0xbc, 0xb8, 0xd0, 0x7f, 0x9b, 0x05, 0x24,
- 0x5a, 0x62, 0x4b, 0xf4, 0x36, 0x22, 0xd1, 0x5c, 0x86, 0x89, 0x96, 0x6c, 0xa9, 0xb5, 0x64, 0xd6,
- 0x57, 0xdd, 0xb4, 0xc2, 0xa3, 0x2d, 0x28, 0x8a, 0x07, 0x1f, 0x05, 0xf1, 0xb2, 0x24, 0x2e, 0x6e,
- 0x2b, 0xc4, 0x69, 0xaf, 0x3a, 0x9f, 0x50, 0x13, 0x62, 0xee, 0x74, 0xdb, 0x04, 0x47, 0x12, 0xd8,
- 0x14, 0x6d, 0xb4, 0xed, 0xf8, 0xfe, 0xa4, 0x18, 0x4d, 0xd1, 0xd1, 0x24, 0x84, 0x63, 0x54, 0xe8,
- 0x2d, 0x18, 0x67, 0x27, 0x25, 0x47, 0xda, 0x2f, 0xa5, 0x4b, 0x1b, 0xec, 0xac, 0xeb, 0x05, 0x56,
- 0x35, 0xd9, 0x2f, 0xcc, 0x25, 0x30, 0xed, 0x3c, 0xca, 0x7c, 0x66, 0x96, 0x9c, 0xfd, 0x43, 0xed,
- 0x1b, 0x21, 0x06, 0xc7, 0xa8, 0xd0, 0x77, 0xa1, 0xb0, 0x2f, 0xdb, 0x42, 0x7e, 0x31, 0xa9, 0x13,
- 0x97, 0x6a, 0x26, 0xc5, 0x08, 0xa7, 0xbe, 0x70, 0x28, 0x4d, 0x7f, 0x07, 0x8a, 0x5b, 0xb6, 0x49,
- 0x3d, 0x66, 0x20, 0xbb, 0x12, 0x3f, 0x31, 0x93, 0x84, 0x57, 0xa2, 0xc2, 0x45, 0xe1, 0x59, 0x9c,
- 0xb8, 0x86, 0xeb, 0x89, 0xc9, 0x23, 0x17, 0xc5, 0xc9, 0x6d, 0x06, 0xc4, 0x02, 0x77, 0xfd, 0x02,
- 0xab, 0xbf, 0xbf, 0x78, 0x52, 0x1d, 0x7b, 0xfc, 0xa4, 0x3a, 0xf6, 0xfe, 0x13, 0x59, 0x8b, 0x4f,
- 0x01, 0x60, 0x7b, 0xef, 0x87, 0xc4, 0x14, 0x59, 0x2d, 0xd5, 0xbe, 0x44, 0xad, 0xe9, 0xf8, 0xbe,
- 0x24, 0xd3, 0xd7, 0x53, 0xc5, 0x70, 0x38, 0x41, 0x89, 0x96, 0xa1, 0x18, 0x6e, 0x42, 0xe4, 0x45,
- 0xcf, 0xaa, 0xc0, 0x09, 0xd7, 0x25, 0x38, 0xa2, 0x49, 0xa4, 0xd8, 0xf1, 0x73, 0x53, 0x6c, 0x1d,
- 0xb2, 0x1d, 0xdb, 0xe2, 0xaf, 0xab, 0x58, 0x7f, 0x4d, 0x95, 0xb8, 0xbb, 0x9b, 0x8d, 0xd3, 0x5e,
- 0xf5, 0xa5, 0x61, 0x0b, 0xc8, 0xa0, 0xdb, 0x26, 0x7e, 0xed, 0xee, 0x66, 0x03, 0x33, 0xe6, 0xb3,
- 0xde, 0x7b, 0x7e, 0xc4, 0xf7, 0x7e, 0x15, 0x40, 0x7a, 0xcd, 0xb8, 0xc5, 0xc3, 0x0d, 0x23, 0xea,
- 0x66, 0x88, 0xc1, 0x31, 0x2a, 0xe4, 0xc3, 0xac, 0xc9, 0x46, 0x61, 0xf6, 0x3c, 0xec, 0x16, 0xf1,
- 0x03, 0xa3, 0x25, 0x36, 0x44, 0xa3, 0x05, 0xf7, 0x25, 0xa9, 0x66, 0x76, 0xad, 0x5f, 0x18, 0x1e,
- 0x94, 0x8f, 0x3c, 0x98, 0xb5, 0xe4, 0x50, 0x17, 0x29, 0x2d, 0x8e, 0xac, 0xf4, 0x22, 0x53, 0xd8,
- 0xe8, 0x17, 0x84, 0x07, 0x65, 0xa3, 0x1f, 0xc0, 0xbc, 0x02, 0x0e, 0x4e, 0xd6, 0x7c, 0xc7, 0x93,
- 0xad, 0x2f, 0x9c, 0xf4, 0xaa, 0xf3, 0x8d, 0xa1, 0x54, 0xf8, 0x39, 0x12, 0x90, 0x05, 0x79, 0x47,
- 0xf4, 0x8f, 0x25, 0x5e, 0xf3, 0xbf, 0x91, 0xce, 0x8b, 0x28, 0xfa, 0x6b, 0xf1, 0xbe, 0x31, 0x9c,
- 0x1c, 0x65, 0xcb, 0x28, 0x65, 0xa3, 0x47, 0x50, 0x32, 0x5c, 0xd7, 0x0b, 0x0c, 0x31, 0xeb, 0x4f,
- 0x72, 0x55, 0xab, 0x23, 0xab, 0x5a, 0x8d, 0x64, 0xf4, 0xf5, 0xa9, 0x31, 0x0c, 0x8e, 0xab, 0x42,
- 0xc7, 0x30, 0xed, 0x1d, 0xbb, 0x84, 0x62, 0xb2, 0x4f, 0x28, 0x71, 0x4d, 0xe2, 0x57, 0xca, 0x5c,
- 0xfb, 0x57, 0x53, 0x6a, 0x4f, 0x30, 0x47, 0x21, 0x9d, 0x84, 0xfb, 0xb8, 0x5f, 0x0b, 0xaa, 0xb1,
- 0x24, 0xe9, 0x1a, 0x8e, 0xfd, 0x23, 0x42, 0xfd, 0xca, 0x54, 0xb4, 0xc4, 0xdb, 0x08, 0xa1, 0x38,
- 0x46, 0x81, 0xbe, 0x06, 0x25, 0xd3, 0xe9, 0xf8, 0x01, 0x11, 0x1b, 0xd5, 0x69, 0xfe, 0x82, 0x42,
- 0xff, 0xd6, 0x22, 0x14, 0x8e, 0xd3, 0xa1, 0x0e, 0x94, 0x5b, 0xf1, 0x92, 0x51, 0x99, 0xe5, 0xde,
- 0x5d, 0x4b, 0xe7, 0xdd, 0x60, 0x51, 0x8b, 0xfa, 0x8a, 0x04, 0x0e, 0x27, 0xb5, 0xcc, 0x7f, 0x1d,
- 0x4a, 0xff, 0x63, 0xcb, 0xcd, 0x5a, 0xf6, 0xfe, 0x7b, 0x1c, 0xa9, 0x65, 0xff, 0x73, 0x06, 0xa6,
- 0x92, 0xa7, 0xdf, 0x57, 0x0e, 0x73, 0xa9, 0xca, 0xa1, 0x1a, 0x0e, 0xb5, 0xa1, 0x4b, 0x60, 0x95,
- 0xd6, 0xb3, 0x43, 0xd3, 0xba, 0xcc, 0x9e, 0xe3, 0x2f, 0x92, 0x3d, 0x6b, 0x00, 0xac, 0xcf, 0xa0,
- 0x9e, 0xe3, 0x10, 0xca, 0x13, 0x67, 0x41, 0x2e, 0x7b, 0x43, 0x28, 0x8e, 0x51, 0xb0, 0x1e, 0x75,
- 0xcf, 0xf1, 0xcc, 0x43, 0x7e, 0x04, 0xea, 0xd1, 0xf3, 0x94, 0x59, 0x10, 0x3d, 0x6a, 0x7d, 0x00,
- 0x8b, 0xcf, 0xe0, 0xd0, 0xbb, 0x70, 0x71, 0xc7, 0xa0, 0x81, 0x6d, 0x38, 0xd1, 0x03, 0xe3, 0x43,
- 0xc0, 0xc3, 0x81, 0x11, 0xe3, 0xb5, 0x51, 0x1f, 0x6a, 0x74, 0xf8, 0x11, 0x2c, 0x1a, 0x33, 0xf4,
- 0xbf, 0x6b, 0x70, 0xe9, 0x4c, 0xdd, 0x9f, 0xc0, 0x88, 0xf3, 0x30, 0x39, 0xe2, 0xbc, 0x91, 0x72,
- 0xdf, 0x78, 0x96, 0xb5, 0x43, 0x06, 0x9e, 0x09, 0xc8, 0xed, 0xb0, 0x86, 0x58, 0xff, 0xb5, 0x06,
- 0x93, 0xfc, 0xd7, 0x28, 0xbb, 0xda, 0x2a, 0xe4, 0xf6, 0x3d, 0xb5, 0x38, 0x2a, 0x88, 0x3f, 0x13,
- 0x36, 0x18, 0x00, 0x0b, 0xf8, 0x0b, 0x2c, 0x73, 0xdf, 0xd3, 0x20, 0xb9, 0x25, 0x45, 0x37, 0x44,
- 0xfc, 0x6a, 0xe1, 0x1a, 0x73, 0xc4, 0xd8, 0x7d, 0x73, 0xd8, 0x80, 0x36, 0x97, 0x6a, 0x77, 0x77,
- 0x05, 0x8a, 0xd8, 0xf3, 0x82, 0x1d, 0x23, 0x38, 0xf0, 0x99, 0xe3, 0x6d, 0xf6, 0x43, 0x9e, 0x0d,
- 0x77, 0x9c, 0x63, 0xb0, 0x80, 0xeb, 0xbf, 0xd2, 0xe0, 0xd2, 0xd0, 0xfd, 0x39, 0x4b, 0x01, 0x66,
- 0xf8, 0x25, 0x3d, 0x0a, 0xa3, 0x30, 0xa2, 0xc3, 0x31, 0x2a, 0x36, 0x59, 0x25, 0x96, 0xee, 0xfd,
- 0x93, 0x55, 0x42, 0x1b, 0x4e, 0xd2, 0xea, 0xff, 0xce, 0x40, 0x7e, 0x37, 0x30, 0x82, 0x8e, 0xff,
- 0x7f, 0x8e, 0xd8, 0x57, 0x20, 0xef, 0x73, 0x3d, 0xd2, 0xbc, 0xb0, 0xc6, 0x0a, 0xed, 0x58, 0x62,
- 0xf9, 0x34, 0x42, 0x7c, 0xdf, 0x68, 0xaa, 0x8c, 0x15, 0x4d, 0x23, 0x02, 0x8c, 0x15, 0x1e, 0xbd,
- 0x0e, 0x79, 0x4a, 0x0c, 0x3f, 0x1c, 0xcc, 0x16, 0x94, 0x48, 0xcc, 0xa1, 0xa7, 0xbd, 0xea, 0xa4,
- 0x14, 0xce, 0xbf, 0xb1, 0xa4, 0x46, 0xf7, 0x61, 0xc2, 0x22, 0x81, 0x61, 0x3b, 0x62, 0x1e, 0x4b,
- 0xbd, 0xae, 0x17, 0xc2, 0x1a, 0x82, 0xb5, 0x5e, 0x62, 0x36, 0xc9, 0x0f, 0xac, 0x04, 0xb2, 0x6c,
- 0x6b, 0x7a, 0x96, 0x18, 0x27, 0x72, 0x51, 0xb6, 0x5d, 0xf3, 0x2c, 0x82, 0x39, 0x46, 0x7f, 0xac,
- 0x41, 0x49, 0x48, 0x5a, 0x33, 0x3a, 0x3e, 0x41, 0x2b, 0xa1, 0x17, 0xe2, 0xba, 0x55, 0x27, 0x37,
- 0xce, 0x06, 0x8e, 0xd3, 0x5e, 0xb5, 0xc8, 0xc9, 0xf8, 0x24, 0xa2, 0x1c, 0x88, 0x9d, 0x51, 0xe6,
- 0x9c, 0x33, 0x7a, 0x19, 0x72, 0xfc, 0xf5, 0xc8, 0xc3, 0x0c, 0xdf, 0x3a, 0x7f, 0x60, 0x58, 0xe0,
- 0xf4, 0x8f, 0x32, 0x50, 0x4e, 0x38, 0x97, 0x62, 0x16, 0x08, 0x17, 0x8a, 0x99, 0x14, 0x4b, 0xea,
- 0xe1, 0x7f, 0x51, 0xca, 0xda, 0x93, 0x7f, 0x91, 0xda, 0xf3, 0x3d, 0xc8, 0x9b, 0xec, 0x8c, 0xd4,
- 0x3f, 0xde, 0x2b, 0xa3, 0x5c, 0x27, 0x3f, 0xdd, 0x28, 0x1a, 0xf9, 0xa7, 0x8f, 0xa5, 0x40, 0x74,
- 0x13, 0x66, 0x29, 0x09, 0x68, 0x77, 0x75, 0x3f, 0x20, 0x34, 0x3e, 0xc4, 0xe7, 0xa2, 0x8e, 0x1b,
- 0xf7, 0x13, 0xe0, 0x41, 0x1e, 0x7d, 0x0f, 0x26, 0xef, 0x18, 0x7b, 0x4e, 0xf8, 0x07, 0x14, 0x86,
- 0xb2, 0xed, 0x9a, 0x4e, 0xc7, 0x22, 0x22, 0x1b, 0xab, 0xec, 0xa5, 0x1e, 0xed, 0x66, 0x1c, 0x79,
- 0xda, 0xab, 0xce, 0x25, 0x00, 0xe2, 0x1f, 0x17, 0x9c, 0x14, 0xa1, 0x3b, 0x30, 0xfe, 0x09, 0x4e,
- 0x8f, 0xdf, 0x87, 0x62, 0xd4, 0xdf, 0x7f, 0xcc, 0x2a, 0xf5, 0x87, 0x50, 0x60, 0x11, 0xaf, 0xe6,
- 0xd2, 0x73, 0x5a, 0x9c, 0x64, 0xe3, 0x94, 0x49, 0xd3, 0x38, 0xe9, 0x2d, 0x28, 0xdf, 0x6d, 0x5b,
- 0x2f, 0xf8, 0x17, 0x64, 0x26, 0x75, 0xd5, 0xba, 0x0a, 0xe2, 0xcf, 0x74, 0x56, 0x20, 0x44, 0xe5,
- 0x8e, 0x15, 0x88, 0x78, 0xe1, 0x8d, 0xed, 0xca, 0x7f, 0xa6, 0x01, 0xf0, 0xa5, 0xd4, 0xfa, 0x11,
- 0x71, 0x03, 0x76, 0x0e, 0x2c, 0xf0, 0xfb, 0xcf, 0x81, 0x67, 0x06, 0x8e, 0x41, 0x77, 0x21, 0xef,
- 0x89, 0x68, 0x12, 0x7f, 0x43, 0x8e, 0xb8, 0xf9, 0x0c, 0x1f, 0x81, 0x88, 0x27, 0x2c, 0x85, 0xd5,
- 0x97, 0x9e, 0x3e, 0x5b, 0x18, 0xfb, 0xe0, 0xd9, 0xc2, 0xd8, 0x87, 0xcf, 0x16, 0xc6, 0xde, 0x3d,
- 0x59, 0xd0, 0x9e, 0x9e, 0x2c, 0x68, 0x1f, 0x9c, 0x2c, 0x68, 0x1f, 0x9e, 0x2c, 0x68, 0x1f, 0x9d,
- 0x2c, 0x68, 0x8f, 0xff, 0xb9, 0x30, 0x76, 0x3f, 0x73, 0xb4, 0xf2, 0xdf, 0x00, 0x00, 0x00, 0xff,
- 0xff, 0x61, 0xb7, 0xc5, 0x7c, 0xc2, 0x24, 0x00, 0x00,
+ // 2832 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0xcd, 0x6f, 0x23, 0x57,
+ 0x3d, 0x63, 0xc7, 0x89, 0xfd, 0x73, 0x9c, 0x8f, 0xb7, 0x59, 0xf0, 0x06, 0x11, 0xa7, 0x53, 0xb4,
+ 0xda, 0x42, 0xeb, 0x34, 0x4b, 0xa9, 0xb6, 0x5b, 0x5a, 0x88, 0xe3, 0x64, 0x1b, 0x9a, 0x34, 0xd1,
+ 0xcb, 0xee, 0x02, 0xa5, 0x42, 0x9d, 0x78, 0x5e, 0x9c, 0x21, 0xe3, 0x19, 0xf7, 0xbd, 0x71, 0xb2,
+ 0x86, 0x03, 0x3d, 0x80, 0x00, 0x09, 0xaa, 0x1e, 0x11, 0x07, 0xd4, 0x0a, 0xfe, 0x02, 0x2e, 0xf0,
+ 0x07, 0x20, 0xd1, 0x63, 0x25, 0x2e, 0x95, 0x40, 0x56, 0x37, 0x1c, 0x38, 0x22, 0xae, 0xb9, 0x80,
+ 0xde, 0xc7, 0xcc, 0xbc, 0xf1, 0xc7, 0x66, 0xdc, 0x2d, 0x15, 0x37, 0xcf, 0xef, 0xfb, 0xbd, 0xf7,
+ 0x7b, 0xbf, 0xaf, 0x67, 0xd8, 0x3d, 0xb9, 0xc5, 0xaa, 0x8e, 0xbf, 0x7a, 0xd2, 0x39, 0x24, 0xd4,
+ 0x23, 0x01, 0x61, 0xab, 0xa7, 0xc4, 0xb3, 0x7d, 0xba, 0xaa, 0x10, 0x56, 0xdb, 0x69, 0x59, 0x8d,
+ 0x63, 0xc7, 0x23, 0xb4, 0xbb, 0xda, 0x3e, 0x69, 0x72, 0x00, 0x5b, 0x6d, 0x91, 0xc0, 0x5a, 0x3d,
+ 0x5d, 0x5b, 0x6d, 0x12, 0x8f, 0x50, 0x2b, 0x20, 0x76, 0xb5, 0x4d, 0xfd, 0xc0, 0x47, 0x5f, 0x92,
+ 0x5c, 0x55, 0x9d, 0xab, 0xda, 0x3e, 0x69, 0x72, 0x00, 0xab, 0x72, 0xae, 0xea, 0xe9, 0xda, 0xd2,
+ 0x33, 0x4d, 0x27, 0x38, 0xee, 0x1c, 0x56, 0x1b, 0x7e, 0x6b, 0xb5, 0xe9, 0x37, 0xfd, 0x55, 0xc1,
+ 0x7c, 0xd8, 0x39, 0x12, 0x5f, 0xe2, 0x43, 0xfc, 0x92, 0x42, 0x97, 0x46, 0x9a, 0x42, 0x3b, 0x5e,
+ 0xe0, 0xb4, 0x48, 0xbf, 0x15, 0x4b, 0xcf, 0x5f, 0xc6, 0xc0, 0x1a, 0xc7, 0xa4, 0x65, 0xf5, 0xf3,
+ 0x99, 0x7f, 0xc9, 0x42, 0x7e, 0x7d, 0x7f, 0xfb, 0x0e, 0xf5, 0x3b, 0x6d, 0xb4, 0x02, 0x93, 0x9e,
+ 0xd5, 0x22, 0x65, 0x63, 0xc5, 0xb8, 0x51, 0xa8, 0xcd, 0x7c, 0xd0, 0xab, 0x4c, 0x9c, 0xf7, 0x2a,
+ 0x93, 0xaf, 0x59, 0x2d, 0x82, 0x05, 0x06, 0xb9, 0x90, 0x3f, 0x25, 0x94, 0x39, 0xbe, 0xc7, 0xca,
+ 0x99, 0x95, 0xec, 0x8d, 0xe2, 0xcd, 0x97, 0xab, 0x69, 0xd6, 0x5f, 0x15, 0x0a, 0xee, 0x4b, 0xd6,
+ 0x2d, 0x9f, 0xd6, 0x1d, 0xd6, 0xf0, 0x4f, 0x09, 0xed, 0xd6, 0xe6, 0x95, 0x96, 0xbc, 0x42, 0x32,
+ 0x1c, 0x69, 0x40, 0x3f, 0x31, 0x60, 0xbe, 0x4d, 0xc9, 0x11, 0xa1, 0x94, 0xd8, 0x0a, 0x5f, 0xce,
+ 0xae, 0x18, 0x9f, 0x82, 0xda, 0xb2, 0x52, 0x3b, 0xbf, 0xdf, 0x27, 0x1f, 0x0f, 0x68, 0x44, 0xbf,
+ 0x33, 0x60, 0x89, 0x11, 0x7a, 0x4a, 0xe8, 0xba, 0x6d, 0x53, 0xc2, 0x58, 0xad, 0xbb, 0xe1, 0x3a,
+ 0xc4, 0x0b, 0x36, 0xb6, 0xeb, 0x98, 0x95, 0x27, 0xc5, 0x3e, 0x7c, 0x23, 0x9d, 0x41, 0x07, 0xa3,
+ 0xe4, 0xd4, 0x4c, 0x65, 0xd1, 0xd2, 0x48, 0x12, 0x86, 0x1f, 0x61, 0x86, 0x79, 0x04, 0x33, 0xe1,
+ 0x41, 0xee, 0x38, 0x2c, 0x40, 0xf7, 0x61, 0xaa, 0xc9, 0x3f, 0x58, 0xd9, 0x10, 0x06, 0x56, 0xd3,
+ 0x19, 0x18, 0xca, 0xa8, 0xcd, 0x2a, 0x7b, 0xa6, 0xc4, 0x27, 0xc3, 0x4a, 0x9a, 0xf9, 0x8b, 0x49,
+ 0x28, 0xae, 0xef, 0x6f, 0x63, 0xc2, 0xfc, 0x0e, 0x6d, 0x90, 0x14, 0x4e, 0x73, 0x0b, 0x66, 0x98,
+ 0xe3, 0x35, 0x3b, 0xae, 0x45, 0x39, 0xb4, 0x3c, 0x25, 0x28, 0x17, 0x15, 0xe5, 0xcc, 0x81, 0x86,
+ 0xc3, 0x09, 0x4a, 0x74, 0x13, 0x80, 0x4b, 0x60, 0x6d, 0xab, 0x41, 0xec, 0x72, 0x66, 0xc5, 0xb8,
+ 0x91, 0xaf, 0x21, 0xc5, 0x07, 0xaf, 0x45, 0x18, 0xac, 0x51, 0xa1, 0x27, 0x21, 0x27, 0x2c, 0x2d,
+ 0xe7, 0x85, 0x9a, 0x92, 0x22, 0xcf, 0x89, 0x65, 0x60, 0x89, 0x43, 0x4f, 0xc1, 0xb4, 0xf2, 0xb2,
+ 0x72, 0x41, 0x90, 0xcd, 0x29, 0xb2, 0xe9, 0xd0, 0x0d, 0x42, 0x3c, 0x5f, 0xdf, 0x89, 0xe3, 0xd9,
+ 0xc2, 0xef, 0xb4, 0xf5, 0xbd, 0xea, 0x78, 0x36, 0x16, 0x18, 0xb4, 0x03, 0xb9, 0x53, 0x42, 0x0f,
+ 0xb9, 0x27, 0x70, 0xd7, 0xfc, 0x4a, 0xba, 0x8d, 0xbe, 0xcf, 0x59, 0x6a, 0x05, 0x6e, 0x9a, 0xf8,
+ 0x89, 0xa5, 0x10, 0x54, 0x05, 0x60, 0xc7, 0x3e, 0x0d, 0xc4, 0xf2, 0xca, 0xb9, 0x95, 0xec, 0x8d,
+ 0x42, 0x6d, 0x96, 0xaf, 0xf7, 0x20, 0x82, 0x62, 0x8d, 0x82, 0xd3, 0x37, 0xac, 0x80, 0x34, 0x7d,
+ 0xea, 0x10, 0x56, 0x9e, 0x8e, 0xe9, 0x37, 0x22, 0x28, 0xd6, 0x28, 0xd0, 0xb7, 0x00, 0xb1, 0xc0,
+ 0xa7, 0x56, 0x93, 0xa8, 0xa5, 0xbe, 0x62, 0xb1, 0xe3, 0x32, 0x88, 0xd5, 0x2d, 0xa9, 0xd5, 0xa1,
+ 0x83, 0x01, 0x0a, 0x3c, 0x84, 0xcb, 0xfc, 0x83, 0x01, 0x73, 0x9a, 0x2f, 0x08, 0xbf, 0xbb, 0x05,
+ 0x33, 0x4d, 0xed, 0xd6, 0x29, 0xbf, 0x88, 0x4e, 0x5b, 0xbf, 0x91, 0x38, 0x41, 0x89, 0x08, 0x14,
+ 0xa8, 0x92, 0x14, 0x46, 0x97, 0xb5, 0xd4, 0x4e, 0x1b, 0xda, 0x10, 0x6b, 0xd2, 0x80, 0x0c, 0xc7,
+ 0x92, 0xcd, 0x7f, 0x1a, 0xc2, 0x81, 0xc3, 0x78, 0x83, 0x6e, 0x68, 0x31, 0xcd, 0x10, 0xdb, 0x37,
+ 0x33, 0x22, 0x1e, 0x5d, 0x12, 0x08, 0x32, 0xff, 0x17, 0x81, 0xe0, 0x76, 0xfe, 0xd7, 0xef, 0x55,
+ 0x26, 0xde, 0xfe, 0xfb, 0xca, 0x84, 0xf9, 0x9f, 0x0c, 0x14, 0x36, 0x7c, 0xcf, 0x76, 0x02, 0xe5,
+ 0xc8, 0x41, 0xb7, 0x3d, 0x70, 0x51, 0xef, 0x76, 0xdb, 0x04, 0x0b, 0x0c, 0x7a, 0x01, 0xa6, 0x58,
+ 0x60, 0x05, 0x1d, 0x26, 0xae, 0x5a, 0xa1, 0xf6, 0x44, 0x18, 0x02, 0x0e, 0x04, 0xf4, 0xa2, 0x57,
+ 0x99, 0x8b, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xab, 0xfc, 0x43, 0x61, 0x94, 0x7d, 0x47, 0xa6,
+ 0x98, 0x30, 0x56, 0x67, 0x63, 0xaf, 0xda, 0x1b, 0xa0, 0xc0, 0x43, 0xb8, 0xd0, 0x29, 0x20, 0xd7,
+ 0x62, 0xc1, 0x5d, 0x6a, 0x79, 0x4c, 0xe8, 0xba, 0xeb, 0xb4, 0x88, 0xba, 0x5c, 0x5f, 0x4e, 0xb7,
+ 0xbb, 0x9c, 0x23, 0xd6, 0xbb, 0x33, 0x20, 0x0d, 0x0f, 0xd1, 0x80, 0xae, 0xc3, 0x14, 0x25, 0x16,
+ 0xf3, 0xbd, 0x72, 0x4e, 0x2c, 0x3f, 0x8a, 0x80, 0x58, 0x40, 0xb1, 0xc2, 0xf2, 0xe0, 0xd1, 0x22,
+ 0x8c, 0x59, 0xcd, 0x30, 0x94, 0x45, 0xc1, 0x63, 0x57, 0x82, 0x71, 0x88, 0x37, 0x5b, 0x50, 0xda,
+ 0xa0, 0xc4, 0x0a, 0xc8, 0x5e, 0x3b, 0x10, 0x2e, 0x64, 0xc2, 0x94, 0x4d, 0xbb, 0xb8, 0xe3, 0x29,
+ 0x57, 0x03, 0x2e, 0xbf, 0x2e, 0x20, 0x58, 0x61, 0xf8, 0x0d, 0x3a, 0x72, 0x88, 0x6b, 0xef, 0x5a,
+ 0x9e, 0xd5, 0x24, 0x54, 0x45, 0x9e, 0xc8, 0xaf, 0xb7, 0x34, 0x1c, 0x4e, 0x50, 0x9a, 0x3f, 0xcb,
+ 0x42, 0xa9, 0x4e, 0x5c, 0x12, 0xeb, 0xdb, 0x02, 0xd4, 0xa4, 0x56, 0x83, 0xec, 0x13, 0xea, 0xf8,
+ 0xf6, 0x01, 0x69, 0xf8, 0x9e, 0xcd, 0x84, 0x0b, 0x64, 0x6b, 0x9f, 0xe3, 0x7b, 0x73, 0x67, 0x00,
+ 0x8b, 0x87, 0x70, 0x20, 0x17, 0x4a, 0x6d, 0x2a, 0x7e, 0x8b, 0xfd, 0x92, 0x1e, 0x52, 0xbc, 0xf9,
+ 0xd5, 0x74, 0xc7, 0xb1, 0xaf, 0xb3, 0xd6, 0x16, 0xce, 0x7b, 0x95, 0x52, 0x02, 0x84, 0x93, 0xc2,
+ 0xd1, 0x37, 0x61, 0xde, 0xa7, 0xed, 0x63, 0xcb, 0xab, 0x93, 0x36, 0xf1, 0x6c, 0xe2, 0x05, 0x4c,
+ 0xec, 0x42, 0xbe, 0xb6, 0xc8, 0x73, 0xf6, 0x5e, 0x1f, 0x0e, 0x0f, 0x50, 0xa3, 0xd7, 0x61, 0xa1,
+ 0x4d, 0xfd, 0xb6, 0xd5, 0x14, 0x2e, 0xb5, 0xef, 0xbb, 0x4e, 0xa3, 0x2b, 0x5c, 0xa8, 0x50, 0x7b,
+ 0xfa, 0xbc, 0x57, 0x59, 0xd8, 0xef, 0x47, 0x5e, 0xf4, 0x2a, 0x57, 0xc4, 0xd6, 0x71, 0x48, 0x8c,
+ 0xc4, 0x83, 0x62, 0xb4, 0x33, 0xcc, 0x8d, 0x3a, 0x43, 0x73, 0x1b, 0xf2, 0xf5, 0x8e, 0xf2, 0xe7,
+ 0x97, 0x20, 0x6f, 0xab, 0xdf, 0x6a, 0xe7, 0xc3, 0x8b, 0x15, 0xd1, 0x5c, 0xf4, 0x2a, 0x25, 0x5e,
+ 0xa6, 0x55, 0x43, 0x00, 0x8e, 0x58, 0xcc, 0x37, 0xa0, 0xb4, 0xf9, 0xa0, 0xed, 0xd3, 0x20, 0x3c,
+ 0xd3, 0xeb, 0x30, 0x45, 0x04, 0x40, 0x48, 0xcb, 0xc7, 0x7e, 0x2a, 0xc9, 0xb0, 0xc2, 0xf2, 0x4c,
+ 0x48, 0x1e, 0x58, 0x8d, 0x40, 0x25, 0xce, 0x28, 0x13, 0x6e, 0x72, 0x20, 0x96, 0x38, 0xf3, 0x3a,
+ 0xe4, 0x85, 0x43, 0xb1, 0xfb, 0x6b, 0x68, 0x1e, 0xb2, 0xd8, 0x3a, 0x13, 0x52, 0x67, 0x70, 0x96,
+ 0x5a, 0x67, 0x5a, 0x2c, 0xd9, 0x03, 0xb8, 0x43, 0x22, 0x13, 0xd6, 0x61, 0x2e, 0x0c, 0xa8, 0xc9,
+ 0x38, 0xff, 0x79, 0xa5, 0x64, 0x0e, 0x27, 0xd1, 0xb8, 0x9f, 0xde, 0x7c, 0x03, 0x0a, 0x22, 0x17,
+ 0xf0, 0x44, 0x1a, 0x27, 0x6d, 0xe3, 0x11, 0x49, 0x3b, 0xcc, 0xc4, 0x99, 0x51, 0x99, 0x58, 0x33,
+ 0xd7, 0x85, 0x92, 0xe4, 0x0d, 0xcb, 0x94, 0x54, 0x1a, 0x9e, 0x86, 0x7c, 0x68, 0xa6, 0xd2, 0x12,
+ 0x95, 0xa7, 0xa1, 0x20, 0x1c, 0x51, 0x68, 0xda, 0x8e, 0x21, 0x91, 0xd7, 0xd2, 0x29, 0xd3, 0x6a,
+ 0x90, 0xcc, 0xa3, 0x6b, 0x10, 0x4d, 0xd3, 0x8f, 0xa1, 0x3c, 0xaa, 0xa6, 0x7d, 0x8c, 0xcc, 0x9b,
+ 0xde, 0x14, 0xf3, 0x1d, 0x03, 0xe6, 0x75, 0x49, 0xe9, 0x8f, 0x2f, 0xbd, 0x92, 0xcb, 0x6b, 0x2e,
+ 0x6d, 0x47, 0x7e, 0x6b, 0xc0, 0x62, 0x62, 0x69, 0x63, 0x9d, 0xf8, 0x18, 0x46, 0xe9, 0xce, 0x91,
+ 0x1d, 0xc3, 0x39, 0xfe, 0x9a, 0x81, 0xd2, 0x8e, 0x75, 0x48, 0xdc, 0x03, 0xe2, 0x92, 0x46, 0xe0,
+ 0x53, 0xf4, 0x23, 0x28, 0xb6, 0xac, 0xa0, 0x71, 0x2c, 0xa0, 0x61, 0x7d, 0x5e, 0x4f, 0x17, 0x4a,
+ 0x13, 0x92, 0xaa, 0xbb, 0xb1, 0x98, 0x4d, 0x2f, 0xa0, 0xdd, 0xda, 0x15, 0x65, 0x52, 0x51, 0xc3,
+ 0x60, 0x5d, 0x9b, 0x68, 0xaa, 0xc4, 0xf7, 0xe6, 0x83, 0x36, 0x2f, 0x1e, 0xc6, 0xef, 0xe5, 0x12,
+ 0x26, 0x60, 0xf2, 0x56, 0xc7, 0xa1, 0xa4, 0x45, 0xbc, 0x20, 0x6e, 0xaa, 0x76, 0xfb, 0xe4, 0xe3,
+ 0x01, 0x8d, 0x4b, 0x2f, 0xc3, 0x7c, 0xbf, 0xf1, 0x3c, 0xfe, 0x9c, 0x90, 0xae, 0x3c, 0x2f, 0xcc,
+ 0x7f, 0xa2, 0x45, 0xc8, 0x9d, 0x5a, 0x6e, 0x47, 0xdd, 0x46, 0x2c, 0x3f, 0x6e, 0x67, 0x6e, 0x19,
+ 0xe6, 0xef, 0x0d, 0x28, 0x8f, 0x32, 0x04, 0x7d, 0x51, 0x13, 0x54, 0x2b, 0x2a, 0xab, 0xb2, 0xaf,
+ 0x92, 0xae, 0x94, 0xba, 0x09, 0x79, 0xbf, 0xcd, 0xab, 0x0d, 0x9f, 0xaa, 0x53, 0x7f, 0x2a, 0x3c,
+ 0xc9, 0x3d, 0x05, 0xbf, 0xe8, 0x55, 0xae, 0x26, 0xc4, 0x87, 0x08, 0x1c, 0xb1, 0xf2, 0x3c, 0x20,
+ 0xec, 0xe1, 0xb9, 0x29, 0xca, 0x03, 0xf7, 0x05, 0x04, 0x2b, 0x8c, 0xf9, 0x27, 0x03, 0x26, 0x45,
+ 0x59, 0xfc, 0x06, 0xe4, 0xf9, 0xfe, 0xd9, 0x56, 0x60, 0x09, 0xbb, 0x52, 0x37, 0x64, 0x9c, 0x7b,
+ 0x97, 0x04, 0x56, 0xec, 0x6d, 0x21, 0x04, 0x47, 0x12, 0x11, 0x86, 0x9c, 0x13, 0x90, 0x56, 0x78,
+ 0x90, 0xcf, 0x8c, 0x14, 0xad, 0xc6, 0x01, 0x55, 0x6c, 0x9d, 0x6d, 0x3e, 0x08, 0x88, 0xc7, 0x0f,
+ 0x23, 0xbe, 0x1a, 0xdb, 0x5c, 0x06, 0x96, 0xa2, 0xcc, 0x7f, 0x1b, 0x10, 0xa9, 0xe2, 0xce, 0xcf,
+ 0x88, 0x7b, 0xb4, 0xe3, 0x78, 0x27, 0x6a, 0x5b, 0x23, 0x73, 0x0e, 0x14, 0x1c, 0x47, 0x14, 0xc3,
+ 0xd2, 0x43, 0x66, 0xbc, 0xf4, 0xc0, 0x15, 0x36, 0x7c, 0x2f, 0x70, 0xbc, 0xce, 0xc0, 0x6d, 0xdb,
+ 0x50, 0x70, 0x1c, 0x51, 0xf0, 0x32, 0x87, 0x92, 0x96, 0xe5, 0x78, 0x8e, 0xd7, 0xe4, 0x8b, 0xd8,
+ 0xf0, 0x3b, 0x5e, 0x20, 0xf2, 0xbd, 0x2a, 0x73, 0xf0, 0x00, 0x16, 0x0f, 0xe1, 0x30, 0xff, 0x38,
+ 0x09, 0x45, 0xbe, 0xe6, 0x30, 0xcf, 0xbd, 0x08, 0x25, 0x57, 0xf7, 0x02, 0xb5, 0xf6, 0xab, 0xca,
+ 0x94, 0xe4, 0xbd, 0xc6, 0x49, 0x5a, 0xce, 0x2c, 0xaa, 0xb3, 0x88, 0x39, 0x93, 0x64, 0xde, 0xd2,
+ 0x91, 0x38, 0x49, 0xcb, 0xa3, 0xd7, 0x19, 0xbf, 0x1f, 0xaa, 0xee, 0x89, 0x8e, 0xe8, 0xdb, 0x1c,
+ 0x88, 0x25, 0x0e, 0xed, 0xc2, 0x15, 0xcb, 0x75, 0xfd, 0x33, 0x01, 0xac, 0xf9, 0xfe, 0x49, 0xcb,
+ 0xa2, 0x27, 0x4c, 0xb4, 0xb4, 0xf9, 0xda, 0x17, 0x14, 0xcb, 0x95, 0xf5, 0x41, 0x12, 0x3c, 0x8c,
+ 0x6f, 0xd8, 0xb1, 0x4d, 0x8e, 0x79, 0x6c, 0xc7, 0xb0, 0xd8, 0x07, 0x12, 0xb7, 0x5c, 0xf5, 0x97,
+ 0xcf, 0x29, 0x39, 0x8b, 0x78, 0x08, 0xcd, 0xc5, 0x08, 0x38, 0x1e, 0x2a, 0x11, 0xdd, 0x86, 0x59,
+ 0xee, 0xc9, 0x7e, 0x27, 0x08, 0xab, 0xda, 0x9c, 0x38, 0x6e, 0x74, 0xde, 0xab, 0xcc, 0xde, 0x4d,
+ 0x60, 0x70, 0x1f, 0x25, 0xdf, 0x5c, 0xd7, 0x69, 0x39, 0x41, 0x79, 0x5a, 0xb0, 0x44, 0x9b, 0xbb,
+ 0xc3, 0x81, 0x58, 0xe2, 0x12, 0x1e, 0x98, 0xbf, 0xcc, 0x03, 0xcd, 0xdf, 0x64, 0x01, 0xc9, 0x32,
+ 0xdc, 0x96, 0xf5, 0x94, 0x0c, 0x69, 0xbc, 0x57, 0x50, 0x65, 0xbc, 0xd1, 0xd7, 0x2b, 0xa8, 0x0a,
+ 0x3e, 0xc4, 0xa3, 0x5d, 0x28, 0xc8, 0xd0, 0x12, 0x5f, 0x97, 0x55, 0x45, 0x5c, 0xd8, 0x0b, 0x11,
+ 0x17, 0xbd, 0xca, 0x52, 0x42, 0x4d, 0x84, 0x11, 0x7d, 0x5c, 0x2c, 0x01, 0xdd, 0x04, 0xb0, 0xda,
+ 0x8e, 0x3e, 0x35, 0x2b, 0xc4, 0xb3, 0x93, 0xb8, 0xff, 0xc5, 0x1a, 0x15, 0x7a, 0x05, 0x26, 0x83,
+ 0x4f, 0xd6, 0x6b, 0xe5, 0x45, 0x2b, 0xc9, 0x3b, 0x2b, 0x21, 0x81, 0x6b, 0x17, 0xfe, 0xcc, 0xb8,
+ 0x59, 0xaa, 0x4d, 0x8a, 0xb4, 0x6f, 0x45, 0x18, 0xac, 0x51, 0xa1, 0xef, 0x40, 0xfe, 0x48, 0x95,
+ 0xa2, 0xe2, 0x60, 0x52, 0x87, 0xc8, 0xb0, 0x80, 0x95, 0x8d, 0x7b, 0xf8, 0x85, 0x23, 0x69, 0xe6,
+ 0x5b, 0x50, 0xd8, 0x75, 0x1a, 0xd4, 0x17, 0x6d, 0xde, 0x53, 0x30, 0xcd, 0x12, 0x7d, 0x50, 0x74,
+ 0x24, 0xa1, 0xbb, 0x84, 0x78, 0xee, 0x27, 0x9e, 0xe5, 0xf9, 0xb2, 0xdb, 0xc9, 0xc5, 0x7e, 0xf2,
+ 0x1a, 0x07, 0x62, 0x89, 0xbb, 0xbd, 0xc8, 0x33, 0xfd, 0xcf, 0xdf, 0xaf, 0x4c, 0xbc, 0xfb, 0x7e,
+ 0x65, 0xe2, 0xbd, 0xf7, 0x55, 0xd6, 0xbf, 0x00, 0x80, 0xbd, 0xc3, 0x1f, 0x90, 0x86, 0x8c, 0x9f,
+ 0xa9, 0xa6, 0x64, 0xe1, 0x70, 0x56, 0x4c, 0xc9, 0x32, 0x7d, 0xd5, 0x9b, 0x86, 0xc3, 0x09, 0x4a,
+ 0xb4, 0x0a, 0x85, 0x68, 0xfe, 0xa5, 0x0e, 0x7a, 0x21, 0x74, 0x9c, 0x68, 0x48, 0x86, 0x63, 0x9a,
+ 0x44, 0x30, 0x9f, 0xbc, 0x34, 0x98, 0xd7, 0x20, 0xdb, 0x71, 0x6c, 0xd5, 0x13, 0x3f, 0x1b, 0x26,
+ 0xd3, 0x7b, 0xdb, 0xf5, 0x8b, 0x5e, 0xe5, 0x89, 0x51, 0x63, 0xe7, 0xa0, 0xdb, 0x26, 0xac, 0x7a,
+ 0x6f, 0xbb, 0x8e, 0x39, 0xf3, 0xb0, 0xc8, 0x32, 0x35, 0x66, 0x64, 0xb9, 0x09, 0xd0, 0x8c, 0x27,
+ 0x0b, 0xf2, 0xe2, 0x46, 0x1e, 0xa5, 0x4d, 0x14, 0x34, 0x2a, 0xc4, 0x60, 0xa1, 0xc1, 0xdb, 0x6f,
+ 0xd5, 0xe1, 0xb3, 0xc0, 0x6a, 0xc9, 0xb9, 0xe0, 0x78, 0xce, 0x7d, 0x4d, 0xa9, 0x59, 0xd8, 0xe8,
+ 0x17, 0x86, 0x07, 0xe5, 0x23, 0x1f, 0x16, 0x6c, 0xd5, 0x48, 0xc6, 0x4a, 0x0b, 0x63, 0x2b, 0xbd,
+ 0xca, 0x15, 0xd6, 0xfb, 0x05, 0xe1, 0x41, 0xd9, 0xe8, 0xfb, 0xb0, 0x14, 0x02, 0x07, 0xbb, 0x79,
+ 0x11, 0x79, 0xb3, 0xb5, 0xe5, 0xf3, 0x5e, 0x65, 0xa9, 0x3e, 0x92, 0x0a, 0x3f, 0x42, 0x02, 0xb2,
+ 0x61, 0xca, 0x95, 0x95, 0x6a, 0x51, 0x54, 0x17, 0x5f, 0x4f, 0xb7, 0x8a, 0xd8, 0xfb, 0xab, 0x7a,
+ 0x85, 0x1a, 0x75, 0xab, 0xaa, 0x38, 0x55, 0xb2, 0xd1, 0x03, 0x28, 0x5a, 0x9e, 0xe7, 0x07, 0x96,
+ 0x9c, 0x2f, 0xcc, 0x08, 0x55, 0xeb, 0x63, 0xab, 0x5a, 0x8f, 0x65, 0xf4, 0x55, 0xc4, 0x1a, 0x06,
+ 0xeb, 0xaa, 0xd0, 0x19, 0xcc, 0xf9, 0x67, 0x1e, 0xa1, 0x98, 0x1c, 0x11, 0x4a, 0xbc, 0x06, 0x61,
+ 0xe5, 0x92, 0xd0, 0xfe, 0x5c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0xfb, 0xb5,
+ 0xa0, 0x2a, 0x0f, 0x92, 0x9e, 0xe5, 0x3a, 0x3f, 0x24, 0x94, 0x95, 0x67, 0xe3, 0xd1, 0xed, 0x56,
+ 0x04, 0xc5, 0x1a, 0x05, 0xfa, 0x1a, 0x14, 0x1b, 0x6e, 0x87, 0x05, 0x44, 0xce, 0xd1, 0xe7, 0xc4,
+ 0x0d, 0x8a, 0xd6, 0xb7, 0x11, 0xa3, 0xb0, 0x4e, 0x87, 0x3a, 0x50, 0x6a, 0xe9, 0x29, 0xa3, 0xbc,
+ 0x20, 0x56, 0x77, 0x2b, 0xdd, 0xea, 0x06, 0x93, 0x5a, 0x5c, 0xc1, 0x24, 0x70, 0x38, 0xa9, 0x65,
+ 0xe9, 0x05, 0x28, 0x7e, 0xc2, 0xe2, 0x9e, 0x37, 0x07, 0xfd, 0xe7, 0x38, 0x56, 0x73, 0xf0, 0xe7,
+ 0x0c, 0xcc, 0x26, 0x77, 0xbf, 0x2f, 0x1d, 0xe6, 0x52, 0xa5, 0xc3, 0xb0, 0x0d, 0x35, 0x46, 0x8e,
+ 0xfe, 0xc3, 0xb0, 0x9e, 0x1d, 0x19, 0xd6, 0x55, 0xf4, 0x9c, 0x7c, 0x9c, 0xe8, 0x59, 0x05, 0xe0,
+ 0x75, 0x06, 0xf5, 0x5d, 0x97, 0x50, 0x11, 0x38, 0xf3, 0x6a, 0xc4, 0x1f, 0x41, 0xb1, 0x46, 0xc1,
+ 0xab, 0xe1, 0x43, 0xd7, 0x6f, 0x9c, 0x88, 0x2d, 0x08, 0x2f, 0xbd, 0x08, 0x99, 0x79, 0x59, 0x0d,
+ 0xd7, 0x06, 0xb0, 0x78, 0x08, 0x87, 0xd9, 0x85, 0xab, 0xfb, 0x16, 0x0d, 0x1c, 0xcb, 0x8d, 0x2f,
+ 0x98, 0x68, 0x37, 0xde, 0x1c, 0x68, 0x66, 0x9e, 0x1d, 0xf7, 0xa2, 0xc6, 0x9b, 0x1f, 0xc3, 0xe2,
+ 0x86, 0xc6, 0xfc, 0x9b, 0x01, 0xd7, 0x86, 0xea, 0xfe, 0x0c, 0x9a, 0xa9, 0x37, 0x93, 0xcd, 0xd4,
+ 0x8b, 0x29, 0x67, 0x9c, 0xc3, 0xac, 0x1d, 0xd1, 0x5a, 0x4d, 0x43, 0x6e, 0x9f, 0x17, 0xb1, 0xe6,
+ 0xaf, 0x0c, 0x98, 0x11, 0xbf, 0xc6, 0x99, 0x0f, 0x57, 0x20, 0x77, 0xe4, 0x87, 0x23, 0xaa, 0xbc,
+ 0x7c, 0x42, 0xda, 0xe2, 0x00, 0x2c, 0xe1, 0x8f, 0x31, 0x40, 0x7e, 0xc7, 0x80, 0xe4, 0x64, 0x16,
+ 0xbd, 0x2c, 0xfd, 0xd7, 0x88, 0x46, 0xa7, 0x63, 0xfa, 0xee, 0x4b, 0xa3, 0x5a, 0xc1, 0x2b, 0xa9,
+ 0xa6, 0x84, 0x4f, 0x43, 0x01, 0xfb, 0x7e, 0xb0, 0x6f, 0x05, 0xc7, 0x8c, 0x2f, 0xbc, 0xcd, 0x7f,
+ 0xa8, 0xbd, 0x11, 0x0b, 0x17, 0x18, 0x2c, 0xe1, 0xe6, 0x2f, 0x0d, 0xb8, 0x36, 0xf2, 0xd5, 0x84,
+ 0x87, 0x80, 0x46, 0xf4, 0xa5, 0x56, 0x14, 0x79, 0x61, 0x4c, 0x87, 0x35, 0x2a, 0xde, 0xc3, 0x25,
+ 0x9e, 0x5a, 0xfa, 0x7b, 0xb8, 0x84, 0x36, 0x9c, 0xa4, 0x35, 0xff, 0x95, 0x01, 0xf5, 0x74, 0xf2,
+ 0x3f, 0xf6, 0xd8, 0xeb, 0x7d, 0x0f, 0x37, 0xb3, 0xc9, 0x87, 0x9b, 0xe8, 0x95, 0x46, 0x7b, 0xb9,
+ 0xc8, 0x3e, 0xfa, 0xe5, 0x02, 0x3d, 0x1f, 0x3d, 0x86, 0xc8, 0xd0, 0xb5, 0x9c, 0x7c, 0x0c, 0xb9,
+ 0xe8, 0x55, 0x66, 0x94, 0xf0, 0xe4, 0xe3, 0xc8, 0xeb, 0x30, 0x6d, 0x93, 0xc0, 0x72, 0x5c, 0xd9,
+ 0x8f, 0xa5, 0x7e, 0x22, 0x90, 0xc2, 0xea, 0x92, 0xb5, 0x56, 0xe4, 0x36, 0xa9, 0x0f, 0x1c, 0x0a,
+ 0xe4, 0xd1, 0xb6, 0xe1, 0xdb, 0xb2, 0x9d, 0xc8, 0xc5, 0xd1, 0x76, 0xc3, 0xb7, 0x09, 0x16, 0x18,
+ 0xf3, 0x5d, 0x03, 0x8a, 0x52, 0xd2, 0x86, 0xd5, 0x61, 0x04, 0xad, 0x45, 0xab, 0x90, 0xc7, 0x7d,
+ 0x4d, 0x7f, 0xf5, 0xba, 0xe8, 0x55, 0x0a, 0x82, 0x4c, 0x74, 0x22, 0x43, 0x5e, 0x77, 0x32, 0x97,
+ 0xec, 0xd1, 0x93, 0x90, 0x13, 0xb7, 0x47, 0x6d, 0x66, 0x74, 0xd7, 0xc5, 0x05, 0xc3, 0x12, 0x67,
+ 0x7e, 0x9c, 0x81, 0x52, 0x62, 0x71, 0x29, 0x7a, 0x81, 0x68, 0x74, 0x99, 0x49, 0x31, 0x0e, 0x1f,
+ 0xfd, 0x30, 0xad, 0x72, 0xcf, 0xd4, 0xe3, 0xe4, 0x9e, 0xef, 0xc2, 0x54, 0x83, 0xef, 0x51, 0xf8,
+ 0x3f, 0x87, 0xb5, 0x71, 0x8e, 0x53, 0xec, 0x6e, 0xec, 0x8d, 0xe2, 0x93, 0x61, 0x25, 0x10, 0xdd,
+ 0x81, 0x05, 0x4a, 0x02, 0xda, 0x5d, 0x3f, 0x0a, 0x08, 0xd5, 0x9b, 0xf8, 0x5c, 0x5c, 0x71, 0xe3,
+ 0x7e, 0x02, 0x3c, 0xc8, 0x63, 0x1e, 0xc2, 0xcc, 0x5d, 0xeb, 0xd0, 0x8d, 0x1e, 0xbd, 0x30, 0x94,
+ 0x1c, 0xaf, 0xe1, 0x76, 0x6c, 0x22, 0xa3, 0x71, 0x18, 0xbd, 0xc2, 0x4b, 0xbb, 0xad, 0x23, 0x2f,
+ 0x7a, 0x95, 0x2b, 0x09, 0x80, 0x7c, 0xe5, 0xc1, 0x49, 0x11, 0xa6, 0x0b, 0x93, 0x9f, 0x61, 0xf7,
+ 0xf8, 0x3d, 0x28, 0xc4, 0xf5, 0xfd, 0xa7, 0xac, 0xd2, 0x7c, 0x13, 0xf2, 0xdc, 0xe3, 0xc3, 0xbe,
+ 0xf4, 0x92, 0x12, 0x27, 0x59, 0x38, 0x65, 0xd2, 0x14, 0x4e, 0x66, 0x0b, 0x4a, 0xf7, 0xda, 0xf6,
+ 0x63, 0x3e, 0x7b, 0x66, 0x52, 0x67, 0xad, 0x9b, 0x20, 0xff, 0x42, 0xc1, 0x13, 0x84, 0xcc, 0xdc,
+ 0x5a, 0x82, 0xd0, 0x13, 0xaf, 0x36, 0x95, 0xff, 0xa9, 0x01, 0x20, 0xc6, 0x5f, 0x9b, 0xa7, 0xc4,
+ 0x0b, 0x52, 0x3c, 0x8e, 0xdf, 0x83, 0x29, 0x5f, 0x7a, 0x93, 0x7c, 0xfa, 0x1c, 0x73, 0xc6, 0x1a,
+ 0x5d, 0x02, 0xe9, 0x4f, 0x58, 0x09, 0xab, 0xdd, 0xf8, 0xe0, 0xe1, 0xf2, 0xc4, 0x87, 0x0f, 0x97,
+ 0x27, 0x3e, 0x7a, 0xb8, 0x3c, 0xf1, 0xf6, 0xf9, 0xb2, 0xf1, 0xc1, 0xf9, 0xb2, 0xf1, 0xe1, 0xf9,
+ 0xb2, 0xf1, 0xd1, 0xf9, 0xb2, 0xf1, 0xf1, 0xf9, 0xb2, 0xf1, 0xee, 0x3f, 0x96, 0x27, 0x5e, 0xcf,
+ 0x9c, 0xae, 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0x82, 0x62, 0x88, 0xff, 0xb8, 0x26, 0x00, 0x00,
}
func (m *APIGroup) Marshal() (dAtA []byte, err error) {
@@ -1752,6 +1788,62 @@ func (m *APIVersions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *Condition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Condition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Condition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x2a
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x18
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *CreateOptions) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -2399,6 +2491,11 @@ func (m *ListOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ i -= len(m.ResourceVersionMatch)
+ copy(dAtA[i:], m.ResourceVersionMatch)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersionMatch)))
+ i--
+ dAtA[i] = 0x52
i--
if m.AllowWatchBookmarks {
dAtA[i] = 1
@@ -3488,6 +3585,26 @@ func (m *APIVersions) Size() (n int) {
return n
}
+func (m *Condition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *CreateOptions) Size() (n int) {
if m == nil {
return 0
@@ -3758,6 +3875,8 @@ func (m *ListOptions) Size() (n int) {
l = len(m.Continue)
n += 1 + l + sovGenerated(uint64(l))
n += 2
+ l = len(m.ResourceVersionMatch)
+ n += 1 + l + sovGenerated(uint64(l))
return n
}
@@ -4198,6 +4317,21 @@ func (this *APIResourceList) String() string {
}, "")
return s
}
+func (this *Condition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Condition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *CreateOptions) String() string {
if this == nil {
return "nil"
@@ -4244,16 +4378,6 @@ func (this *ExportOptions) String() string {
}, "")
return s
}
-func (this *FieldsV1) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&FieldsV1{`,
- `Raw:` + valueToStringGenerated(this.Raw) + `,`,
- `}`,
- }, "")
- return s
-}
func (this *GetOptions) String() string {
if this == nil {
return "nil"
@@ -4355,6 +4479,7 @@ func (this *ListOptions) String() string {
`Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
`Continue:` + fmt.Sprintf("%v", this.Continue) + `,`,
`AllowWatchBookmarks:` + fmt.Sprintf("%v", this.AllowWatchBookmarks) + `,`,
+ `ResourceVersionMatch:` + fmt.Sprintf("%v", this.ResourceVersionMatch) + `,`,
`}`,
}, "")
return s
@@ -4369,7 +4494,7 @@ func (this *ManagedFieldsEntry) String() string {
`APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
`Time:` + strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "Time", 1) + `,`,
`FieldsType:` + fmt.Sprintf("%v", this.FieldsType) + `,`,
- `FieldsV1:` + strings.Replace(this.FieldsV1.String(), "FieldsV1", "FieldsV1", 1) + `,`,
+ `FieldsV1:` + strings.Replace(fmt.Sprintf("%v", this.FieldsV1), "FieldsV1", "FieldsV1", 1) + `,`,
`}`,
}, "")
return s
@@ -5508,6 +5633,239 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *Condition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Condition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Condition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *CreateOptions) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -7811,6 +8169,38 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error {
}
}
m.AllowWatchBookmarks = bool(v != 0)
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersionMatch", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceVersionMatch = ResourceVersionMatch(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
index ba1194dcc..b72d43ff0 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
@@ -134,6 +134,73 @@ message APIVersions {
repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2;
}
+// Condition contains details for one aspect of the current state of this API Resource.
+// ---
+// This struct is intended for direct use as an array at the field path .status.conditions. For example,
+// type FooStatus struct{
+// // Represents the observations of a foo's current state.
+// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded"
+// // +patchMergeKey=type
+// // +patchStrategy=merge
+// // +listType=map
+// // +listMapKey=type
+// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+//
+// // other fields
+// }
+message Condition {
+ // type of condition in CamelCase or in foo.example.com/CamelCase.
+ // ---
+ // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
+ // useful (see .node.status.conditions), the ability to deconflict is important.
+ // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
+ // +kubebuilder:validation:MaxLength=316
+ optional string type = 1;
+
+ // status of the condition, one of True, False, Unknown.
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum=True;False;Unknown
+ optional string status = 2;
+
+ // observedGeneration represents the .metadata.generation that the condition was set based upon.
+ // For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ // with respect to the current state of the instance.
+ // +optional
+ // +kubebuilder:validation:Minimum=0
+ optional int64 observedGeneration = 3;
+
+ // lastTransitionTime is the last time the condition transitioned from one status to another.
+ // This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Format=date-time
+ optional Time lastTransitionTime = 4;
+
+ // reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ // Producers of specific condition types may define expected values and meanings for this field,
+ // and whether the values are considered a guaranteed API.
+ // The value should be a CamelCase string.
+ // This field may not be empty.
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength=1024
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Pattern=`^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$`
+ optional string reason = 5;
+
+ // message is a human readable message indicating details about the transition.
+ // This may be an empty string.
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength=32768
+ optional string message = 6;
+}
+
// CreateOptions may be provided when creating an API object.
message CreateOptions {
// When present, indicates that modifications should not be
@@ -224,6 +291,7 @@ message ExportOptions {
// If a key maps to an empty Fields value, the field that key represents is part of the set.
//
// The exact format is defined in sigs.k8s.io/structured-merge-diff
+// +protobuf.options.(gogoproto.goproto_stringer)=false
message FieldsV1 {
// Raw is the underlying serialization of this object.
optional bytes Raw = 1;
@@ -231,10 +299,12 @@ message FieldsV1 {
// GetOptions is the standard query options to the standard REST get call.
message GetOptions {
- // When specified:
- // - if unset, then the result is returned from remote storage based on quorum-read flag;
- // - if it's 0, then we simply return what we currently have in cache, no guarantee;
- // - if set to non zero, then the result is at least as fresh as given rv.
+ // resourceVersion sets a constraint on what resource versions a request may be served from.
+ // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+ // details.
+ //
+ // Defaults to unset
+ // +optional
optional string resourceVersion = 1;
}
@@ -420,15 +490,24 @@ message ListOptions {
// +optional
optional bool allowWatchBookmarks = 9;
- // When specified with a watch call, shows changes that occur after that particular version of a resource.
- // Defaults to changes from the beginning of history.
- // When specified for list:
- // - if unset, then the result is returned from remote storage based on quorum-read flag;
- // - if it's 0, then we simply return what we currently have in cache, no guarantee;
- // - if set to non zero, then the result is at least as fresh as given rv.
+ // resourceVersion sets a constraint on what resource versions a request may be served from.
+ // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+ // details.
+ //
+ // Defaults to unset
// +optional
optional string resourceVersion = 4;
+ // resourceVersionMatch determines how resourceVersion is applied to list calls.
+ // It is highly recommended that resourceVersionMatch be set for list calls where
+ // resourceVersion is set
+ // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+ // details.
+ //
+ // Defaults to unset
+ // +optional
+ optional string resourceVersionMatch = 10;
+
// Timeout for the list/watch call.
// This limits the duration of the call, regardless of any activity or inactivity.
// +optional
@@ -546,7 +625,7 @@ message ObjectMeta {
// +optional
optional string generateName = 2;
- // Namespace defines the space within each name must be unique. An empty namespace is
+ // Namespace defines the space within which each name must be unique. An empty namespace is
// equivalent to the "default" namespace, but "default" is the canonical representation.
// Not all objects are required to be scoped to a namespace - the value of this field for
// those objects will be empty.
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go
index 912cf2036..2002f91b0 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go
@@ -156,7 +156,9 @@ func (meta *ObjectMeta) GetDeletionTimestamp() *Time { return meta.DeletionTimes
func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *Time) {
meta.DeletionTimestamp = deletionTimestamp
}
-func (meta *ObjectMeta) GetDeletionGracePeriodSeconds() *int64 { return meta.DeletionGracePeriodSeconds }
+func (meta *ObjectMeta) GetDeletionGracePeriodSeconds() *int64 {
+ return meta.DeletionGracePeriodSeconds
+}
func (meta *ObjectMeta) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) {
meta.DeletionGracePeriodSeconds = deletionGracePeriodSeconds
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
index e7aaead8c..bb57f2cc4 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
@@ -135,7 +135,7 @@ type ObjectMeta struct {
// +optional
GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"`
- // Namespace defines the space within each name must be unique. An empty namespace is
+ // Namespace defines the space within which each name must be unique. An empty namespace is
// equivalent to the "default" namespace, but "default" is the canonical representation.
// Not all objects are required to be scoped to a namespace - the value of this field for
// those objects will be empty.
@@ -355,14 +355,23 @@ type ListOptions struct {
// +optional
AllowWatchBookmarks bool `json:"allowWatchBookmarks,omitempty" protobuf:"varint,9,opt,name=allowWatchBookmarks"`
- // When specified with a watch call, shows changes that occur after that particular version of a resource.
- // Defaults to changes from the beginning of history.
- // When specified for list:
- // - if unset, then the result is returned from remote storage based on quorum-read flag;
- // - if it's 0, then we simply return what we currently have in cache, no guarantee;
- // - if set to non zero, then the result is at least as fresh as given rv.
+ // resourceVersion sets a constraint on what resource versions a request may be served from.
+ // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+ // details.
+ //
+ // Defaults to unset
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
+
+ // resourceVersionMatch determines how resourceVersion is applied to list calls.
+ // It is highly recommended that resourceVersionMatch be set for list calls where
+ // resourceVersion is set
+ // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+ // details.
+ //
+ // Defaults to unset
+ // +optional
+ ResourceVersionMatch ResourceVersionMatch `json:"resourceVersionMatch,omitempty" protobuf:"bytes,10,opt,name=resourceVersionMatch,casttype=ResourceVersionMatch"`
// Timeout for the list/watch call.
// This limits the duration of the call, regardless of any activity or inactivity.
// +optional
@@ -402,6 +411,25 @@ type ListOptions struct {
Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"`
}
+// resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch
+// may only be set if resourceVersion is also set.
+//
+// "NotOlderThan" matches data at least as new as the provided resourceVersion.
+// "Exact" matches data at the exact resourceVersion provided.
+//
+// See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+// details.
+type ResourceVersionMatch string
+
+const (
+ // ResourceVersionMatchNotOlderThan matches data at least as new as the provided
+ // resourceVersion.
+ ResourceVersionMatchNotOlderThan ResourceVersionMatch = "NotOlderThan"
+ // ResourceVersionMatchExact matches data at the exact resourceVersion
+ // provided.
+ ResourceVersionMatchExact ResourceVersionMatch = "Exact"
+)
+
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -423,10 +451,12 @@ type ExportOptions struct {
// GetOptions is the standard query options to the standard REST get call.
type GetOptions struct {
TypeMeta `json:",inline"`
- // When specified:
- // - if unset, then the result is returned from remote storage based on quorum-read flag;
- // - if it's 0, then we simply return what we currently have in cache, no guarantee;
- // - if set to non zero, then the result is at least as fresh as given rv.
+ // resourceVersion sets a constraint on what resource versions a request may be served from.
+ // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+ // details.
+ //
+ // Defaults to unset
+ // +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,1,opt,name=resourceVersion"`
// +k8s:deprecated=includeUninitialized,protobuf=2
}
@@ -1148,11 +1178,16 @@ const (
// If a key maps to an empty Fields value, the field that key represents is part of the set.
//
// The exact format is defined in sigs.k8s.io/structured-merge-diff
+// +protobuf.options.(gogoproto.goproto_stringer)=false
type FieldsV1 struct {
// Raw is the underlying serialization of this object.
Raw []byte `json:"-" protobuf:"bytes,1,opt,name=Raw"`
}
+func (f FieldsV1) String() string {
+ return string(f.Raw)
+}
+
// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf
// generation to support a meta type that can accept any valid JSON. This can be introduced
// in a v1 because clients a) receive an error if they try to access proto today, and b)
@@ -1314,3 +1349,65 @@ type PartialObjectMetadataList struct {
// items contains each of the included items.
Items []PartialObjectMetadata `json:"items" protobuf:"bytes,2,rep,name=items"`
}
+
+// Condition contains details for one aspect of the current state of this API Resource.
+// ---
+// This struct is intended for direct use as an array at the field path .status.conditions. For example,
+// type FooStatus struct{
+// // Represents the observations of a foo's current state.
+// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded"
+// // +patchMergeKey=type
+// // +patchStrategy=merge
+// // +listType=map
+// // +listMapKey=type
+// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+//
+// // other fields
+// }
+type Condition struct {
+ // type of condition in CamelCase or in foo.example.com/CamelCase.
+ // ---
+ // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
+ // useful (see .node.status.conditions), the ability to deconflict is important.
+ // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
+ // +kubebuilder:validation:MaxLength=316
+ Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+ // status of the condition, one of True, False, Unknown.
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum=True;False;Unknown
+ Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status"`
+ // observedGeneration represents the .metadata.generation that the condition was set based upon.
+ // For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ // with respect to the current state of the instance.
+ // +optional
+ // +kubebuilder:validation:Minimum=0
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
+ // lastTransitionTime is the last time the condition transitioned from one status to another.
+ // This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Format=date-time
+ LastTransitionTime Time `json:"lastTransitionTime" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+ // reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ // Producers of specific condition types may define expected values and meanings for this field,
+ // and whether the values are considered a guaranteed API.
+ // The value should be a CamelCase string.
+ // This field may not be empty.
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength=1024
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Pattern=`^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$`
+ Reason string `json:"reason" protobuf:"bytes,5,opt,name=reason"`
+ // message is a human readable message indicating details about the transition.
+ // This may be an empty string.
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength=32768
+ Message string `json:"message" protobuf:"bytes,6,opt,name=message"`
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
index b62e591ee..ace0abfb9 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
@@ -86,6 +86,20 @@ func (APIVersions) SwaggerDoc() map[string]string {
return map_APIVersions
}
+var map_Condition = map[string]string{
+ "": "Condition contains details for one aspect of the current state of this API Resource.",
+ "type": "type of condition in CamelCase or in foo.example.com/CamelCase.",
+ "status": "status of the condition, one of True, False, Unknown.",
+ "observedGeneration": "observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.",
+ "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.",
+ "reason": "reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.",
+ "message": "message is a human readable message indicating details about the transition. This may be an empty string.",
+}
+
+func (Condition) SwaggerDoc() map[string]string {
+ return map_Condition
+}
+
var map_CreateOptions = map[string]string{
"": "CreateOptions may be provided when creating an API object.",
"dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
@@ -129,7 +143,7 @@ func (FieldsV1) SwaggerDoc() map[string]string {
var map_GetOptions = map[string]string{
"": "GetOptions is the standard query options to the standard REST get call.",
- "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "resourceVersion": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
}
func (GetOptions) SwaggerDoc() map[string]string {
@@ -190,15 +204,16 @@ func (ListMeta) SwaggerDoc() map[string]string {
}
var map_ListOptions = map[string]string{
- "": "ListOptions is the query options to a standard REST list call.",
- "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
- "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
- "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
- "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
- "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
- "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
- "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
- "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "": "ListOptions is the query options to a standard REST list call.",
+ "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "resourceVersion": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
+ "resourceVersionMatch": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
+ "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
}
func (ListOptions) SwaggerDoc() map[string]string {
@@ -223,7 +238,7 @@ var map_ObjectMeta = map[string]string{
"": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
"name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
"generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency",
- "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces",
+ "namespace": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces",
"selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.",
"uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
"resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
index 4244b8a6d..54a231e49 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
@@ -27,7 +27,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
- "k8s.io/klog"
+ "k8s.io/klog/v2"
)
// NestedFieldCopy returns a deep copy of the value of a nested field.
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
index 2ade69dd9..06afd9b5b 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
@@ -145,6 +145,11 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
+ if err := s.AddConversionFunc((*[]string)(nil), (*ResourceVersionMatch)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_Slice_string_To_v1_ResourceVersionMatch(a.(*[]string), b.(*ResourceVersionMatch), scope)
+ }); err != nil {
+ return err
+ }
if err := s.AddConversionFunc((*[]string)(nil), (*Time)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_Slice_string_To_v1_Time(a.(*[]string), b.(*Time), scope)
}); err != nil {
@@ -415,6 +420,13 @@ func autoConvert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions,
} else {
out.ResourceVersion = ""
}
+ if values, ok := map[string][]string(*in)["resourceVersionMatch"]; ok && len(values) > 0 {
+ if err := Convert_Slice_string_To_v1_ResourceVersionMatch(&values, &out.ResourceVersionMatch, s); err != nil {
+ return err
+ }
+ } else {
+ out.ResourceVersionMatch = ""
+ }
if values, ok := map[string][]string(*in)["timeoutSeconds"]; ok && len(values) > 0 {
if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.TimeoutSeconds, s); err != nil {
return err
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
index b82fdf202..1aa73bd24 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
@@ -192,6 +192,23 @@ func (in *APIVersions) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Condition) DeepCopyInto(out *Condition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
+func (in *Condition) DeepCopy() *Condition {
+ if in == nil {
+ return nil
+ }
+ out := new(Condition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CreateOptions) DeepCopyInto(out *CreateOptions) {
*out = *in
out.TypeMeta = in.TypeMeta
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go
index 2d7c8bd1e..838d5b0aa 100644
--- a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go
@@ -442,20 +442,20 @@ func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags
if fn, ok := c.generatedConversionFuncs.untyped[pair]; ok {
return fn(src, dest, scope)
}
- // TODO: consider everything past this point deprecated - we want to support only point to point top level
- // conversions
dv, err := EnforcePtr(dest)
if err != nil {
return err
}
- if !dv.CanAddr() && !dv.CanSet() {
- return fmt.Errorf("can't write to dest")
- }
sv, err := EnforcePtr(src)
if err != nil {
return err
}
+ return fmt.Errorf("converting (%s) to (%s): unknown conversion", sv.Type(), dv.Type())
+
+ // TODO: Everything past this point is deprecated.
+ // Remove in 1.20 once we're sure it didn't break anything.
+
// Leave something on the stack, so that calls to struct tag getters never fail.
scope.srcStack.push(scopeStackElem{})
scope.destStack.push(scopeStackElem{})
diff --git a/vendor/k8s.io/apimachinery/pkg/fields/selector.go b/vendor/k8s.io/apimachinery/pkg/fields/selector.go
index e3e4453b6..a9e204976 100644
--- a/vendor/k8s.io/apimachinery/pkg/fields/selector.go
+++ b/vendor/k8s.io/apimachinery/pkg/fields/selector.go
@@ -57,13 +57,15 @@ type Selector interface {
type nothingSelector struct{}
-func (n nothingSelector) Matches(_ Fields) bool { return false }
-func (n nothingSelector) Empty() bool { return false }
-func (n nothingSelector) String() string { return "" }
-func (n nothingSelector) Requirements() Requirements { return nil }
-func (n nothingSelector) DeepCopySelector() Selector { return n }
-func (n nothingSelector) RequiresExactMatch(field string) (value string, found bool) { return "", false }
-func (n nothingSelector) Transform(fn TransformFunc) (Selector, error) { return n, nil }
+func (n nothingSelector) Matches(_ Fields) bool { return false }
+func (n nothingSelector) Empty() bool { return false }
+func (n nothingSelector) String() string { return "" }
+func (n nothingSelector) Requirements() Requirements { return nil }
+func (n nothingSelector) DeepCopySelector() Selector { return n }
+func (n nothingSelector) RequiresExactMatch(field string) (value string, found bool) {
+ return "", false
+}
+func (n nothingSelector) Transform(fn TransformFunc) (Selector, error) { return n, nil }
// Nothing returns a selector that matches no fields
func Nothing() Selector {
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go
index abf3ace6f..d9eeb4f91 100644
--- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go
+++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go
@@ -57,14 +57,22 @@ func (ls Set) Get(label string) string {
return ls[label]
}
-// AsSelector converts labels into a selectors.
+// AsSelector converts labels into a selectors. It does not
+// perform any validation, which means the server will reject
+// the request if the Set contains invalid values.
func (ls Set) AsSelector() Selector {
return SelectorFromSet(ls)
}
+// AsValidatedSelector converts labels into a selectors.
+// The Set is validated client-side, which allows to catch errors early.
+func (ls Set) AsValidatedSelector() (Selector, error) {
+ return ValidatedSelectorFromSet(ls)
+}
+
// AsSelectorPreValidated converts labels into a selector, but
-// assumes that labels are already validated and thus don't
-// preform any validation.
+// assumes that labels are already validated and thus doesn't
+// perform any validation.
// According to our measurements this is significantly faster
// in codepaths that matter at high scale.
func (ls Set) AsSelectorPreValidated() Selector {
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
index 2f8e1e2b0..bf62f98a4 100644
--- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go
+++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
@@ -26,7 +26,7 @@ import (
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
- "k8s.io/klog"
+ "k8s.io/klog/v2"
)
// Requirements is AND of all requirements.
@@ -68,13 +68,15 @@ func Everything() Selector {
type nothingSelector struct{}
-func (n nothingSelector) Matches(_ Labels) bool { return false }
-func (n nothingSelector) Empty() bool { return false }
-func (n nothingSelector) String() string { return "" }
-func (n nothingSelector) Add(_ ...Requirement) Selector { return n }
-func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false }
-func (n nothingSelector) DeepCopySelector() Selector { return n }
-func (n nothingSelector) RequiresExactMatch(label string) (value string, found bool) { return "", false }
+func (n nothingSelector) Matches(_ Labels) bool { return false }
+func (n nothingSelector) Empty() bool { return false }
+func (n nothingSelector) String() string { return "" }
+func (n nothingSelector) Add(_ ...Requirement) Selector { return n }
+func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false }
+func (n nothingSelector) DeepCopySelector() Selector { return n }
+func (n nothingSelector) RequiresExactMatch(label string) (value string, found bool) {
+ return "", false
+}
// Nothing returns a selector that matches no labels
func Nothing() Selector {
@@ -221,7 +223,7 @@ func (r *Requirement) Matches(ls Labels) bool {
return false
}
- // There should be only one strValue in r.strValues, and can be converted to a integer.
+ // There should be only one strValue in r.strValues, and can be converted to an integer.
if len(r.strValues) != 1 {
klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r)
return false
@@ -869,23 +871,30 @@ func validateLabelValue(k, v string) error {
// SelectorFromSet returns a Selector which will match exactly the given Set. A
// nil and empty Sets are considered equivalent to Everything().
+// It does not perform any validation, which means the server will reject
+// the request if the Set contains invalid values.
func SelectorFromSet(ls Set) Selector {
+ return SelectorFromValidatedSet(ls)
+}
+
+// ValidatedSelectorFromSet returns a Selector which will match exactly the given Set. A
+// nil and empty Sets are considered equivalent to Everything().
+// The Set is validated client-side, which allows to catch errors early.
+func ValidatedSelectorFromSet(ls Set) (Selector, error) {
if ls == nil || len(ls) == 0 {
- return internalSelector{}
+ return internalSelector{}, nil
}
requirements := make([]Requirement, 0, len(ls))
for label, value := range ls {
r, err := NewRequirement(label, selection.Equals, []string{value})
- if err == nil {
- requirements = append(requirements, *r)
- } else {
- //TODO: double check errors when input comes from serialization?
- return internalSelector{}
+ if err != nil {
+ return nil, err
}
+ requirements = append(requirements, *r)
}
// sort to have deterministic string representation
sort.Sort(ByKey(requirements))
- return internalSelector(requirements)
+ return internalSelector(requirements), nil
}
// SelectorFromValidatedSet returns a Selector which will match exactly the given Set.
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go
index 0bccf9dd9..a92863139 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go
@@ -29,7 +29,7 @@ import (
"k8s.io/apimachinery/pkg/conversion/queryparams"
"k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/klog"
+ "k8s.io/klog/v2"
)
// codec binds an encoder and decoder.
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go
index 510444a4d..000228061 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go
@@ -21,6 +21,7 @@ import (
"reflect"
"k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/json"
)
// CheckCodec makes sure that the codec can encode objects like internalType,
@@ -32,7 +33,14 @@ func CheckCodec(c Codec, internalType Object, externalTypes ...schema.GroupVersi
return fmt.Errorf("Internal type not encodable: %v", err)
}
for _, et := range externalTypes {
- exBytes := []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v"}`, et.Kind, et.GroupVersion().String()))
+ typeMeta := TypeMeta{
+ Kind: et.Kind,
+ APIVersion: et.GroupVersion().String(),
+ }
+ exBytes, err := json.Marshal(&typeMeta)
+ if err != nil {
+ return err
+ }
obj, err := Decode(c, exBytes)
if err != nil {
return fmt.Errorf("external type %s not interpretable: %v", et, err)
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go
index 918d0831d..871e4c8c4 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go
@@ -31,9 +31,9 @@ import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/util/json"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
- "sigs.k8s.io/structured-merge-diff/v3/value"
+ "sigs.k8s.io/structured-merge-diff/v4/value"
- "k8s.io/klog"
+ "k8s.io/klog/v2"
)
// UnstructuredConverter is an interface for converting between interface{}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go
index 636103312..994a3e3fa 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go
@@ -176,15 +176,6 @@ func (gv GroupVersion) Empty() bool {
// String puts "group" and "version" into a single "group/version" string. For the legacy v1
// it returns "v1".
func (gv GroupVersion) String() string {
- // special case the internal apiVersion for the legacy kube types
- if gv.Empty() {
- return ""
- }
-
- // special case of "v1" for backward compatibility
- if len(gv.Group) == 0 && gv.Version == "v1" {
- return gv.Version
- }
if len(gv.Group) > 0 {
return gv.Group + "/" + gv.Version
}
@@ -252,10 +243,10 @@ func (gv GroupVersion) WithResource(resource string) GroupVersionResource {
type GroupVersions []GroupVersion
// Identifier implements runtime.GroupVersioner interface.
-func (gv GroupVersions) Identifier() string {
- groupVersions := make([]string, 0, len(gv))
- for i := range gv {
- groupVersions = append(groupVersions, gv[i].String())
+func (gvs GroupVersions) Identifier() string {
+ groupVersions := make([]string, 0, len(gvs))
+ for i := range gvs {
+ groupVersions = append(groupVersions, gvs[i].String())
}
return fmt.Sprintf("[%s]", strings.Join(groupVersions, ","))
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
index 4b739ec38..3b254961d 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
@@ -211,6 +211,19 @@ func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) {
}
}
s.typeToGVK[t] = append(s.typeToGVK[t], gvk)
+
+ // if the type implements DeepCopyInto(<obj>), register a self-conversion
+ if m := reflect.ValueOf(obj).MethodByName("DeepCopyInto"); m.IsValid() && m.Type().NumIn() == 1 && m.Type().NumOut() == 0 && m.Type().In(0) == reflect.TypeOf(obj) {
+ if err := s.AddGeneratedConversionFunc(obj, obj, func(a, b interface{}, scope conversion.Scope) error {
+ // copy a to b
+ reflect.ValueOf(a).MethodByName("DeepCopyInto").Call([]reflect.Value{reflect.ValueOf(b)})
+ // clear TypeMeta to match legacy reflective conversion
+ b.(Object).GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{})
+ return nil
+ }); err != nil {
+ panic(err)
+ }
+ }
}
// KnownTypes returns the types known for the given version.
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
index 9d17f09e5..e081d7ff1 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
@@ -31,7 +31,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
"k8s.io/apimachinery/pkg/util/framer"
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
- "k8s.io/klog"
+ "k8s.io/klog/v2"
)
// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
index ced184c91..718c5dfb7 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
@@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/klog"
+ "k8s.io/klog/v2"
)
// NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme.
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
index 9d5fdeece..00ce5f785 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
@@ -114,6 +114,18 @@ func negotiateProtocol(clientProtocols, serverProtocols []string) string {
return ""
}
+func commaSeparatedHeaderValues(header []string) []string {
+ var parsedClientProtocols []string
+ for i := range header {
+ for _, clientProtocol := range strings.Split(header[i], ",") {
+ if proto := strings.Trim(clientProtocol, " "); len(proto) > 0 {
+ parsedClientProtocols = append(parsedClientProtocols, proto)
+ }
+ }
+ }
+ return parsedClientProtocols
+}
+
// Handshake performs a subprotocol negotiation. If the client did request a
// subprotocol, Handshake will select the first common value found in
// serverProtocols. If a match is found, Handshake adds a response header
@@ -121,7 +133,7 @@ func negotiateProtocol(clientProtocols, serverProtocols []string) string {
// returned, along with a response header containing the list of protocols the
// server can accept.
func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) {
- clientProtocols := req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)]
+ clientProtocols := commaSeparatedHeaderValues(req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)])
if len(clientProtocols) == 0 {
return "", fmt.Errorf("unable to upgrade: %s is required", HeaderProtocolVersion)
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
index 9d222faa8..7a6881250 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
@@ -24,7 +24,7 @@ import (
"github.com/docker/spdystream"
"k8s.io/apimachinery/pkg/util/httpstream"
- "k8s.io/klog"
+ "k8s.io/klog/v2"
)
// connection maintains state about a spdystream.Connection and its associated
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
index 2699597e7..6309fbc26 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
@@ -76,19 +76,20 @@ var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{}
var _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{}
var _ utilnet.Dialer = &SpdyRoundTripper{}
-// NewRoundTripper creates a new SpdyRoundTripper that will use
-// the specified tlsConfig.
-func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) httpstream.UpgradeRoundTripper {
- return NewSpdyRoundTripper(tlsConfig, followRedirects, requireSameHostRedirects)
+// NewRoundTripper creates a new SpdyRoundTripper that will use the specified
+// tlsConfig.
+func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper {
+ return NewRoundTripperWithProxy(tlsConfig, followRedirects, requireSameHostRedirects, utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment))
}
-// NewSpdyRoundTripper creates a new SpdyRoundTripper that will use
-// the specified tlsConfig. This function is mostly meant for unit tests.
-func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper {
+// NewRoundTripperWithProxy creates a new SpdyRoundTripper that will use the
+// specified tlsConfig and proxy func.
+func NewRoundTripperWithProxy(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool, proxier func(*http.Request) (*url.URL, error)) *SpdyRoundTripper {
return &SpdyRoundTripper{
tlsConfig: tlsConfig,
followRedirects: followRedirects,
requireSameHostRedirects: requireSameHostRedirects,
+ proxier: proxier,
}
}
@@ -116,11 +117,7 @@ func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) {
// dial dials the host specified by req, using TLS if appropriate, optionally
// using a proxy server if one is configured via environment variables.
func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
- proxier := s.proxier
- if proxier == nil {
- proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
- }
- proxyURL, err := proxier(req)
+ proxyURL, err := s.proxier(req)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
index cb974dcf7..6576def82 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
@@ -26,7 +26,7 @@ import (
"strings"
"github.com/google/gofuzz"
- "k8s.io/klog"
+ "k8s.io/klog/v2"
)
// IntOrString is a type that can hold an int32 or a string. When used in
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go
index 7b64e6815..945886c43 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go
@@ -21,18 +21,23 @@ import (
"bytes"
"context"
"crypto/tls"
+ "errors"
"fmt"
"io"
+ "mime"
"net"
"net/http"
"net/url"
"os"
"path"
+ "regexp"
"strconv"
"strings"
+ "unicode"
+ "unicode/utf8"
"golang.org/x/net/http2"
- "k8s.io/klog"
+ "k8s.io/klog/v2"
)
// JoinPreservingTrailingSlash does a path.Join of the specified elements,
@@ -57,8 +62,11 @@ func JoinPreservingTrailingSlash(elem ...string) string {
// IsTimeout returns true if the given error is a network timeout error
func IsTimeout(err error) bool {
- neterr, ok := err.(net.Error)
- return ok && neterr != nil && neterr.Timeout()
+ var neterr net.Error
+ if errors.As(err, &neterr) {
+ return neterr != nil && neterr.Timeout()
+ }
+ return false
}
// IsProbableEOF returns true if the given error resembles a connection termination
@@ -71,13 +79,16 @@ func IsProbableEOF(err error) bool {
if err == nil {
return false
}
- if uerr, ok := err.(*url.Error); ok {
+ var uerr *url.Error
+ if errors.As(err, &uerr) {
err = uerr.Err
}
msg := err.Error()
switch {
case err == io.EOF:
return true
+ case err == io.ErrUnexpectedEOF:
+ return true
case msg == "http: can't write HTTP request on broken connection":
return true
case strings.Contains(msg, "http2: server sent GOAWAY and closed the connection"):
@@ -482,3 +493,232 @@ func CloneHeader(in http.Header) http.Header {
}
return out
}
+
+// WarningHeader contains a single RFC2616 14.46 warnings header
+type WarningHeader struct {
+ // Codeindicates the type of warning. 299 is a miscellaneous persistent warning
+ Code int
+ // Agent contains the name or pseudonym of the server adding the Warning header.
+ // A single "-" is recommended when agent is unknown.
+ Agent string
+ // Warning text
+ Text string
+}
+
+// ParseWarningHeaders extract RFC2616 14.46 warnings headers from the specified set of header values.
+// Multiple comma-separated warnings per header are supported.
+// If errors are encountered on a header, the remainder of that header are skipped and subsequent headers are parsed.
+// Returns successfully parsed warnings and any errors encountered.
+func ParseWarningHeaders(headers []string) ([]WarningHeader, []error) {
+ var (
+ results []WarningHeader
+ errs []error
+ )
+ for _, header := range headers {
+ for len(header) > 0 {
+ result, remainder, err := ParseWarningHeader(header)
+ if err != nil {
+ errs = append(errs, err)
+ break
+ }
+ results = append(results, result)
+ header = remainder
+ }
+ }
+ return results, errs
+}
+
+var (
+ codeMatcher = regexp.MustCompile(`^[0-9]{3}$`)
+ wordDecoder = &mime.WordDecoder{}
+)
+
+// ParseWarningHeader extracts one RFC2616 14.46 warning from the specified header,
+// returning an error if the header does not contain a correctly formatted warning.
+// Any remaining content in the header is returned.
+func ParseWarningHeader(header string) (result WarningHeader, remainder string, err error) {
+ // https://tools.ietf.org/html/rfc2616#section-14.46
+ // updated by
+ // https://tools.ietf.org/html/rfc7234#section-5.5
+ // https://tools.ietf.org/html/rfc7234#appendix-A
+ // Some requirements regarding production and processing of the Warning
+ // header fields have been relaxed, as it is not widely implemented.
+ // Furthermore, the Warning header field no longer uses RFC 2047
+ // encoding, nor does it allow multiple languages, as these aspects were
+ // not implemented.
+ //
+ // Format is one of:
+ // warn-code warn-agent "warn-text"
+ // warn-code warn-agent "warn-text" "warn-date"
+ //
+ // warn-code is a three digit number
+ // warn-agent is unquoted and contains no spaces
+ // warn-text is quoted with backslash escaping (RFC2047-encoded according to RFC2616, not encoded according to RFC7234)
+ // warn-date is optional, quoted, and in HTTP-date format (no embedded or escaped quotes)
+ //
+ // additional warnings can optionally be included in the same header by comma-separating them:
+ // warn-code warn-agent "warn-text" "warn-date"[, warn-code warn-agent "warn-text" "warn-date", ...]
+
+ // tolerate leading whitespace
+ header = strings.TrimSpace(header)
+
+ parts := strings.SplitN(header, " ", 3)
+ if len(parts) != 3 {
+ return WarningHeader{}, "", errors.New("invalid warning header: fewer than 3 segments")
+ }
+ code, agent, textDateRemainder := parts[0], parts[1], parts[2]
+
+ // verify code format
+ if !codeMatcher.Match([]byte(code)) {
+ return WarningHeader{}, "", errors.New("invalid warning header: code segment is not 3 digits between 100-299")
+ }
+ codeInt, _ := strconv.ParseInt(code, 10, 64)
+
+ // verify agent presence
+ if len(agent) == 0 {
+ return WarningHeader{}, "", errors.New("invalid warning header: empty agent segment")
+ }
+ if !utf8.ValidString(agent) || hasAnyRunes(agent, unicode.IsControl) {
+ return WarningHeader{}, "", errors.New("invalid warning header: invalid agent")
+ }
+
+ // verify textDateRemainder presence
+ if len(textDateRemainder) == 0 {
+ return WarningHeader{}, "", errors.New("invalid warning header: empty text segment")
+ }
+
+ // extract text
+ text, dateAndRemainder, err := parseQuotedString(textDateRemainder)
+ if err != nil {
+ return WarningHeader{}, "", fmt.Errorf("invalid warning header: %v", err)
+ }
+ // tolerate RFC2047-encoded text from warnings produced according to RFC2616
+ if decodedText, err := wordDecoder.DecodeHeader(text); err == nil {
+ text = decodedText
+ }
+ if !utf8.ValidString(text) || hasAnyRunes(text, unicode.IsControl) {
+ return WarningHeader{}, "", errors.New("invalid warning header: invalid text")
+ }
+ result = WarningHeader{Code: int(codeInt), Agent: agent, Text: text}
+
+ if len(dateAndRemainder) > 0 {
+ if dateAndRemainder[0] == '"' {
+ // consume date
+ foundEndQuote := false
+ for i := 1; i < len(dateAndRemainder); i++ {
+ if dateAndRemainder[i] == '"' {
+ foundEndQuote = true
+ remainder = strings.TrimSpace(dateAndRemainder[i+1:])
+ break
+ }
+ }
+ if !foundEndQuote {
+ return WarningHeader{}, "", errors.New("invalid warning header: unterminated date segment")
+ }
+ } else {
+ remainder = dateAndRemainder
+ }
+ }
+ if len(remainder) > 0 {
+ if remainder[0] == ',' {
+ // consume comma if present
+ remainder = strings.TrimSpace(remainder[1:])
+ } else {
+ return WarningHeader{}, "", errors.New("invalid warning header: unexpected token after warn-date")
+ }
+ }
+
+ return result, remainder, nil
+}
+
+func parseQuotedString(quotedString string) (string, string, error) {
+ if len(quotedString) == 0 {
+ return "", "", errors.New("invalid quoted string: 0-length")
+ }
+
+ if quotedString[0] != '"' {
+ return "", "", errors.New("invalid quoted string: missing initial quote")
+ }
+
+ quotedString = quotedString[1:]
+ var remainder string
+ escaping := false
+ closedQuote := false
+ result := &bytes.Buffer{}
+loop:
+ for i := 0; i < len(quotedString); i++ {
+ b := quotedString[i]
+ switch b {
+ case '"':
+ if escaping {
+ result.WriteByte(b)
+ escaping = false
+ } else {
+ closedQuote = true
+ remainder = strings.TrimSpace(quotedString[i+1:])
+ break loop
+ }
+ case '\\':
+ if escaping {
+ result.WriteByte(b)
+ escaping = false
+ } else {
+ escaping = true
+ }
+ default:
+ result.WriteByte(b)
+ escaping = false
+ }
+ }
+
+ if !closedQuote {
+ return "", "", errors.New("invalid quoted string: missing closing quote")
+ }
+ return result.String(), remainder, nil
+}
+
+func NewWarningHeader(code int, agent, text string) (string, error) {
+ if code < 0 || code > 999 {
+ return "", errors.New("code must be between 0 and 999")
+ }
+ if len(agent) == 0 {
+ agent = "-"
+ } else if !utf8.ValidString(agent) || strings.ContainsAny(agent, `\"`) || hasAnyRunes(agent, unicode.IsSpace, unicode.IsControl) {
+ return "", errors.New("agent must be valid UTF-8 and must not contain spaces, quotes, backslashes, or control characters")
+ }
+ if !utf8.ValidString(text) || hasAnyRunes(text, unicode.IsControl) {
+ return "", errors.New("text must be valid UTF-8 and must not contain control characters")
+ }
+ return fmt.Sprintf("%03d %s %s", code, agent, makeQuotedString(text)), nil
+}
+
+func hasAnyRunes(s string, runeCheckers ...func(rune) bool) bool {
+ for _, r := range s {
+ for _, checker := range runeCheckers {
+ if checker(r) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func makeQuotedString(s string) string {
+ result := &bytes.Buffer{}
+ // opening quote
+ result.WriteRune('"')
+ for _, c := range s {
+ switch c {
+ case '"', '\\':
+ // escape " and \
+ result.WriteRune('\\')
+ result.WriteRune(c)
+ default:
+ // write everything else as-is
+ result.WriteRune(c)
+ }
+ }
+ // closing quote
+ result.WriteRune('"')
+ return result.String()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go
index 836494d57..204e223ca 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go
@@ -26,7 +26,7 @@ import (
"strings"
- "k8s.io/klog"
+ "k8s.io/klog/v2"
)
type AddressFamily uint
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go
index 2e7cb9499..5950087e0 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/net/util.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go
@@ -17,9 +17,8 @@ limitations under the License.
package net
import (
+ "errors"
"net"
- "net/url"
- "os"
"reflect"
"syscall"
)
@@ -40,34 +39,18 @@ func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool {
// Returns if the given err is "connection reset by peer" error.
func IsConnectionReset(err error) bool {
- if urlErr, ok := err.(*url.Error); ok {
- err = urlErr.Err
- }
- if opErr, ok := err.(*net.OpError); ok {
- err = opErr.Err
- }
- if osErr, ok := err.(*os.SyscallError); ok {
- err = osErr.Err
- }
- if errno, ok := err.(syscall.Errno); ok && errno == syscall.ECONNRESET {
- return true
+ var errno syscall.Errno
+ if errors.As(err, &errno) {
+ return errno == syscall.ECONNRESET
}
return false
}
// Returns if the given err is "connection refused" error
func IsConnectionRefused(err error) bool {
- if urlErr, ok := err.(*url.Error); ok {
- err = urlErr.Err
- }
- if opErr, ok := err.(*net.OpError); ok {
- err = opErr.Err
- }
- if osErr, ok := err.(*os.SyscallError); ok {
- err = osErr.Err
- }
- if errno, ok := err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED {
- return true
+ var errno syscall.Errno
+ if errors.As(err, &errno) {
+ return errno == syscall.ECONNREFUSED
}
return false
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
index 1428443f5..e8a9f609f 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
@@ -23,7 +23,7 @@ import (
"sync"
"time"
- "k8s.io/klog"
+ "k8s.io/klog/v2"
)
var (
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
index 915231f2e..4752b29a9 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
@@ -106,6 +106,11 @@ func IsFullyQualifiedDomainName(fldPath *field.Path, name string) field.ErrorLis
if len(strings.Split(name, ".")) < 2 {
return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least two segments separated by dots"))
}
+ for _, label := range strings.Split(name, ".") {
+ if errs := IsDNS1123Label(label); len(errs) > 0 {
+ return append(allErrors, field.Invalid(fldPath, label, strings.Join(errs, ",")))
+ }
+ }
return allErrors
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
index a9a3853ac..492171faf 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
@@ -26,7 +26,7 @@ import (
"strings"
"unicode"
- "k8s.io/klog"
+ "k8s.io/klog/v2"
"sigs.k8s.io/yaml"
)
@@ -92,6 +92,10 @@ type YAMLDecoder struct {
// the caller in framing the chunk.
func NewDocumentDecoder(r io.ReadCloser) io.ReadCloser {
scanner := bufio.NewScanner(r)
+ // the size of initial allocation for buffer 4k
+ buf := make([]byte, 4*1024)
+ // the maximum size used to buffer a token 5M
+ scanner.Buffer(buf, 5*1024*1024)
scanner.Split(splitYAMLDocument)
return &YAMLDecoder{
r: r,
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
index 4269a836a..8271e9b70 100644
--- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
+++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
@@ -21,7 +21,7 @@ import (
"io"
"sync"
- "k8s.io/klog"
+ "k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/net"
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go
index 988aba3ed..1f4911a31 100644
--- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go
+++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go
@@ -20,7 +20,7 @@ import (
"fmt"
"sync"
- "k8s.io/klog"
+ "k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/runtime"
)
@@ -32,8 +32,8 @@ type Interface interface {
Stop()
// Returns a chan which will receive all the events. If an error occurs
- // or Stop() is called, this channel will be closed, in which case the
- // watch should be completely cleaned up.
+ // or Stop() is called, the implementation will close this channel and
+ // release any resources used by the watch.
ResultChan() <-chan Event
}
@@ -46,7 +46,9 @@ const (
Deleted EventType = "DELETED"
Bookmark EventType = "BOOKMARK"
Error EventType = "ERROR"
+)
+var (
DefaultChanSize int32 = 100
)
diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go
index 7ed1d1cff..6be80349a 100644
--- a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go
+++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go
@@ -16,7 +16,7 @@ import (
// that type.
type Equalities map[reflect.Type]reflect.Value
-// For convenience, panics on errrors
+// For convenience, panics on errors
func EqualitiesOrDie(funcs ...interface{}) Equalities {
e := Equalities{}
if err := e.AddFuncs(funcs...); err != nil {
@@ -229,7 +229,7 @@ func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool,
//
// An empty slice *is* equal to a nil slice for our purposes; same for maps.
//
-// Unexported field members cannot be compared and will cause an imformative panic; you must add an Equality
+// Unexported field members cannot be compared and will cause an informative panic; you must add an Equality
// function for these types.
func (e Equalities) DeepEqual(a1, a2 interface{}) bool {
if a1 == nil || a2 == nil {
diff --git a/vendor/k8s.io/klog/v2/.gitignore b/vendor/k8s.io/klog/v2/.gitignore
new file mode 100644
index 000000000..0aa200239
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/.gitignore
@@ -0,0 +1,17 @@
+# OSX leaves these everywhere on SMB shares
+._*
+
+# OSX trash
+.DS_Store
+
+# Eclipse files
+.classpath
+.project
+.settings/**
+
+# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA
+.idea/
+*.iml
+
+# Vscode files
+.vscode
diff --git a/vendor/k8s.io/klog/v2/CONTRIBUTING.md b/vendor/k8s.io/klog/v2/CONTRIBUTING.md
new file mode 100644
index 000000000..2641b1f41
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/CONTRIBUTING.md
@@ -0,0 +1,22 @@
+# Contributing Guidelines
+
+Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
+
+_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
+
+## Getting Started
+
+We have full documentation on how to get started contributing here:
+
+- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
+- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
+- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers
+
+## Mentorship
+
+- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
+
+## Contact Information
+
+- [Slack](https://kubernetes.slack.com/messages/sig-architecture)
+- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture)
diff --git a/vendor/k8s.io/klog/v2/LICENSE b/vendor/k8s.io/klog/v2/LICENSE
new file mode 100644
index 000000000..37ec93a14
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS
new file mode 100644
index 000000000..380e514f2
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/OWNERS
@@ -0,0 +1,19 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+reviewers:
+ - jayunit100
+ - hoegaarden
+ - andyxning
+ - neolit123
+ - pohly
+ - yagonobre
+ - vincepri
+ - detiber
+approvers:
+ - dims
+ - thockin
+ - justinsb
+ - tallclair
+ - piosz
+ - brancz
+ - DirectXMan12
+ - lavalamp
diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md
new file mode 100644
index 000000000..2f9f6f0d8
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/README.md
@@ -0,0 +1,99 @@
+klog
+====
+
+klog is a permanent fork of https://github.com/golang/glog.
+
+## Why was klog created?
+
+The decision to create klog was one that wasn't made lightly, but it was necessary due to some
+drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README:
+
+> The code in this repo [...] is not itself under development
+
+This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below:
+
+ * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented.
+ * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it
+ * A long term goal is to implement a logging interface that allows us to add context, change output format, etc.
+
+Historical context is available here:
+
+ * https://github.com/kubernetes/kubernetes/issues/61006
+ * https://github.com/kubernetes/kubernetes/issues/70264
+ * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ
+ * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ
+
+----
+
+How to use klog
+===============
+- Replace imports for `github.com/golang/glog` with `k8s.io/klog`
+- Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags
+- You can now use `log_file` instead of `log_dir` for logging to a single file (See `examples/log_file/usage_log_file.go`)
+- If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`)
+- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md))
+
+**NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater.
+
+### Coexisting with glog
+This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and synchronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`.
+
+## Community, discussion, contribution, and support
+
+Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
+
+You can reach the maintainers of this project at:
+
+- [Slack](https://kubernetes.slack.com/messages/klog)
+- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture)
+
+### Code of conduct
+
+Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).
+
+----
+
+glog
+====
+
+Leveled execution logs for Go.
+
+This is an efficient pure Go implementation of leveled logs in the
+manner of the open source C++ package
+ https://github.com/google/glog
+
+By binding methods to booleans it is possible to use the log package
+without paying the expense of evaluating the arguments to the log.
+Through the -vmodule flag, the package also provides fine-grained
+control over logging at the file level.
+
+The comment from glog.go introduces the ideas:
+
+ Package glog implements logging analogous to the Google-internal
+ C++ INFO/ERROR/V setup. It provides functions Info, Warning,
+ Error, Fatal, plus formatting variants such as Infof. It
+ also provides V-style logging controlled by the -v and
+ -vmodule=file=2 flags.
+
+ Basic examples:
+
+ glog.Info("Prepare to repel boarders")
+
+ glog.Fatalf("Initialization failed: %s", err)
+
+ See the documentation for the V function for an explanation
+ of these examples:
+
+ if glog.V(2) {
+ glog.Info("Starting transaction...")
+ }
+
+ glog.V(2).Infoln("Processed", nItems, "elements")
+
+
+The repository contains an open source version of the log package
+used inside Google. The master copy of the source lives inside
+Google, not here. The code in this repo is for export only and is not itself
+under development. Feature requests will be ignored.
+
+Send bug reports to golang-nuts@googlegroups.com.
diff --git a/vendor/k8s.io/klog/v2/RELEASE.md b/vendor/k8s.io/klog/v2/RELEASE.md
new file mode 100644
index 000000000..b53eb960c
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/RELEASE.md
@@ -0,0 +1,9 @@
+# Release Process
+
+The `klog` is released on an as-needed basis. The process is as follows:
+
+1. An issue is proposing a new release with a changelog since the last release
+1. All [OWNERS](OWNERS) must LGTM this release
+1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
+1. The release issue is closed
+1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released`
diff --git a/vendor/k8s.io/klog/v2/SECURITY_CONTACTS b/vendor/k8s.io/klog/v2/SECURITY_CONTACTS
new file mode 100644
index 000000000..6128a5869
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/SECURITY_CONTACTS
@@ -0,0 +1,20 @@
+# Defined below are the security contacts for this repo.
+#
+# They are the contact point for the Product Security Committee to reach out
+# to for triaging and handling of incoming issues.
+#
+# The below names agree to abide by the
+# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
+# and will be removed and replaced if they violate that agreement.
+#
+# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
+# INSTRUCTIONS AT https://kubernetes.io/security/
+
+dims
+thockin
+justinsb
+tallclair
+piosz
+brancz
+DirectXMan12
+lavalamp
diff --git a/vendor/k8s.io/klog/v2/code-of-conduct.md b/vendor/k8s.io/klog/v2/code-of-conduct.md
new file mode 100644
index 000000000..0d15c00cf
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/code-of-conduct.md
@@ -0,0 +1,3 @@
+# Kubernetes Community Code of Conduct
+
+Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
diff --git a/vendor/k8s.io/klog/v2/go.mod b/vendor/k8s.io/klog/v2/go.mod
new file mode 100644
index 000000000..e396e31c0
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/go.mod
@@ -0,0 +1,5 @@
+module k8s.io/klog/v2
+
+go 1.13
+
+require github.com/go-logr/logr v0.2.0
diff --git a/vendor/k8s.io/klog/v2/go.sum b/vendor/k8s.io/klog/v2/go.sum
new file mode 100644
index 000000000..8dfa78542
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/go.sum
@@ -0,0 +1,2 @@
+github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go
new file mode 100644
index 000000000..ae2b86138
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/klog.go
@@ -0,0 +1,1525 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
+// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
+// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
+//
+// Basic examples:
+//
+// klog.Info("Prepare to repel boarders")
+//
+// klog.Fatalf("Initialization failed: %s", err)
+//
+// See the documentation for the V function for an explanation of these examples:
+//
+// if klog.V(2) {
+// klog.Info("Starting transaction...")
+// }
+//
+// klog.V(2).Infoln("Processed", nItems, "elements")
+//
+// Log output is buffered and written periodically using Flush. Programs
+// should call Flush before exiting to guarantee all log output is written.
+//
+// By default, all log statements write to standard error.
+// This package provides several flags that modify this behavior.
+// As a result, flag.Parse must be called before any logging is done.
+//
+// -logtostderr=true
+// Logs are written to standard error instead of to files.
+// -alsologtostderr=false
+// Logs are written to standard error as well as to files.
+// -stderrthreshold=ERROR
+// Log events at or above this severity are logged to standard
+// error as well as to files.
+// -log_dir=""
+// Log files will be written to this directory instead of the
+// default temporary directory.
+//
+// Other flags provide aids to debugging.
+//
+// -log_backtrace_at=""
+// When set to a file and line number holding a logging statement,
+// such as
+// -log_backtrace_at=gopherflakes.go:234
+// a stack trace will be written to the Info log whenever execution
+// hits that statement. (Unlike with -vmodule, the ".go" must be
+// present.)
+// -v=0
+// Enable V-leveled logging at the specified level.
+// -vmodule=""
+// The syntax of the argument is a comma-separated list of pattern=N,
+// where pattern is a literal file name (minus the ".go" suffix) or
+// "glob" pattern and N is a V level. For instance,
+// -vmodule=gopher*=3
+// sets the V level to 3 in all Go files whose names begin "gopher".
+//
+package klog
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ stdLog "log"
+ "math"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-logr/logr"
+)
+
+// severity identifies the sort of log: info, warning etc. It also implements
+// the flag.Value interface. The -stderrthreshold flag is of type severity and
+// should be modified only through the flag.Value interface. The values match
+// the corresponding constants in C++.
+type severity int32 // sync/atomic int32
+
+// These constants identify the log levels in order of increasing severity.
+// A message written to a high-severity log file is also written to each
+// lower-severity log file.
+const (
+ infoLog severity = iota
+ warningLog
+ errorLog
+ fatalLog
+ numSeverity = 4
+)
+
+const severityChar = "IWEF"
+
+var severityName = []string{
+ infoLog: "INFO",
+ warningLog: "WARNING",
+ errorLog: "ERROR",
+ fatalLog: "FATAL",
+}
+
+// get returns the value of the severity.
+func (s *severity) get() severity {
+ return severity(atomic.LoadInt32((*int32)(s)))
+}
+
+// set sets the value of the severity.
+func (s *severity) set(val severity) {
+ atomic.StoreInt32((*int32)(s), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (s *severity) String() string {
+ return strconv.FormatInt(int64(*s), 10)
+}
+
+// Get is part of the flag.Getter interface.
+func (s *severity) Get() interface{} {
+ return *s
+}
+
+// Set is part of the flag.Value interface.
+func (s *severity) Set(value string) error {
+ var threshold severity
+ // Is it a known name?
+ if v, ok := severityByName(value); ok {
+ threshold = v
+ } else {
+ v, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return err
+ }
+ threshold = severity(v)
+ }
+ logging.stderrThreshold.set(threshold)
+ return nil
+}
+
+func severityByName(s string) (severity, bool) {
+ s = strings.ToUpper(s)
+ for i, name := range severityName {
+ if name == s {
+ return severity(i), true
+ }
+ }
+ return 0, false
+}
+
+// OutputStats tracks the number of output lines and bytes written.
+type OutputStats struct {
+ lines int64
+ bytes int64
+}
+
+// Lines returns the number of lines written.
+func (s *OutputStats) Lines() int64 {
+ return atomic.LoadInt64(&s.lines)
+}
+
+// Bytes returns the number of bytes written.
+func (s *OutputStats) Bytes() int64 {
+ return atomic.LoadInt64(&s.bytes)
+}
+
+// Stats tracks the number of lines of output and number of bytes
+// per severity level. Values must be read with atomic.LoadInt64.
+var Stats struct {
+ Info, Warning, Error OutputStats
+}
+
+var severityStats = [numSeverity]*OutputStats{
+ infoLog: &Stats.Info,
+ warningLog: &Stats.Warning,
+ errorLog: &Stats.Error,
+}
+
+// Level is exported because it appears in the arguments to V and is
+// the type of the v flag, which can be set programmatically.
+// It's a distinct type because we want to discriminate it from logType.
+// Variables of type level are only changed under logging.mu.
+// The -v flag is read only with atomic ops, so the state of the logging
+// module is consistent.
+
+// Level is treated as a sync/atomic int32.
+
+// Level specifies a level of verbosity for V logs. *Level implements
+// flag.Value; the -v flag is of type Level and should be modified
+// only through the flag.Value interface.
+type Level int32
+
+// get returns the value of the Level.
+func (l *Level) get() Level {
+ return Level(atomic.LoadInt32((*int32)(l)))
+}
+
+// set sets the value of the Level.
+func (l *Level) set(val Level) {
+ atomic.StoreInt32((*int32)(l), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (l *Level) String() string {
+ return strconv.FormatInt(int64(*l), 10)
+}
+
+// Get is part of the flag.Getter interface.
+func (l *Level) Get() interface{} {
+ return *l
+}
+
+// Set is part of the flag.Value interface.
+func (l *Level) Set(value string) error {
+ v, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return err
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ logging.setVState(Level(v), logging.vmodule.filter, false)
+ return nil
+}
+
+// moduleSpec represents the setting of the -vmodule flag.
+type moduleSpec struct {
+ filter []modulePat
+}
+
+// modulePat contains a filter for the -vmodule flag.
+// It holds a verbosity level and a file pattern to match.
+type modulePat struct {
+ pattern string
+ literal bool // The pattern is a literal string
+ level Level
+}
+
+// match reports whether the file matches the pattern. It uses a string
+// comparison if the pattern contains no metacharacters.
+func (m *modulePat) match(file string) bool {
+ if m.literal {
+ return file == m.pattern
+ }
+ match, _ := filepath.Match(m.pattern, file)
+ return match
+}
+
+func (m *moduleSpec) String() string {
+ // Lock because the type is not atomic. TODO: clean this up.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ var b bytes.Buffer
+ for i, f := range m.filter {
+ if i > 0 {
+ b.WriteRune(',')
+ }
+ fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
+ }
+ return b.String()
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported.
+func (m *moduleSpec) Get() interface{} {
+ return nil
+}
+
+var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
+
+// Syntax: -vmodule=recordio=2,file=1,gfs*=3
+func (m *moduleSpec) Set(value string) error {
+ var filter []modulePat
+ for _, pat := range strings.Split(value, ",") {
+ if len(pat) == 0 {
+ // Empty strings such as from a trailing comma can be ignored.
+ continue
+ }
+ patLev := strings.Split(pat, "=")
+ if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
+ return errVmoduleSyntax
+ }
+ pattern := patLev[0]
+ v, err := strconv.ParseInt(patLev[1], 10, 32)
+ if err != nil {
+ return errors.New("syntax error: expect comma-separated list of filename=N")
+ }
+ if v < 0 {
+ return errors.New("negative value for vmodule level")
+ }
+ if v == 0 {
+ continue // Ignore. It's harmless but no point in paying the overhead.
+ }
+ // TODO: check syntax of filter?
+ filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ logging.setVState(logging.verbosity, filter, true)
+ return nil
+}
+
+// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
+// that require filepath.Match to be called to match the pattern.
+func isLiteral(pattern string) bool {
+ return !strings.ContainsAny(pattern, `\*?[]`)
+}
+
+// traceLocation represents the setting of the -log_backtrace_at flag.
+type traceLocation struct {
+ file string
+ line int
+}
+
+// isSet reports whether the trace location has been specified.
+// logging.mu is held.
+func (t *traceLocation) isSet() bool {
+ return t.line > 0
+}
+
+// match reports whether the specified file and line matches the trace location.
+// The argument file name is the full path, not the basename specified in the flag.
+// logging.mu is held.
+func (t *traceLocation) match(file string, line int) bool {
+ if t.line != line {
+ return false
+ }
+ if i := strings.LastIndex(file, "/"); i >= 0 {
+ file = file[i+1:]
+ }
+ return t.file == file
+}
+
+func (t *traceLocation) String() string {
+ // Lock because the type is not atomic. TODO: clean this up.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ return fmt.Sprintf("%s:%d", t.file, t.line)
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported
+func (t *traceLocation) Get() interface{} {
+ return nil
+}
+
+var errTraceSyntax = errors.New("syntax error: expect file.go:234")
+
+// Syntax: -log_backtrace_at=gopherflakes.go:234
+// Note that unlike vmodule the file extension is included here.
+func (t *traceLocation) Set(value string) error {
+ if value == "" {
+ // Unset.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ t.line = 0
+ t.file = ""
+ return nil
+ }
+ fields := strings.Split(value, ":")
+ if len(fields) != 2 {
+ return errTraceSyntax
+ }
+ file, line := fields[0], fields[1]
+ if !strings.Contains(file, ".") {
+ return errTraceSyntax
+ }
+ v, err := strconv.Atoi(line)
+ if err != nil {
+ return errTraceSyntax
+ }
+ if v <= 0 {
+ return errors.New("negative or zero value for level")
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ t.line = v
+ t.file = file
+ return nil
+}
+
+// flushSyncWriter is the interface satisfied by logging destinations.
+type flushSyncWriter interface {
+ Flush() error
+ Sync() error
+ io.Writer
+}
+
+// init sets up the defaults and runs flushDaemon.
+func init() {
+ logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR.
+ logging.setVState(0, nil, false)
+ logging.logDir = ""
+ logging.logFile = ""
+ logging.logFileMaxSizeMB = 1800
+ logging.toStderr = true
+ logging.alsoToStderr = false
+ logging.skipHeaders = false
+ logging.addDirHeader = false
+ logging.skipLogHeaders = false
+ go logging.flushDaemon()
+}
+
+// InitFlags is for explicitly initializing the flags.
+func InitFlags(flagset *flag.FlagSet) {
+ if flagset == nil {
+ flagset = flag.CommandLine
+ }
+
+ flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory")
+ flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file")
+ flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB,
+ "Defines the maximum size a log file can grow to. Unit is megabytes. "+
+ "If the value is 0, the maximum file size is unlimited.")
+ flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files")
+ flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files")
+ flagset.Var(&logging.verbosity, "v", "number for the log level verbosity")
+ flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages")
+ flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages")
+ flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files")
+ flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
+ flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
+ flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
+}
+
+// Flush flushes all pending log I/O.
+func Flush() {
+ logging.lockAndFlushAll()
+}
+
+// loggingT collects all the global state of the logging setup.
+type loggingT struct {
+ // Boolean flags. Not handled atomically because the flag.Value interface
+ // does not let us avoid the =true, and that shorthand is necessary for
+ // compatibility. TODO: does this matter enough to fix? Seems unlikely.
+ toStderr bool // The -logtostderr flag.
+ alsoToStderr bool // The -alsologtostderr flag.
+
+ // Level flag. Handled atomically.
+ stderrThreshold severity // The -stderrthreshold flag.
+
+ // freeList is a list of byte buffers, maintained under freeListMu.
+ freeList *buffer
+ // freeListMu maintains the free list. It is separate from the main mutex
+ // so buffers can be grabbed and printed to without holding the main lock,
+ // for better parallelization.
+ freeListMu sync.Mutex
+
+ // mu protects the remaining elements of this structure and is
+ // used to synchronize logging.
+ mu sync.Mutex
+ // file holds writer for each of the log types.
+ file [numSeverity]flushSyncWriter
+ // pcs is used in V to avoid an allocation when computing the caller's PC.
+ pcs [1]uintptr
+ // vmap is a cache of the V Level for each V() call site, identified by PC.
+ // It is wiped whenever the vmodule flag changes state.
+ vmap map[uintptr]Level
+ // filterLength stores the length of the vmodule filter chain. If greater
+ // than zero, it means vmodule is enabled. It may be read safely
+ // using sync.LoadInt32, but is only modified under mu.
+ filterLength int32
+ // traceLocation is the state of the -log_backtrace_at flag.
+ traceLocation traceLocation
+ // These flags are modified only under lock, although verbosity may be fetched
+ // safely using atomic.LoadInt32.
+ vmodule moduleSpec // The state of the -vmodule flag.
+ verbosity Level // V logging level, the value of the -v flag/
+
+ // If non-empty, overrides the choice of directory in which to write logs.
+ // See createLogDirs for the full list of possible destinations.
+ logDir string
+
+ // If non-empty, specifies the path of the file to write logs. mutually exclusive
+ // with the log_dir option.
+ logFile string
+
+ // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the
+ // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile.
+ logFileMaxSizeMB uint64
+
+ // If true, do not add the prefix headers, useful when used with SetOutput
+ skipHeaders bool
+
+ // If true, do not add the headers to log files
+ skipLogHeaders bool
+
+ // If true, add the file directory to the header
+ addDirHeader bool
+
+ // If set, all output will be redirected unconditionally to the provided logr.Logger
+ logr logr.Logger
+}
+
+// buffer holds a byte Buffer for reuse. The zero value is ready for use.
+type buffer struct {
+ bytes.Buffer
+ tmp [64]byte // temporary byte array for creating headers.
+ next *buffer
+}
+
+var logging loggingT
+
+// setVState sets a consistent state for V logging.
+// l.mu is held.
+func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
+ // Turn verbosity off so V will not fire while we are in transition.
+ l.verbosity.set(0)
+ // Ditto for filter length.
+ atomic.StoreInt32(&l.filterLength, 0)
+
+ // Set the new filters and wipe the pc->Level map if the filter has changed.
+ if setFilter {
+ l.vmodule.filter = filter
+ l.vmap = make(map[uintptr]Level)
+ }
+
+ // Things are consistent now, so enable filtering and verbosity.
+ // They are enabled in order opposite to that in V.
+ atomic.StoreInt32(&l.filterLength, int32(len(filter)))
+ l.verbosity.set(verbosity)
+}
+
+// getBuffer returns a new, ready-to-use buffer.
+func (l *loggingT) getBuffer() *buffer {
+ l.freeListMu.Lock()
+ b := l.freeList
+ if b != nil {
+ l.freeList = b.next
+ }
+ l.freeListMu.Unlock()
+ if b == nil {
+ b = new(buffer)
+ } else {
+ b.next = nil
+ b.Reset()
+ }
+ return b
+}
+
+// putBuffer returns a buffer to the free list.
+func (l *loggingT) putBuffer(b *buffer) {
+ if b.Len() >= 256 {
+ // Let big buffers die a natural death.
+ return
+ }
+ l.freeListMu.Lock()
+ b.next = l.freeList
+ l.freeList = b
+ l.freeListMu.Unlock()
+}
+
+var timeNow = time.Now // Stubbed out for testing.
+
+/*
+header formats a log header as defined by the C++ implementation.
+It returns a buffer containing the formatted header and the user's file and line number.
+The depth specifies how many stack frames above lives the source line to be identified in the log message.
+
+Log lines have this form:
+ Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+where the fields are defined as follows:
+ L A single character, representing the log level (eg 'I' for INFO)
+ mm The month (zero padded; ie May is '05')
+ dd The day (zero padded)
+ hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds
+ threadid The space-padded thread ID as returned by GetTID()
+ file The file name
+ line The line number
+ msg The user-supplied message
+*/
+func (l *loggingT) header(s severity, depth int) (*buffer, string, int) {
+ _, file, line, ok := runtime.Caller(3 + depth)
+ if !ok {
+ file = "???"
+ line = 1
+ } else {
+ if slash := strings.LastIndex(file, "/"); slash >= 0 {
+ path := file
+ file = path[slash+1:]
+ if l.addDirHeader {
+ if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 {
+ file = path[dirsep+1:]
+ }
+ }
+ }
+ }
+ return l.formatHeader(s, file, line), file, line
+}
+
+// formatHeader formats a log header using the provided file name and line number.
+func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
+ now := timeNow()
+ if line < 0 {
+ line = 0 // not a real line number, but acceptable to someDigits
+ }
+ if s > fatalLog {
+ s = infoLog // for safety.
+ }
+ buf := l.getBuffer()
+ if l.skipHeaders {
+ return buf
+ }
+
+ // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
+ // It's worth about 3X. Fprintf is hard.
+ _, month, day := now.Date()
+ hour, minute, second := now.Clock()
+ // Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+ buf.tmp[0] = severityChar[s]
+ buf.twoDigits(1, int(month))
+ buf.twoDigits(3, day)
+ buf.tmp[5] = ' '
+ buf.twoDigits(6, hour)
+ buf.tmp[8] = ':'
+ buf.twoDigits(9, minute)
+ buf.tmp[11] = ':'
+ buf.twoDigits(12, second)
+ buf.tmp[14] = '.'
+ buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
+ buf.tmp[21] = ' '
+ buf.nDigits(7, 22, pid, ' ') // TODO: should be TID
+ buf.tmp[29] = ' '
+ buf.Write(buf.tmp[:30])
+ buf.WriteString(file)
+ buf.tmp[0] = ':'
+ n := buf.someDigits(1, line)
+ buf.tmp[n+1] = ']'
+ buf.tmp[n+2] = ' '
+ buf.Write(buf.tmp[:n+3])
+ return buf
+}
+
+// Some custom tiny helper functions to print the log header efficiently.
+
+const digits = "0123456789"
+
+// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
+func (buf *buffer) twoDigits(i, d int) {
+ buf.tmp[i+1] = digits[d%10]
+ d /= 10
+ buf.tmp[i] = digits[d%10]
+}
+
+// nDigits formats an n-digit integer at buf.tmp[i],
+// padding with pad on the left.
+// It assumes d >= 0.
+func (buf *buffer) nDigits(n, i, d int, pad byte) {
+ j := n - 1
+ for ; j >= 0 && d > 0; j-- {
+ buf.tmp[i+j] = digits[d%10]
+ d /= 10
+ }
+ for ; j >= 0; j-- {
+ buf.tmp[i+j] = pad
+ }
+}
+
+// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
+func (buf *buffer) someDigits(i, d int) int {
+ // Print into the top, then copy down. We know there's space for at least
+ // a 10-digit number.
+ j := len(buf.tmp)
+ for {
+ j--
+ buf.tmp[j] = digits[d%10]
+ d /= 10
+ if d == 0 {
+ break
+ }
+ }
+ return copy(buf.tmp[i:], buf.tmp[j:])
+}
+
+func (l *loggingT) println(s severity, logr logr.Logger, args ...interface{}) {
+ buf, file, line := l.header(s, 0)
+ // if logr is set, we clear the generated header as we rely on the backing
+ // logr implementation to print headers
+ if logr != nil {
+ l.putBuffer(buf)
+ buf = l.getBuffer()
+ }
+ fmt.Fprintln(buf, args...)
+ l.output(s, logr, buf, file, line, false)
+}
+
+func (l *loggingT) print(s severity, logr logr.Logger, args ...interface{}) {
+ l.printDepth(s, logr, 1, args...)
+}
+
+func (l *loggingT) printDepth(s severity, logr logr.Logger, depth int, args ...interface{}) {
+ buf, file, line := l.header(s, depth)
+ // if logr is set, we clear the generated header as we rely on the backing
+ // logr implementation to print headers
+ if logr != nil {
+ l.putBuffer(buf)
+ buf = l.getBuffer()
+ }
+ fmt.Fprint(buf, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, logr, buf, file, line, false)
+}
+
+func (l *loggingT) printf(s severity, logr logr.Logger, format string, args ...interface{}) {
+ buf, file, line := l.header(s, 0)
+ // if logr is set, we clear the generated header as we rely on the backing
+ // logr implementation to print headers
+ if logr != nil {
+ l.putBuffer(buf)
+ buf = l.getBuffer()
+ }
+ fmt.Fprintf(buf, format, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, logr, buf, file, line, false)
+}
+
+// printWithFileLine behaves like print but uses the provided file and line number. If
+// alsoLogToStderr is true, the log message always appears on standard error; it
+// will also appear in the log file unless --logtostderr is set.
+func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, file string, line int, alsoToStderr bool, args ...interface{}) {
+ buf := l.formatHeader(s, file, line)
+ // if logr is set, we clear the generated header as we rely on the backing
+ // logr implementation to print headers
+ if logr != nil {
+ l.putBuffer(buf)
+ buf = l.getBuffer()
+ }
+ fmt.Fprint(buf, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, logr, buf, file, line, alsoToStderr)
+}
+
+// if loggr is specified, will call loggr.Error, otherwise output with logging module.
+func (l *loggingT) errorS(err error, loggr logr.Logger, msg string, keysAndValues ...interface{}) {
+ if loggr != nil {
+ loggr.Error(err, msg, keysAndValues)
+ return
+ }
+ l.printS(err, msg, keysAndValues...)
+}
+
+// if loggr is specified, will call loggr.Info, otherwise output with logging module.
+func (l *loggingT) infoS(loggr logr.Logger, msg string, keysAndValues ...interface{}) {
+ if loggr != nil {
+ loggr.Info(msg, keysAndValues)
+ return
+ }
+ l.printS(nil, msg, keysAndValues...)
+}
+
+// printS is called from infoS and errorS if loggr is not specified.
+// if err arguments is specified, will output to errorLog severity
+func (l *loggingT) printS(err error, msg string, keysAndValues ...interface{}) {
+ b := &bytes.Buffer{}
+ b.WriteString(fmt.Sprintf("%q", msg))
+ if err != nil {
+ b.WriteByte(' ')
+ b.WriteString(fmt.Sprintf("err=%q", err.Error()))
+ }
+ kvListFormat(b, keysAndValues...)
+ var s severity
+ if err == nil {
+ s = infoLog
+ } else {
+ s = errorLog
+ }
+ l.printDepth(s, logging.logr, 2, b)
+}
+
+const missingValue = "(MISSING)"
+
+func kvListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
+ for i := 0; i < len(keysAndValues); i += 2 {
+ var v interface{}
+ k := keysAndValues[i]
+ if i+1 < len(keysAndValues) {
+ v = keysAndValues[i+1]
+ } else {
+ v = missingValue
+ }
+ b.WriteByte(' ')
+
+ switch v.(type) {
+ case string, error:
+ b.WriteString(fmt.Sprintf("%s=%q", k, v))
+ default:
+ if _, ok := v.(fmt.Stringer); ok {
+ b.WriteString(fmt.Sprintf("%s=%q", k, v))
+ } else {
+ b.WriteString(fmt.Sprintf("%s=%+v", k, v))
+ }
+ }
+ }
+}
+
+// redirectBuffer is used to set an alternate destination for the logs
+type redirectBuffer struct {
+ w io.Writer
+}
+
+func (rb *redirectBuffer) Sync() error {
+ return nil
+}
+
+func (rb *redirectBuffer) Flush() error {
+ return nil
+}
+
+func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
+ return rb.w.Write(bytes)
+}
+
+// SetLogger will set the backing logr implementation for klog.
+// If set, all log lines will be suppressed from the regular Output, and
+// redirected to the logr implementation.
+// All log lines include the 'severity', 'file' and 'line' values attached as
+// structured logging values.
+// Use as:
+// ...
+// klog.SetLogger(zapr.NewLogger(zapLog))
+func SetLogger(logr logr.Logger) {
+ logging.logr = logr
+}
+
+// SetOutput sets the output destination for all severities
+func SetOutput(w io.Writer) {
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ for s := fatalLog; s >= infoLog; s-- {
+ rb := &redirectBuffer{
+ w: w,
+ }
+ logging.file[s] = rb
+ }
+}
+
+// SetOutputBySeverity sets the output destination for specific severity
+func SetOutputBySeverity(name string, w io.Writer) {
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ sev, ok := severityByName(name)
+ if !ok {
+ panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
+ }
+ rb := &redirectBuffer{
+ w: w,
+ }
+ logging.file[sev] = rb
+}
+
+// LogToStderr sets whether to log exclusively to stderr, bypassing outputs
+func LogToStderr(stderr bool) {
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+
+ logging.toStderr = stderr
+}
+
+// output writes the data to the log files and releases the buffer.
+func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, file string, line int, alsoToStderr bool) {
+ l.mu.Lock()
+ if l.traceLocation.isSet() {
+ if l.traceLocation.match(file, line) {
+ buf.Write(stacks(false))
+ }
+ }
+ data := buf.Bytes()
+ if log != nil {
+ // TODO: set 'severity' and caller information as structured log info
+ // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line}
+ if s == errorLog {
+ l.logr.Error(nil, string(data))
+ } else {
+ log.Info(string(data))
+ }
+ } else if l.toStderr {
+ os.Stderr.Write(data)
+ } else {
+ if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
+ os.Stderr.Write(data)
+ }
+
+ if logging.logFile != "" {
+ // Since we are using a single log file, all of the items in l.file array
+ // will point to the same file, so just use one of them to write data.
+ if l.file[infoLog] == nil {
+ if err := l.createFiles(infoLog); err != nil {
+ os.Stderr.Write(data) // Make sure the message appears somewhere.
+ l.exit(err)
+ }
+ }
+ l.file[infoLog].Write(data)
+ } else {
+ if l.file[s] == nil {
+ if err := l.createFiles(s); err != nil {
+ os.Stderr.Write(data) // Make sure the message appears somewhere.
+ l.exit(err)
+ }
+ }
+
+ switch s {
+ case fatalLog:
+ l.file[fatalLog].Write(data)
+ fallthrough
+ case errorLog:
+ l.file[errorLog].Write(data)
+ fallthrough
+ case warningLog:
+ l.file[warningLog].Write(data)
+ fallthrough
+ case infoLog:
+ l.file[infoLog].Write(data)
+ }
+ }
+ }
+ if s == fatalLog {
+ // If we got here via Exit rather than Fatal, print no stacks.
+ if atomic.LoadUint32(&fatalNoStacks) > 0 {
+ l.mu.Unlock()
+ timeoutFlush(10 * time.Second)
+ os.Exit(1)
+ }
+ // Dump all goroutine stacks before exiting.
+ trace := stacks(true)
+ // Write the stack trace for all goroutines to the stderr.
+ if l.toStderr || l.alsoToStderr || s >= l.stderrThreshold.get() || alsoToStderr {
+ os.Stderr.Write(trace)
+ }
+ // Write the stack trace for all goroutines to the files.
+ logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
+ for log := fatalLog; log >= infoLog; log-- {
+ if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
+ f.Write(trace)
+ }
+ }
+ l.mu.Unlock()
+ timeoutFlush(10 * time.Second)
+ os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
+ }
+ l.putBuffer(buf)
+ l.mu.Unlock()
+ if stats := severityStats[s]; stats != nil {
+ atomic.AddInt64(&stats.lines, 1)
+ atomic.AddInt64(&stats.bytes, int64(len(data)))
+ }
+}
+
+// timeoutFlush calls Flush and returns when it completes or after timeout
+// elapses, whichever happens first. This is needed because the hooks invoked
+// by Flush may deadlock when klog.Fatal is called from a hook that holds
+// a lock.
+func timeoutFlush(timeout time.Duration) {
+ done := make(chan bool, 1)
+ go func() {
+ Flush() // calls logging.lockAndFlushAll()
+ done <- true
+ }()
+ select {
+ case <-done:
+ case <-time.After(timeout):
+ fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout)
+ }
+}
+
+// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
+func stacks(all bool) []byte {
+ // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
+ n := 10000
+ if all {
+ n = 100000
+ }
+ var trace []byte
+ for i := 0; i < 5; i++ {
+ trace = make([]byte, n)
+ nbytes := runtime.Stack(trace, all)
+ if nbytes < len(trace) {
+ return trace[:nbytes]
+ }
+ n *= 2
+ }
+ return trace
+}
+
+// logExitFunc provides a simple mechanism to override the default behavior
+// of exiting on error. Used in testing and to guarantee we reach a required exit
+// for fatal logs. Instead, exit could be a function rather than a method but that
+// would make its use clumsier.
+var logExitFunc func(error)
+
+// exit is called if there is trouble creating or writing log files.
+// It flushes the logs and exits the program; there's no point in hanging around.
+// l.mu is held.
+func (l *loggingT) exit(err error) {
+ fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
+ // If logExitFunc is set, we do that instead of exiting.
+ if logExitFunc != nil {
+ logExitFunc(err)
+ return
+ }
+ l.flushAll()
+ os.Exit(2)
+}
+
+// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
+// file's Sync method and providing a wrapper for the Write method that provides log
+// file rotation. There are conflicting methods, so the file cannot be embedded.
+// l.mu is held for all its methods.
+type syncBuffer struct {
+ logger *loggingT
+ *bufio.Writer
+ file *os.File
+ sev severity
+ nbytes uint64 // The number of bytes written to this file
+ maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up.
+}
+
+func (sb *syncBuffer) Sync() error {
+ return sb.file.Sync()
+}
+
+// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options.
+func CalculateMaxSize() uint64 {
+ if logging.logFile != "" {
+ if logging.logFileMaxSizeMB == 0 {
+ // If logFileMaxSizeMB is zero, we don't have limitations on the log size.
+ return math.MaxUint64
+ }
+ // Flag logFileMaxSizeMB is in MB for user convenience.
+ return logging.logFileMaxSizeMB * 1024 * 1024
+ }
+ // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size.
+ return MaxSize
+}
+
+func (sb *syncBuffer) Write(p []byte) (n int, err error) {
+ if sb.nbytes+uint64(len(p)) >= sb.maxbytes {
+ if err := sb.rotateFile(time.Now(), false); err != nil {
+ sb.logger.exit(err)
+ }
+ }
+ n, err = sb.Writer.Write(p)
+ sb.nbytes += uint64(n)
+ if err != nil {
+ sb.logger.exit(err)
+ }
+ return
+}
+
+// rotateFile closes the syncBuffer's file and starts a new one.
+// The startup argument indicates whether this is the initial startup of klog.
+// If startup is true, existing files are opened for appending instead of truncated.
+func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error {
+ if sb.file != nil {
+ sb.Flush()
+ sb.file.Close()
+ }
+ var err error
+ sb.file, _, err = create(severityName[sb.sev], now, startup)
+ sb.nbytes = 0
+ if err != nil {
+ return err
+ }
+
+ sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
+
+ if sb.logger.skipLogHeaders {
+ return nil
+ }
+
+ // Write header.
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
+ fmt.Fprintf(&buf, "Running on machine: %s\n", host)
+ fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+ fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
+ n, err := sb.file.Write(buf.Bytes())
+ sb.nbytes += uint64(n)
+ return err
+}
+
+// bufferSize sizes the buffer associated with each log file. It's large
+// so that log records can accumulate without the logging thread blocking
+// on disk I/O. The flushDaemon will block instead.
+const bufferSize = 256 * 1024
+
+// createFiles creates all the log files for severity from sev down to infoLog.
+// l.mu is held.
+func (l *loggingT) createFiles(sev severity) error {
+ now := time.Now()
+ // Files are created in decreasing severity order, so as soon as we find one
+ // has already been created, we can stop.
+ for s := sev; s >= infoLog && l.file[s] == nil; s-- {
+ sb := &syncBuffer{
+ logger: l,
+ sev: s,
+ maxbytes: CalculateMaxSize(),
+ }
+ if err := sb.rotateFile(now, true); err != nil {
+ return err
+ }
+ l.file[s] = sb
+ }
+ return nil
+}
+
+const flushInterval = 5 * time.Second
+
+// flushDaemon periodically flushes the log file buffers.
+func (l *loggingT) flushDaemon() {
+ for range time.NewTicker(flushInterval).C {
+ l.lockAndFlushAll()
+ }
+}
+
+// lockAndFlushAll is like flushAll but locks l.mu first.
+func (l *loggingT) lockAndFlushAll() {
+ l.mu.Lock()
+ l.flushAll()
+ l.mu.Unlock()
+}
+
+// flushAll flushes all the logs and attempts to "sync" their data to disk.
+// l.mu is held.
+func (l *loggingT) flushAll() {
+ // Flush from fatal down, in case there's trouble flushing.
+ for s := fatalLog; s >= infoLog; s-- {
+ file := l.file[s]
+ if file != nil {
+ file.Flush() // ignore error
+ file.Sync() // ignore error
+ }
+ }
+}
+
+// CopyStandardLogTo arranges for messages written to the Go "log" package's
+// default logs to also appear in the Google logs for the named and lower
+// severities. Subsequent changes to the standard log's default output location
+// or format may break this behavior.
+//
+// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
+// recognized, CopyStandardLogTo panics.
+func CopyStandardLogTo(name string) {
+ sev, ok := severityByName(name)
+ if !ok {
+ panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
+ }
+ // Set a log format that captures the user's file and line:
+ // d.go:23: message
+ stdLog.SetFlags(stdLog.Lshortfile)
+ stdLog.SetOutput(logBridge(sev))
+}
+
+// logBridge provides the Write method that enables CopyStandardLogTo to connect
+// Go's standard logs to the logs provided by this package.
+type logBridge severity
+
+// Write parses the standard logging line and passes its components to the
+// logger for severity(lb).
+func (lb logBridge) Write(b []byte) (n int, err error) {
+ var (
+ file = "???"
+ line = 1
+ text string
+ )
+ // Split "d.go:23: message" into "d.go", "23", and "message".
+ if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
+ text = fmt.Sprintf("bad log format: %s", b)
+ } else {
+ file = string(parts[0])
+ text = string(parts[2][1:]) // skip leading space
+ line, err = strconv.Atoi(string(parts[1]))
+ if err != nil {
+ text = fmt.Sprintf("bad line number: %s", b)
+ line = 1
+ }
+ }
+ // printWithFileLine with alsoToStderr=true, so standard log messages
+ // always appear on standard error.
+ logging.printWithFileLine(severity(lb), logging.logr, file, line, true, text)
+ return len(b), nil
+}
+
+// setV computes and remembers the V level for a given PC
+// when vmodule is enabled.
+// File pattern matching takes the basename of the file, stripped
+// of its .go suffix, and uses filepath.Match, which is a little more
+// general than the *? matching used in C++.
+// l.mu is held.
+func (l *loggingT) setV(pc uintptr) Level {
+ fn := runtime.FuncForPC(pc)
+ file, _ := fn.FileLine(pc)
+ // The file is something like /a/b/c/d.go. We want just the d.
+ if strings.HasSuffix(file, ".go") {
+ file = file[:len(file)-3]
+ }
+ if slash := strings.LastIndex(file, "/"); slash >= 0 {
+ file = file[slash+1:]
+ }
+ for _, filter := range l.vmodule.filter {
+ if filter.match(file) {
+ l.vmap[pc] = filter.level
+ return filter.level
+ }
+ }
+ l.vmap[pc] = 0
+ return 0
+}
+
+// Verbose is a boolean type that implements Infof (like Printf) etc.
+// See the documentation of V for more information.
+type Verbose struct {
+ enabled bool
+ logr logr.Logger
+}
+
+func newVerbose(level Level, b bool) Verbose {
+ if logging.logr == nil {
+ return Verbose{b, nil}
+ }
+ return Verbose{b, logging.logr.V(int(level))}
+}
+
+// V reports whether verbosity at the call site is at least the requested level.
+// The returned value is a struct of type Verbose, which implements Info, Infoln
+// and Infof. These methods will write to the Info log if called.
+// Thus, one may write either
+// if glog.V(2).Enabled() { klog.Info("log this") }
+// or
+// klog.V(2).Info("log this")
+// The second form is shorter but the first is cheaper if logging is off because it does
+// not evaluate its arguments.
+//
+// Whether an individual call to V generates a log record depends on the setting of
+// the -v and -vmodule flags; both are off by default. The V call will log if its level
+// is less than or equal to the value of the -v flag, or alternatively if its level is
+// less than or equal to the value of the -vmodule pattern matching the source file
+// containing the call.
+func V(level Level) Verbose {
+ // This function tries hard to be cheap unless there's work to do.
+ // The fast path is two atomic loads and compares.
+
+ // Here is a cheap but safe test to see if V logging is enabled globally.
+ if logging.verbosity.get() >= level {
+ return newVerbose(level, true)
+ }
+
+ // It's off globally but it vmodule may still be set.
+ // Here is another cheap but safe test to see if vmodule is enabled.
+ if atomic.LoadInt32(&logging.filterLength) > 0 {
+ // Now we need a proper lock to use the logging structure. The pcs field
+ // is shared so we must lock before accessing it. This is fairly expensive,
+ // but if V logging is enabled we're slow anyway.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ if runtime.Callers(2, logging.pcs[:]) == 0 {
+ return newVerbose(level, false)
+ }
+ v, ok := logging.vmap[logging.pcs[0]]
+ if !ok {
+ v = logging.setV(logging.pcs[0])
+ }
+ return newVerbose(level, v >= level)
+ }
+ return newVerbose(level, false)
+}
+
+// Enabled will return true if this log level is enabled, guarded by the value
+// of v.
+// See the documentation of V for usage.
+func (v Verbose) Enabled() bool {
+ return v.enabled
+}
+
+// Info is equivalent to the global Info function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Info(args ...interface{}) {
+ if v.enabled {
+ logging.print(infoLog, v.logr, args...)
+ }
+}
+
+// Infoln is equivalent to the global Infoln function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infoln(args ...interface{}) {
+ if v.enabled {
+ logging.println(infoLog, v.logr, args...)
+ }
+}
+
+// Infof is equivalent to the global Infof function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infof(format string, args ...interface{}) {
+ if v.enabled {
+ logging.printf(infoLog, v.logr, format, args...)
+ }
+}
+
+// InfoS is equivalent to the global InfoS function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) {
+ if v.enabled {
+ logging.infoS(v.logr, msg, keysAndValues...)
+ }
+}
+
+// Error is equivalent to the global Error function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Error(err error, msg string, args ...interface{}) {
+ if v.enabled {
+ logging.errorS(err, v.logr, msg, args...)
+ }
+}
+
+// Info logs to the INFO log.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Info(args ...interface{}) {
+ logging.print(infoLog, logging.logr, args...)
+}
+
+// InfoDepth acts as Info but uses depth to determine which call frame to log.
+// InfoDepth(0, "msg") is the same as Info("msg").
+func InfoDepth(depth int, args ...interface{}) {
+ logging.printDepth(infoLog, logging.logr, depth, args...)
+}
+
+// Infoln logs to the INFO log.
+// Arguments are handled in the manner of fmt.Println; a newline is always appended.
+func Infoln(args ...interface{}) {
+ logging.println(infoLog, logging.logr, args...)
+}
+
+// Infof logs to the INFO log.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Infof(format string, args ...interface{}) {
+ logging.printf(infoLog, logging.logr, format, args...)
+}
+
+// InfoS structured logs to the INFO log.
+// The msg argument used to add constant description to the log line.
+// The key/value pairs would be join by "=" ; a newline is always appended.
+//
+// Basic examples:
+// >> klog.InfoS("Pod status updated", "pod", "kubedns", "status", "ready")
+// output:
+// >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready"
+func InfoS(msg string, keysAndValues ...interface{}) {
+ logging.infoS(logging.logr, msg, keysAndValues...)
+}
+
+// Warning logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Warning(args ...interface{}) {
+ logging.print(warningLog, logging.logr, args...)
+}
+
+// WarningDepth acts as Warning but uses depth to determine which call frame to log.
+// WarningDepth(0, "msg") is the same as Warning("msg").
+func WarningDepth(depth int, args ...interface{}) {
+ logging.printDepth(warningLog, logging.logr, depth, args...)
+}
+
+// Warningln logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is always appended.
+func Warningln(args ...interface{}) {
+ logging.println(warningLog, logging.logr, args...)
+}
+
+// Warningf logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Warningf(format string, args ...interface{}) {
+ logging.printf(warningLog, logging.logr, format, args...)
+}
+
+// Error logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Error(args ...interface{}) {
+ logging.print(errorLog, logging.logr, args...)
+}
+
+// ErrorDepth acts as Error but uses depth to determine which call frame to log.
+// ErrorDepth(0, "msg") is the same as Error("msg").
+func ErrorDepth(depth int, args ...interface{}) {
+ logging.printDepth(errorLog, logging.logr, depth, args...)
+}
+
+// Errorln logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is always appended.
+func Errorln(args ...interface{}) {
+ logging.println(errorLog, logging.logr, args...)
+}
+
+// Errorf logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Errorf(format string, args ...interface{}) {
+ logging.printf(errorLog, logging.logr, format, args...)
+}
+
+// ErrorS structured logs to the ERROR, WARNING, and INFO logs.
+// the err argument used as "err" field of log line.
+// The msg argument used to add constant description to the log line.
+// The key/value pairs would be join by "=" ; a newline is always appended.
+//
+// Basic examples:
+// >> klog.ErrorS(err, "Failed to update pod status")
+// output:
+// >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout"
+func ErrorS(err error, msg string, keysAndValues ...interface{}) {
+ logging.errorS(err, logging.logr, msg, keysAndValues...)
+}
+
+// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Fatal(args ...interface{}) {
+ logging.print(fatalLog, logging.logr, args...)
+}
+
+// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
+// FatalDepth(0, "msg") is the same as Fatal("msg").
+func FatalDepth(depth int, args ...interface{}) {
+ logging.printDepth(fatalLog, logging.logr, depth, args...)
+}
+
+// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Println; a newline is always appended.
+func Fatalln(args ...interface{}) {
+ logging.println(fatalLog, logging.logr, args...)
+}
+
+// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Fatalf(format string, args ...interface{}) {
+ logging.printf(fatalLog, logging.logr, format, args...)
+}
+
+// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
+// It allows Exit and relatives to use the Fatal logs.
+var fatalNoStacks uint32
+
+// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Exit(args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.print(fatalLog, logging.logr, args...)
+}
+
+// ExitDepth acts as Exit but uses depth to determine which call frame to log.
+// ExitDepth(0, "msg") is the same as Exit("msg").
+func ExitDepth(depth int, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printDepth(fatalLog, logging.logr, depth, args...)
+}
+
+// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+func Exitln(args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.println(fatalLog, logging.logr, args...)
+}
+
+// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Exitf(format string, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printf(fatalLog, logging.logr, format, args...)
+}
+
+// ObjectRef references a kubernetes object
+type ObjectRef struct {
+ Name string `json:"name"`
+ Namespace string `json:"namespace,omitempty"`
+}
+
+func (ref ObjectRef) String() string {
+ if ref.Namespace != "" {
+ return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name)
+ }
+ return ref.Name
+}
+
+// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface
+// this interface may expand in the future, but will always be a subset of the
+// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface
+type KMetadata interface {
+ GetName() string
+ GetNamespace() string
+}
+
+// KObj returns ObjectRef from ObjectMeta
+func KObj(obj KMetadata) ObjectRef {
+ return ObjectRef{
+ Name: obj.GetName(),
+ Namespace: obj.GetNamespace(),
+ }
+}
+
+// KRef returns ObjectRef from name and namespace
+func KRef(namespace, name string) ObjectRef {
+ return ObjectRef{
+ Name: name,
+ Namespace: namespace,
+ }
+}
diff --git a/vendor/k8s.io/klog/v2/klog_file.go b/vendor/k8s.io/klog/v2/klog_file.go
new file mode 100644
index 000000000..de830d922
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/klog_file.go
@@ -0,0 +1,164 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// File I/O for logs.
+
+package klog
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "os/user"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+)
+
+// MaxSize is the maximum size of a log file in bytes.
+var MaxSize uint64 = 1024 * 1024 * 1800
+
+// logDirs lists the candidate directories for new log files.
+var logDirs []string
+
+func createLogDirs() {
+ if logging.logDir != "" {
+ logDirs = append(logDirs, logging.logDir)
+ }
+ logDirs = append(logDirs, os.TempDir())
+}
+
+var (
+ pid = os.Getpid()
+ program = filepath.Base(os.Args[0])
+ host = "unknownhost"
+ userName = "unknownuser"
+ userNameOnce sync.Once
+)
+
+func init() {
+ if h, err := os.Hostname(); err == nil {
+ host = shortHostname(h)
+ }
+}
+
+func getUserName() string {
+ userNameOnce.Do(func() {
+ // On Windows, the Go 'user' package requires netapi32.dll.
+ // This affects Windows Nano Server:
+ // https://github.com/golang/go/issues/21867
+ // Fallback to using environment variables.
+ if runtime.GOOS == "windows" {
+ u := os.Getenv("USERNAME")
+ if len(u) == 0 {
+ return
+ }
+ // Sanitize the USERNAME since it may contain filepath separators.
+ u = strings.Replace(u, `\`, "_", -1)
+
+ // user.Current().Username normally produces something like 'USERDOMAIN\USERNAME'
+ d := os.Getenv("USERDOMAIN")
+ if len(d) != 0 {
+ userName = d + "_" + u
+ } else {
+ userName = u
+ }
+ } else {
+ current, err := user.Current()
+ if err == nil {
+ userName = current.Username
+ }
+ }
+ })
+
+ return userName
+}
+
+// shortHostname returns its argument, truncating at the first period.
+// For instance, given "www.google.com" it returns "www".
+func shortHostname(hostname string) string {
+ if i := strings.Index(hostname, "."); i >= 0 {
+ return hostname[:i]
+ }
+ return hostname
+}
+
+// logName returns a new log file name containing tag, with start time t, and
+// the name for the symlink for tag.
+func logName(tag string, t time.Time) (name, link string) {
+ name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
+ program,
+ host,
+ getUserName(),
+ tag,
+ t.Year(),
+ t.Month(),
+ t.Day(),
+ t.Hour(),
+ t.Minute(),
+ t.Second(),
+ pid)
+ return name, program + "." + tag
+}
+
+var onceLogDirs sync.Once
+
+// create creates a new log file and returns the file and its filename, which
+// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
+// successfully, create also attempts to update the symlink for that tag, ignoring
+// errors.
+// The startup argument indicates whether this is the initial startup of klog.
+// If startup is true, existing files are opened for appending instead of truncated.
+func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) {
+ if logging.logFile != "" {
+ f, err := openOrCreate(logging.logFile, startup)
+ if err == nil {
+ return f, logging.logFile, nil
+ }
+ return nil, "", fmt.Errorf("log: unable to create log: %v", err)
+ }
+ onceLogDirs.Do(createLogDirs)
+ if len(logDirs) == 0 {
+ return nil, "", errors.New("log: no log dirs")
+ }
+ name, link := logName(tag, t)
+ var lastErr error
+ for _, dir := range logDirs {
+ fname := filepath.Join(dir, name)
+ f, err := openOrCreate(fname, startup)
+ if err == nil {
+ symlink := filepath.Join(dir, link)
+ os.Remove(symlink) // ignore err
+ os.Symlink(name, symlink) // ignore err
+ return f, fname, nil
+ }
+ lastErr = err
+ }
+ return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
+}
+
+// The startup argument indicates whether this is the initial startup of klog.
+// If startup is true, existing files are opened for appending instead of truncated.
+func openOrCreate(name string, startup bool) (*os.File, error) {
+ if startup {
+ f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
+ return f, err
+ }
+ f, err := os.Create(name)
+ return f, err
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index c6e098f58..5490f3711 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -51,7 +51,7 @@ github.com/containerd/containerd/platforms
github.com/containerd/continuity/fs
github.com/containerd/continuity/syscallx
github.com/containerd/continuity/sysx
-# github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921
+# github.com/containernetworking/cni v0.8.0
github.com/containernetworking/cni/libcni
github.com/containernetworking/cni/pkg/invoke
github.com/containernetworking/cni/pkg/types
@@ -59,14 +59,14 @@ github.com/containernetworking/cni/pkg/types/020
github.com/containernetworking/cni/pkg/types/current
github.com/containernetworking/cni/pkg/utils
github.com/containernetworking/cni/pkg/version
-# github.com/containernetworking/plugins v0.8.6
+# github.com/containernetworking/plugins v0.8.7
github.com/containernetworking/plugins/pkg/ip
github.com/containernetworking/plugins/pkg/ns
github.com/containernetworking/plugins/pkg/utils/hwaddr
github.com/containernetworking/plugins/pkg/utils/sysctl
github.com/containernetworking/plugins/plugins/ipam/host-local/backend
github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator
-# github.com/containers/buildah v1.15.1-0.20200731151214-29f4d01c621c
+# github.com/containers/buildah v1.15.1-0.20200813183340-0a8dc1f8064c
github.com/containers/buildah
github.com/containers/buildah/bind
github.com/containers/buildah/chroot
@@ -84,13 +84,15 @@ github.com/containers/buildah/pkg/secrets
github.com/containers/buildah/pkg/supplemented
github.com/containers/buildah/pkg/umask
github.com/containers/buildah/util
-# github.com/containers/common v0.18.0
+# github.com/containers/common v0.20.3-0.20200827091701-a550d6a98aa3
github.com/containers/common/pkg/apparmor
+github.com/containers/common/pkg/apparmor/internal/supported
github.com/containers/common/pkg/auth
github.com/containers/common/pkg/capabilities
github.com/containers/common/pkg/cgroupv2
github.com/containers/common/pkg/config
github.com/containers/common/pkg/retry
+github.com/containers/common/pkg/seccomp
github.com/containers/common/pkg/sysinfo
github.com/containers/common/version
# github.com/containers/conmon v2.0.19+incompatible
@@ -156,7 +158,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.23.0
+# github.com/containers/storage v1.23.2
github.com/containers/storage
github.com/containers/storage/drivers
github.com/containers/storage/drivers/aufs
@@ -282,6 +284,8 @@ github.com/fsnotify/fsnotify
github.com/fsouza/go-dockerclient
# github.com/ghodss/yaml v1.0.0
github.com/ghodss/yaml
+# github.com/go-logr/logr v0.2.0
+github.com/go-logr/logr
# github.com/godbus/dbus/v5 v5.0.3
github.com/godbus/dbus/v5
# github.com/gogo/protobuf v1.3.1
@@ -303,7 +307,7 @@ github.com/google/shlex
github.com/google/uuid
# github.com/gorilla/mux v1.7.4
github.com/gorilla/mux
-# github.com/gorilla/schema v1.1.0
+# github.com/gorilla/schema v1.2.0
github.com/gorilla/schema
# github.com/hashicorp/errwrap v1.0.0
github.com/hashicorp/errwrap
@@ -325,7 +329,7 @@ github.com/inconshreveable/mousetrap
github.com/ishidawataru/sctp
# github.com/json-iterator/go v1.1.10
github.com/json-iterator/go
-# github.com/klauspost/compress v1.10.10
+# github.com/klauspost/compress v1.10.11
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
@@ -418,9 +422,9 @@ github.com/opencontainers/runc/libcontainer/devices
github.com/opencontainers/runc/libcontainer/system
github.com/opencontainers/runc/libcontainer/user
github.com/opencontainers/runc/libcontainer/utils
-# github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
+# github.com/opencontainers/runtime-spec v1.0.3-0.20200817204227-f9c09b4ea1df
github.com/opencontainers/runtime-spec/specs-go
-# github.com/opencontainers/runtime-tools v0.9.1-0.20200714183735-07406c5828aa
+# github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/runtime-tools/error
github.com/opencontainers/runtime-tools/filepath
github.com/opencontainers/runtime-tools/generate
@@ -481,9 +485,9 @@ github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udpproxy
github.com/rootless-containers/rootlesskit/pkg/port/portutil
# github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8
github.com/safchain/ethtool
-# github.com/seccomp/containers-golang v0.5.0
+# github.com/seccomp/containers-golang v0.6.0
github.com/seccomp/containers-golang
-# github.com/seccomp/libseccomp-golang v0.9.1
+# github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf
github.com/seccomp/libseccomp-golang
# github.com/sirupsen/logrus v1.6.0
github.com/sirupsen/logrus
@@ -563,7 +567,7 @@ go.opencensus.io/trace/internal
go.opencensus.io/trace/tracestate
# go.uber.org/atomic v1.4.0
go.uber.org/atomic
-# golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
+# golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
golang.org/x/crypto/blowfish
golang.org/x/crypto/cast5
golang.org/x/crypto/chacha20
@@ -583,7 +587,7 @@ golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/terminal
-# golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7
+# golang.org/x/net v0.0.0-20200707034311-ab3426394381
golang.org/x/net/context
golang.org/x/net/context/ctxhttp
golang.org/x/net/html
@@ -601,7 +605,7 @@ golang.org/x/oauth2/internal
# golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
-# golang.org/x/sys v0.0.0-20200519105757-fe76b779f299
+# golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1
golang.org/x/sys/cpu
golang.org/x/sys/internal/unsafeheader
golang.org/x/sys/unix
@@ -640,7 +644,7 @@ google.golang.org/appengine/internal/log
google.golang.org/appengine/internal/remote_api
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/urlfetch
-# google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55
+# google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.27.1
google.golang.org/grpc/codes
@@ -648,7 +652,7 @@ google.golang.org/grpc/connectivity
google.golang.org/grpc/grpclog
google.golang.org/grpc/internal
google.golang.org/grpc/status
-# google.golang.org/protobuf v1.23.0
+# google.golang.org/protobuf v1.24.0
google.golang.org/protobuf/encoding/prototext
google.golang.org/protobuf/encoding/protowire
google.golang.org/protobuf/internal/descfmt
@@ -696,7 +700,7 @@ gopkg.in/yaml.v3
# k8s.io/api v0.18.8
k8s.io/api/apps/v1
k8s.io/api/core/v1
-# k8s.io/apimachinery v0.18.8
+# k8s.io/apimachinery v0.19.0
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/resource
k8s.io/apimachinery/pkg/apis/meta/v1
@@ -755,9 +759,11 @@ k8s.io/client-go/util/homedir
k8s.io/client-go/util/keyutil
# k8s.io/klog v1.0.0
k8s.io/klog
+# k8s.io/klog/v2 v2.2.0
+k8s.io/klog/v2
# k8s.io/utils v0.0.0-20190221042446-c2654d5206da
k8s.io/utils/integer
-# sigs.k8s.io/structured-merge-diff/v3 v3.0.0
-sigs.k8s.io/structured-merge-diff/v3/value
+# sigs.k8s.io/structured-merge-diff/v4 v4.0.1
+sigs.k8s.io/structured-merge-diff/v4/value
# sigs.k8s.io/yaml v1.2.0
sigs.k8s.io/yaml
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE b/vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/allocator.go
index f70cd4167..f70cd4167 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/allocator.go
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/doc.go
index 84d7f0f3f..84d7f0f3f 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/doc.go
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/fields.go
index be3c67249..be3c67249 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/fields.go
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/jsontagutil.go
index d4adb8fc9..d4adb8fc9 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/jsontagutil.go
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/list.go
index 0748f18e8..0748f18e8 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/list.go
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/listreflect.go
index 197d4c921..197d4c921 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/listreflect.go
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/listunstructured.go
index 64cd8e7c0..64cd8e7c0 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/listunstructured.go
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/map.go
index 168b9fa08..168b9fa08 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/map.go
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go
index dc8b8c720..dc8b8c720 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go
index d8e208628..d8e208628 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go
index 49e6dd169..49e6dd169 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/scalar.go
index c78a4c18d..c78a4c18d 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/scalar.go
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/structreflect.go
index 4a7bb5c6e..4a7bb5c6e 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/structreflect.go
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go
index ea79e3a00..ea79e3a00 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/valuereflect.go
index 05e70deba..05e70deba 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/valuereflect.go
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/valueunstructured.go
index ac5a92628..ac5a92628 100644
--- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/valueunstructured.go