summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml16
-rw-r--r--Makefile8
-rw-r--r--changelog.txt108
-rw-r--r--cmd/podman/common/volumes.go22
-rw-r--r--cmd/podman/containers/create.go3
-rw-r--r--cmd/podman/containers/rm.go3
-rw-r--r--cmd/podman/containers/run.go3
-rw-r--r--cmd/podman/containers/runlabel.go3
-rw-r--r--cmd/podman/containers/stats.go9
-rw-r--r--cmd/podman/generate/systemd.go3
-rw-r--r--cmd/podman/images/build.go1
-rw-r--r--cmd/podman/images/list.go1
-rw-r--r--cmd/podman/images/load.go5
-rw-r--r--cmd/podman/images/pull.go16
-rw-r--r--cmd/podman/images/push.go17
-rw-r--r--cmd/podman/manifest/push.go38
-rw-r--r--cmd/podman/play/kube.go16
-rw-r--r--cmd/podman/pods/inspect.go13
-rw-r--r--cmd/podman/root.go2
-rw-r--r--cmd/podman/system/service.go23
-rw-r--r--cmd/podman/system/varlink.go7
-rw-r--r--commands-demo.md2
-rw-r--r--completions/bash/podman18
-rwxr-xr-xcontrib/cirrus/integration_test.sh1
-rw-r--r--contrib/dependencies.txt1
-rw-r--r--contrib/gate/Dockerfile8
-rw-r--r--docs/Readme.md2
-rw-r--r--docs/source/index.rst2
-rw-r--r--docs/source/markdown/links/podman-image-diff.11
-rw-r--r--docs/source/markdown/podman-create.1.md2
-rw-r--r--docs/source/markdown/podman-exec.1.md2
-rw-r--r--docs/source/markdown/podman-generate-systemd.1.md12
-rw-r--r--docs/source/markdown/podman-history.1.md5
-rw-r--r--docs/source/markdown/podman-image-diff.1.md46
-rw-r--r--docs/source/markdown/podman-image.1.md2
-rw-r--r--docs/source/markdown/podman-manifest-push.1.md18
-rw-r--r--docs/source/markdown/podman-network-inspect.1.md4
-rw-r--r--docs/source/markdown/podman-pod-inspect.1.md51
-rw-r--r--docs/source/markdown/podman-pull.1.md5
-rw-r--r--docs/source/markdown/podman-run.1.md2
-rw-r--r--docs/source/markdown/podman-system-service.1.md6
-rw-r--r--docs/source/markdown/podman-varlink.1.md2
-rw-r--r--docs/source/markdown/podman-wait.1.md3
-rw-r--r--docs/source/markdown/podman.1.md6
-rw-r--r--docs/tutorials/rootless_tutorial.md42
-rw-r--r--go.mod10
-rw-r--r--go.sum94
-rwxr-xr-xhack/podman-registry74
-rwxr-xr-xhack/xref-helpmsgs-manpages4
-rw-r--r--libpod/image/docker_registry_options.go2
-rw-r--r--libpod/options.go18
-rw-r--r--libpod/runtime.go5
-rw-r--r--pkg/api/handlers/compat/containers_archive.go12
-rw-r--r--pkg/api/handlers/compat/containers_attach.go2
-rw-r--r--pkg/api/handlers/compat/containers_stats.go4
-rw-r--r--pkg/api/handlers/compat/events.go12
-rw-r--r--pkg/api/handlers/compat/images.go30
-rw-r--r--pkg/api/handlers/compat/images_push.go15
-rw-r--r--pkg/api/handlers/compat/networks.go301
-rw-r--r--pkg/api/handlers/compat/resize.go18
-rw-r--r--pkg/api/handlers/compat/swagger.go28
-rw-r--r--pkg/api/handlers/compat/types.go2
-rw-r--r--pkg/api/handlers/libpod/containers.go4
-rw-r--r--pkg/api/handlers/libpod/copy.go12
-rw-r--r--pkg/api/handlers/libpod/images.go58
-rw-r--r--pkg/api/handlers/libpod/manifests.go4
-rw-r--r--pkg/api/handlers/libpod/play.go20
-rw-r--r--pkg/api/handlers/types.go2
-rw-r--r--pkg/api/server/register_archive.go171
-rw-r--r--pkg/api/server/register_events.go5
-rw-r--r--pkg/api/server/register_images.go15
-rw-r--r--pkg/api/server/register_networks.go90
-rw-r--r--pkg/api/server/server.go9
-rw-r--r--pkg/api/server/swagger.go7
-rw-r--r--pkg/api/tags.yaml2
-rw-r--r--pkg/auth/auth.go216
-rw-r--r--pkg/bindings/connection.go21
-rw-r--r--pkg/bindings/containers/checkpoint.go4
-rw-r--r--pkg/bindings/containers/commit.go2
-rw-r--r--pkg/bindings/containers/containers.go93
-rw-r--r--pkg/bindings/containers/create.go2
-rw-r--r--pkg/bindings/containers/diff.go2
-rw-r--r--pkg/bindings/containers/exec.go4
-rw-r--r--pkg/bindings/containers/healthcheck.go2
-rw-r--r--pkg/bindings/containers/logs.go3
-rw-r--r--pkg/bindings/containers/mount.go6
-rw-r--r--pkg/bindings/generate/generate.go2
-rw-r--r--pkg/bindings/images/diff.go2
-rw-r--r--pkg/bindings/images/images.go58
-rw-r--r--pkg/bindings/images/rm.go4
-rw-r--r--pkg/bindings/manifests/manifests.go10
-rw-r--r--pkg/bindings/network/network.go8
-rw-r--r--pkg/bindings/play/play.go9
-rw-r--r--pkg/bindings/pods/pods.go28
-rw-r--r--pkg/bindings/system/info.go2
-rw-r--r--pkg/bindings/system/system.go37
-rw-r--r--pkg/bindings/test/auth_test.go143
-rw-r--r--pkg/bindings/test/system_test.go4
-rw-r--r--pkg/bindings/volumes/volumes.go10
-rw-r--r--pkg/domain/entities/engine.go27
-rw-r--r--pkg/domain/entities/generate.go6
-rw-r--r--pkg/domain/entities/images.go16
-rw-r--r--pkg/domain/entities/manifest.go11
-rw-r--r--pkg/domain/entities/play.go7
-rw-r--r--pkg/domain/entities/pods.go2
-rw-r--r--pkg/domain/entities/system.go6
-rw-r--r--pkg/domain/infra/abi/containers.go1
-rw-r--r--pkg/domain/infra/abi/events.go5
-rw-r--r--pkg/domain/infra/abi/generate.go6
-rw-r--r--pkg/domain/infra/abi/images.go18
-rw-r--r--pkg/domain/infra/abi/manifest.go21
-rw-r--r--pkg/domain/infra/abi/play.go10
-rw-r--r--pkg/domain/infra/abi/system.go38
-rw-r--r--pkg/domain/infra/abi/system_novalink.go14
-rw-r--r--pkg/domain/infra/abi/system_varlink.go49
-rw-r--r--pkg/domain/infra/runtime_libpod.go3
-rw-r--r--pkg/domain/infra/tunnel/events.go3
-rw-r--r--pkg/network/network.go18
-rw-r--r--pkg/ps/ps.go2
-rw-r--r--pkg/signal/signal_common.go41
-rw-r--r--pkg/signal/signal_linux.go36
-rw-r--r--pkg/signal/signal_unsupported.go89
-rw-r--r--pkg/specgen/generate/config_linux.go8
-rw-r--r--pkg/specgen/generate/config_linux_nocgo.go3
-rw-r--r--test/apiv2/10-images.at29
-rw-r--r--test/apiv2/40-pods.at4
-rw-r--r--test/apiv2/rest_api/__init__.py0
-rw-r--r--test/apiv2/rest_api/test_rest_v1_0_0.py219
-rwxr-xr-xtest/apiv2/test-apiv221
-rw-r--r--test/e2e/attach_test.go1
-rw-r--r--test/e2e/common_test.go8
-rw-r--r--test/e2e/events_test.go49
-rw-r--r--test/e2e/generate_systemd_test.go92
-rw-r--r--test/e2e/libpod_suite_remote_test.go2
-rw-r--r--test/e2e/libpod_suite_varlink_test.go2
-rw-r--r--test/e2e/ps_test.go25
-rw-r--r--test/e2e/run_volume_test.go5
-rw-r--r--test/endpoint/endpoint.go2
-rw-r--r--test/system/010-images.bats2
-rw-r--r--test/system/200-pod.bats12
-rw-r--r--vendor/github.com/containers/buildah/.cirrus.yml23
-rw-r--r--vendor/github.com/containers/buildah/.golangci.yml7
-rw-r--r--vendor/github.com/containers/buildah/SECURITY.md3
-rw-r--r--vendor/github.com/containers/buildah/buildah.go4
-rw-r--r--vendor/github.com/containers/buildah/commit.go35
-rw-r--r--vendor/github.com/containers/buildah/common.go6
-rw-r--r--vendor/github.com/containers/buildah/go.mod24
-rw-r--r--vendor/github.com/containers/buildah/go.sum150
-rw-r--r--vendor/github.com/containers/buildah/image.go6
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go23
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/executor.go8
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/stage_executor.go12
-rw-r--r--vendor/github.com/containers/buildah/new.go13
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go1
-rw-r--r--vendor/github.com/containers/buildah/pull.go7
-rw-r--r--vendor/github.com/containers/buildah/util/util.go8
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go81
-rw-r--r--vendor/github.com/containers/common/pkg/config/config_local.go81
-rw-r--r--vendor/github.com/containers/common/pkg/config/config_remote.go25
-rw-r--r--vendor/github.com/containers/common/pkg/config/config_unix.go15
-rw-r--r--vendor/github.com/containers/common/pkg/config/config_windows.go10
-rw-r--r--vendor/github.com/containers/common/pkg/config/default.go17
-rw-r--r--vendor/github.com/containers/common/pkg/config/libpodConfig.go4
-rw-r--r--vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go11
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image_src.go1
-rw-r--r--vendor/github.com/containers/image/v5/manifest/oci.go2
-rw-r--r--vendor/github.com/containers/image/v5/pkg/docker/config/config.go114
-rw-r--r--vendor/github.com/containers/image/v5/signature/policy_config.go21
-rw-r--r--vendor/github.com/containers/image/v5/version/version.go6
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockdec.go6
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder.go1
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec.go5
-rw-r--r--vendor/github.com/mattn/go-isatty/.travis.yml14
-rw-r--r--vendor/github.com/mattn/go-isatty/LICENSE9
-rw-r--r--vendor/github.com/mattn/go-isatty/README.md50
-rw-r--r--vendor/github.com/mattn/go-isatty/doc.go2
-rw-r--r--vendor/github.com/mattn/go-isatty/go.mod5
-rw-r--r--vendor/github.com/mattn/go-isatty/go.sum2
-rw-r--r--vendor/github.com/mattn/go-isatty/go.test.sh12
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_bsd.go18
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_others.go15
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_plan9.go22
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_solaris.go22
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_tcgets.go18
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_windows.go125
-rw-r--r--vendor/github.com/mattn/go-isatty/renovate.json8
-rw-r--r--vendor/github.com/mattn/go-runewidth/.travis.yml16
-rw-r--r--vendor/github.com/mattn/go-runewidth/LICENSE21
-rw-r--r--vendor/github.com/mattn/go-runewidth/README.md27
-rw-r--r--vendor/github.com/mattn/go-runewidth/go.mod3
-rw-r--r--vendor/github.com/mattn/go-runewidth/go.test.sh12
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth.go257
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth_appengine.go8
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth_js.go9
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth_posix.go82
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth_table.go437
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth_windows.go28
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/label/label.go22
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go34
-rw-r--r--vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go4
-rw-r--r--vendor/github.com/openshift/api/LICENSE201
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml164
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml328
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml101
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml98
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml219
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml123
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml366
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml70
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml100
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml76
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml144
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml221
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml55
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml141
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml661
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml63
-rw-r--r--vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml88
-rw-r--r--vendor/github.com/openshift/api/config/v1/doc.go8
-rw-r--r--vendor/github.com/openshift/api/config/v1/register.go70
-rw-r--r--vendor/github.com/openshift/api/config/v1/stringsource.go31
-rw-r--r--vendor/github.com/openshift/api/config/v1/types.go312
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_apiserver.go118
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_authentication.go118
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_build.go109
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_cluster_operator.go184
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_cluster_version.go267
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_console.go62
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_dns.go87
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_feature.go194
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_image.go115
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_infrastructure.go241
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_ingress.go46
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_network.go122
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_oauth.go557
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_operatorhub.go78
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_project.go54
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_proxy.go90
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_scheduling.go74
-rw-r--r--vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go260
-rw-r--r--vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go3365
-rw-r--r--vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go1292
-rw-r--r--vendor/github.com/stretchr/testify/LICENSE2
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_compare.go (renamed from vendor/github.com/stretchr/testify/assert/assertion_order.go)173
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_format.go49
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_forward.go79
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertions.go191
-rw-r--r--vendor/github.com/stretchr/testify/assert/http_assertions.go46
-rw-r--r--vendor/github.com/stretchr/testify/require/require.go91
-rw-r--r--vendor/github.com/stretchr/testify/require/require_forward.go79
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/bar.go156
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/bar_filler.go136
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/bar_filler_bar.go173
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/bar_filler_spinner.go (renamed from vendor/github.com/vbauerster/mpb/v5/spinner_filler.go)4
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/bar_option.go48
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/container_option.go (renamed from vendor/github.com/vbauerster/mpb/v5/options.go)11
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go13
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/cwriter/writer_posix.go15
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/cwriter/writer_windows.go19
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/decor/any.go14
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/decor/counters.go8
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/decor/decorator.go59
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/decor/elapsed.go4
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/decor/eta.go4
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/decor/merge.go29
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/decor/name.go2
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/decor/on_complete.go2
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/decor/percentage.go2
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/decor/speed.go4
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/decor/spinner.go2
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/go.mod5
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/go.sum16
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/internal/width.go8
-rw-r--r--vendor/github.com/vbauerster/mpb/v5/progress.go115
-rw-r--r--vendor/gopkg.in/yaml.v3/.travis.yml16
-rw-r--r--vendor/gopkg.in/yaml.v3/LICENSE50
-rw-r--r--vendor/gopkg.in/yaml.v3/NOTICE13
-rw-r--r--vendor/gopkg.in/yaml.v3/README.md150
-rw-r--r--vendor/gopkg.in/yaml.v3/apic.go746
-rw-r--r--vendor/gopkg.in/yaml.v3/decode.go931
-rw-r--r--vendor/gopkg.in/yaml.v3/emitterc.go1992
-rw-r--r--vendor/gopkg.in/yaml.v3/encode.go561
-rw-r--r--vendor/gopkg.in/yaml.v3/go.mod5
-rw-r--r--vendor/gopkg.in/yaml.v3/parserc.go1229
-rw-r--r--vendor/gopkg.in/yaml.v3/readerc.go434
-rw-r--r--vendor/gopkg.in/yaml.v3/resolve.go326
-rw-r--r--vendor/gopkg.in/yaml.v3/scannerc.go3025
-rw-r--r--vendor/gopkg.in/yaml.v3/sorter.go134
-rw-r--r--vendor/gopkg.in/yaml.v3/writerc.go48
-rw-r--r--vendor/gopkg.in/yaml.v3/yaml.go662
-rw-r--r--vendor/gopkg.in/yaml.v3/yamlh.go805
-rw-r--r--vendor/gopkg.in/yaml.v3/yamlprivateh.go198
-rw-r--r--vendor/modules.txt22
293 files changed, 16584 insertions, 12582 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 017618aea..116dc90cd 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -137,11 +137,9 @@ gating_task:
# FIXME
#- 'cd $GOSRC && ./hack/podman-commands.sh |& ${TIMESTAMP}'
# N/B: need 'clean' so some committed files are re-generated.
- # FIXME
- #- '/usr/local/bin/entrypoint.sh clean podman-remote |& ${TIMESTAMP}'
- #- '/usr/local/bin/entrypoint.sh clean podman xref_helpmsgs_manpages BUILDTAGS="exclude_graphdriver_devicemapper selinux seccomp" |& ${TIMESTAMP}'
- # FIXME
- #- '/usr/local/bin/entrypoint.sh local-cross |& ${TIMESTAMP}'
+ - '/usr/local/bin/entrypoint.sh clean podman-remote |& ${TIMESTAMP}'
+ - '/usr/local/bin/entrypoint.sh clean podman xref_helpmsgs_manpages BUILDTAGS="exclude_graphdriver_devicemapper selinux seccomp" |& ${TIMESTAMP}'
+ - '/usr/local/bin/entrypoint.sh local-cross |& ${TIMESTAMP}'
# Verify some aspects of ci/related scripts
ci_script:
@@ -311,8 +309,6 @@ build_each_commit_task:
build_without_cgo_task:
- skip: $CI == 'true'
-
depends_on:
- "gating"
- "vendor"
@@ -424,10 +420,10 @@ testing_task:
- name: "test ${FEDORA_NAME}"
gce_instance:
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
+ - name: "test ${PRIOR_FEDORA_NAME}"
+ gce_instance:
+ image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# TODO:
- # - name: "test ${PRIOR_FEDORA_NAME}"
- # gce_instance:
- # image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# - name: "test ${UBUNTU_NAME}"
# gce_instance:
# image_name: "${UBUNTU_CACHE_IMAGE_NAME}"
diff --git a/Makefile b/Makefile
index 16a09fb59..75cf64c6c 100644
--- a/Makefile
+++ b/Makefile
@@ -22,7 +22,7 @@ ETCDIR ?= /etc
TMPFILESDIR ?= ${PREFIX}/lib/tmpfiles.d
SYSTEMDDIR ?= ${PREFIX}/lib/systemd/system
USERSYSTEMDDIR ?= ${PREFIX}/lib/systemd/user
-REMOTETAGS := !ABISupport remoteclient exclude_graphdriver_btrfs btrfs_noversion exclude_graphdriver_devicemapper containers_image_openpgp
+REMOTETAGS ?= !ABISupport remote exclude_graphdriver_btrfs btrfs_noversion exclude_graphdriver_devicemapper containers_image_openpgp
BUILDTAGS ?= \
$(shell hack/apparmor_tag.sh) \
$(shell hack/btrfs_installed_tag.sh) \
@@ -45,7 +45,7 @@ ifeq ($(shell go help mod >/dev/null 2>&1 && echo true), true)
GO_BUILD=GO111MODULE=on $(GO) build -mod=vendor
endif
-BUILDTAGS_CROSS ?= containers_image_openpgp exclude_graphdriver_btrfs exclude_graphdriver_devicemapper exclude_graphdriver_overlay
+BUILDTAGS_CROSS ?= ABISupport containers_image_openpgp exclude_graphdriver_btrfs exclude_graphdriver_devicemapper exclude_graphdriver_overlay
ifneq (,$(findstring varlink,$(BUILDTAGS)))
PODMAN_VARLINK_DEPENDENCIES = pkg/varlink/iopodman.go
endif
@@ -318,7 +318,7 @@ localunit: test/goecho/goecho varlink_generate
ginkgo \
-r \
$(TESTFLAGS) \
- --skipPackage test/e2e,pkg/apparmor,test/endpoint,pkg/bindings \
+ --skipPackage test/e2e,pkg/apparmor,test/endpoint,pkg/bindings,hack \
--cover \
--covermode atomic \
--tags "$(BUILDTAGS)" \
@@ -326,7 +326,7 @@ localunit: test/goecho/goecho varlink_generate
.PHONY: ginkgo
ginkgo:
- ginkgo -v $(TESTFLAGS) -tags "$(BUILDTAGS)" $(GINKGOTIMEOUT) -cover -flakeAttempts 3 -progress -trace -noColor -nodes 3 -debug test/e2e/.
+ ginkgo -v $(TESTFLAGS) -tags "$(BUILDTAGS)" $(GINKGOTIMEOUT) -cover -flakeAttempts 3 -progress -trace -noColor -nodes 3 -debug test/e2e/. hack/.
.PHONY: ginkgo-remote
ginkgo-remote:
diff --git a/changelog.txt b/changelog.txt
index 552a17663..47a99ba64 100644
--- a/changelog.txt
+++ b/changelog.txt
@@ -1,3 +1,111 @@
+- Changelog for v2.0.0-rc3 (2020-05-29)
+ * Bump github.com/stretchr/testify from 1.5.1 to 1.6.0
+ * V2 verify JSON output is consistent and doesn't drift
+ * Vendor in containers/common v0.12.0
+ * Ensure that signal names can be parsed on Windows
+ * fix `ps --last=N`
+ * test.apiv2: add testing for image and deal with API returning binary
+ * specgen: fix segfault
+ * Add streaming ability to endpoint
+ * Fix builds on 32 bit arches
+ * v2 libpod push: correct docs
+ * container stats: fix --no-stream race
+ * Add --format to pod inspect
+ * Add support for `readonly` option to --mount
+ * docs: fix typo
+ * V2 Fix interface nil checks
+ * [CI:DOCS] Tweak casing in rootless doc
+ * podman-registry: many unrelated fixes
+ * Fix Dockerfile
+ * Bump github.com/opencontainers/selinux from 1.5.1 to 1.5.2
+ * podman-registry helper script: handle errors
+ * Makefile: customizable $REMOTETAGS
+ * add section on rootless volumes
+ * [CI:DOCS] Prepare image to turn on podman-commands test
+ * Vendor in latest containers/buildah
+ * Turn on Fedora testing
+ * [CI:DOCS] Fix readthedocs link
+ * [CI:DOCS]add crun to gating image
+ * network compatibility endpoints for API
+ * Add MethodNotAllowedHandler() to add in debugging
+ * Follow up PR to fix issues found in #6341
+ * Bump to v2.0.0-dev
+ * [CI:DOCS]Add conmon to gating image
+ * Attempt to turn on build_without_cgo tests
+ * Attempt to turn on additional build tests
+ * Added new flags to 'podman generate systemd' to change the unit name prefix
+ * Enable rootless tests for podman remote
+ * V2 enable remote logs and testing
+
+- Changelog for v2.0.0-rc2 (2020-05-22)
+ * Attempt to turn on integration tests
+ * Removes remote system reset functionality. skip e2e test for remote.
+ * Attempt to turn on special_testing_endpoing tests
+ * Attempt to turn on varlink tests
+ * Attempt to turn on rpmbuild tests
+ * Bump github.com/containers/common from 0.11.2 to 0.11.4
+ * Enables iidfile test as issue fixed now
+ * [CI:DOCS] Docs revamp.
+ * Fix podman-remote start tests
+ * podman version --format ... was not working
+ * Display human build date in podman info
+ * remote manifest test
+ * Turn on more remote tests
+ * v2 podman-remote build
+ * Fix podman-remote stop --all to handle not running containers
+ * Enable rmi test
+ * Bump github.com/opencontainers/go-digest from 1.0.0-rc1 to 1.0.0
+ * Remove github.com/libpod/libpod from cmd/pkg/podman
+ * Start testing with cross compilation
+ * Fixes podman pod create --pod-id-file #6292
+ * remote untag test
+ * Get proper exit code when running or starting a container.
+ * vendor: update seccomp/containers-golang to v0.4.1
+ * Bump github.com/containers/storage from 1.19.2 to 1.20.1
+ * Bump github.com/onsi/ginkgo from 1.12.0 to 1.12.2
+ * Handle filters correctly for podman prune
+ * Fix remote handling of podman images calls
+ * Bump k8s.io/api from 0.18.2 to 0.18.3
+ * Bump github.com/onsi/gomega from 1.10.0 to 1.10.1
+ * Enable system prune test remote client
+ * Fix build on OS X
+ * Update Derivative API tutorial to reflect the HTTP API
+ * Turn off 'noexec' option by default for named volumes
+ * enable remote integration tests for init
+ * Add a test for detached exec
+ * Update manpage for `podman exec` to include detach flag
+ * Enable cleanup processes for detached exec
+ * Add ability to clean up exec sessions with cleanup
+ * Add CLI frontend for detached exec
+ * Add backend code for detached exec
+ * Add exit commands to exec sessions
+ * enable pod_create remote integration tests
+ * Fix remote integration for healthchecks
+ * Fix create_test for remote integration
+ * govern remote attach and start
+ * Test fixes for remote integration
+ * V2 API Version Support
+ * Print container state when erroring that it is improper
+ * system tests: more podman-pod tests
+ * don't skip log tests unless remote
+ * [CI:DOCS] Image tree endpoint should return 404
+ * oci conmon: tell conmon to log container name
+ * add go-bindings for `hack/podman-registry`
+ * New tool: hack/podman-registry, manages local registry
+ * Testcase added for network commands
+ * format option added to network inspect command.
+ * filter option added to network ls command.
+ * Fix mountpont in SecretMountsWithUIDGID
+ * Update troubleshoot page
+ * v2 enable remote integration tests
+ * Get MAC, Windows and Linux podman-remote from latest version links.
+ * V2 Implement terminal handling in bindings attach
+ * Fix EOM for SendFile
+ * Bump to v2.0.0-dev
+ * Give `auto-update` ability to use per-container authfile specified by label.
+ * system tests: small fixes for rawhide+cgroups v1
+ * Add HairpinMode to our CNI configs
+
- Changelog for v2.0.0-rc1 (2020-05-18)
* v2endpoint remove image path correction
* Drop APIv2 resize endpoint
diff --git a/cmd/podman/common/volumes.go b/cmd/podman/common/volumes.go
index 6b0b6e9cf..a70410ad3 100644
--- a/cmd/podman/common/volumes.go
+++ b/cmd/podman/common/volumes.go
@@ -209,9 +209,29 @@ func getBindMount(args []string) (spec.Mount, error) {
switch kv[0] {
case "bind-nonrecursive":
newMount.Options = append(newMount.Options, "bind")
+ case "readonly", "read-only":
+ if setRORW {
+ return newMount, errors.Wrapf(optionArgError, "cannot pass 'readonly', 'ro', or 'rw' options more than once")
+ }
+ setRORW = true
+ switch len(kv) {
+ case 1:
+ newMount.Options = append(newMount.Options, "ro")
+ case 2:
+ switch strings.ToLower(kv[1]) {
+ case "true":
+ newMount.Options = append(newMount.Options, "ro")
+ case "false":
+ // RW is default, so do nothing
+ default:
+ return newMount, errors.Wrapf(optionArgError, "readonly must be set to true or false, instead received %q", kv[1])
+ }
+ default:
+ return newMount, errors.Wrapf(optionArgError, "badly formatted option %q", val)
+ }
case "ro", "rw":
if setRORW {
- return newMount, errors.Wrapf(optionArgError, "cannot pass 'ro' or 'rw' options more than once")
+ return newMount, errors.Wrapf(optionArgError, "cannot pass 'readonly', 'ro', or 'rw' options more than once")
}
setRORW = true
// Can be formatted as one of:
diff --git a/cmd/podman/containers/create.go b/cmd/podman/containers/create.go
index bb6cb5fdd..c8007bc2f 100644
--- a/cmd/podman/containers/create.go
+++ b/cmd/podman/containers/create.go
@@ -62,6 +62,9 @@ func createFlags(flags *pflag.FlagSet) {
_ = flags.MarkHidden("env-host")
_ = flags.MarkHidden("http-proxy")
}
+ // Not sure we want these exposed yet. If we do, they need to be documented in man pages
+ _ = flags.MarkHidden("override-arch")
+ _ = flags.MarkHidden("override-os")
}
func init() {
diff --git a/cmd/podman/containers/rm.go b/cmd/podman/containers/rm.go
index f01462447..b25473a8d 100644
--- a/cmd/podman/containers/rm.go
+++ b/cmd/podman/containers/rm.go
@@ -74,8 +74,7 @@ func init() {
Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: rmCommand,
})
- flags := rmCommand.Flags()
- rmFlags(flags)
+ rmFlags(rmCommand.Flags())
registry.Commands = append(registry.Commands, registry.CliCommand{
Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
diff --git a/cmd/podman/containers/run.go b/cmd/podman/containers/run.go
index 2298691a9..890c6e827 100644
--- a/cmd/podman/containers/run.go
+++ b/cmd/podman/containers/run.go
@@ -63,6 +63,9 @@ func runFlags(flags *pflag.FlagSet) {
_ = flags.MarkHidden("env-host")
_ = flags.MarkHidden("http-proxy")
}
+ // Not sure we want these exposed yet. If we do, they need to be documented in man pages
+ _ = flags.MarkHidden("override-arch")
+ _ = flags.MarkHidden("override-os")
}
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
diff --git a/cmd/podman/containers/runlabel.go b/cmd/podman/containers/runlabel.go
index 11fa362b8..8d1c48ad2 100644
--- a/cmd/podman/containers/runlabel.go
+++ b/cmd/podman/containers/runlabel.go
@@ -42,7 +42,7 @@ func init() {
Parent: containerCmd,
})
- flags := rmCommand.Flags()
+ flags := runlabelCommand.Flags()
flags.StringVar(&runlabelOptions.Authfile, "authfile", auth.GetDefaultAuthFile(), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
flags.StringVar(&runlabelOptions.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys")
flags.StringVar(&runlabelOptions.Credentials, "creds", "", "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry")
@@ -61,6 +61,7 @@ func init() {
_ = flags.MarkHidden("opt1")
_ = flags.MarkHidden("opt2")
_ = flags.MarkHidden("opt3")
+ _ = flags.MarkHidden("signature-policy")
if err := flags.MarkDeprecated("pull", "podman will pull if not found in local storage"); err != nil {
logrus.Error("unable to mark pull flag deprecated")
diff --git a/cmd/podman/containers/stats.go b/cmd/podman/containers/stats.go
index 5b7f52cc7..c61b161e4 100644
--- a/cmd/podman/containers/stats.go
+++ b/cmd/podman/containers/stats.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
"strings"
+ "sync"
"text/tabwriter"
"text/template"
@@ -111,14 +112,20 @@ func stats(cmd *cobra.Command, args []string) error {
}
}
statsOptions.StatChan = make(chan []*define.ContainerStats, 1)
+ wg := sync.WaitGroup{}
+ wg.Add(1)
go func() {
for reports := range statsOptions.StatChan {
if err := outputStats(reports); err != nil {
logrus.Error(err)
}
}
+ wg.Done()
+
}()
- return registry.ContainerEngine().ContainerStats(registry.Context(), args, statsOptions)
+ err := registry.ContainerEngine().ContainerStats(registry.Context(), args, statsOptions)
+ wg.Wait()
+ return err
}
func outputStats(reports []*define.ContainerStats) error {
diff --git a/cmd/podman/generate/systemd.go b/cmd/podman/generate/systemd.go
index 20d9748d4..75031e070 100644
--- a/cmd/podman/generate/systemd.go
+++ b/cmd/podman/generate/systemd.go
@@ -39,6 +39,9 @@ func init() {
flags.UintVarP(&systemdTimeout, "time", "t", containerConfig.Engine.StopTimeout, "Stop timeout override")
flags.StringVar(&systemdOptions.RestartPolicy, "restart-policy", "on-failure", "Systemd restart-policy")
flags.BoolVarP(&systemdOptions.New, "new", "", false, "Create a new container instead of starting an existing one")
+ flags.StringVar(&systemdOptions.ContainerPrefix, "container-prefix", "container", "Systemd unit name prefix for containers")
+ flags.StringVar(&systemdOptions.PodPrefix, "pod-prefix", "pod", "Systemd unit name prefix for pods")
+ flags.StringVar(&systemdOptions.Separator, "separator", "-", "Systemd unit name seperator between name/id and prefix")
flags.SetNormalizeFunc(utils.AliasFlags)
}
diff --git a/cmd/podman/images/build.go b/cmd/podman/images/build.go
index 06a7efd25..2efc795cd 100644
--- a/cmd/podman/images/build.go
+++ b/cmd/podman/images/build.go
@@ -126,6 +126,7 @@ func buildFlags(flags *pflag.FlagSet) {
os.Exit(1)
}
flags.AddFlagSet(&fromAndBudFlags)
+ _ = flags.MarkHidden("signature-policy")
}
// build executes the build command.
diff --git a/cmd/podman/images/list.go b/cmd/podman/images/list.go
index 022c90f71..4f8948b8b 100644
--- a/cmd/podman/images/list.go
+++ b/cmd/podman/images/list.go
@@ -74,7 +74,6 @@ func imageListFlagSet(flags *pflag.FlagSet) {
flags.BoolVar(&listFlag.digests, "digests", false, "Show digests")
flags.BoolVarP(&listFlag.noHeading, "noheading", "n", false, "Do not print column headings")
flags.BoolVar(&listFlag.noTrunc, "no-trunc", false, "Do not truncate output")
- flags.BoolVar(&listFlag.noTrunc, "notruncate", false, "Do not truncate output")
flags.BoolVarP(&listFlag.quiet, "quiet", "q", false, "Display only image IDs")
flags.StringVar(&listFlag.sort, "sort", "created", "Sort by "+sortFields.String())
flags.BoolVarP(&listFlag.history, "history", "", false, "Display the image name history")
diff --git a/cmd/podman/images/load.go b/cmd/podman/images/load.go
index 4bbffd432..a984ad81f 100644
--- a/cmd/podman/images/load.go
+++ b/cmd/podman/images/load.go
@@ -60,10 +60,7 @@ func loadFlags(flags *pflag.FlagSet) {
flags.StringVarP(&loadOpts.Input, "input", "i", "", "Read from specified archive file (default: stdin)")
flags.BoolVarP(&loadOpts.Quiet, "quiet", "q", false, "Suppress the output")
flags.StringVar(&loadOpts.SignaturePolicy, "signature-policy", "", "Pathname of signature policy file")
- if registry.IsRemote() {
- _ = flags.MarkHidden("signature-policy")
- }
-
+ _ = flags.MarkHidden("signature-policy")
}
func load(cmd *cobra.Command, args []string) error {
diff --git a/cmd/podman/images/pull.go b/cmd/podman/images/pull.go
index 9e883703f..9e137b5d6 100644
--- a/cmd/podman/images/pull.go
+++ b/cmd/podman/images/pull.go
@@ -8,6 +8,7 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@@ -17,7 +18,8 @@ import (
// CLI-only fields into the API types.
type pullOptionsWrapper struct {
entities.ImagePullOptions
- TLSVerifyCLI bool // CLI only
+ TLSVerifyCLI bool // CLI only
+ CredentialsCLI string
}
var (
@@ -77,7 +79,7 @@ func pullFlags(flags *pflag.FlagSet) {
flags.BoolVar(&pullOptions.AllTags, "all-tags", false, "All tagged images in the repository will be pulled")
flags.StringVar(&pullOptions.Authfile, "authfile", auth.GetDefaultAuthFile(), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
flags.StringVar(&pullOptions.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys")
- flags.StringVar(&pullOptions.Credentials, "creds", "", "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry")
+ flags.StringVar(&pullOptions.CredentialsCLI, "creds", "", "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry")
flags.StringVar(&pullOptions.OverrideArch, "override-arch", "", "Use `ARCH` instead of the architecture of the machine for choosing images")
flags.StringVar(&pullOptions.OverrideOS, "override-os", "", "Use `OS` instead of the running OS for choosing images")
flags.BoolVarP(&pullOptions.Quiet, "quiet", "q", false, "Suppress output information when pulling images")
@@ -87,9 +89,9 @@ func pullFlags(flags *pflag.FlagSet) {
if registry.IsRemote() {
_ = flags.MarkHidden("authfile")
_ = flags.MarkHidden("cert-dir")
- _ = flags.MarkHidden("signature-policy")
_ = flags.MarkHidden("tls-verify")
}
+ _ = flags.MarkHidden("signature-policy")
}
// imagePull is implement the command for pulling images.
@@ -107,6 +109,14 @@ func imagePull(cmd *cobra.Command, args []string) error {
}
}
+ if pullOptions.CredentialsCLI != "" {
+ creds, err := util.ParseRegistryCreds(pullOptions.CredentialsCLI)
+ if err != nil {
+ return err
+ }
+ pullOptions.Username = creds.Username
+ pullOptions.Password = creds.Password
+ }
// Let's do all the remaining Yoga in the API to prevent us from
// scattering logic across (too) many parts of the code.
pullReport, err := registry.ImageEngine().Pull(registry.GetContext(), args[0], pullOptions.ImagePullOptions)
diff --git a/cmd/podman/images/push.go b/cmd/podman/images/push.go
index dd536213f..a1614dc7a 100644
--- a/cmd/podman/images/push.go
+++ b/cmd/podman/images/push.go
@@ -7,6 +7,7 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@@ -16,7 +17,8 @@ import (
// CLI-only fields into the API types.
type pushOptionsWrapper struct {
entities.ImagePushOptions
- TLSVerifyCLI bool // CLI only
+ TLSVerifyCLI bool // CLI only
+ CredentialsCLI string
}
var (
@@ -73,7 +75,7 @@ func pushFlags(flags *pflag.FlagSet) {
flags.StringVar(&pushOptions.Authfile, "authfile", auth.GetDefaultAuthFile(), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
flags.StringVar(&pushOptions.CertDir, "cert-dir", "", "Path to a directory containing TLS certificates and keys")
flags.BoolVar(&pushOptions.Compress, "compress", false, "Compress tarball image layers when pushing to a directory using the 'dir' transport. (default is same compression type as source)")
- flags.StringVar(&pushOptions.Credentials, "creds", "", "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry")
+ flags.StringVar(&pushOptions.CredentialsCLI, "creds", "", "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry")
flags.StringVar(&pushOptions.DigestFile, "digestfile", "", "Write the digest of the pushed image to the specified file")
flags.StringVarP(&pushOptions.Format, "format", "f", "", "Manifest type (oci, v2s1, or v2s2) to use when pushing an image using the 'dir' transport (default is manifest type of source)")
flags.BoolVarP(&pushOptions.Quiet, "quiet", "q", false, "Suppress output information when pushing images")
@@ -87,9 +89,9 @@ func pushFlags(flags *pflag.FlagSet) {
_ = flags.MarkHidden("cert-dir")
_ = flags.MarkHidden("compress")
_ = flags.MarkHidden("quiet")
- _ = flags.MarkHidden("signature-policy")
_ = flags.MarkHidden("tls-verify")
}
+ _ = flags.MarkHidden("signature-policy")
}
// imagePush is implement the command for pushing images.
@@ -122,6 +124,15 @@ func imagePush(cmd *cobra.Command, args []string) error {
}
}
+ if pushOptions.CredentialsCLI != "" {
+ creds, err := util.ParseRegistryCreds(pushOptions.CredentialsCLI)
+ if err != nil {
+ return err
+ }
+ pushOptions.Username = creds.Username
+ pushOptions.Password = creds.Password
+ }
+
// Let's do all the remaining Yoga in the API to prevent us from scattering
// logic across (too) many parts of the code.
return registry.ImageEngine().Push(registry.GetContext(), source, destination, pushOptions.ImagePushOptions)
diff --git a/cmd/podman/manifest/push.go b/cmd/podman/manifest/push.go
index 49c76f40b..a2e68aff1 100644
--- a/cmd/podman/manifest/push.go
+++ b/cmd/podman/manifest/push.go
@@ -1,17 +1,26 @@
package manifest
import (
- "context"
-
"github.com/containers/common/pkg/auth"
+ "github.com/containers/image/v5/types"
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
+// manifestPushOptsWrapper wraps entities.ManifestPushOptions and prevents leaking
+// CLI-only fields into the API types.
+type manifestPushOptsWrapper struct {
+ entities.ManifestPushOptions
+
+ TLSVerifyCLI bool // CLI only
+ CredentialsCLI string
+}
+
var (
- manifestPushOpts = entities.ManifestPushOptions{}
+ manifestPushOpts = manifestPushOptsWrapper{}
pushCmd = &cobra.Command{
Use: "push [flags] SOURCE DESTINATION",
Short: "Push a manifest list or image index to a registry",
@@ -33,12 +42,12 @@ func init() {
flags.BoolVar(&manifestPushOpts.All, "all", false, "also push the images in the list")
flags.StringVar(&manifestPushOpts.Authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
flags.StringVar(&manifestPushOpts.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
- flags.StringVar(&manifestPushOpts.Creds, "creds", "", "use `[username[:password]]` for accessing the registry")
+ flags.StringVar(&manifestPushOpts.CredentialsCLI, "creds", "", "use `[username[:password]]` for accessing the registry")
flags.StringVar(&manifestPushOpts.DigestFile, "digestfile", "", "after copying the image, write the digest of the resulting digest to the file")
flags.StringVarP(&manifestPushOpts.Format, "format", "f", "", "manifest type (oci or v2s2) to attempt to use when pushing the manifest list (default is manifest type of source)")
flags.BoolVarP(&manifestPushOpts.RemoveSignatures, "remove-signatures", "", false, "don't copy signatures when pushing images")
flags.StringVar(&manifestPushOpts.SignBy, "sign-by", "", "sign the image using a GPG key with the specified `FINGERPRINT`")
- flags.BoolVar(&manifestPushOpts.TlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
+ flags.BoolVar(&manifestPushOpts.TLSVerifyCLI, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
flags.BoolVarP(&manifestPushOpts.Quiet, "quiet", "q", false, "don't output progress information when pushing lists")
if registry.IsRemote() {
_ = flags.MarkHidden("authfile")
@@ -59,7 +68,24 @@ func push(cmd *cobra.Command, args []string) error {
if destSpec == "" {
return errors.Errorf(`invalid destination "%s"`, destSpec)
}
- if err := registry.ImageEngine().ManifestPush(context.Background(), args, manifestPushOpts); err != nil {
+
+ if manifestPushOpts.CredentialsCLI != "" {
+ creds, err := util.ParseRegistryCreds(manifestPushOpts.CredentialsCLI)
+ if err != nil {
+ return err
+ }
+ manifestPushOpts.Username = creds.Username
+ manifestPushOpts.Password = creds.Password
+ }
+
+ // TLS verification in c/image is controlled via a `types.OptionalBool`
+ // which allows for distinguishing among set-true, set-false, unspecified
+ // which is important to implement a sane way of dealing with defaults of
+ // boolean CLI flags.
+ if cmd.Flags().Changed("tls-verify") {
+ manifestPushOpts.SkipTLSVerify = types.NewOptionalBool(!manifestPushOpts.TLSVerifyCLI)
+ }
+ if err := registry.ImageEngine().ManifestPush(registry.Context(), args, manifestPushOpts.ManifestPushOptions); err != nil {
return errors.Wrapf(err, "error pushing manifest %s to %s", listImageSpec, destSpec)
}
return nil
diff --git a/cmd/podman/play/kube.go b/cmd/podman/play/kube.go
index 2499b54b9..1fbf24d5e 100644
--- a/cmd/podman/play/kube.go
+++ b/cmd/podman/play/kube.go
@@ -9,6 +9,7 @@ import (
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/cmd/podman/utils"
"github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -18,7 +19,8 @@ import (
type playKubeOptionsWrapper struct {
entities.PlayKubeOptions
- TLSVerifyCLI bool
+ TLSVerifyCLI bool
+ CredentialsCLI string
}
var (
@@ -49,7 +51,7 @@ func init() {
flags := kubeCmd.Flags()
flags.SetNormalizeFunc(utils.AliasFlags)
- flags.StringVar(&kubeOptions.Credentials, "creds", "", "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry")
+ flags.StringVar(&kubeOptions.CredentialsCLI, "creds", "", "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry")
flags.StringVar(&kubeOptions.Network, "network", "", "Connect pod to CNI network(s)")
flags.BoolVarP(&kubeOptions.Quiet, "quiet", "q", false, "Suppress output information when pulling images")
if !registry.IsRemote() {
@@ -59,6 +61,8 @@ func init() {
flags.StringVar(&kubeOptions.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
flags.StringVar(&kubeOptions.SeccompProfileRoot, "seccomp-profile-root", defaultSeccompRoot, "Directory path for seccomp profiles")
}
+
+ _ = flags.MarkHidden("signature-policy")
}
func kube(cmd *cobra.Command, args []string) error {
@@ -74,6 +78,14 @@ func kube(cmd *cobra.Command, args []string) error {
return errors.Wrapf(err, "error getting authfile %s", kubeOptions.Authfile)
}
}
+ if kubeOptions.CredentialsCLI != "" {
+ creds, err := util.ParseRegistryCreds(kubeOptions.CredentialsCLI)
+ if err != nil {
+ return err
+ }
+ kubeOptions.Username = creds.Username
+ kubeOptions.Password = creds.Password
+ }
report, err := registry.ContainerEngine().PlayKube(registry.GetContext(), args[0], kubeOptions.PlayKubeOptions)
if err != nil {
diff --git a/cmd/podman/pods/inspect.go b/cmd/podman/pods/inspect.go
index 1e333247b..34c07c11b 100644
--- a/cmd/podman/pods/inspect.go
+++ b/cmd/podman/pods/inspect.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
+ "github.com/containers/buildah/pkg/formats"
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/pkg/errors"
@@ -36,6 +37,7 @@ func init() {
})
flags := inspectCmd.Flags()
flags.BoolVarP(&inspectOptions.Latest, "latest", "l", false, "Act on the latest pod podman is aware of")
+ flags.StringVarP(&inspectOptions.Format, "format", "f", "json", "Format the output to a Go template or json")
if registry.IsRemote() {
_ = flags.MarkHidden("latest")
}
@@ -54,10 +56,11 @@ func inspect(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
- b, err := json.MarshalIndent(responses, "", " ")
- if err != nil {
- return err
+ var data interface{} = responses
+ var out formats.Writer = formats.JSONStruct{Output: data}
+ if inspectOptions.Format != "json" {
+ out = formats.StdoutTemplate{Output: data, Template: inspectOptions.Format}
}
- fmt.Println(string(b))
- return nil
+
+ return out.Out()
}
diff --git a/cmd/podman/root.go b/cmd/podman/root.go
index dffd9b534..3796b8e27 100644
--- a/cmd/podman/root.go
+++ b/cmd/podman/root.go
@@ -206,6 +206,7 @@ func rootFlags(opts *entities.PodmanConfig, flags *pflag.FlagSet) {
flags.IntVar(&opts.MaxWorks, "max-workers", 0, "The maximum number of workers for parallel operations")
flags.StringVar(&cfg.Engine.Namespace, "namespace", cfg.Engine.Namespace, "Set the libpod namespace, used to create separate views of the containers and pods on the system")
flags.StringVar(&cfg.Engine.StaticDir, "root", "", "Path to the root directory in which data, including images, is stored")
+ flags.StringVar(&opts.RegistriesConf, "registries-conf", "", "Path to a registries.conf to use for image processing")
flags.StringVar(&opts.Runroot, "runroot", "", "Path to the 'run directory' where all state information is stored")
flags.StringVar(&opts.RuntimePath, "runtime", "", "Path to the OCI-compatible binary used to run containers, default is /usr/bin/runc")
// -s is deprecated due to conflict with -s on subcommands
@@ -225,6 +226,7 @@ func rootFlags(opts *entities.PodmanConfig, flags *pflag.FlagSet) {
"cpu-profile",
"default-mounts-file",
"max-workers",
+ "registries-conf",
"trace",
} {
if err := flags.MarkHidden(f); err != nil {
diff --git a/cmd/podman/system/service.go b/cmd/podman/system/service.go
index b5dd2f2aa..0f42ae28b 100644
--- a/cmd/podman/system/service.go
+++ b/cmd/podman/system/service.go
@@ -17,6 +17,7 @@ import (
"github.com/containers/libpod/pkg/util"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
+ "github.com/spf13/pflag"
)
var (
@@ -26,13 +27,12 @@ Enable a listening service for API access to Podman commands.
`
srvCmd = &cobra.Command{
- Use: "service [flags] [URI]",
- Args: cobra.MaximumNArgs(1),
- Short: "Run API service",
- Long: srvDescription,
- RunE: service,
- Example: `podman system service --time=0 unix:///tmp/podman.sock
- podman system service --varlink --time=0 unix:///tmp/podman.sock`,
+ Use: "service [flags] [URI]",
+ Args: cobra.MaximumNArgs(1),
+ Short: "Run API service",
+ Long: srvDescription,
+ RunE: service,
+ Example: `podman system service --time=0 unix:///tmp/podman.sock`,
}
srvArgs = struct {
@@ -50,10 +50,17 @@ func init() {
flags := srvCmd.Flags()
flags.Int64VarP(&srvArgs.Timeout, "time", "t", 5, "Time until the service session expires in seconds. Use 0 to disable the timeout")
- flags.Int64Var(&srvArgs.Timeout, "timeout", 5, "Time until the service session expires in seconds. Use 0 to disable the timeout")
flags.BoolVar(&srvArgs.Varlink, "varlink", false, "Use legacy varlink service instead of REST")
_ = flags.MarkDeprecated("varlink", "valink API is deprecated.")
+ flags.SetNormalizeFunc(aliasTimeoutFlag)
+}
+
+func aliasTimeoutFlag(_ *pflag.FlagSet, name string) pflag.NormalizedName {
+ if name == "timeout" {
+ name = "time"
+ }
+ return pflag.NormalizedName(name)
}
func service(cmd *cobra.Command, args []string) error {
diff --git a/cmd/podman/system/varlink.go b/cmd/podman/system/varlink.go
index c83f5ff76..6a38b3d28 100644
--- a/cmd/podman/system/varlink.go
+++ b/cmd/podman/system/varlink.go
@@ -1,3 +1,5 @@
+// +build linux
+
package system
import (
@@ -20,7 +22,7 @@ var (
Long: varlinkDescription,
RunE: varlinkE,
Example: `podman varlink unix:/run/podman/io.podman
- podman varlink --timeout 5000 unix:/run/podman/io.podman`,
+ podman varlink --time 5000 unix:/run/podman/io.podman`,
}
varlinkArgs = struct {
Timeout int64
@@ -34,8 +36,7 @@ func init() {
})
flags := varlinkCmd.Flags()
flags.Int64VarP(&varlinkArgs.Timeout, "time", "t", 1000, "Time until the varlink session expires in milliseconds. Use 0 to disable the timeout")
- flags.Int64Var(&varlinkArgs.Timeout, "timeout", 1000, "Time until the varlink session expires in milliseconds. Use 0 to disable the timeout")
-
+ flags.SetNormalizeFunc(aliasTimeoutFlag)
}
func varlinkE(cmd *cobra.Command, args []string) error {
diff --git a/commands-demo.md b/commands-demo.md
index a32fdbcad..555f83af9 100644
--- a/commands-demo.md
+++ b/commands-demo.md
@@ -28,8 +28,10 @@
| [podman-history(1)](https://podman.readthedocs.io/en/latest/markdown/podman-history.1.html) | Shows the history of an image |
| [podman-image(1)](https://podman.readthedocs.io/en/latest/image.html) | Manage Images |
| [podman-image-exists(1)](https://podman.readthedocs.io/en/latest/markdown/podman-image-exists.1.html) | Check if an image exists in local storage |
+| [podman-image-diff(1)](https://podman.readthedocs.io/en/latest/markdown/podman-image-diff.html) | Inspect changes on an image's filesystem. |
| [podman-image-prune(1)](https://podman.readthedocs.io/en/latest/markdown/podman-image-prune.1.html) | Remove all unused images from the local store |
| [podman-image-sign(1)](https://podman.readthedocs.io/en/latest/markdown/podman-image-sign.1.html) | Create a signature for an image |
+| [podman-image-search(1)](https://podman.readthedocs.io/en/latest/markdown/podman-search.1.html) | Search a registry for an image. |
| [podman-image-tree(1)](https://podman.readthedocs.io/en/latest/markdown/podman-image-tree.1.html) | Prints layer hierarchy of an image in a tree format |
| [podman-image-trust(1)](https://podman.readthedocs.io/en/latest/markdown/podman-image-trust.1.html) | Manage container registry image trust policy |
| [podman-images(1)](https://podman.readthedocs.io/en/latest/markdown/podman-images.1.html) | List images in local storage | [![...](/docs/source/markdown/play.png)](https://podman.io/asciinema/podman/images/) | [Here](https://github.com/containers/Demos/blob/master/podman_cli/podman_images.sh) |
diff --git a/completions/bash/podman b/completions/bash/podman
index 9baf7901e..a58becaf0 100644
--- a/completions/bash/podman
+++ b/completions/bash/podman
@@ -2838,7 +2838,10 @@ _podman_generate_systemd() {
local options_with_args="
--restart-policy
-t
- --time"
+ --time
+ --container-prefix
+ --pod-prefix
+ --separator"
local boolean_options="
-h
@@ -3342,6 +3345,18 @@ _podman_pod_unpause() {
esac
}
+_podman_pod_inspect() {
+ local options_with_args="
+ --format
+ -f
+ --latest
+ -l
+ "
+
+ _complete_ "$options_with_args"
+}
+
+
_podman_pod() {
local boolean_options="
--help
@@ -3359,6 +3374,7 @@ _podman_pod() {
stop
top
unpause
+ inspect
"
local aliases="
list
diff --git a/contrib/cirrus/integration_test.sh b/contrib/cirrus/integration_test.sh
index 0f2b2ab7e..c92f123fd 100755
--- a/contrib/cirrus/integration_test.sh
+++ b/contrib/cirrus/integration_test.sh
@@ -45,6 +45,7 @@ case "$SPECIALMODE" in
bindings)
make
make install PREFIX=/usr ETCDIR=/etc
+ export PATH=$PATH:`pwd`/hack
cd pkg/bindings/test && ginkgo -trace -noColor -debug -r
;;
none)
diff --git a/contrib/dependencies.txt b/contrib/dependencies.txt
index 5a6fa9834..f61912fde 100644
--- a/contrib/dependencies.txt
+++ b/contrib/dependencies.txt
@@ -2,7 +2,6 @@
btrfs-progs-devel
bzip2
-container-selinux
containernetworking-cni
device-mapper-devel
findutils
diff --git a/contrib/gate/Dockerfile b/contrib/gate/Dockerfile
index f7cd8f2b3..f86709b00 100644
--- a/contrib/gate/Dockerfile
+++ b/contrib/gate/Dockerfile
@@ -1,4 +1,4 @@
-FROM fedora:31
+FROM fedora:32
ENV GOPATH="/var/tmp/go" \
GOBIN="/var/tmp/go/bin" \
@@ -7,11 +7,13 @@ ENV GOPATH="/var/tmp/go" \
GOSRC="/var/tmp/go/src/github.com/containers/libpod"
# Only needed for installing build-time dependencies, then will be removed
-COPY / $GOSRC
+COPY . $GOSRC
# Install packages from dependencies.txt, ignoring commented lines
+# Note: adding conmon and crun so podman command checks will work
RUN dnf -y install \
- $(grep "^[^#]" $GOSRC/contrib/dependencies.txt) \
+ $(grep "^[^#]" $GOSRC/contrib/dependencies.txt) diffutils containers-common fuse-overlayfs conmon crun runc --exclude container-selinux; \
+ sed -i -e 's|^#mount_program|mount_program|g' /etc/containers/storage.conf \
&& dnf clean all
# Install dependencies
diff --git a/docs/Readme.md b/docs/Readme.md
index 4d10cfa56..987a5b8e4 100644
--- a/docs/Readme.md
+++ b/docs/Readme.md
@@ -1,7 +1,7 @@
# Podman Documentation
The online man pages and other documents regarding Podman can be found at
-[Read The Docs](https://podman.readthedocs.io/en/latest/index.html). The man pages
+[Read The Docs](https://podman.readthedocs.io). The man pages
can be found under the [Commands](https://podman.readthedocs.io/en/latest/Commands.html)
link on that page.
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 9cc8e7af8..1c46f1c8a 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -2,7 +2,7 @@
What is Podman?
==================================
-Podman_ is a daemonless, open source, Linux native tool designed to make it easy to find, run, build, share and deploy applications using Open Containers Initiative (OCI_) Containers_ and `Container Images`_. Podman provides a command line interface (CLI) familiar to anyone who has used the Docker `Container Engine`_. Most users can simply alias Docker to Podman (`alias docker=podman`) without any problems. Similiar other common `Container Engines`_ (Docker, CRI-O, containerd), Podman relies on an OCI compliant `Container Runtime`_ (runc, crun, runv, etc) to interface with the operating system and create the running containers.This makes the running containers created by Podman nearly indistinguishable from those created by any other common container engine.
+Podman_ is a daemonless, open source, Linux native tool designed to make it easy to find, run, build, share and deploy applications using Open Containers Initiative (OCI_) Containers_ and `Container Images`_. Podman provides a command line interface (CLI) familiar to anyone who has used the Docker `Container Engine`_. Most users can simply alias Docker to Podman (`alias docker=podman`) without any problems. Similar to other common `Container Engines`_ (Docker, CRI-O, containerd), Podman relies on an OCI compliant `Container Runtime`_ (runc, crun, runv, etc) to interface with the operating system and create the running containers.This makes the running containers created by Podman nearly indistinguishable from those created by any other common container engine.
Containers under the control of Podman can either be run by root or by a non-privileged user. Podman manages the entire container ecosystem which includes pods, containers, container images, and container volumes using the libpod_ library. Podman specializes in all of the commands and functions that help you to maintain and modify OCI container images, such as pulling and tagging. It allows you to create, run, and maintain those containers and container images in a production environment.
diff --git a/docs/source/markdown/links/podman-image-diff.1 b/docs/source/markdown/links/podman-image-diff.1
deleted file mode 100644
index ac4881f98..000000000
--- a/docs/source/markdown/links/podman-image-diff.1
+++ /dev/null
@@ -1 +0,0 @@
-.so man1/podman-diff.1
diff --git a/docs/source/markdown/podman-create.1.md b/docs/source/markdown/podman-create.1.md
index 475634fde..3a6077832 100644
--- a/docs/source/markdown/podman-create.1.md
+++ b/docs/source/markdown/podman-create.1.md
@@ -502,7 +502,7 @@ Current supported mount TYPES are `bind`, `volume`, and `tmpfs`.
· dst, destination, target: mount destination spec.
- · ro, read-only: true or false (default).
+ · ro, readonly: true or false (default).
Options specific to bind:
diff --git a/docs/source/markdown/podman-exec.1.md b/docs/source/markdown/podman-exec.1.md
index b24a1f8aa..f44a3d3d9 100644
--- a/docs/source/markdown/podman-exec.1.md
+++ b/docs/source/markdown/podman-exec.1.md
@@ -13,7 +13,7 @@ podman\-exec - Execute a command in a running container
## OPTIONS
-**--detach**
+**--detach**, **-d**
Start the exec session, but do not attach to it. The command will run in the background and the exec session will be automatically removed when it completes. The **podman exec** command will print the ID of the exec session and exit immediately after it starts.
diff --git a/docs/source/markdown/podman-generate-systemd.1.md b/docs/source/markdown/podman-generate-systemd.1.md
index fa04f81f9..72031b19b 100644
--- a/docs/source/markdown/podman-generate-systemd.1.md
+++ b/docs/source/markdown/podman-generate-systemd.1.md
@@ -40,6 +40,18 @@ Override the default stop timeout for the container with the given value.
Set the systemd restart policy. The restart-policy must be one of: "no", "on-success", "on-failure", "on-abnormal",
"on-watchdog", "on-abort", or "always". The default policy is *on-failure*.
+**--container-prefix**=*prefix*
+
+Set the systemd unit name prefix for containers. The default is *container*.
+
+**--pod-prefix**=*prefix*
+
+Set the systemd unit name prefix for pods. The default is *pod*.
+
+**--separator**=*separator*
+
+Set the systemd unit name seperator between the name/id of a container/pod and the prefix. The default is *-*.
+
## Examples
### Generate and print a systemd unit file for a container
diff --git a/docs/source/markdown/podman-history.1.md b/docs/source/markdown/podman-history.1.md
index 1a8f8906c..26b213af9 100644
--- a/docs/source/markdown/podman-history.1.md
+++ b/docs/source/markdown/podman-history.1.md
@@ -37,10 +37,13 @@ Display sizes and dates in human readable format (default *true*).
Do not truncate the output (default *false*).
+**--notruncate**
+
+Do not truncate the output
+
**--quiet**, **-q**=*true|false*
Print the numeric IDs only (default *false*).
-
**--format**=*format*
Alter the output for a format like 'json' or a Go template.
diff --git a/docs/source/markdown/podman-image-diff.1.md b/docs/source/markdown/podman-image-diff.1.md
new file mode 100644
index 000000000..1e7397cd8
--- /dev/null
+++ b/docs/source/markdown/podman-image-diff.1.md
@@ -0,0 +1,46 @@
+% podman-image-diff(1)
+
+## NAME
+podman-image-diff - Inspect changes on an image's filesystem
+
+## SYNOPSIS
+**podman image diff** [*options*] *name*
+
+## DESCRIPTION
+Displays changes on a container or image's filesystem. The container or image will be compared to its parent layer
+
+## OPTIONS
+
+**--format**
+
+Alter the output into a different format. The only valid format for diff is `json`.
+
+## EXAMPLE
+
+```
+# podman diff redis:old redis:alpine
+C /usr
+C /usr/local
+C /usr/local/bin
+A /usr/local/bin/docker-entrypoint.sh
+```
+
+```
+# podman diff --format json redis:old redis:alpine
+{
+ "changed": [
+ "/usr",
+ "/usr/local",
+ "/usr/local/bin"
+ ],
+ "added": [
+ "/usr/local/bin/docker-entrypoint.sh"
+ ]
+}
+```
+
+## SEE ALSO
+podman(1)
+
+## HISTORY
+August 2017, Originally compiled by Ryan Cole <rycole@redhat.com>
diff --git a/docs/source/markdown/podman-image.1.md b/docs/source/markdown/podman-image.1.md
index 1552098ac..dfff57b31 100644
--- a/docs/source/markdown/podman-image.1.md
+++ b/docs/source/markdown/podman-image.1.md
@@ -14,6 +14,7 @@ The image command allows you to manage images
| Command | Man Page | Description |
| -------- | ----------------------------------------------- | --------------------------------------------------------------------------- |
| build | [podman-build(1)](podman-build.1.md) | Build a container using a Dockerfile. |
+| diff | [podman-image-diff(1)](podman-image-diff.1.md) | Inspect changes on an image's filesystem. |
| exists | [podman-image-exists(1)](podman-image-exists.1.md) | Check if an image exists in local storage. |
| history | [podman-history(1)](podman-history.1.md) | Show the history of an image. |
| import | [podman-import(1)](podman-import.1.md) | Import a tarball and save it as a filesystem image. |
@@ -25,6 +26,7 @@ The image command allows you to manage images
| push | [podman-push(1)](podman-push.1.md) | Push an image from local storage to elsewhere. |
| rm | [podman-rmi(1)](podman-rmi.1.md) | Removes one or more locally stored images. |
| save | [podman-save(1)](podman-save.1.md) | Save an image to docker-archive or oci. |
+| search | [podman-search(1)](podman-search.1.md) | Search a registry for an image. |
| sign | [podman-image-sign(1)](podman-image-sign.1.md) | Create a signature for an image. |
| tag | [podman-tag(1)](podman-tag.1.md) | Add an additional name to a local image. |
| untag | [podman-untag(1)](podman-untag.1.md) | Removes one or more names from a locally-stored image. |
diff --git a/docs/source/markdown/podman-manifest-push.1.md b/docs/source/markdown/podman-manifest-push.1.md
index 38d0c5904..ab3287a7c 100644
--- a/docs/source/markdown/podman-manifest-push.1.md
+++ b/docs/source/markdown/podman-manifest-push.1.md
@@ -19,7 +19,7 @@ The list image's ID and the digest of the image's manifest.
Push the images mentioned in the manifest list or image index, in addition to
the list or index itself.
-**--authfile** *path*
+**--authfile**=*path*
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`. (Not available for remote commands)
@@ -27,22 +27,22 @@ If the authorization state is not found there, $HOME/.docker/config.json is chec
Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE
environment variable. `export REGISTRY_AUTH_FILE=path`
-**--cert-dir** *path*
+**--cert-dir**=*path*
Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
Default certificates directory is _/etc/containers/certs.d_. (Not available for remote commands)
-**--creds** *creds*
+**--creds**=*creds*
The [username[:password]] to use to authenticate with the registry if required.
If one or both values are not supplied, a command line prompt will appear and the
value can be entered. The password is entered without echo.
-**--digestfile** *Digestfile*
+**--digestfile**=*Digestfile*
After copying the image, write the digest of the resulting image to the file.
-**--format, -f**
+**--format**, **-f**=*format*
Manifest list type (oci or v2s2) to use when pushing the list (default is oci).
@@ -50,15 +50,19 @@ Manifest list type (oci or v2s2) to use when pushing the list (default is oci).
Delete the manifest list or image index from local storage if pushing succeeds.
+**--quiet**, **-q**
+
+When writing the manifest, suppress progress output
+
**--remove-signatures**
Don't copy signatures when pushing images.
-**--sign-by** *fingerprint*
+**--sign-by**=*fingerprint*
Sign the pushed images using the GPG key that matches the specified fingerprint.
-**--tls-verify** *bool-value*
+**--tls-verify**
Require HTTPS and verify certificates when talking to container registries (defaults to true) (Not available for remote commands)
diff --git a/docs/source/markdown/podman-network-inspect.1.md b/docs/source/markdown/podman-network-inspect.1.md
index ca6875d18..86fa2552e 100644
--- a/docs/source/markdown/podman-network-inspect.1.md
+++ b/docs/source/markdown/podman-network-inspect.1.md
@@ -10,10 +10,6 @@ podman\-network\-inspect - Displays the raw CNI network configuration for one or
Display the raw (JSON format) network configuration. This command is not available for rootless users.
## OPTIONS
-**--quiet**, **-q**
-
-The `quiet` option will restrict the output to only the network names.
-
**--format**, **-f**
Pretty-print networks to JSON or using a Go template.
diff --git a/docs/source/markdown/podman-pod-inspect.1.md b/docs/source/markdown/podman-pod-inspect.1.md
index 831d28259..bc04b2b32 100644
--- a/docs/source/markdown/podman-pod-inspect.1.md
+++ b/docs/source/markdown/podman-pod-inspect.1.md
@@ -18,21 +18,50 @@ to run pods such as CRI-O, the last started pod could be from either of those me
The latest option is not supported on the remote client.
+**-f**, **--format**=*format*
+
+Change the default output format. This can be of a supported type like 'json'
+or a Go template.
+Valid placeholders for the Go template are listed below:
+
+| **Placeholder** | **Description** |
+| ----------------- | ----------------------------------------------------------------------------- |
+| .ID | Pod ID |
+| .Name | Pod name |
+| .State | Pod state |
+| .Hostname | Pod hostname |
+| .Labels | Pod labels |
+| .Created | Time when the pod was created |
+| .CreateCgroup | Whether cgroup was created |
+| .CgroupParent | Pod cgroup parent |
+| .CgroupPath | Pod cgroup path |
+| .CreateInfra | Whether infrastructure created |
+| .InfraContainerID | Pod infrastructure ID |
+| .SharedNamespaces | Pod shared namespaces |
+| .NumContainers | Number of containers in the pod |
+| .Containers | Pod containers |
+
## EXAMPLE
```
# podman pod inspect foobar
{
- "Config": {
- "id": "3513ca70583dd7ef2bac83331350f6b6c47d7b4e526c908e49d89ebf720e4693",
- "name": "foobar",
- "labels": {},
- "cgroupParent": "/libpod_parent",
- "UsePodCgroup": true,
- "created": "2018-08-08T11:15:18.823115347-05:00"
- },
- "State": {
- "CgroupPath": ""
- },
+
+ "Id": "3513ca70583dd7ef2bac83331350f6b6c47d7b4e526c908e49d89ebf720e4693",
+ "Name": "foobar",
+ "Labels": {},
+ "CgroupParent": "/libpod_parent",
+ "CreateCgroup": true,
+ "Created": "2018-08-08T11:15:18.823115347-05:00"
+ "State": "created",
+ "Hostname": "",
+ "SharedNamespaces": [
+ "uts",
+ "ipc",
+ "net"
+ ]
+ "CreateInfra": false,
+ "InfraContainerID": "1020dd70583dd7ff2bac83331350f6b6e007de0d026c908e49d89ebf891d4699"
+ "CgroupPath": ""
"Containers": [
{
"id": "d53f8bf1e9730281264aac6e6586e327429f62c704abea4b6afb5d8a2b2c9f2c",
diff --git a/docs/source/markdown/podman-pull.1.md b/docs/source/markdown/podman-pull.1.md
index aa558526a..5d941219a 100644
--- a/docs/source/markdown/podman-pull.1.md
+++ b/docs/source/markdown/podman-pull.1.md
@@ -73,7 +73,10 @@ The [username[:password]] to use to authenticate with the registry if required.
If one or both values are not supplied, a command line prompt will appear and the
value can be entered. The password is entered without echo.
-**--override-arch**=ARCH
+**--override-os**=*OS*
+Use OS instead of the running OS for choosing images
+
+**--override-arch**=*ARCH*
Override the machine's default architecture of the image to be pulled. For example, `arm`.
diff --git a/docs/source/markdown/podman-run.1.md b/docs/source/markdown/podman-run.1.md
index 4c236b520..1e05b8999 100644
--- a/docs/source/markdown/podman-run.1.md
+++ b/docs/source/markdown/podman-run.1.md
@@ -511,7 +511,7 @@ Current supported mount TYPEs are **bind**, **volume**, and **tmpfs**.
· dst, destination, target: mount destination spec.
- · ro, read-only: true or false (default).
+ · ro, readonly: true or false (default).
Options specific to bind:
diff --git a/docs/source/markdown/podman-system-service.1.md b/docs/source/markdown/podman-system-service.1.md
index a2fefe4dd..48e595641 100644
--- a/docs/source/markdown/podman-system-service.1.md
+++ b/docs/source/markdown/podman-system-service.1.md
@@ -15,15 +15,11 @@ example *unix:/run/user/1000/podman/podman.sock*)
## OPTIONS
-**--timeout**, **-t**
+**--time**, **-t**
The time until the session expires in _milliseconds_. The default is 1
second. A value of `0` means no timeout and the session will not expire.
-**--varlink**
-
-Use the varlink protocol instead of the REST-based protocol. This option will be deprecated in the future.
-
**--help**, **-h**
Print usage statement.
diff --git a/docs/source/markdown/podman-varlink.1.md b/docs/source/markdown/podman-varlink.1.md
index 0d2ab1668..0b04d5ba3 100644
--- a/docs/source/markdown/podman-varlink.1.md
+++ b/docs/source/markdown/podman-varlink.1.md
@@ -19,7 +19,7 @@ The varlink service should generally be done with systemd. See _Configuration_
Print usage statement
-**--timeout**, **-t**
+**--time**, **-t**
The time until the varlink session expires in _milliseconds_. The default is 1
second. A value of `0` means no timeout and the session will not expire.
diff --git a/docs/source/markdown/podman-wait.1.md b/docs/source/markdown/podman-wait.1.md
index ce1c70a5f..886bbc55b 100644
--- a/docs/source/markdown/podman-wait.1.md
+++ b/docs/source/markdown/podman-wait.1.md
@@ -15,6 +15,9 @@ After the container stops, the container's return code is printed.
## OPTIONS
+**--condition**=*state*
+Condition to wait on (default "stopped")
+
**--help**, **-h**
Print usage statement
diff --git a/docs/source/markdown/podman.1.md b/docs/source/markdown/podman.1.md
index 6bac0cc9d..02f23e6cc 100644
--- a/docs/source/markdown/podman.1.md
+++ b/docs/source/markdown/podman.1.md
@@ -58,6 +58,9 @@ Podman and libpod currently support an additional `precreate` state which is cal
**WARNING**: the `precreate` hook lets you do powerful things, such as adding additional mounts to the runtime configuration. That power also makes it easy to break things. Before reporting libpod errors, try running your container with `precreate` hooks disabled to see if the problem is due to one of your hooks.
+**--identity**=*path*
+Path to SSH identity file
+
**--log-level**=*level*
Log messages above specified level: debug, info, warn, error (default), fatal or panic (default: "error")
@@ -70,6 +73,9 @@ When namespace is set, created containers and pods will join the given namespace
**--network-cmd-path**=*path*
Path to the command binary to use for setting up a network. It is currently only used for setting up a slirp4netns network. If "" is used then the binary is looked up using the $PATH environment variable.
+**--remote**, **-r**=*url*
+URL to access Podman service (default "unix:/run/user/3267/podman/podman.sock")
+
**--root**=*value*
Storage root dir in which data, including images, is stored (default: "/var/lib/containers/storage" for UID 0, "$HOME/.local/share/containers/storage" for other users).
diff --git a/docs/tutorials/rootless_tutorial.md b/docs/tutorials/rootless_tutorial.md
index 8e048c746..440e12062 100644
--- a/docs/tutorials/rootless_tutorial.md
+++ b/docs/tutorials/rootless_tutorial.md
@@ -58,7 +58,7 @@ The number of user namespaces that are allowed on the system is specified in the
### /etc/subuid and /etc/subgid configuration
-Rootless podman requires the user running it to have a range of UIDs listed in /etc/subuid and /etc/subgid files. The `shadows-utils` or `newuid` package provides these files on different distributions and they must be installed on the system. These files will need someone with root privileges on the system to add or update the entries within them. The following is a summarization from the [How does rootless Podman work?](https://opensource.com/article/19/2/how-does-rootless-podman-work) article by Dan Walsh on [opensource.com](https://opensource.com)
+Rootless Podman requires the user running it to have a range of UIDs listed in /etc/subuid and /etc/subgid files. The `shadows-utils` or `newuid` package provides these files on different distributions and they must be installed on the system. These files will need someone with root privileges on the system to add or update the entries within them. The following is a summarization from the [How does rootless Podman work?](https://opensource.com/article/19/2/how-does-rootless-podman-work) article by Dan Walsh on [opensource.com](https://opensource.com)
Update the /etc/subuid and /etc/subgid with fields for each user that will be allowed to create containers that look like the following. Note that the values for each user must be unique and without any overlap. If there is an overlap, there is a potential for a user to use another’s namespace and they could corrupt it.
@@ -110,6 +110,46 @@ The Podman configuration files for root reside in `/usr/share/containers` with o
The default authorization file used by the `podman login` and `podman logout` commands reside in `${XDG_RUNTIME_DIR}/containers/auth.json`.
+### Using volumes
+
+Rootless Podman is not, and will never be, root; it's not a setuid binary, and gains no privileges when it runs. Instead, Podman makes use of a user namespace to shift the UIDs and GIDs of a block of users it is given access to on the host (via the newuidmap and newgidmap executables) and your own user within the containers that Podman creates.
+
+If your container runs with the root user, then `root` in the container is actually your user on the host. UID/GID 1 is the first UID/GID specified in your user's mapping in `/etc/subuid` and `/etc/subgid`, etc. If you mount a directory from the host into a container as a rootless user, and create a file in that directory as root in the container, you'll see it's actually owned by your user on the host.
+
+So, for example,
+
+```
+> whoami
+john
+
+# a folder which is empty
+host> ls /home/john/folder
+host> podman run -v /home/john/folder:/container/volume mycontainer /bin/bash
+
+# Now I'm in the container
+root@container> whoami
+root
+root@container> touch /container/volume/test
+root@container> ls -l /container/volume
+total 0
+-rw-r--r-- 1 root root 0 May 20 21:47 test
+root@container> exit
+
+# I check again
+host> ls -l /home/john/folder
+total 0
+-rw-r--r-- 1 john john 0 May 20 21:47 test
+```
+
+We do recognize that this doesn't really match how many people intend to use rootless Podman - they want their UID inside and outside the container to match. Thus, we provide the `--userns=keep-id` flag, which ensures that your user is mapped to its own UID and GID inside the container.
+
+It is also helpful to distinguish between running Podman as a rootless user, and a container which is built to run rootless. If the container you're trying you run has a `USER` which is not root, then when mounting volumes you **must** use `--userns=keep-id`. This is because the container user would not be able to become `root` and access the mounted volumes.
+
+Other considerations in regards to volumes:
+
+- You should always give the full path to the volume you'd like to mount
+- The mount point must exist in the container
+
## More information
If you are still experiencing problems running Podman in a rootless environment, please refer to the [Shortcomings of Rootless Podman](https://github.com/containers/libpod/blob/master/rootless.md) page which lists known issues and solutions to known issues in this environment.
diff --git a/go.mod b/go.mod
index 3bb3b6991..dd2550585 100644
--- a/go.mod
+++ b/go.mod
@@ -10,10 +10,10 @@ require (
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921
github.com/containernetworking/plugins v0.8.6
- github.com/containers/buildah v1.14.9-0.20200501175434-42a48f9373d9
- github.com/containers/common v0.11.4
+ github.com/containers/buildah v1.14.9-0.20200523094741-de0f541d9224
+ github.com/containers/common v0.12.0
github.com/containers/conmon v2.0.16+incompatible
- github.com/containers/image/v5 v5.4.4
+ github.com/containers/image/v5 v5.4.5-0.20200529084758-46b2ee6aebb0
github.com/containers/psgo v1.5.0
github.com/containers/storage v1.20.1
github.com/coreos/go-systemd/v22 v22.0.0
@@ -42,7 +42,7 @@ require (
github.com/opencontainers/runc v1.0.0-rc9
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
github.com/opencontainers/runtime-tools v0.9.0
- github.com/opencontainers/selinux v1.5.1
+ github.com/opencontainers/selinux v1.5.2
github.com/opentracing/opentracing-go v1.1.0
github.com/pkg/errors v0.9.1
github.com/pmezard/go-difflib v1.0.0
@@ -51,7 +51,7 @@ require (
github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v0.0.7
github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.5.1
+ github.com/stretchr/testify v1.6.0
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
github.com/uber/jaeger-client-go v2.23.1+incompatible
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
diff --git a/go.sum b/go.sum
index 9ddb52ca6..12823b765 100644
--- a/go.sum
+++ b/go.sum
@@ -8,7 +8,6 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX
github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873 h1:93nQ7k53GjoMQ07HVP8g6Zj1fQZDDj7Xy2VkNNtvX8o=
@@ -20,9 +19,7 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
@@ -69,16 +66,17 @@ github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921 h1:eUMd8
github.com/containernetworking/cni v0.7.2-0.20200304161608-4fae32b84921/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/plugins v0.8.6 h1:npZTLiMa4CRn6m5P9+1Dz4O1j0UeFbm8VYN6dlsw568=
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
-github.com/containers/buildah v1.14.9-0.20200501175434-42a48f9373d9 h1:EGegltin15wEzCI/5jeHcxBKfwwIHYkBUvsYC3XP060=
-github.com/containers/buildah v1.14.9-0.20200501175434-42a48f9373d9/go.mod h1:+2aNsVcd4pVzmVAbOfWN5X+0Lpz2rtICSGXbTSCzdBU=
-github.com/containers/common v0.10.0/go.mod h1:6A/moCuQITXLqBe5A0WKKTcCfCmEQRbknI05HcPzOL0=
-github.com/containers/common v0.11.4 h1:M7lmjaVY+29g+YiaWH/UP4YeHjT/pZMxvRgmsWsQn74=
-github.com/containers/common v0.11.4/go.mod h1:AOxw4U5TJJrR/J1QPRvWbjHNdwU13wMy79rjK+7+aJE=
+github.com/containers/buildah v1.14.9-0.20200523094741-de0f541d9224 h1:EqwBZRqyUYvU7JOmmSSPviSaAoUP1wN0cefXXDZ9ATo=
+github.com/containers/buildah v1.14.9-0.20200523094741-de0f541d9224/go.mod h1:5ZkWjOuK90yl55L5R+purJNLfUo0VUr8pstJazNtYck=
+github.com/containers/common v0.11.2/go.mod h1:2w3QE6VUmhltGYW4wV00h4okq1Crs7hNI1ZD2I0QRUY=
+github.com/containers/common v0.12.0 h1:LR/sYyzFa22rFhfu6J9dEYhVkrWjagUigz/ewHhHL9s=
+github.com/containers/common v0.12.0/go.mod h1:PKlahPDnQQYcXuIw5qq8mq6yNuCHBtgABphzy6pN0iI=
github.com/containers/conmon v2.0.16+incompatible h1:QFOlb9Id4WoJ24BelCFWwDSPTquwKMp3L3g2iGmRTq4=
github.com/containers/conmon v2.0.16+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.4.3/go.mod h1:pN0tvp3YbDd7BWavK2aE0mvJUqVd2HmhPjekyWSFm0U=
-github.com/containers/image/v5 v5.4.4 h1:JSanNn3v/BMd3o0MEvO4R4OKNuoJUSzVGQAI1+0FMXE=
github.com/containers/image/v5 v5.4.4/go.mod h1:g7cxNXitiLi6pEr9/L9n/0wfazRuhDKXU15kV86N8h8=
+github.com/containers/image/v5 v5.4.5-0.20200529084758-46b2ee6aebb0 h1:K1ez+qAi9hCMHv/akPF4ddZumQTq/PBGf2Nzc7e+7lI=
+github.com/containers/image/v5 v5.4.5-0.20200529084758-46b2ee6aebb0/go.mod h1:XRf/UlTCkDBQONudBhSFXiLCouFKPU/oXwIjWw/tPpo=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.2 h1:Q0/IPs8ohfbXNxEfyJ2pFVmvJu5BhqJUAmc6ES9NKbo=
@@ -86,8 +84,8 @@ github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNj
github.com/containers/psgo v1.5.0 h1:uofUREsrm0Ls5K4tkEIFPqWSHKyg3Bvoqo/Q2eDmj8g=
github.com/containers/psgo v1.5.0/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzPUWfawVU=
github.com/containers/storage v1.18.2/go.mod h1:WTBMf+a9ZZ/LbmEVeLHH2TX4CikWbO1Bt+/m58ZHVPg=
-github.com/containers/storage v1.19.0/go.mod h1:9Xc4rrTubn5hmtBfL+PSJH1XlfTQwR4VAG1NDUIpCts=
github.com/containers/storage v1.19.1/go.mod h1:KbXjSwKnx17ejOsjFcCXSf78mCgZkQSLPBNTMRc3XrQ=
+github.com/containers/storage v1.19.2/go.mod h1:gYCp3jzgXkvubO0rI14QAjz5Mxm/qKJgLmHFyqayDnw=
github.com/containers/storage v1.20.1 h1:2XE4eRIqSa6YjhAZjNwIkIKE6+Miy+5WV8l1KzY2ZKk=
github.com/containers/storage v1.20.1/go.mod h1:RoKzO8KSDogCT6c06rEbanZTcKYxshorB33JikEGc3A=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
@@ -111,7 +109,6 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
-github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -142,7 +139,6 @@ github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkg
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
@@ -163,16 +159,9 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
-github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=
@@ -183,7 +172,6 @@ github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14j
github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
@@ -209,7 +197,6 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
@@ -254,7 +241,6 @@ github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwD
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
github.com/jamescun/tuntap v0.0.0-20190712092105-cb1fb277045c/go.mod h1:zzwpsgcYhzzIP5WyF8g9ivCv38cY9uAV9Gu0m3lThhE=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -266,10 +252,10 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.5 h1:7q6vHIqubShURwQz8cQK6yIe/xC3IF0Vm7TGfqjewrc=
github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/pgzip v1.2.3 h1:Ce2to9wvs/cuJ2b86/CKQoTYr9VHfpanYosZ0UBJqdw=
+github.com/klauspost/compress v1.10.6 h1:SP6zavvTG3YjOosWePXFDlExpKIWMTO4SE/Y8MZB2vI=
+github.com/klauspost/compress v1.10.6/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.3/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@@ -280,14 +266,14 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
@@ -302,7 +288,6 @@ github.com/moby/vpnkit v0.4.0/go.mod h1:KyjUrL9cb6ZSNNAUwZfqRjhwwgJ3BJN+kXh0t43W
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
@@ -336,11 +321,10 @@ github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuB
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/onsi/gomega v1.10.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
@@ -353,7 +337,6 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P
github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 h1:Dliu5QO+4JYWu/yMshaMU7G3JN2POGpwjJN7gjy10Go=
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E=
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@@ -364,8 +347,8 @@ github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOl
github.com/opencontainers/selinux v1.4.0/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.5.1 h1:jskKwSMFYqyTrHEuJgQoUlTcId0av64S6EWObrIfn5Y=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
-github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316 h1:enQG2QUGwug4fR1yM6hL0Fjzx6Km/exZY6RbSPwMu3o=
-github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316/go.mod h1:dv+J0b/HWai0QnMVb37/H0v36klkLBi2TNpPeWDxX10=
+github.com/opencontainers/selinux v1.5.2 h1:F6DgIsjgBIcDksLW4D5RG9bXok6oqZ3nvMwj4ZoFu/Q=
+github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/openshift/imagebuilder v1.1.4 h1:LUg8aTjyXMtlDx6IbtvaqofFGZ6aYqe+VIeATE735LM=
github.com/openshift/imagebuilder v1.1.4/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
@@ -380,7 +363,6 @@ github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
@@ -408,15 +390,12 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rootless-containers/rootlesskit v0.9.5 h1:ygvFn6ms/14MlRQmMK8OSLKwwtHeRLFNblm+rOIndA0=
github.com/rootless-containers/rootlesskit v0.9.5/go.mod h1:OZQfuRPb+2MA1p+hmjHmSmDRv9SdTzlQ3taNA/0d7XM=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 h1:2c1EFnZHIPCW8qKWgHMH/fX2PkSabFc5mrVzfUNdg5U=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
-github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f h1:OtU/w6sBKmXYaw2KEODxjcYi3oPSyyslhgGFgIJVGAI=
-github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f/go.mod h1:f/98/SnvAzhAEFQJ3u836FePXvcbE8BS0YGMQNn4mhA=
github.com/seccomp/containers-golang v0.4.1 h1:6hsmsP8Y9T6PWKJELqAkRWkc6Te60+zK64avkjInd44=
github.com/seccomp/containers-golang v0.4.1/go.mod h1:5fP9lgyYyklJ8fg8Geq193G1QLe0ikf34z+hZKIjmnE=
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
@@ -428,7 +407,6 @@ github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjM
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@@ -447,14 +425,15 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.0 h1:jlIyCplCJFULU/01vCkhKuTyc3OorI3bJFuw6obfgho=
+github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@@ -476,8 +455,9 @@ github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b/go.mod h1:YHaw8N660ESgM
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
github.com/vbauerster/mpb/v5 v5.0.3/go.mod h1:h3YxU5CSr8rZP4Q3xZPVB3jJLhWPou63lHEdr9ytH4Y=
-github.com/vbauerster/mpb/v5 v5.0.4 h1:w7l/tJfHmtIOKZkU+bhbDZOUxj1kln9jy4DUOp3Tl14=
github.com/vbauerster/mpb/v5 v5.0.4/go.mod h1:fvzasBUyuo35UyuA6sSOlVhpLoNQsp2nBdHw7OiSUU8=
+github.com/vbauerster/mpb/v5 v5.2.2 h1:zIICVOm+XD+uV6crpSORaL6I0Q1WqOdvxZTp+r3L9cw=
+github.com/vbauerster/mpb/v5 v5.2.2/go.mod h1:W5Fvgw4dm3/0NhqzV8j6EacfuTe5SvnzBRwiXxDR9ww=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
@@ -508,7 +488,6 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -516,13 +495,9 @@ golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU=
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -538,9 +513,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
@@ -572,7 +545,6 @@ golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -581,6 +553,7 @@ golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -603,19 +576,12 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
-gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
-gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
@@ -665,40 +631,30 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A=
-k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
k8s.io/api v0.18.3 h1:2AJaUQdgUZLoDZHrun21PW2Nx9+ll6cUzvn3IKhSIn0=
k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA=
k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA=
-k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
k8s.io/apimachinery v0.18.3 h1:pOGcbVAhxADgUYnjS08EFXs9QMl8qaH5U4fr5LGUrSk=
k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab h1:E8Fecph0qbNsAbijJJQryKu4Oi9QTp5cVpjTE+nqg6g=
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab/go.mod h1:E95RaSlHr79aHaX0aGSwcPNfygDiPKOVXdmivCIZT0k=
-k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
-k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4=
k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
-modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
-modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
-modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
-modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
-modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
-sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e h1:4Z09Hglb792X0kfOBBJUPFEyvVfQWrYT/l8h5EKA6JQ=
-sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
diff --git a/hack/podman-registry b/hack/podman-registry
index e7708ce6a..fe79b7d9d 100755
--- a/hack/podman-registry
+++ b/hack/podman-registry
@@ -14,7 +14,7 @@ PODMAN_REGISTRY_PASS=
PODMAN_REGISTRY_PORT=
# Podman binary to run
-PODMAN=${PODMAN:-$(type -p podman)}
+PODMAN=${PODMAN:-$(dirname $0)/../bin/podman}
# END defaults
###############################################################################
@@ -104,6 +104,24 @@ function podman() {
"$@"
}
+###############
+# must_pass # Run a command quietly; abort with error on failure
+###############
+function must_pass() {
+ local log=${PODMAN_REGISTRY_WORKDIR}/log
+
+ "$@" &> $log
+ if [ $? -ne 0 ]; then
+ echo "$ME: Command failed: $*" >&2
+ cat $log >&2
+
+ # If we ever get here, it's a given that the registry is not running.
+ # Clean up after ourselves.
+ rm -rf ${PODMAN_REGISTRY_WORKDIR}
+ exit 1
+ fi
+}
+
# END helper functions
###############################################################################
# BEGIN action processing
@@ -132,7 +150,7 @@ function do_start() {
PODMAN_REGISTRY_PASS=$(random_string 15)
fi
- # Die on any error
+ # For the next few commands, die on any error
set -e
mkdir -p ${PODMAN_REGISTRY_WORKDIR}
@@ -140,50 +158,50 @@ function do_start() {
local AUTHDIR=${PODMAN_REGISTRY_WORKDIR}/auth
mkdir -p $AUTHDIR
- # We have to be silent; our only output must be env. vars. Log output here.
- local log=${PODMAN_REGISTRY_WORKDIR}/log
- touch $log
-
# Pull registry image, but into a separate container storage
mkdir -p ${PODMAN_REGISTRY_WORKDIR}/root
mkdir -p ${PODMAN_REGISTRY_WORKDIR}/runroot
+ set +e
+
# Give it three tries, to compensate for flakes
- podman pull ${PODMAN_REGISTRY_IMAGE} &>> $log ||
- podman pull ${PODMAN_REGISTRY_IMAGE} &>> $log ||
- podman pull ${PODMAN_REGISTRY_IMAGE} &>> $log
+ podman pull ${PODMAN_REGISTRY_IMAGE} &>/dev/null ||
+ podman pull ${PODMAN_REGISTRY_IMAGE} &>/dev/null ||
+ must_pass podman pull ${PODMAN_REGISTRY_IMAGE}
# Registry image needs a cert. Self-signed is good enough.
local CERT=$AUTHDIR/domain.crt
- # FIXME: if this fails, we fail silently! It'd be more helpful
- # to say 'openssl failed' and cat the logfile
- openssl req -newkey rsa:4096 -nodes -sha256 \
- -keyout ${AUTHDIR}/domain.key -x509 -days 2 \
- -out ${AUTHDIR}/domain.crt \
- -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost" \
- &>> $log
-
- # Store credentials where container will see them
+ must_pass openssl req -newkey rsa:4096 -nodes -sha256 \
+ -keyout ${AUTHDIR}/domain.key -x509 -days 2 \
+ -out ${AUTHDIR}/domain.crt \
+ -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost"
+
+ # Store credentials where container will see them. We can't run
+ # this one via must_pass because we need its stdout.
podman run --rm \
--entrypoint htpasswd ${PODMAN_REGISTRY_IMAGE} \
-Bbn ${PODMAN_REGISTRY_USER} ${PODMAN_REGISTRY_PASS} \
> $AUTHDIR/htpasswd
+ if [ $? -ne 0 ]; then
+ rm -rf ${PODMAN_REGISTRY_WORKDIR}
+ die "Command failed: podman run [htpasswd]"
+ fi
# In case someone needs to debug
echo "${PODMAN_REGISTRY_USER}:${PODMAN_REGISTRY_PASS}" \
> $AUTHDIR/htpasswd-plaintext
# Run the registry container.
- podman run --quiet -d \
- -p ${PODMAN_REGISTRY_PORT}:5000 \
- --name registry \
- -v $AUTHDIR:/auth:Z \
- -e "REGISTRY_AUTH=htpasswd" \
- -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \
- -e "REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd" \
- -e "REGISTRY_HTTP_TLS_CERTIFICATE=/auth/domain.crt" \
- -e "REGISTRY_HTTP_TLS_KEY=/auth/domain.key" \
- registry:2 &>> $log
+ must_pass podman run --quiet -d \
+ -p ${PODMAN_REGISTRY_PORT}:5000 \
+ --name registry \
+ -v $AUTHDIR:/auth:Z \
+ -e "REGISTRY_AUTH=htpasswd" \
+ -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \
+ -e "REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd" \
+ -e "REGISTRY_HTTP_TLS_CERTIFICATE=/auth/domain.crt" \
+ -e "REGISTRY_HTTP_TLS_KEY=/auth/domain.key" \
+ registry:2
# Dump settings. Our caller will use these to access the registry.
for v in IMAGE PORT USER PASS; do
diff --git a/hack/xref-helpmsgs-manpages b/hack/xref-helpmsgs-manpages
index 00db3c8de..c1e9dffc4 100755
--- a/hack/xref-helpmsgs-manpages
+++ b/hack/xref-helpmsgs-manpages
@@ -150,6 +150,10 @@ sub xref_by_man {
my %ignore = map { $_ => 1 } qw(-l -s -t --latest --size --type);
next if $man =~ /-inspect/ && $ignore{$k};
+ # Special case: podman-diff serves dual purpose (image, ctr)
+ my %diffignore = map { $_ => 1 } qw(-l --latest );
+ next if $man =~ /-diff/ && $diffignore{$k};
+
# Special case: the 'trust' man page is a mess
next if $man =~ /-trust/;
diff --git a/libpod/image/docker_registry_options.go b/libpod/image/docker_registry_options.go
index 01b5558af..081e7ef4f 100644
--- a/libpod/image/docker_registry_options.go
+++ b/libpod/image/docker_registry_options.go
@@ -30,6 +30,8 @@ type DockerRegistryOptions struct {
OSChoice string
// If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match.
ArchitectureChoice string
+ // RegistriesConfPath can be used to override the default path of registries.conf.
+ RegistriesConfPath string
}
// GetSystemContext constructs a new system context from a parent context. the values in the DockerRegistryOptions, and other parameters.
diff --git a/libpod/options.go b/libpod/options.go
index ff5e29335..8e0d3df86 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -8,6 +8,7 @@ import (
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/manifest"
+ "github.com/containers/image/v5/types"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/pkg/namespaces"
@@ -17,6 +18,7 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// Runtime Creation Options
@@ -244,6 +246,22 @@ func WithStaticDir(dir string) RuntimeOption {
}
}
+// WithRegistriesConf configures the runtime to always use specified
+// registries.conf for image processing.
+func WithRegistriesConf(path string) RuntimeOption {
+ logrus.Debugf("Setting custom registries.conf: %q", path)
+ return func(rt *Runtime) error {
+ if _, err := os.Stat(path); err != nil {
+ return errors.Wrap(err, "error locating specified registries.conf")
+ }
+ if rt.imageContext == nil {
+ rt.imageContext = &types.SystemContext{}
+ }
+ rt.imageContext.SystemRegistriesConfPath = path
+ return nil
+ }
+}
+
// WithHooksDir sets the directories to look for OCI runtime hook configuration.
func WithHooksDir(hooksDirs ...string) RuntimeOption {
return func(rt *Runtime) error {
diff --git a/libpod/runtime.go b/libpod/runtime.go
index c1864683f..4744de1a2 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -339,9 +339,10 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
}
// Set up containers/image
- runtime.imageContext = &types.SystemContext{
- SignaturePolicyPath: runtime.config.Engine.SignaturePolicyPath,
+ if runtime.imageContext == nil {
+ runtime.imageContext = &types.SystemContext{}
}
+ runtime.imageContext.SignaturePolicyPath = runtime.config.Engine.SignaturePolicyPath
// Create the tmpDir
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil {
diff --git a/pkg/api/handlers/compat/containers_archive.go b/pkg/api/handlers/compat/containers_archive.go
new file mode 100644
index 000000000..c3a26873e
--- /dev/null
+++ b/pkg/api/handlers/compat/containers_archive.go
@@ -0,0 +1,12 @@
+package compat
+
+import (
+ "errors"
+ "net/http"
+
+ "github.com/containers/libpod/pkg/api/handlers/utils"
+)
+
+func Archive(w http.ResponseWriter, r *http.Request) {
+ utils.Error(w, "not implemented", http.StatusNotImplemented, errors.New("not implemented"))
+}
diff --git a/pkg/api/handlers/compat/containers_attach.go b/pkg/api/handlers/compat/containers_attach.go
index 012e20daf..990140ee1 100644
--- a/pkg/api/handlers/compat/containers_attach.go
+++ b/pkg/api/handlers/compat/containers_attach.go
@@ -90,7 +90,7 @@ func AttachContainer(w http.ResponseWriter, r *http.Request) {
// For Docker compatibility, we need to re-initialize containers in these states.
if state == define.ContainerStateConfigured || state == define.ContainerStateExited {
if err := ctr.Init(r.Context()); err != nil {
- utils.InternalServerError(w, errors.Wrapf(err, "error preparing container %s for attach", ctr.ID()))
+ utils.Error(w, "Container in wrong state", http.StatusConflict, errors.Wrapf(err, "error preparing container %s for attach", ctr.ID()))
return
}
} else if !(state == define.ContainerStateCreated || state == define.ContainerStateRunning) {
diff --git a/pkg/api/handlers/compat/containers_stats.go b/pkg/api/handlers/compat/containers_stats.go
index 62ccd2b93..048321add 100644
--- a/pkg/api/handlers/compat/containers_stats.go
+++ b/pkg/api/handlers/compat/containers_stats.go
@@ -45,8 +45,8 @@ func StatsContainer(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
return
}
- if state != define.ContainerStateRunning && !query.Stream {
- utils.InternalServerError(w, define.ErrCtrStateInvalid)
+ if state != define.ContainerStateRunning {
+ utils.Error(w, "Container not running and streaming requested", http.StatusConflict, define.ErrCtrStateInvalid)
return
}
diff --git a/pkg/api/handlers/compat/events.go b/pkg/api/handlers/compat/events.go
index 7ebfb0d1e..577ddd0a1 100644
--- a/pkg/api/handlers/compat/events.go
+++ b/pkg/api/handlers/compat/events.go
@@ -26,7 +26,10 @@ func GetEvents(w http.ResponseWriter, r *http.Request) {
Since string `schema:"since"`
Until string `schema:"until"`
Filters map[string][]string `schema:"filters"`
- }{}
+ Stream bool `schema:"stream"`
+ }{
+ Stream: true,
+ }
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
utils.Error(w, "Failed to parse parameters", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String()))
}
@@ -41,9 +44,10 @@ func GetEvents(w http.ResponseWriter, r *http.Request) {
if len(query.Since) > 0 || len(query.Until) > 0 {
fromStart = true
}
+
eventChannel := make(chan *events.Event)
go func() {
- readOpts := events.ReadOptions{FromStart: fromStart, Stream: true, Filters: libpodFilters, EventChannel: eventChannel, Since: query.Since, Until: query.Until}
+ readOpts := events.ReadOptions{FromStart: fromStart, Stream: query.Stream, Filters: libpodFilters, EventChannel: eventChannel, Since: query.Since, Until: query.Until}
eventsError = runtime.Events(readOpts)
}()
if eventsError != nil {
@@ -55,7 +59,9 @@ func GetEvents(w http.ResponseWriter, r *http.Request) {
// If client disappears we need to stop listening for events
go func(done <-chan struct{}) {
<-done
- close(eventChannel)
+ if _, ok := <-eventChannel; ok {
+ close(eventChannel)
+ }
}(r.Context().Done())
// Headers need to be written out before turning Writer() over to json encoder
diff --git a/pkg/api/handlers/compat/images.go b/pkg/api/handlers/compat/images.go
index ea9cbd691..b64ed0036 100644
--- a/pkg/api/handlers/compat/images.go
+++ b/pkg/api/handlers/compat/images.go
@@ -15,6 +15,7 @@ import (
image2 "github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/api/handlers"
"github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/containers/libpod/pkg/auth"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/libpod/pkg/util"
"github.com/docker/docker/api/types"
@@ -251,19 +252,32 @@ func CreateImageFromImage(w http.ResponseWriter, r *http.Request) {
return
}
- /*
- fromImage – Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed.
- repo – Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image.
- tag – Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled.
- */
fromImage := query.FromImage
if len(query.Tag) >= 1 {
fromImage = fmt.Sprintf("%s:%s", fromImage, query.Tag)
}
- // TODO
- // We are eating the output right now because we haven't talked about how to deal with multiple responses yet
- img, err := runtime.ImageRuntime().New(r.Context(), fromImage, "", "", nil, &image2.DockerRegistryOptions{}, image2.SigningOptions{}, nil, util.PullImageMissing)
+ authConf, authfile, err := auth.GetCredentials(r)
+ if err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", auth.XRegistryAuthHeader, r.URL.String()))
+ return
+ }
+ defer auth.RemoveAuthfile(authfile)
+
+ registryOpts := image2.DockerRegistryOptions{DockerRegistryCreds: authConf}
+ if sys := runtime.SystemContext(); sys != nil {
+ registryOpts.DockerCertPath = sys.DockerCertPath
+ }
+ img, err := runtime.ImageRuntime().New(r.Context(),
+ fromImage,
+ "", // signature policy
+ authfile,
+ nil, // writer
+ &registryOpts,
+ image2.SigningOptions{},
+ nil, // label
+ util.PullImageMissing,
+ )
if err != nil {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, err)
return
diff --git a/pkg/api/handlers/compat/images_push.go b/pkg/api/handlers/compat/images_push.go
index 2260d5557..47976b7c9 100644
--- a/pkg/api/handlers/compat/images_push.go
+++ b/pkg/api/handlers/compat/images_push.go
@@ -9,6 +9,7 @@ import (
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/containers/libpod/pkg/auth"
"github.com/gorilla/schema"
"github.com/pkg/errors"
)
@@ -48,13 +49,17 @@ func PushImage(w http.ResponseWriter, r *http.Request) {
return
}
- // TODO: the X-Registry-Auth header is not checked yet here nor in any other
- // endpoint. Pushing does NOT work with authentication at the moment.
- dockerRegistryOptions := &image.DockerRegistryOptions{}
- authfile := ""
+ authConf, authfile, err := auth.GetCredentials(r)
+ if err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", auth.XRegistryAuthHeader, r.URL.String()))
+ return
+ }
+ defer auth.RemoveAuthfile(authfile)
+
+ dockerRegistryOptions := &image.DockerRegistryOptions{DockerRegistryCreds: authConf}
if sys := runtime.SystemContext(); sys != nil {
dockerRegistryOptions.DockerCertPath = sys.DockerCertPath
- authfile = sys.AuthFilePath
+ dockerRegistryOptions.RegistriesConfPath = sys.SystemRegistriesConfPath
}
err = newImage.PushImageToHeuristicDestination(
diff --git a/pkg/api/handlers/compat/networks.go b/pkg/api/handlers/compat/networks.go
new file mode 100644
index 000000000..c52ca093f
--- /dev/null
+++ b/pkg/api/handlers/compat/networks.go
@@ -0,0 +1,301 @@
+package compat
+
+import (
+ "encoding/json"
+ "net"
+ "net/http"
+ "os"
+ "syscall"
+ "time"
+
+ "github.com/containernetworking/cni/libcni"
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/containers/libpod/pkg/domain/infra/abi"
+ "github.com/containers/libpod/pkg/network"
+ "github.com/docker/docker/api/types"
+ dockerNetwork "github.com/docker/docker/api/types/network"
+ "github.com/gorilla/schema"
+ "github.com/pkg/errors"
+)
+
+type CompatInspectNetwork struct {
+ types.NetworkResource
+}
+
+func InspectNetwork(w http.ResponseWriter, r *http.Request) {
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+
+ // FYI scope and version are currently unused but are described by the API
+ // Leaving this for if/when we have to enable these
+ //query := struct {
+ // scope string
+ // verbose bool
+ //}{
+ // // override any golang type defaults
+ //}
+ //decoder := r.Context().Value("decoder").(*schema.Decoder)
+ //if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ // utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String()))
+ // return
+ //}
+ config, err := runtime.GetConfig()
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ name := utils.GetName(r)
+ _, err = network.InspectNetwork(config, name)
+ if err != nil {
+ // TODO our network package does not distinguish between not finding a
+ // specific network vs not being able to read it
+ utils.InternalServerError(w, err)
+ return
+ }
+ report, err := getNetworkResourceByName(name, runtime)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ utils.WriteResponse(w, http.StatusOK, report)
+}
+
+func getNetworkResourceByName(name string, runtime *libpod.Runtime) (*types.NetworkResource, error) {
+ var (
+ ipamConfigs []dockerNetwork.IPAMConfig
+ )
+ config, err := runtime.GetConfig()
+ if err != nil {
+ return nil, err
+ }
+ containerEndpoints := map[string]types.EndpointResource{}
+ // Get the network path so we can get created time
+ networkConfigPath, err := network.GetCNIConfigPathByName(config, name)
+ if err != nil {
+ return nil, err
+ }
+ f, err := os.Stat(networkConfigPath)
+ if err != nil {
+ return nil, err
+ }
+ stat := f.Sys().(*syscall.Stat_t)
+ cons, err := runtime.GetAllContainers()
+ if err != nil {
+ return nil, err
+ }
+ conf, err := libcni.ConfListFromFile(networkConfigPath)
+ if err != nil {
+ return nil, err
+ }
+
+ // No Bridge plugin means we bail
+ bridge, err := genericPluginsToBridge(conf.Plugins, network.DefaultNetworkDriver)
+ if err != nil {
+ return nil, err
+ }
+ for _, outer := range bridge.IPAM.Ranges {
+ for _, n := range outer {
+ ipamConfig := dockerNetwork.IPAMConfig{
+ Subnet: n.Subnet,
+ Gateway: n.Gateway,
+ }
+ ipamConfigs = append(ipamConfigs, ipamConfig)
+ }
+ }
+
+ for _, con := range cons {
+ data, err := con.Inspect(false)
+ if err != nil {
+ return nil, err
+ }
+ if netData, ok := data.NetworkSettings.Networks[name]; ok {
+ containerEndpoint := types.EndpointResource{
+ Name: netData.NetworkID,
+ EndpointID: netData.EndpointID,
+ MacAddress: netData.MacAddress,
+ IPv4Address: netData.IPAddress,
+ IPv6Address: netData.GlobalIPv6Address,
+ }
+ containerEndpoints[con.ID()] = containerEndpoint
+ }
+ }
+ report := types.NetworkResource{
+ Name: name,
+ ID: "",
+ Created: time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec)), // nolint: unconvert
+ Scope: "",
+ Driver: network.DefaultNetworkDriver,
+ EnableIPv6: false,
+ IPAM: dockerNetwork.IPAM{
+ Driver: "default",
+ Options: nil,
+ Config: ipamConfigs,
+ },
+ Internal: false,
+ Attachable: false,
+ Ingress: false,
+ ConfigFrom: dockerNetwork.ConfigReference{},
+ ConfigOnly: false,
+ Containers: containerEndpoints,
+ Options: nil,
+ Labels: nil,
+ Peers: nil,
+ Services: nil,
+ }
+ return &report, nil
+}
+
+func genericPluginsToBridge(plugins []*libcni.NetworkConfig, pluginType string) (network.HostLocalBridge, error) {
+ var bridge network.HostLocalBridge
+ generic, err := findPluginByName(plugins, pluginType)
+ if err != nil {
+ return bridge, err
+ }
+ err = json.Unmarshal(generic, &bridge)
+ return bridge, err
+}
+
+func findPluginByName(plugins []*libcni.NetworkConfig, pluginType string) ([]byte, error) {
+ for _, p := range plugins {
+ if pluginType == p.Network.Type {
+ return p.Bytes, nil
+ }
+ }
+ return nil, errors.New("unable to find bridge plugin")
+}
+
+func ListNetworks(w http.ResponseWriter, r *http.Request) {
+ var (
+ reports []*types.NetworkResource
+ )
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ decoder := r.Context().Value("decoder").(*schema.Decoder)
+ query := struct {
+ Filters map[string][]string `schema:"filters"`
+ }{
+ // override any golang type defaults
+ }
+ if err := decoder.Decode(&query, r.URL.Query()); err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse parameters for %s", r.URL.String()))
+ return
+ }
+ config, err := runtime.GetConfig()
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ // TODO remove when filters are implemented
+ if len(query.Filters) > 0 {
+ utils.InternalServerError(w, errors.New("filters for listing networks is not implemented"))
+ return
+ }
+ netNames, err := network.GetNetworkNamesFromFileSystem(config)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ for _, name := range netNames {
+ report, err := getNetworkResourceByName(name, runtime)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ }
+ reports = append(reports, report)
+ }
+ utils.WriteResponse(w, http.StatusOK, reports)
+}
+
+func CreateNetwork(w http.ResponseWriter, r *http.Request) {
+ var (
+ name string
+ networkCreate types.NetworkCreateRequest
+ )
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ if err := json.NewDecoder(r.Body).Decode(&networkCreate); err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
+ return
+ }
+
+ if len(networkCreate.Name) > 0 {
+ name = networkCreate.Name
+ }
+ // At present I think we should just suport the bridge driver
+ // and allow demand to make us consider more
+ if networkCreate.Driver != network.DefaultNetworkDriver {
+ utils.InternalServerError(w, errors.New("network create only supports the bridge driver"))
+ return
+ }
+ ncOptions := entities.NetworkCreateOptions{
+ Driver: network.DefaultNetworkDriver,
+ Internal: networkCreate.Internal,
+ }
+ if networkCreate.IPAM != nil && networkCreate.IPAM.Config != nil {
+ if len(networkCreate.IPAM.Config) > 1 {
+ utils.InternalServerError(w, errors.New("compat network create can only support one IPAM config"))
+ return
+ }
+
+ if len(networkCreate.IPAM.Config[0].Subnet) > 0 {
+ _, subnet, err := net.ParseCIDR(networkCreate.IPAM.Config[0].Subnet)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ ncOptions.Subnet = *subnet
+ }
+ if len(networkCreate.IPAM.Config[0].Gateway) > 0 {
+ ncOptions.Gateway = net.ParseIP(networkCreate.IPAM.Config[0].Gateway)
+ }
+ if len(networkCreate.IPAM.Config[0].IPRange) > 0 {
+ _, IPRange, err := net.ParseCIDR(networkCreate.IPAM.Config[0].IPRange)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ ncOptions.Range = *IPRange
+ }
+ }
+ ce := abi.ContainerEngine{Libpod: runtime}
+ _, err := ce.NetworkCreate(r.Context(), name, ncOptions)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ }
+ report := types.NetworkCreate{
+ CheckDuplicate: networkCreate.CheckDuplicate,
+ Driver: networkCreate.Driver,
+ Scope: networkCreate.Scope,
+ EnableIPv6: networkCreate.EnableIPv6,
+ IPAM: networkCreate.IPAM,
+ Internal: networkCreate.Internal,
+ Attachable: networkCreate.Attachable,
+ Ingress: networkCreate.Ingress,
+ ConfigOnly: networkCreate.ConfigOnly,
+ ConfigFrom: networkCreate.ConfigFrom,
+ Options: networkCreate.Options,
+ Labels: networkCreate.Labels,
+ }
+ utils.WriteResponse(w, http.StatusOK, report)
+}
+
+func RemoveNetwork(w http.ResponseWriter, r *http.Request) {
+ runtime := r.Context().Value("runtime").(*libpod.Runtime)
+ config, err := runtime.GetConfig()
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ name := utils.GetName(r)
+ exists, err := network.Exists(config, name)
+ if err != nil {
+ utils.InternalServerError(w, err)
+ return
+ }
+ if !exists {
+ utils.Error(w, "network not found", http.StatusNotFound, err)
+ return
+ }
+ if err := network.RemoveNetwork(config, name); err != nil {
+ utils.InternalServerError(w, err)
+ }
+ utils.WriteResponse(w, http.StatusNoContent, "")
+}
diff --git a/pkg/api/handlers/compat/resize.go b/pkg/api/handlers/compat/resize.go
index 3ead733bc..231b53175 100644
--- a/pkg/api/handlers/compat/resize.go
+++ b/pkg/api/handlers/compat/resize.go
@@ -1,10 +1,12 @@
package compat
import (
+ "fmt"
"net/http"
"strings"
"github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/api/handlers/utils"
"github.com/gorilla/schema"
"github.com/pkg/errors"
@@ -43,6 +45,14 @@ func ResizeTTY(w http.ResponseWriter, r *http.Request) {
utils.ContainerNotFound(w, name, err)
return
}
+ if state, err := ctnr.State(); err != nil {
+ utils.InternalServerError(w, errors.Wrapf(err, "cannot obtain container state"))
+ return
+ } else if state != define.ContainerStateRunning {
+ utils.Error(w, "Container not running", http.StatusConflict,
+ fmt.Errorf("container %q in wrong state %q", name, state.String()))
+ return
+ }
if err := ctnr.AttachResize(sz); err != nil {
utils.InternalServerError(w, errors.Wrapf(err, "cannot resize container"))
return
@@ -56,6 +66,14 @@ func ResizeTTY(w http.ResponseWriter, r *http.Request) {
utils.SessionNotFound(w, name, err)
return
}
+ if state, err := ctnr.State(); err != nil {
+ utils.InternalServerError(w, errors.Wrapf(err, "cannot obtain session container state"))
+ return
+ } else if state != define.ContainerStateRunning {
+ utils.Error(w, "Container not running", http.StatusConflict,
+ fmt.Errorf("container %q in wrong state %q", name, state.String()))
+ return
+ }
if err := ctnr.ExecResize(name, sz); err != nil {
utils.InternalServerError(w, errors.Wrapf(err, "cannot resize session"))
return
diff --git a/pkg/api/handlers/compat/swagger.go b/pkg/api/handlers/compat/swagger.go
index ce83aa32f..dc94a7ebd 100644
--- a/pkg/api/handlers/compat/swagger.go
+++ b/pkg/api/handlers/compat/swagger.go
@@ -3,6 +3,7 @@ package compat
import (
"github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/storage/pkg/archive"
+ "github.com/docker/docker/api/types"
)
// Create container
@@ -35,3 +36,30 @@ type swagChangesResponse struct {
Changes []archive.Change
}
}
+
+// Network inspect
+// swagger:response CompatNetworkInspect
+type swagCompatNetworkInspect struct {
+ // in:body
+ Body types.NetworkResource
+}
+
+// Network list
+// swagger:response CompatNetworkList
+type swagCompatNetworkList struct {
+ // in:body
+ Body []types.NetworkResource
+}
+
+// Network create
+// swagger:model NetworkCreateRequest
+type NetworkCreateRequest struct {
+ types.NetworkCreateRequest
+}
+
+// Network create
+// swagger:response CompatNetworkCreate
+type swagCompatNetworkCreateResponse struct {
+ // in:body
+ Body struct{ types.NetworkCreate }
+}
diff --git a/pkg/api/handlers/compat/types.go b/pkg/api/handlers/compat/types.go
index b8d06760f..6d47ede64 100644
--- a/pkg/api/handlers/compat/types.go
+++ b/pkg/api/handlers/compat/types.go
@@ -48,7 +48,7 @@ type StatsJSON struct {
Stats
Name string `json:"name,omitempty"`
- ID string `json:"id,omitempty"`
+ ID string `json:"Id,omitempty"`
// Networks request version >=1.21
Networks map[string]docker.NetworkStats `json:"networks,omitempty"`
diff --git a/pkg/api/handlers/libpod/containers.go b/pkg/api/handlers/libpod/containers.go
index 3902bdc9b..50f6b1a38 100644
--- a/pkg/api/handlers/libpod/containers.go
+++ b/pkg/api/handlers/libpod/containers.go
@@ -66,6 +66,10 @@ func ListContainers(w http.ResponseWriter, r *http.Request) {
utils.InternalServerError(w, err)
return
}
+ if len(pss) == 0 {
+ utils.WriteResponse(w, http.StatusOK, "[]")
+ return
+ }
utils.WriteResponse(w, http.StatusOK, pss)
}
diff --git a/pkg/api/handlers/libpod/copy.go b/pkg/api/handlers/libpod/copy.go
new file mode 100644
index 000000000..a3b404bce
--- /dev/null
+++ b/pkg/api/handlers/libpod/copy.go
@@ -0,0 +1,12 @@
+package libpod
+
+import (
+ "net/http"
+
+ "github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/pkg/errors"
+)
+
+func Archive(w http.ResponseWriter, r *http.Request) {
+ utils.Error(w, "not implemented", http.StatusNotImplemented, errors.New("not implemented"))
+}
diff --git a/pkg/api/handlers/libpod/images.go b/pkg/api/handlers/libpod/images.go
index 1cbcfb52c..4b277d39c 100644
--- a/pkg/api/handlers/libpod/images.go
+++ b/pkg/api/handlers/libpod/images.go
@@ -21,6 +21,7 @@ import (
image2 "github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/api/handlers"
"github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/containers/libpod/pkg/auth"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/libpod/pkg/domain/infra/abi"
"github.com/containers/libpod/pkg/errorhandling"
@@ -28,6 +29,7 @@ import (
utils2 "github.com/containers/libpod/utils"
"github.com/gorilla/schema"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// Commit
@@ -339,7 +341,6 @@ func ImagesPull(w http.ResponseWriter, r *http.Request) {
decoder := r.Context().Value("decoder").(*schema.Decoder)
query := struct {
Reference string `schema:"reference"`
- Credentials string `schema:"credentials"`
OverrideOS string `schema:"overrideOS"`
OverrideArch string `schema:"overrideArch"`
TLSVerify bool `schema:"tlsVerify"`
@@ -382,20 +383,16 @@ func ImagesPull(w http.ResponseWriter, r *http.Request) {
return
}
- var registryCreds *types.DockerAuthConfig
- if len(query.Credentials) != 0 {
- creds, err := util.ParseRegistryCreds(query.Credentials)
- if err != nil {
- utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
- errors.Wrapf(err, "error parsing credentials %q", query.Credentials))
- return
- }
- registryCreds = creds
+ authConf, authfile, err := auth.GetCredentials(r)
+ if err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", auth.XRegistryAuthHeader, r.URL.String()))
+ return
}
+ defer auth.RemoveAuthfile(authfile)
// Setup the registry options
dockerRegistryOptions := image.DockerRegistryOptions{
- DockerRegistryCreds: registryCreds,
+ DockerRegistryCreds: authConf,
OSChoice: query.OverrideOS,
ArchitectureChoice: query.OverrideArch,
}
@@ -403,6 +400,13 @@ func ImagesPull(w http.ResponseWriter, r *http.Request) {
dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!query.TLSVerify)
}
+ sys := runtime.SystemContext()
+ if sys == nil {
+ sys = image.GetSystemContext("", authfile, false)
+ }
+ dockerRegistryOptions.DockerCertPath = sys.DockerCertPath
+ sys.DockerAuthConfig = authConf
+
// Prepare the images we want to pull
imagesToPull := []string{}
res := []handlers.LibpodImagesPullReport{}
@@ -411,8 +415,7 @@ func ImagesPull(w http.ResponseWriter, r *http.Request) {
if !query.AllTags {
imagesToPull = append(imagesToPull, imageName)
} else {
- systemContext := image.GetSystemContext("", "", false)
- tags, err := docker.GetRepositoryTags(context.Background(), systemContext, imageRef)
+ tags, err := docker.GetRepositoryTags(context.Background(), sys, imageRef)
if err != nil {
utils.InternalServerError(w, errors.Wrap(err, "error getting repository tags"))
return
@@ -422,12 +425,6 @@ func ImagesPull(w http.ResponseWriter, r *http.Request) {
}
}
- authfile := ""
- if sys := runtime.SystemContext(); sys != nil {
- dockerRegistryOptions.DockerCertPath = sys.DockerCertPath
- authfile = sys.AuthFilePath
- }
-
// Finally pull the images
for _, img := range imagesToPull {
newImage, err := runtime.ImageRuntime().New(
@@ -456,7 +453,6 @@ func PushImage(w http.ResponseWriter, r *http.Request) {
runtime := r.Context().Value("runtime").(*libpod.Runtime)
query := struct {
- Credentials string `schema:"credentials"`
Destination string `schema:"destination"`
TLSVerify bool `schema:"tlsVerify"`
}{
@@ -492,26 +488,20 @@ func PushImage(w http.ResponseWriter, r *http.Request) {
return
}
- var registryCreds *types.DockerAuthConfig
- if len(query.Credentials) != 0 {
- creds, err := util.ParseRegistryCreds(query.Credentials)
- if err != nil {
- utils.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest,
- errors.Wrapf(err, "error parsing credentials %q", query.Credentials))
- return
- }
- registryCreds = creds
+ authConf, authfile, err := auth.GetCredentials(r)
+ if err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", auth.XRegistryAuthHeader, r.URL.String()))
+ return
}
+ defer auth.RemoveAuthfile(authfile)
+ logrus.Errorf("AuthConf: %v", authConf)
- // TODO: the X-Registry-Auth header is not checked yet here nor in any other
- // endpoint. Pushing does NOT work with authentication at the moment.
dockerRegistryOptions := &image.DockerRegistryOptions{
- DockerRegistryCreds: registryCreds,
+ DockerRegistryCreds: authConf,
}
- authfile := ""
if sys := runtime.SystemContext(); sys != nil {
dockerRegistryOptions.DockerCertPath = sys.DockerCertPath
- authfile = sys.AuthFilePath
+ dockerRegistryOptions.RegistriesConfPath = sys.SystemRegistriesConfPath
}
if _, found := r.URL.Query()["tlsVerify"]; found {
dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!query.TLSVerify)
diff --git a/pkg/api/handlers/libpod/manifests.go b/pkg/api/handlers/libpod/manifests.go
index 93ca367f7..aef92368b 100644
--- a/pkg/api/handlers/libpod/manifests.go
+++ b/pkg/api/handlers/libpod/manifests.go
@@ -120,6 +120,10 @@ func ManifestRemove(w http.ResponseWriter, r *http.Request) {
utils.WriteResponse(w, http.StatusOK, handlers.IDResponse{ID: newID})
}
func ManifestPush(w http.ResponseWriter, r *http.Request) {
+ // FIXME: parameters are missing (tlsVerify, format).
+ // Also, we should use the ABI function to avoid duplicate code.
+ // Also, support for XRegistryAuth headers are missing.
+
runtime := r.Context().Value("runtime").(*libpod.Runtime)
decoder := r.Context().Value("decoder").(*schema.Decoder)
query := struct {
diff --git a/pkg/api/handlers/libpod/play.go b/pkg/api/handlers/libpod/play.go
index 26e02bf4f..1cb5cdb6c 100644
--- a/pkg/api/handlers/libpod/play.go
+++ b/pkg/api/handlers/libpod/play.go
@@ -9,6 +9,7 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/api/handlers/utils"
+ "github.com/containers/libpod/pkg/auth"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/libpod/pkg/domain/infra/abi"
"github.com/gorilla/schema"
@@ -47,9 +48,26 @@ func PlayKube(w http.ResponseWriter, r *http.Request) {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "error closing temporary file"))
return
}
+ authConf, authfile, err := auth.GetCredentials(r)
+ if err != nil {
+ utils.Error(w, "Something went wrong.", http.StatusBadRequest, errors.Wrapf(err, "Failed to parse %q header for %s", auth.XRegistryAuthHeader, r.URL.String()))
+ return
+ }
+ defer auth.RemoveAuthfile(authfile)
+ var username, password string
+ if authConf != nil {
+ username = authConf.Username
+ password = authConf.Password
+ }
containerEngine := abi.ContainerEngine{Libpod: runtime}
- options := entities.PlayKubeOptions{Network: query.Network, Quiet: true}
+ options := entities.PlayKubeOptions{
+ Authfile: authfile,
+ Username: username,
+ Password: password,
+ Network: query.Network,
+ Quiet: true,
+ }
if _, found := r.URL.Query()["tlsVerify"]; found {
options.SkipTLSVerify = types.NewOptionalBool(!query.TLSVerify)
}
diff --git a/pkg/api/handlers/types.go b/pkg/api/handlers/types.go
index d8cdd9caf..aa3d0fe91 100644
--- a/pkg/api/handlers/types.go
+++ b/pkg/api/handlers/types.go
@@ -120,7 +120,7 @@ type CreateContainerConfig struct {
// swagger:model IDResponse
type IDResponse struct {
// ID
- ID string `json:"id"`
+ ID string `json:"Id"`
}
type ContainerTopOKBody struct {
diff --git a/pkg/api/server/register_archive.go b/pkg/api/server/register_archive.go
new file mode 100644
index 000000000..a1d5941bc
--- /dev/null
+++ b/pkg/api/server/register_archive.go
@@ -0,0 +1,171 @@
+package server
+
+import (
+ "net/http"
+
+ "github.com/containers/libpod/pkg/api/handlers/compat"
+ "github.com/containers/libpod/pkg/api/handlers/libpod"
+ "github.com/gorilla/mux"
+)
+
+func (s *APIServer) registerAchiveHandlers(r *mux.Router) error {
+ // swagger:operation POST /containers/{name}/archive compat putArchive
+ // ---
+ // summary: Put files into a container
+ // description: Put a tar archive of files into a container
+ // tags:
+ // - containers (compat)
+ // produces:
+ // - application/json
+ // parameters:
+ // - in: path
+ // name: name
+ // type: string
+ // description: container name or id
+ // required: true
+ // - in: query
+ // name: path
+ // type: string
+ // description: Path to a directory in the container to extract
+ // required: true
+ // - in: query
+ // name: noOverwriteDirNonDir
+ // type: string
+ // description: if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa (1 or true)
+ // - in: query
+ // name: copyUIDGID
+ // type: string
+ // description: copy UID/GID maps to the dest file or di (1 or true)
+ // - in: body
+ // name: request
+ // description: tarfile of files to copy into the container
+ // schema:
+ // type: string
+ // responses:
+ // 200:
+ // description: no error
+ // 400:
+ // $ref: "#/responses/BadParamError"
+ // 403:
+ // description: the container rootfs is read-only
+ // 404:
+ // $ref: "#/responses/NoSuchContainer"
+ // 500:
+ // $ref: "#/responses/InternalError"
+
+ // swagger:operation GET /containers/{name}/archive compat getArchive
+ // ---
+ // summary: Get files from a container
+ // description: Get a tar archive of files from a container
+ // tags:
+ // - containers (compat)
+ // produces:
+ // - application/json
+ // parameters:
+ // - in: path
+ // name: name
+ // type: string
+ // description: container name or id
+ // required: true
+ // - in: query
+ // name: path
+ // type: string
+ // description: Path to a directory in the container to extract
+ // required: true
+ // responses:
+ // 200:
+ // description: no error
+ // schema:
+ // type: string
+ // format: binary
+ // 400:
+ // $ref: "#/responses/BadParamError"
+ // 404:
+ // $ref: "#/responses/NoSuchContainer"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/containers/{name}/archive"), s.APIHandler(compat.Archive)).Methods(http.MethodGet, http.MethodPost)
+ // Added non version path to URI to support docker non versioned paths
+ r.HandleFunc("/containers/{name}/archive", s.APIHandler(compat.Archive)).Methods(http.MethodGet, http.MethodPost)
+
+ /*
+ Libpod
+ */
+
+ // swagger:operation POST /libpod/containers/{name}/copy libpod libpodPutArchive
+ // ---
+ // summary: Copy files into a container
+ // description: Copy a tar archive of files into a container
+ // tags:
+ // - containers
+ // produces:
+ // - application/json
+ // parameters:
+ // - in: path
+ // name: name
+ // type: string
+ // description: container name or id
+ // required: true
+ // - in: query
+ // name: path
+ // type: string
+ // description: Path to a directory in the container to extract
+ // required: true
+ // - in: query
+ // name: pause
+ // type: boolean
+ // description: pause the container while copying (defaults to true)
+ // default: true
+ // - in: body
+ // name: request
+ // description: tarfile of files to copy into the container
+ // schema:
+ // type: string
+ // responses:
+ // 200:
+ // description: no error
+ // 400:
+ // $ref: "#/responses/BadParamError"
+ // 403:
+ // description: the container rootfs is read-only
+ // 404:
+ // $ref: "#/responses/NoSuchContainer"
+ // 500:
+ // $ref: "#/responses/InternalError"
+
+ // swagger:operation GET /libpod/containers/{name}/copy libpod libpodGetArchive
+ // ---
+ // summary: Copy files from a container
+ // description: Copy a tar archive of files from a container
+ // tags:
+ // - containers (compat)
+ // produces:
+ // - application/json
+ // parameters:
+ // - in: path
+ // name: name
+ // type: string
+ // description: container name or id
+ // required: true
+ // - in: query
+ // name: path
+ // type: string
+ // description: Path to a directory in the container to extract
+ // required: true
+ // responses:
+ // 200:
+ // description: no error
+ // schema:
+ // type: string
+ // format: binary
+ // 400:
+ // $ref: "#/responses/BadParamError"
+ // 404:
+ // $ref: "#/responses/NoSuchContainer"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/copy"), s.APIHandler(libpod.Archive)).Methods(http.MethodGet, http.MethodPost)
+ r.HandleFunc(VersionedPath("/libpod/containers/{name}/archive"), s.APIHandler(libpod.Archive)).Methods(http.MethodGet, http.MethodPost)
+
+ return nil
+}
diff --git a/pkg/api/server/register_events.go b/pkg/api/server/register_events.go
index e909303da..2b85eb169 100644
--- a/pkg/api/server/register_events.go
+++ b/pkg/api/server/register_events.go
@@ -58,6 +58,11 @@ func (s *APIServer) registerEventsHandlers(r *mux.Router) error {
// type: string
// in: query
// description: JSON encoded map[string][]string of constraints
+ // - name: stream
+ // type: boolean
+ // in: query
+ // default: true
+ // description: when false, do not follow events
// responses:
// 200:
// description: returns a string of json data describing an event
diff --git a/pkg/api/server/register_images.go b/pkg/api/server/register_images.go
index c885dc81a..83584d0f3 100644
--- a/pkg/api/server/register_images.go
+++ b/pkg/api/server/register_images.go
@@ -8,6 +8,10 @@ import (
"github.com/gorilla/mux"
)
+// TODO
+//
+// * /images/create is missing the "message" and "platform" parameters
+
func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// swagger:operation POST /images/create compat createImage
// ---
@@ -631,13 +635,14 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// required: true
// description: Name of image to push.
// - in: query
- // name: tag
+ // name: destination
// type: string
- // description: The tag to associate with the image on the registry.
+ // description: Allows for pushing the image to a different destintation than the image refers to.
// - in: query
- // name: credentials
- // description: username:password for the registry.
- // type: string
+ // name: tlsVerify
+ // description: Require TLS verification.
+ // type: boolean
+ // default: true
// - in: header
// name: X-Registry-Auth
// type: string
diff --git a/pkg/api/server/register_networks.go b/pkg/api/server/register_networks.go
index b1189c1f4..2c60b2b27 100644
--- a/pkg/api/server/register_networks.go
+++ b/pkg/api/server/register_networks.go
@@ -3,11 +3,96 @@ package server
import (
"net/http"
+ "github.com/containers/libpod/pkg/api/handlers/compat"
"github.com/containers/libpod/pkg/api/handlers/libpod"
"github.com/gorilla/mux"
)
func (s *APIServer) registerNetworkHandlers(r *mux.Router) error {
+ // swagger:operation DELETE /networks/{name} compat compatRemoveNetwork
+ // ---
+ // tags:
+ // - networks (compat)
+ // summary: Remove a network
+ // description: Remove a network
+ // parameters:
+ // - in: path
+ // name: name
+ // type: string
+ // required: true
+ // description: the name of the network
+ // produces:
+ // - application/json
+ // responses:
+ // 204:
+ // description: no error
+ // 404:
+ // $ref: "#/responses/NoSuchNetwork"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/networks/{name}"), s.APIHandler(compat.RemoveNetwork)).Methods(http.MethodDelete)
+ r.HandleFunc("/networks/{name}", s.APIHandler(compat.RemoveNetwork)).Methods(http.MethodDelete)
+ // swagger:operation GET /networks/{name}/json compat compatInspectNetwork
+ // ---
+ // tags:
+ // - networks (compat)
+ // summary: Inspect a network
+ // description: Display low level configuration network
+ // parameters:
+ // - in: path
+ // name: name
+ // type: string
+ // required: true
+ // description: the name of the network
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // $ref: "#/responses/CompatNetworkInspect"
+ // 404:
+ // $ref: "#/responses/NoSuchNetwork"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/networks/{name}/json"), s.APIHandler(compat.InspectNetwork)).Methods(http.MethodGet)
+ r.HandleFunc("/networks/{name}/json", s.APIHandler(compat.InspectNetwork)).Methods(http.MethodGet)
+ // swagger:operation GET /networks/json compat compatListNetwork
+ // ---
+ // tags:
+ // - networks (compat)
+ // summary: List networks
+ // description: Display summary of network configurations
+ // produces:
+ // - application/json
+ // responses:
+ // 200:
+ // $ref: "#/responses/CompatNetworkList"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/networks/json"), s.APIHandler(compat.ListNetworks)).Methods(http.MethodGet)
+ r.HandleFunc("/networks", s.APIHandler(compat.ListNetworks)).Methods(http.MethodGet)
+ // swagger:operation POST /networks/create compat compatCreateNetwork
+ // ---
+ // tags:
+ // - networks (compat)
+ // summary: Create network
+ // description: Create a network configuration
+ // produces:
+ // - application/json
+ // parameters:
+ // - in: body
+ // name: create
+ // description: attributes for creating a container
+ // schema:
+ // $ref: "#/definitions/NetworkCreateRequest"
+ // responses:
+ // 200:
+ // $ref: "#/responses/CompatNetworkCreate"
+ // 400:
+ // $ref: "#/responses/BadParamError"
+ // 500:
+ // $ref: "#/responses/InternalError"
+ r.HandleFunc(VersionedPath("/networks/create"), s.APIHandler(compat.CreateNetwork)).Methods(http.MethodPost)
+ r.HandleFunc("/networks/create", s.APIHandler(compat.CreateNetwork)).Methods(http.MethodPost)
// swagger:operation DELETE /libpod/networks/{name} libpod libpodRemoveNetwork
// ---
// tags:
@@ -33,6 +118,11 @@ func (s *APIServer) registerNetworkHandlers(r *mux.Router) error {
// $ref: "#/responses/NoSuchNetwork"
// 500:
// $ref: "#/responses/InternalError"
+
+ /*
+ Libpod
+ */
+
r.HandleFunc(VersionedPath("/libpod/networks/{name}"), s.APIHandler(libpod.RemoveNetwork)).Methods(http.MethodDelete)
// swagger:operation GET /libpod/networks/{name}/json libpod libpodInspectNetwork
// ---
diff --git a/pkg/api/server/server.go b/pkg/api/server/server.go
index d39528f45..499a4c58a 100644
--- a/pkg/api/server/server.go
+++ b/pkg/api/server/server.go
@@ -92,8 +92,17 @@ func newServer(runtime *libpod.Runtime, duration time.Duration, listener *net.Li
},
)
+ router.MethodNotAllowedHandler = http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ // We can track user errors...
+ logrus.Infof("Failed Request: (%d:%s) for %s:'%s'", http.StatusMethodNotAllowed, http.StatusText(http.StatusMethodNotAllowed), r.Method, r.URL.String())
+ http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
+ },
+ )
+
for _, fn := range []func(*mux.Router) error{
server.registerAuthHandlers,
+ server.registerAchiveHandlers,
server.registerContainersHandlers,
server.registerDistributionHandlers,
server.registerEventsHandlers,
diff --git a/pkg/api/server/swagger.go b/pkg/api/server/swagger.go
index ebd99ba27..c463f809e 100644
--- a/pkg/api/server/swagger.go
+++ b/pkg/api/server/swagger.go
@@ -139,6 +139,13 @@ type swagImageSummary struct {
Body []entities.ImageSummary
}
+// Registries summary
+// swagger:response DocsRegistriesList
+type swagRegistriesList struct {
+ // in:body
+ Body entities.ListRegistriesReport
+}
+
// List Containers
// swagger:response DocsListContainer
type swagListContainers struct {
diff --git a/pkg/api/tags.yaml b/pkg/api/tags.yaml
index 1ffb5e940..f86f8dbea 100644
--- a/pkg/api/tags.yaml
+++ b/pkg/api/tags.yaml
@@ -21,5 +21,7 @@ tags:
description: Actions related to exec for the compatibility endpoints
- name: images (compat)
description: Actions related to images for the compatibility endpoints
+ - name: networks (compat)
+ description: Actions related to compatibility networks
- name: system (compat)
description: Actions related to Podman and compatibility engines
diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go
new file mode 100644
index 000000000..ffa65f7e5
--- /dev/null
+++ b/pkg/auth/auth.go
@@ -0,0 +1,216 @@
+package auth
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strings"
+
+ imageAuth "github.com/containers/image/v5/pkg/docker/config"
+ "github.com/containers/image/v5/types"
+ dockerAPITypes "github.com/docker/docker/api/types"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// XRegistryAuthHeader is the key to the encoded registry authentication
+// configuration in an http-request header.
+const XRegistryAuthHeader = "X-Registry-Auth"
+
+// GetCredentials extracts one or more DockerAuthConfigs from the request's
+// header. The header could specify a single-auth config in which case the
+// first return value is set. In case of a multi-auth header, the contents are
+// stored in a temporary auth file (2nd return value). Note that the auth file
+// should be removed after usage.
+func GetCredentials(r *http.Request) (*types.DockerAuthConfig, string, error) {
+ authHeader := r.Header.Get(XRegistryAuthHeader)
+ if len(authHeader) == 0 {
+ return nil, "", nil
+ }
+
+ // First look for a multi-auth header (i.e., a map).
+ authConfigs, err := multiAuthHeader(r)
+ if err == nil {
+ authfile, err := authConfigsToAuthFile(authConfigs)
+ return nil, authfile, err
+ }
+
+ // Fallback to looking for a single-auth header (i.e., one config).
+ authConfigs, err = singleAuthHeader(r)
+ if err != nil {
+ return nil, "", err
+ }
+ var conf *types.DockerAuthConfig
+ for k := range authConfigs {
+ c := authConfigs[k]
+ conf = &c
+ break
+ }
+ return conf, "", nil
+}
+
+// Header returns a map with the XRegistryAuthHeader set which can
+// conveniently be used in the http stack.
+func Header(sys *types.SystemContext, authfile, username, password string) (map[string]string, error) {
+ var content string
+ var err error
+
+ if username != "" {
+ content, err = encodeSingleAuthConfig(types.DockerAuthConfig{Username: username, Password: password})
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ if sys == nil {
+ sys = &types.SystemContext{}
+ }
+ if authfile != "" {
+ sys.AuthFilePath = authfile
+ }
+ authConfigs, err := imageAuth.GetAllCredentials(sys)
+ if err != nil {
+ return nil, err
+ }
+ content, err = encodeMultiAuthConfigs(authConfigs)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ header := make(map[string]string)
+ header[XRegistryAuthHeader] = content
+
+ return header, nil
+}
+
+// RemoveAuthfile is a convenience function that is meant to be called in a
+// deferred statement. If non-empty, it removes the specified authfile and log
+// errors. It's meant to reduce boilerplate code at call sites of
+// `GetCredentials`.
+func RemoveAuthfile(authfile string) {
+ if authfile == "" {
+ return
+ }
+ if err := os.Remove(authfile); err != nil {
+ logrus.Errorf("Error removing temporary auth file %q: %v", authfile, err)
+ }
+}
+
+// encodeSingleAuthConfig serializes the auth configuration as a base64 encoded JSON payload.
+func encodeSingleAuthConfig(authConfig types.DockerAuthConfig) (string, error) {
+ conf := imageAuthToDockerAuth(authConfig)
+ buf, err := json.Marshal(conf)
+ if err != nil {
+ return "", err
+ }
+ return base64.URLEncoding.EncodeToString(buf), nil
+}
+
+// encodeMultiAuthConfigs serializes the auth configurations as a base64 encoded JSON payload.
+func encodeMultiAuthConfigs(authConfigs map[string]types.DockerAuthConfig) (string, error) {
+ confs := make(map[string]dockerAPITypes.AuthConfig)
+ for registry, authConf := range authConfigs {
+ confs[registry] = imageAuthToDockerAuth(authConf)
+ }
+ buf, err := json.Marshal(confs)
+ if err != nil {
+ return "", err
+ }
+ return base64.URLEncoding.EncodeToString(buf), nil
+}
+
+// authConfigsToAuthFile stores the specified auth configs in a temporary files
+// and returns its path. The file can later be used an auth file for contacting
+// one or more container registries. If tmpDir is empty, the system's default
+// TMPDIR will be used.
+func authConfigsToAuthFile(authConfigs map[string]types.DockerAuthConfig) (string, error) {
+ // Intitialize an empty temporary JSON file.
+ tmpFile, err := ioutil.TempFile("", "auth.json.")
+ if err != nil {
+ return "", err
+ }
+ if _, err := tmpFile.Write([]byte{'{', '}'}); err != nil {
+ return "", errors.Wrap(err, "error initializing temporary auth file")
+ }
+ if err := tmpFile.Close(); err != nil {
+ return "", errors.Wrap(err, "error closing temporary auth file")
+ }
+ authFilePath := tmpFile.Name()
+
+ // TODO: It would be nice if c/image could dump the map at once.
+ //
+ // Now use the c/image packages to store the credentials. It's battle
+ // tested, and we make sure to use the same code as the image backend.
+ sys := types.SystemContext{AuthFilePath: authFilePath}
+ for server, config := range authConfigs {
+ // Note that we do not validate the credentials here. Wassume
+ // that all credentials are valid. They'll be used on demand
+ // later.
+ if err := imageAuth.SetAuthentication(&sys, server, config.Username, config.Password); err != nil {
+ return "", errors.Wrapf(err, "error storing credentials in temporary auth file (server: %q, user: %q)", server, config.Username)
+ }
+ }
+
+ return authFilePath, nil
+}
+
+// dockerAuthToImageAuth converts a docker auth config to one we're using
+// internally from c/image. Note that the Docker types look slightly
+// different, so we need to convert to be extra sure we're not running into
+// undesired side-effects when unmarhalling directly to our types.
+func dockerAuthToImageAuth(authConfig dockerAPITypes.AuthConfig) types.DockerAuthConfig {
+ return types.DockerAuthConfig{
+ Username: authConfig.Username,
+ Password: authConfig.Password,
+ IdentityToken: authConfig.IdentityToken,
+ }
+}
+
+// reverse conversion of `dockerAuthToImageAuth`.
+func imageAuthToDockerAuth(authConfig types.DockerAuthConfig) dockerAPITypes.AuthConfig {
+ return dockerAPITypes.AuthConfig{
+ Username: authConfig.Username,
+ Password: authConfig.Password,
+ IdentityToken: authConfig.IdentityToken,
+ }
+}
+
+// singleAuthHeader extracts a DockerAuthConfig from the request's header.
+// The header content is a single DockerAuthConfig.
+func singleAuthHeader(r *http.Request) (map[string]types.DockerAuthConfig, error) {
+ authHeader := r.Header.Get(XRegistryAuthHeader)
+ authConfig := dockerAPITypes.AuthConfig{}
+ if len(authHeader) > 0 {
+ authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authHeader))
+ if err := json.NewDecoder(authJSON).Decode(&authConfig); err != nil {
+ return nil, err
+ }
+ }
+ authConfigs := make(map[string]types.DockerAuthConfig)
+ authConfigs["0"] = dockerAuthToImageAuth(authConfig)
+ return authConfigs, nil
+}
+
+// multiAuthHeader extracts a DockerAuthConfig from the request's header.
+// The header content is a map[string]DockerAuthConfigs.
+func multiAuthHeader(r *http.Request) (map[string]types.DockerAuthConfig, error) {
+ authHeader := r.Header.Get(XRegistryAuthHeader)
+ if len(authHeader) == 0 {
+ return nil, nil
+ }
+
+ dockerAuthConfigs := make(map[string]dockerAPITypes.AuthConfig)
+ authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authHeader))
+ if err := json.NewDecoder(authJSON).Decode(&dockerAuthConfigs); err != nil {
+ return nil, err
+ }
+
+ // Now convert to the internal types.
+ authConfigs := make(map[string]types.DockerAuthConfig)
+ for server := range dockerAuthConfigs {
+ authConfigs[server] = dockerAuthToImageAuth(dockerAuthConfigs[server])
+ }
+ return authConfigs, nil
+}
diff --git a/pkg/bindings/connection.go b/pkg/bindings/connection.go
index d21d55beb..e9032f083 100644
--- a/pkg/bindings/connection.go
+++ b/pkg/bindings/connection.go
@@ -16,7 +16,6 @@ import (
"time"
"github.com/blang/semver"
- "github.com/containers/libpod/pkg/api/types"
jsoniter "github.com/json-iterator/go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -28,7 +27,7 @@ var (
basePath = &url.URL{
Scheme: "http",
Host: "d",
- Path: "/v" + types.MinimalAPIVersion + "/libpod",
+ Path: "/v" + APIVersion.String() + "/libpod",
}
)
@@ -151,23 +150,28 @@ func pingNewConnection(ctx context.Context) error {
return err
}
// the ping endpoint sits at / in this case
- response, err := client.DoRequest(nil, http.MethodGet, "../../../_ping", nil)
+ response, err := client.DoRequest(nil, http.MethodGet, "../../../_ping", nil, nil)
if err != nil {
return err
}
if response.StatusCode == http.StatusOK {
- v, err := semver.ParseTolerant(response.Header.Get("Libpod-API-Version"))
+ versionHdr := response.Header.Get("Libpod-API-Version")
+ if versionHdr == "" {
+ logrus.Info("Service did not provide Libpod-API-Version Header")
+ return nil
+ }
+ versionSrv, err := semver.ParseTolerant(versionHdr)
if err != nil {
return err
}
- switch APIVersion.Compare(v) {
+ switch APIVersion.Compare(versionSrv) {
case 1, 0:
// Server's job when client version is equal or older
return nil
case -1:
- return errors.Errorf("server API version is too old. client %q server %q", APIVersion.String(), v.String())
+ return errors.Errorf("server API version is too old. client %q server %q", APIVersion.String(), versionSrv.String())
}
}
return errors.Errorf("ping response was %q", response.StatusCode)
@@ -246,7 +250,7 @@ func unixClient(_url *url.URL) (Connection, error) {
}
// DoRequest assembles the http request and returns the response
-func (c *Connection) DoRequest(httpBody io.Reader, httpMethod, endpoint string, queryParams url.Values, pathValues ...string) (*APIResponse, error) {
+func (c *Connection) DoRequest(httpBody io.Reader, httpMethod, endpoint string, queryParams url.Values, header map[string]string, pathValues ...string) (*APIResponse, error) {
var (
err error
response *http.Response
@@ -267,6 +271,9 @@ func (c *Connection) DoRequest(httpBody io.Reader, httpMethod, endpoint string,
if len(queryParams) > 0 {
req.URL.RawQuery = queryParams.Encode()
}
+ for key, val := range header {
+ req.Header.Set(key, val)
+ }
req = req.WithContext(context.WithValue(context.Background(), clientKey, c))
// Give the Do three chances in the case of a comm/service hiccup
for i := 0; i < 3; i++ {
diff --git a/pkg/bindings/containers/checkpoint.go b/pkg/bindings/containers/checkpoint.go
index 84924587b..f483a9297 100644
--- a/pkg/bindings/containers/checkpoint.go
+++ b/pkg/bindings/containers/checkpoint.go
@@ -34,7 +34,7 @@ func Checkpoint(ctx context.Context, nameOrId string, keep, leaveRunning, tcpEst
if export != nil {
params.Set("export", *export)
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/checkpoint", params, nameOrId)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/checkpoint", params, nil, nameOrId)
if err != nil {
return nil, err
}
@@ -71,7 +71,7 @@ func Restore(ctx context.Context, nameOrId string, keep, tcpEstablished, ignoreR
if importArchive != nil {
params.Set("import", *importArchive)
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/restore", params, nameOrId)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/restore", params, nil, nameOrId)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/containers/commit.go b/pkg/bindings/containers/commit.go
index 12c25f842..780d42272 100644
--- a/pkg/bindings/containers/commit.go
+++ b/pkg/bindings/containers/commit.go
@@ -41,7 +41,7 @@ func Commit(ctx context.Context, nameOrId string, options CommitOptions) (handle
if options.Tag != nil {
params.Set("tag", *options.Tag)
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/commit", params)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/commit", params, nil)
if err != nil {
return id, err
}
diff --git a/pkg/bindings/containers/containers.go b/pkg/bindings/containers/containers.go
index 39a077f36..516f3d282 100644
--- a/pkg/bindings/containers/containers.go
+++ b/pkg/bindings/containers/containers.go
@@ -9,6 +9,7 @@ import (
"net/url"
"os"
"os/signal"
+ "reflect"
"strconv"
"strings"
@@ -60,7 +61,7 @@ func List(ctx context.Context, filters map[string][]string, all *bool, last *int
}
params.Set("filters", filterString)
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/containers/json", params)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/containers/json", params, nil)
if err != nil {
return containers, err
}
@@ -85,7 +86,7 @@ func Prune(ctx context.Context, filters map[string][]string) (*entities.Containe
}
params.Set("filters", filterString)
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/prune", params)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/prune", params, nil)
if err != nil {
return nil, err
}
@@ -107,7 +108,7 @@ func Remove(ctx context.Context, nameOrID string, force, volumes *bool) error {
if volumes != nil {
params.Set("vols", strconv.FormatBool(*volumes))
}
- response, err := conn.DoRequest(nil, http.MethodDelete, "/containers/%s", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodDelete, "/containers/%s", params, nil, nameOrID)
if err != nil {
return err
}
@@ -127,7 +128,7 @@ func Inspect(ctx context.Context, nameOrID string, size *bool) (*define.InspectC
if size != nil {
params.Set("size", strconv.FormatBool(*size))
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/json", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/json", params, nil, nameOrID)
if err != nil {
return nil, err
}
@@ -145,7 +146,7 @@ func Kill(ctx context.Context, nameOrID string, sig string) error {
}
params := url.Values{}
params.Set("signal", sig)
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/kill", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/kill", params, nil, nameOrID)
if err != nil {
return err
}
@@ -160,7 +161,7 @@ func Pause(ctx context.Context, nameOrID string) error {
if err != nil {
return err
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/pause", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/pause", nil, nil, nameOrID)
if err != nil {
return err
}
@@ -179,7 +180,7 @@ func Restart(ctx context.Context, nameOrID string, timeout *int) error {
if timeout != nil {
params.Set("t", strconv.Itoa(*timeout))
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/restart", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/restart", params, nil, nameOrID)
if err != nil {
return err
}
@@ -198,7 +199,7 @@ func Start(ctx context.Context, nameOrID string, detachKeys *string) error {
if detachKeys != nil {
params.Set("detachKeys", *detachKeys)
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/start", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/start", params, nil, nameOrID)
if err != nil {
return err
}
@@ -220,7 +221,7 @@ func Top(ctx context.Context, nameOrID string, descriptors []string) ([]string,
// flatten the slice into one string
params.Set("ps_args", strings.Join(descriptors, ","))
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/top", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/top", params, nil, nameOrID)
if err != nil {
return nil, err
}
@@ -248,7 +249,7 @@ func Unpause(ctx context.Context, nameOrID string) error {
if err != nil {
return err
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/unpause", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/unpause", nil, nil, nameOrID)
if err != nil {
return err
}
@@ -268,7 +269,7 @@ func Wait(ctx context.Context, nameOrID string, condition *define.ContainerStatu
if condition != nil {
params.Set("condition", condition.String())
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/wait", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/wait", params, nil, nameOrID)
if err != nil {
return exitCode, err
}
@@ -283,7 +284,7 @@ func Exists(ctx context.Context, nameOrID string) (bool, error) {
if err != nil {
return false, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/exists", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/exists", nil, nil, nameOrID)
if err != nil {
return false, err
}
@@ -301,7 +302,7 @@ func Stop(ctx context.Context, nameOrID string, timeout *uint) error {
if timeout != nil {
params.Set("t", strconv.Itoa(int(*timeout)))
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/stop", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/stop", params, nil, nameOrID)
if err != nil {
return err
}
@@ -316,7 +317,7 @@ func Export(ctx context.Context, nameOrID string, w io.Writer) error {
if err != nil {
return err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/export", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/export", params, nil, nameOrID)
if err != nil {
return err
}
@@ -335,7 +336,7 @@ func ContainerInit(ctx context.Context, nameOrID string) error {
if err != nil {
return err
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/init", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/init", nil, nil, nameOrID)
if err != nil {
return err
}
@@ -347,6 +348,26 @@ func ContainerInit(ctx context.Context, nameOrID string) error {
// Attach attaches to a running container
func Attach(ctx context.Context, nameOrId string, detachKeys *string, logs, stream *bool, stdin io.Reader, stdout io.Writer, stderr io.Writer, attachReady chan bool) error {
+ isSet := struct {
+ stdin bool
+ stdout bool
+ stderr bool
+ }{
+ stdin: !(stdin == nil || reflect.ValueOf(stdin).IsNil()),
+ stdout: !(stdout == nil || reflect.ValueOf(stdout).IsNil()),
+ stderr: !(stderr == nil || reflect.ValueOf(stderr).IsNil()),
+ }
+ // Ensure golang can determine that interfaces are "really" nil
+ if !isSet.stdin {
+ stdin = (io.Reader)(nil)
+ }
+ if !isSet.stdout {
+ stdout = (io.Writer)(nil)
+ }
+ if !isSet.stderr {
+ stderr = (io.Writer)(nil)
+ }
+
conn, err := bindings.GetClient(ctx)
if err != nil {
return err
@@ -368,13 +389,13 @@ func Attach(ctx context.Context, nameOrId string, detachKeys *string, logs, stre
if stream != nil {
params.Add("stream", fmt.Sprintf("%t", *stream))
}
- if stdin != nil {
+ if isSet.stdin {
params.Add("stdin", "true")
}
- if stdout != nil {
+ if isSet.stdout {
params.Add("stdout", "true")
}
- if stderr != nil {
+ if isSet.stderr {
params.Add("stderr", "true")
}
@@ -422,32 +443,26 @@ func Attach(ctx context.Context, nameOrId string, detachKeys *string, logs, stre
}()
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/attach", params, nameOrId)
+ response, err := conn.DoRequest(stdin, http.MethodPost, "/containers/%s/attach", params, nil, nameOrId)
if err != nil {
return err
}
- defer response.Body.Close()
+ if !(response.IsSuccess() || response.IsInformational()) {
+ return response.Process(nil)
+ }
+
// If we are attaching around a start, we need to "signal"
// back that we are in fact attached so that started does
// not execute before we can attach.
if attachReady != nil {
attachReady <- true
}
- if !(response.IsSuccess() || response.IsInformational()) {
- return response.Process(nil)
- }
-
- if stdin != nil {
- go func() {
- _, err := io.Copy(conn, stdin)
- if err != nil {
- logrus.Error("failed to write input to service: " + err.Error())
- }
- }()
- }
buffer := make([]byte, 1024)
if ctnr.Config.Tty {
+ if !isSet.stdout {
+ return fmt.Errorf("container %q requires stdout to be set", ctnr.ID)
+ }
// If not multiplex'ed, read from server and write to stdout
_, err := io.Copy(stdout, response.Body)
if err != nil {
@@ -469,25 +484,25 @@ func Attach(ctx context.Context, nameOrId string, detachKeys *string, logs, stre
}
switch {
- case fd == 0 && stdin != nil:
+ case fd == 0 && isSet.stdout:
_, err := stdout.Write(frame[0:l])
if err != nil {
return err
}
- case fd == 1 && stdout != nil:
+ case fd == 1 && isSet.stdout:
_, err := stdout.Write(frame[0:l])
if err != nil {
return err
}
- case fd == 2 && stderr != nil:
+ case fd == 2 && isSet.stderr:
_, err := stderr.Write(frame[0:l])
if err != nil {
return err
}
case fd == 3:
- return errors.New("error from service in stream: " + string(frame))
+ return fmt.Errorf("error from service from stream: %s", frame)
default:
- return fmt.Errorf("unrecognized input header: %d", fd)
+ return fmt.Errorf("unrecognized channel in header: %d, 0-3 supported", fd)
}
}
}
@@ -520,6 +535,7 @@ func DemuxFrame(r io.Reader, buffer []byte, length int) (frame []byte, err error
if len(buffer) < length {
buffer = append(buffer, make([]byte, length-len(buffer)+1)...)
}
+
n, err := io.ReadFull(r, buffer[0:length])
if err != nil {
return nil, nil
@@ -528,6 +544,7 @@ func DemuxFrame(r io.Reader, buffer []byte, length int) (frame []byte, err error
err = io.ErrUnexpectedEOF
return
}
+
return buffer[0:length], nil
}
@@ -555,7 +572,7 @@ func resizeTTY(ctx context.Context, endpoint string, height *int, width *int) er
if width != nil {
params.Set("w", strconv.Itoa(*width))
}
- rsp, err := conn.DoRequest(nil, http.MethodPost, endpoint, params)
+ rsp, err := conn.DoRequest(nil, http.MethodPost, endpoint, params, nil)
if err != nil {
return err
}
diff --git a/pkg/bindings/containers/create.go b/pkg/bindings/containers/create.go
index 21355f24b..4603b8653 100644
--- a/pkg/bindings/containers/create.go
+++ b/pkg/bindings/containers/create.go
@@ -22,7 +22,7 @@ func CreateWithSpec(ctx context.Context, s *specgen.SpecGenerator) (entities.Con
return ccr, err
}
stringReader := strings.NewReader(specgenString)
- response, err := conn.DoRequest(stringReader, http.MethodPost, "/containers/create", nil)
+ response, err := conn.DoRequest(stringReader, http.MethodPost, "/containers/create", nil, nil)
if err != nil {
return ccr, err
}
diff --git a/pkg/bindings/containers/diff.go b/pkg/bindings/containers/diff.go
index 82070ca9a..06a828c30 100644
--- a/pkg/bindings/containers/diff.go
+++ b/pkg/bindings/containers/diff.go
@@ -15,7 +15,7 @@ func Diff(ctx context.Context, nameOrId string) ([]archive.Change, error) {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/changes", nil, nameOrId)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/changes", nil, nil, nameOrId)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/containers/exec.go b/pkg/bindings/containers/exec.go
index 48f9ed697..2aeeae1f8 100644
--- a/pkg/bindings/containers/exec.go
+++ b/pkg/bindings/containers/exec.go
@@ -34,7 +34,7 @@ func ExecCreate(ctx context.Context, nameOrID string, config *handlers.ExecCreat
}
jsonReader := strings.NewReader(string(requestJSON))
- resp, err := conn.DoRequest(jsonReader, http.MethodPost, "/containers/%s/exec", nil, nameOrID)
+ resp, err := conn.DoRequest(jsonReader, http.MethodPost, "/containers/%s/exec", nil, nil, nameOrID)
if err != nil {
return "", err
}
@@ -57,7 +57,7 @@ func ExecInspect(ctx context.Context, sessionID string) (*define.InspectExecSess
logrus.Debugf("Inspecting session ID %q", sessionID)
- resp, err := conn.DoRequest(nil, http.MethodGet, "/exec/%s/json", nil, sessionID)
+ resp, err := conn.DoRequest(nil, http.MethodGet, "/exec/%s/json", nil, nil, sessionID)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/containers/healthcheck.go b/pkg/bindings/containers/healthcheck.go
index 2b783ac73..b726acf49 100644
--- a/pkg/bindings/containers/healthcheck.go
+++ b/pkg/bindings/containers/healthcheck.go
@@ -18,7 +18,7 @@ func RunHealthCheck(ctx context.Context, nameOrID string) (*define.HealthCheckRe
var (
status define.HealthCheckResults
)
- response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/healthcheck", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/healthcheck", nil, nil, nameOrID)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/containers/logs.go b/pkg/bindings/containers/logs.go
index 20c8b4292..bec4ebb3c 100644
--- a/pkg/bindings/containers/logs.go
+++ b/pkg/bindings/containers/logs.go
@@ -46,11 +46,10 @@ func Logs(ctx context.Context, nameOrID string, opts LogOptions, stdoutChan, std
if opts.Stdout == nil && opts.Stderr == nil {
params.Set("stdout", strconv.FormatBool(true))
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/logs", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/containers/%s/logs", params, nil, nameOrID)
if err != nil {
return err
}
- defer response.Body.Close()
buffer := make([]byte, 1024)
for {
diff --git a/pkg/bindings/containers/mount.go b/pkg/bindings/containers/mount.go
index e0627d9a3..2d553142f 100644
--- a/pkg/bindings/containers/mount.go
+++ b/pkg/bindings/containers/mount.go
@@ -17,7 +17,7 @@ func Mount(ctx context.Context, nameOrID string) (string, error) {
var (
path string
)
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/mount", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/mount", nil, nil, nameOrID)
if err != nil {
return path, err
}
@@ -31,7 +31,7 @@ func Unmount(ctx context.Context, nameOrID string) error {
if err != nil {
return err
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/unmount", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/containers/%s/unmount", nil, nil, nameOrID)
if err != nil {
return err
}
@@ -45,7 +45,7 @@ func GetMountedContainerPaths(ctx context.Context) (map[string]string, error) {
return nil, err
}
mounts := make(map[string]string)
- response, err := conn.DoRequest(nil, http.MethodGet, "/containers/showmounted", nil)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/containers/showmounted", nil, nil)
if err != nil {
return mounts, err
}
diff --git a/pkg/bindings/generate/generate.go b/pkg/bindings/generate/generate.go
index d3177133f..161b722f3 100644
--- a/pkg/bindings/generate/generate.go
+++ b/pkg/bindings/generate/generate.go
@@ -18,7 +18,7 @@ func GenerateKube(ctx context.Context, nameOrID string, options entities.Generat
params := url.Values{}
params.Set("service", strconv.FormatBool(options.Service))
- response, err := conn.DoRequest(nil, http.MethodGet, "/generate/%s/kube", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/generate/%s/kube", params, nil, nameOrID)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/images/diff.go b/pkg/bindings/images/diff.go
index cfdd06a97..e2d344ea0 100644
--- a/pkg/bindings/images/diff.go
+++ b/pkg/bindings/images/diff.go
@@ -15,7 +15,7 @@ func Diff(ctx context.Context, nameOrId string) ([]archive.Change, error) {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/changes", nil, nameOrId)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/changes", nil, nil, nameOrId)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/images/images.go b/pkg/bindings/images/images.go
index f9c02d199..e0802a6e1 100644
--- a/pkg/bindings/images/images.go
+++ b/pkg/bindings/images/images.go
@@ -12,6 +12,7 @@ import (
"github.com/containers/buildah"
"github.com/containers/image/v5/types"
"github.com/containers/libpod/pkg/api/handlers"
+ "github.com/containers/libpod/pkg/auth"
"github.com/containers/libpod/pkg/bindings"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/docker/go-units"
@@ -26,7 +27,7 @@ func Exists(ctx context.Context, nameOrID string) (bool, error) {
if err != nil {
return false, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/exists", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/exists", nil, nil, nameOrID)
if err != nil {
return false, err
}
@@ -52,7 +53,7 @@ func List(ctx context.Context, all *bool, filters map[string][]string) ([]*entit
}
params.Set("filters", strFilters)
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/images/json", params)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/images/json", params, nil)
if err != nil {
return imageSummary, err
}
@@ -71,7 +72,7 @@ func GetImage(ctx context.Context, nameOrID string, size *bool) (*entities.Image
params.Set("size", strconv.FormatBool(*size))
}
inspectedData := entities.ImageInspectReport{}
- response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/json", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/json", params, nil, nameOrID)
if err != nil {
return &inspectedData, err
}
@@ -89,7 +90,7 @@ func Tree(ctx context.Context, nameOrId string, whatRequires *bool) (*entities.I
if whatRequires != nil {
params.Set("size", strconv.FormatBool(*whatRequires))
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/tree", params, nameOrId)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/tree", params, nil, nameOrId)
if err != nil {
return nil, err
}
@@ -103,7 +104,7 @@ func History(ctx context.Context, nameOrID string) ([]*handlers.HistoryResponse,
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/history", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/history", nil, nil, nameOrID)
if err != nil {
return history, err
}
@@ -120,7 +121,7 @@ func Load(ctx context.Context, r io.Reader, name *string) (*entities.ImageLoadRe
if name != nil {
params.Set("reference", *name)
}
- response, err := conn.DoRequest(r, http.MethodPost, "/images/load", params)
+ response, err := conn.DoRequest(r, http.MethodPost, "/images/load", params, nil)
if err != nil {
return nil, err
}
@@ -141,7 +142,7 @@ func Export(ctx context.Context, nameOrID string, w io.Writer, format *string, c
if compress != nil {
params.Set("compress", strconv.FormatBool(*compress))
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/get", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/images/%s/get", params, nil, nameOrID)
if err != nil {
return err
}
@@ -174,7 +175,7 @@ func Prune(ctx context.Context, all *bool, filters map[string][]string) ([]strin
}
params.Set("filters", stringFilter)
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/images/prune", params)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/images/prune", params, nil)
if err != nil {
return deleted, err
}
@@ -190,7 +191,7 @@ func Tag(ctx context.Context, nameOrID, tag, repo string) error {
params := url.Values{}
params.Set("tag", tag)
params.Set("repo", repo)
- response, err := conn.DoRequest(nil, http.MethodPost, "/images/%s/tag", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/images/%s/tag", params, nil, nameOrID)
if err != nil {
return err
}
@@ -206,7 +207,7 @@ func Untag(ctx context.Context, nameOrID, tag, repo string) error {
params := url.Values{}
params.Set("tag", tag)
params.Set("repo", repo)
- response, err := conn.DoRequest(nil, http.MethodPost, "/images/%s/untag", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/images/%s/untag", params, nil, nameOrID)
if err != nil {
return err
}
@@ -297,7 +298,7 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
}
// TODO outputs?
- response, err := conn.DoRequest(tarfile, http.MethodPost, "/build", params)
+ response, err := conn.DoRequest(tarfile, http.MethodPost, "/build", params, nil)
if err != nil {
return nil, err
}
@@ -341,7 +342,7 @@ func Import(ctx context.Context, changes []string, message, reference, u *string
if u != nil {
params.Set("url", *u)
}
- response, err := conn.DoRequest(r, http.MethodPost, "/images/import", params)
+ response, err := conn.DoRequest(r, http.MethodPost, "/images/import", params, nil)
if err != nil {
return nil, err
}
@@ -359,7 +360,6 @@ func Pull(ctx context.Context, rawImage string, options entities.ImagePullOption
}
params := url.Values{}
params.Set("reference", rawImage)
- params.Set("credentials", options.Credentials)
params.Set("overrideArch", options.OverrideArch)
params.Set("overrideOS", options.OverrideOS)
if options.SkipTLSVerify != types.OptionalBoolUndefined {
@@ -369,7 +369,13 @@ func Pull(ctx context.Context, rawImage string, options entities.ImagePullOption
}
params.Set("allTags", strconv.FormatBool(options.AllTags))
- response, err := conn.DoRequest(nil, http.MethodPost, "/images/pull", params)
+ // TODO: have a global system context we can pass around (1st argument)
+ header, err := auth.Header(nil, options.Authfile, options.Username, options.Password)
+ if err != nil {
+ return nil, err
+ }
+
+ response, err := conn.DoRequest(nil, http.MethodPost, "/images/pull", params, header)
if err != nil {
return nil, err
}
@@ -397,8 +403,14 @@ func Push(ctx context.Context, source string, destination string, options entiti
if err != nil {
return err
}
+
+ // TODO: have a global system context we can pass around (1st argument)
+ header, err := auth.Header(nil, options.Authfile, options.Username, options.Password)
+ if err != nil {
+ return err
+ }
+
params := url.Values{}
- params.Set("credentials", options.Credentials)
params.Set("destination", destination)
if options.SkipTLSVerify != types.OptionalBoolUndefined {
// Note: we have to verify if skipped is false.
@@ -407,8 +419,12 @@ func Push(ctx context.Context, source string, destination string, options entiti
}
path := fmt.Sprintf("/images/%s/push", source)
- _, err = conn.DoRequest(nil, http.MethodPost, path, params)
- return err
+ response, err := conn.DoRequest(nil, http.MethodPost, path, params, header)
+ if err != nil {
+ return err
+ }
+
+ return response.Process(err)
}
// Search is the binding for libpod's v2 endpoints for Search images.
@@ -430,7 +446,13 @@ func Search(ctx context.Context, term string, opts entities.ImageSearchOptions)
params.Set("tlsVerify", strconv.FormatBool(verifyTLS))
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/images/search", params)
+ // TODO: have a global system context we can pass around (1st argument)
+ header, err := auth.Header(nil, opts.Authfile, "", "")
+ if err != nil {
+ return nil, err
+ }
+
+ response, err := conn.DoRequest(nil, http.MethodGet, "/images/search", params, header)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/images/rm.go b/pkg/bindings/images/rm.go
index 05aa3f9ca..c315bfce7 100644
--- a/pkg/bindings/images/rm.go
+++ b/pkg/bindings/images/rm.go
@@ -30,7 +30,7 @@ func BatchRemove(ctx context.Context, images []string, opts entities.ImageRemove
params.Add("images", i)
}
- response, err := conn.DoRequest(nil, http.MethodDelete, "/images/remove", params)
+ response, err := conn.DoRequest(nil, http.MethodDelete, "/images/remove", params, nil)
if err != nil {
return nil, []error{err}
}
@@ -52,7 +52,7 @@ func Remove(ctx context.Context, nameOrID string, force bool) (*entities.ImageRe
params := url.Values{}
params.Set("force", strconv.FormatBool(force))
- response, err := conn.DoRequest(nil, http.MethodDelete, "/images/%s", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodDelete, "/images/%s", params, nil, nameOrID)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/manifests/manifests.go b/pkg/bindings/manifests/manifests.go
index f5ee31d93..e89624667 100644
--- a/pkg/bindings/manifests/manifests.go
+++ b/pkg/bindings/manifests/manifests.go
@@ -39,7 +39,7 @@ func Create(ctx context.Context, names, images []string, all *bool) (string, err
params.Add("image", i)
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/manifests/create", params)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/manifests/create", params, nil)
if err != nil {
return "", err
}
@@ -53,7 +53,7 @@ func Inspect(ctx context.Context, name string) (*manifest.Schema2List, error) {
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/manifests/%s/json", nil, name)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/manifests/%s/json", nil, nil, name)
if err != nil {
return nil, err
}
@@ -73,7 +73,7 @@ func Add(ctx context.Context, name string, options image.ManifestAddOpts) (strin
return "", err
}
stringReader := strings.NewReader(optionsString)
- response, err := conn.DoRequest(stringReader, http.MethodPost, "/manifests/%s/add", nil, name)
+ response, err := conn.DoRequest(stringReader, http.MethodPost, "/manifests/%s/add", nil, nil, name)
if err != nil {
return "", err
}
@@ -90,7 +90,7 @@ func Remove(ctx context.Context, name, digest string) (string, error) {
}
params := url.Values{}
params.Set("digest", digest)
- response, err := conn.DoRequest(nil, http.MethodDelete, "/manifests/%s", params, name)
+ response, err := conn.DoRequest(nil, http.MethodDelete, "/manifests/%s", params, nil, name)
if err != nil {
return "", err
}
@@ -118,7 +118,7 @@ func Push(ctx context.Context, name string, destination *string, all *bool) (str
if all != nil {
params.Set("all", strconv.FormatBool(*all))
}
- _, err = conn.DoRequest(nil, http.MethodPost, "/manifests/%s/push", params, name)
+ _, err = conn.DoRequest(nil, http.MethodPost, "/manifests/%s/push", params, nil, name)
if err != nil {
return "", err
}
diff --git a/pkg/bindings/network/network.go b/pkg/bindings/network/network.go
index 7bba4f478..34881b524 100644
--- a/pkg/bindings/network/network.go
+++ b/pkg/bindings/network/network.go
@@ -28,7 +28,7 @@ func Create(ctx context.Context, options entities.NetworkCreateOptions, name *st
return nil, err
}
stringReader := strings.NewReader(networkConfig)
- response, err := conn.DoRequest(stringReader, http.MethodPost, "/networks/create", params)
+ response, err := conn.DoRequest(stringReader, http.MethodPost, "/networks/create", params, nil)
if err != nil {
return nil, err
}
@@ -42,7 +42,7 @@ func Inspect(ctx context.Context, nameOrID string) ([]entities.NetworkInspectRep
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/networks/%s/json", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/networks/%s/json", nil, nil, nameOrID)
if err != nil {
return nil, err
}
@@ -62,7 +62,7 @@ func Remove(ctx context.Context, nameOrID string, force *bool) ([]*entities.Netw
if force != nil {
params.Set("size", strconv.FormatBool(*force))
}
- response, err := conn.DoRequest(nil, http.MethodDelete, "/networks/%s", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodDelete, "/networks/%s", params, nil, nameOrID)
if err != nil {
return nil, err
}
@@ -78,7 +78,7 @@ func List(ctx context.Context) ([]*entities.NetworkListReport, error) {
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/networks/json", nil)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/networks/json", nil, nil)
if err != nil {
return netList, err
}
diff --git a/pkg/bindings/play/play.go b/pkg/bindings/play/play.go
index 653558a3c..288cca454 100644
--- a/pkg/bindings/play/play.go
+++ b/pkg/bindings/play/play.go
@@ -8,6 +8,7 @@ import (
"strconv"
"github.com/containers/image/v5/types"
+ "github.com/containers/libpod/pkg/auth"
"github.com/containers/libpod/pkg/bindings"
"github.com/containers/libpod/pkg/domain/entities"
)
@@ -31,7 +32,13 @@ func PlayKube(ctx context.Context, path string, options entities.PlayKubeOptions
params.Set("tlsVerify", strconv.FormatBool(options.SkipTLSVerify == types.OptionalBoolTrue))
}
- response, err := conn.DoRequest(f, http.MethodPost, "/play/kube", params)
+ // TODO: have a global system context we can pass around (1st argument)
+ header, err := auth.Header(nil, options.Authfile, options.Username, options.Password)
+ if err != nil {
+ return nil, err
+ }
+
+ response, err := conn.DoRequest(f, http.MethodPost, "/play/kube", params, header)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/pods/pods.go b/pkg/bindings/pods/pods.go
index b213c8c73..fb273fdf3 100644
--- a/pkg/bindings/pods/pods.go
+++ b/pkg/bindings/pods/pods.go
@@ -28,7 +28,7 @@ func CreatePodFromSpec(ctx context.Context, s *specgen.PodSpecGenerator) (*entit
return nil, err
}
stringReader := strings.NewReader(specgenString)
- response, err := conn.DoRequest(stringReader, http.MethodPost, "/pods/create", nil)
+ response, err := conn.DoRequest(stringReader, http.MethodPost, "/pods/create", nil, nil)
if err != nil {
return nil, err
}
@@ -41,7 +41,7 @@ func Exists(ctx context.Context, nameOrID string) (bool, error) {
if err != nil {
return false, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/pods/%s/exists", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/pods/%s/exists", nil, nil, nameOrID)
if err != nil {
return false, err
}
@@ -57,7 +57,7 @@ func Inspect(ctx context.Context, nameOrID string) (*entities.PodInspectReport,
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/pods/%s/json", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/pods/%s/json", nil, nil, nameOrID)
if err != nil {
return nil, err
}
@@ -78,7 +78,7 @@ func Kill(ctx context.Context, nameOrID string, signal *string) (*entities.PodKi
if signal != nil {
params.Set("signal", *signal)
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/pods/%s/kill", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/pods/%s/kill", params, nil, nameOrID)
if err != nil {
return nil, err
}
@@ -92,7 +92,7 @@ func Pause(ctx context.Context, nameOrID string) (*entities.PodPauseReport, erro
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/pods/%s/pause", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/pods/%s/pause", nil, nil, nameOrID)
if err != nil {
return nil, err
}
@@ -107,7 +107,7 @@ func Prune(ctx context.Context) ([]*entities.PodPruneReport, error) {
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/pods/prune", nil)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/pods/prune", nil, nil)
if err != nil {
return nil, err
}
@@ -132,7 +132,7 @@ func List(ctx context.Context, filters map[string][]string) ([]*entities.ListPod
}
params.Set("filters", stringFilter)
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/pods/json", params)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/pods/json", params, nil)
if err != nil {
return podsReports, err
}
@@ -146,7 +146,7 @@ func Restart(ctx context.Context, nameOrID string) (*entities.PodRestartReport,
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/pods/%s/restart", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/pods/%s/restart", nil, nil, nameOrID)
if err != nil {
return nil, err
}
@@ -165,7 +165,7 @@ func Remove(ctx context.Context, nameOrID string, force *bool) (*entities.PodRmR
if force != nil {
params.Set("force", strconv.FormatBool(*force))
}
- response, err := conn.DoRequest(nil, http.MethodDelete, "/pods/%s", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodDelete, "/pods/%s", params, nil, nameOrID)
if err != nil {
return nil, err
}
@@ -179,7 +179,7 @@ func Start(ctx context.Context, nameOrID string) (*entities.PodStartReport, erro
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/pods/%s/start", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/pods/%s/start", nil, nil, nameOrID)
if err != nil {
return nil, err
}
@@ -202,7 +202,7 @@ func Stop(ctx context.Context, nameOrID string, timeout *int) (*entities.PodStop
if timeout != nil {
params.Set("t", strconv.Itoa(*timeout))
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/pods/%s/stop", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/pods/%s/stop", params, nil, nameOrID)
if err != nil {
return nil, err
}
@@ -226,7 +226,7 @@ func Top(ctx context.Context, nameOrID string, descriptors []string) ([]string,
// flatten the slice into one string
params.Set("ps_args", strings.Join(descriptors, ","))
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/pods/%s/top", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/pods/%s/top", params, nil, nameOrID)
if err != nil {
return nil, err
}
@@ -254,7 +254,7 @@ func Unpause(ctx context.Context, nameOrID string) (*entities.PodUnpauseReport,
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/pods/%s/unpause", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/pods/%s/unpause", nil, nil, nameOrID)
if err != nil {
return nil, err
}
@@ -277,7 +277,7 @@ func Stats(ctx context.Context, namesOrIDs []string, options entities.PodStatsOp
params.Set("all", strconv.FormatBool(options.All))
var reports []*entities.PodStatsReport
- response, err := conn.DoRequest(nil, http.MethodGet, "/pods/stats", params)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/pods/stats", params, nil)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/system/info.go b/pkg/bindings/system/info.go
index 13e12645d..8ad704f84 100644
--- a/pkg/bindings/system/info.go
+++ b/pkg/bindings/system/info.go
@@ -15,7 +15,7 @@ func Info(ctx context.Context) (*define.Info, error) {
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/info", nil)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/info", nil, nil)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/system/system.go b/pkg/bindings/system/system.go
index 5348d0cfb..010762bef 100644
--- a/pkg/bindings/system/system.go
+++ b/pkg/bindings/system/system.go
@@ -20,7 +20,7 @@ import (
// Events allows you to monitor libdpod related events like container creation and
// removal. The events are then passed to the eventChan provided. The optional cancelChan
// can be used to cancel the read of events and close down the HTTP connection.
-func Events(ctx context.Context, eventChan chan (entities.Event), cancelChan chan bool, since, until *string, filters map[string][]string) error {
+func Events(ctx context.Context, eventChan chan entities.Event, cancelChan chan bool, since, until *string, filters map[string][]string, stream *bool) error {
conn, err := bindings.GetClient(ctx)
if err != nil {
return err
@@ -32,6 +32,9 @@ func Events(ctx context.Context, eventChan chan (entities.Event), cancelChan cha
if until != nil {
params.Set("until", *until)
}
+ if stream != nil {
+ params.Set("stream", strconv.FormatBool(*stream))
+ }
if filters != nil {
filterString, err := bindings.FiltersToString(filters)
if err != nil {
@@ -39,7 +42,7 @@ func Events(ctx context.Context, eventChan chan (entities.Event), cancelChan cha
}
params.Set("filters", filterString)
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/events", params)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/events", params, nil)
if err != nil {
return err
}
@@ -50,18 +53,24 @@ func Events(ctx context.Context, eventChan chan (entities.Event), cancelChan cha
logrus.Error(errors.Wrap(err, "unable to close event response body"))
}()
}
+
dec := json.NewDecoder(response.Body)
- for {
- e := entities.Event{}
- if err := dec.Decode(&e); err != nil {
- if err == io.EOF {
- break
- }
- return errors.Wrap(err, "unable to decode event response")
+ for err = (error)(nil); err == nil; {
+ var e = entities.Event{}
+ err = dec.Decode(&e)
+ if err == nil {
+ eventChan <- e
}
- eventChan <- e
}
- return nil
+ close(eventChan)
+ switch {
+ case err == nil:
+ return nil
+ case errors.Is(err, io.EOF):
+ return nil
+ default:
+ return errors.Wrap(err, "unable to decode event response")
+ }
}
// Prune removes all unused system data.
@@ -80,7 +89,7 @@ func Prune(ctx context.Context, all, volumes *bool) (*entities.SystemPruneReport
if volumes != nil {
params.Set("Volumes", strconv.FormatBool(*volumes))
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/system/prune", params)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/system/prune", params, nil)
if err != nil {
return nil, err
}
@@ -101,7 +110,7 @@ func Version(ctx context.Context) (*entities.SystemVersionReport, error) {
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/version", nil)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/version", nil, nil)
if err != nil {
return nil, err
}
@@ -130,7 +139,7 @@ func DiskUsage(ctx context.Context) (*entities.SystemDfReport, error) {
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/system/df", nil)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/system/df", nil, nil)
if err != nil {
return nil, err
}
diff --git a/pkg/bindings/test/auth_test.go b/pkg/bindings/test/auth_test.go
new file mode 100644
index 000000000..fdb190551
--- /dev/null
+++ b/pkg/bindings/test/auth_test.go
@@ -0,0 +1,143 @@
+package test_bindings
+
+import (
+ "io/ioutil"
+ "os"
+ "time"
+
+ "github.com/containers/common/pkg/auth"
+ "github.com/containers/image/v5/types"
+ podmanRegistry "github.com/containers/libpod/hack/podman-registry-go"
+ "github.com/containers/libpod/pkg/bindings/images"
+ "github.com/containers/libpod/pkg/domain/entities"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ "github.com/onsi/gomega/gexec"
+)
+
+var _ = Describe("Podman images", func() {
+ var (
+ registry *podmanRegistry.Registry
+ bt *bindingTest
+ s *gexec.Session
+ err error
+ )
+
+ BeforeEach(func() {
+ // Note: we need to start the registry **before** setting up
+ // the test. Otherwise, the registry is not reachable for
+ // currently unknown reasons.
+ registry, err = podmanRegistry.Start()
+ Expect(err).To(BeNil())
+
+ bt = newBindingTest()
+ bt.RestoreImagesFromCache()
+ s = bt.startAPIService()
+ time.Sleep(1 * time.Second)
+ err := bt.NewConnection()
+ Expect(err).To(BeNil())
+ })
+
+ AfterEach(func() {
+ s.Kill()
+ bt.cleanup()
+ registry.Stop()
+ })
+
+ // Test using credentials.
+ It("tag + push + pull (with credentials)", func() {
+
+ imageRep := "localhost:" + registry.Port + "/test"
+ imageTag := "latest"
+ imageRef := imageRep + ":" + imageTag
+
+ // Tag the alpine image and verify it has worked.
+ err = images.Tag(bt.conn, alpine.shortName, imageTag, imageRep)
+ Expect(err).To(BeNil())
+ _, err = images.GetImage(bt.conn, imageRef, nil)
+ Expect(err).To(BeNil())
+
+ // Now push the image.
+ pushOpts := entities.ImagePushOptions{
+ Username: registry.User,
+ Password: registry.Password,
+ SkipTLSVerify: types.OptionalBoolTrue,
+ }
+ err = images.Push(bt.conn, imageRef, imageRef, pushOpts)
+ Expect(err).To(BeNil())
+
+ // Now pull the image.
+ pullOpts := entities.ImagePullOptions{
+ Username: registry.User,
+ Password: registry.Password,
+ SkipTLSVerify: types.OptionalBoolTrue,
+ }
+ _, err = images.Pull(bt.conn, imageRef, pullOpts)
+ Expect(err).To(BeNil())
+ })
+
+ // Test using authfile.
+ It("tag + push + pull + search (with authfile)", func() {
+
+ imageRep := "localhost:" + registry.Port + "/test"
+ imageTag := "latest"
+ imageRef := imageRep + ":" + imageTag
+
+ // Create a temporary authentication file.
+ tmpFile, err := ioutil.TempFile("", "auth.json.")
+ Expect(err).To(BeNil())
+ _, err = tmpFile.Write([]byte{'{', '}'})
+ Expect(err).To(BeNil())
+ err = tmpFile.Close()
+ Expect(err).To(BeNil())
+
+ authFilePath := tmpFile.Name()
+
+ // Now login to a) test the credentials and to b) store them in
+ // the authfile for later use.
+ sys := types.SystemContext{
+ AuthFilePath: authFilePath,
+ DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
+ }
+ loginOptions := auth.LoginOptions{
+ Username: registry.User,
+ Password: registry.Password,
+ AuthFile: authFilePath,
+ Stdin: os.Stdin,
+ Stdout: os.Stdout,
+ }
+ err = auth.Login(bt.conn, &sys, &loginOptions, []string{imageRep})
+ Expect(err).To(BeNil())
+
+ // Tag the alpine image and verify it has worked.
+ err = images.Tag(bt.conn, alpine.shortName, imageTag, imageRep)
+ Expect(err).To(BeNil())
+ _, err = images.GetImage(bt.conn, imageRef, nil)
+ Expect(err).To(BeNil())
+
+ // Now push the image.
+ pushOpts := entities.ImagePushOptions{
+ Authfile: authFilePath,
+ SkipTLSVerify: types.OptionalBoolTrue,
+ }
+ err = images.Push(bt.conn, imageRef, imageRef, pushOpts)
+ Expect(err).To(BeNil())
+
+ // Now pull the image.
+ pullOpts := entities.ImagePullOptions{
+ Authfile: authFilePath,
+ SkipTLSVerify: types.OptionalBoolTrue,
+ }
+ _, err = images.Pull(bt.conn, imageRef, pullOpts)
+ Expect(err).To(BeNil())
+
+ // Last, but not least, exercise search.
+ searchOptions := entities.ImageSearchOptions{
+ Authfile: authFilePath,
+ SkipTLSVerify: types.OptionalBoolTrue,
+ }
+ _, err = images.Search(bt.conn, imageRef, searchOptions)
+ Expect(err).To(BeNil())
+ })
+
+})
diff --git a/pkg/bindings/test/system_test.go b/pkg/bindings/test/system_test.go
index 27ab2f555..dd3778754 100644
--- a/pkg/bindings/test/system_test.go
+++ b/pkg/bindings/test/system_test.go
@@ -47,13 +47,13 @@ var _ = Describe("Podman system", func() {
}
}()
go func() {
- system.Events(bt.conn, eChan, cancelChan, nil, nil, nil)
+ system.Events(bt.conn, eChan, cancelChan, nil, nil, nil, bindings.PFalse)
}()
_, err := bt.RunTopContainer(nil, nil, nil)
Expect(err).To(BeNil())
cancelChan <- true
- Expect(len(messages)).To(BeNumerically("==", 3))
+ Expect(len(messages)).To(BeNumerically("==", 5))
})
It("podman system prune - pod,container stopped", func() {
diff --git a/pkg/bindings/volumes/volumes.go b/pkg/bindings/volumes/volumes.go
index cef9246cb..ebe19794a 100644
--- a/pkg/bindings/volumes/volumes.go
+++ b/pkg/bindings/volumes/volumes.go
@@ -26,7 +26,7 @@ func Create(ctx context.Context, config entities.VolumeCreateOptions) (*entities
return nil, err
}
stringReader := strings.NewReader(createString)
- response, err := conn.DoRequest(stringReader, http.MethodPost, "/volumes/create", nil)
+ response, err := conn.DoRequest(stringReader, http.MethodPost, "/volumes/create", nil, nil)
if err != nil {
return nil, err
}
@@ -42,7 +42,7 @@ func Inspect(ctx context.Context, nameOrID string) (*entities.VolumeConfigRespon
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/volumes/%s/json", nil, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/volumes/%s/json", nil, nil, nameOrID)
if err != nil {
return &inspect, err
}
@@ -67,7 +67,7 @@ func List(ctx context.Context, filters map[string][]string) ([]*entities.VolumeL
}
params.Set("filters", strFilters)
}
- response, err := conn.DoRequest(nil, http.MethodGet, "/volumes/json", params)
+ response, err := conn.DoRequest(nil, http.MethodGet, "/volumes/json", params, nil)
if err != nil {
return vols, err
}
@@ -83,7 +83,7 @@ func Prune(ctx context.Context) ([]*entities.VolumePruneReport, error) {
if err != nil {
return nil, err
}
- response, err := conn.DoRequest(nil, http.MethodPost, "/volumes/prune", nil)
+ response, err := conn.DoRequest(nil, http.MethodPost, "/volumes/prune", nil, nil)
if err != nil {
return nil, err
}
@@ -101,7 +101,7 @@ func Remove(ctx context.Context, nameOrID string, force *bool) error {
if force != nil {
params.Set("force", strconv.FormatBool(*force))
}
- response, err := conn.DoRequest(nil, http.MethodDelete, "/volumes/%s", params, nameOrID)
+ response, err := conn.DoRequest(nil, http.MethodDelete, "/volumes/%s", params, nil, nameOrID)
if err != nil {
return err
}
diff --git a/pkg/domain/entities/engine.go b/pkg/domain/entities/engine.go
index 265c9f36f..db58befa5 100644
--- a/pkg/domain/entities/engine.go
+++ b/pkg/domain/entities/engine.go
@@ -37,19 +37,20 @@ type PodmanConfig struct {
*config.Config
*pflag.FlagSet
- CGroupUsage string // rootless code determines Usage message
- ConmonPath string // --conmon flag will set Engine.ConmonPath
- CpuProfile string // Hidden: Should CPU profile be taken
- EngineMode EngineMode // ABI or Tunneling mode
- Identities []string // ssh identities for connecting to server
- MaxWorks int // maximum number of parallel threads
- RuntimePath string // --runtime flag will set Engine.RuntimePath
- SpanCloser io.Closer // Close() for tracing object
- SpanCtx context.Context // context to use when tracing
- Span opentracing.Span // tracing object
- Syslog bool // write to StdOut and Syslog, not supported when tunneling
- Trace bool // Hidden: Trace execution
- Uri string // URI to API Service
+ CGroupUsage string // rootless code determines Usage message
+ ConmonPath string // --conmon flag will set Engine.ConmonPath
+ CpuProfile string // Hidden: Should CPU profile be taken
+ EngineMode EngineMode // ABI or Tunneling mode
+ Identities []string // ssh identities for connecting to server
+ MaxWorks int // maximum number of parallel threads
+ RegistriesConf string // allows for specifying a custom registries.conf
+ RuntimePath string // --runtime flag will set Engine.RuntimePath
+ SpanCloser io.Closer // Close() for tracing object
+ SpanCtx context.Context // context to use when tracing
+ Span opentracing.Span // tracing object
+ Syslog bool // write to StdOut and Syslog, not supported when tunneling
+ Trace bool // Hidden: Trace execution
+ Uri string // URI to API Service
Runroot string
StorageDriver string
diff --git a/pkg/domain/entities/generate.go b/pkg/domain/entities/generate.go
index edd217615..68a42d897 100644
--- a/pkg/domain/entities/generate.go
+++ b/pkg/domain/entities/generate.go
@@ -14,6 +14,12 @@ type GenerateSystemdOptions struct {
RestartPolicy string
// StopTimeout - time when stopping the container.
StopTimeout *uint
+ // ContainerPrefix - systemd unit name prefix for containers
+ ContainerPrefix string
+ // PodPrefix - systemd unit name prefix for pods
+ PodPrefix string
+ // Separator - systemd unit name seperator between name/id and prefix
+ Separator string
}
// GenerateSystemdReport
diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go
index 0f909ab37..19a2c87f5 100644
--- a/pkg/domain/entities/images.go
+++ b/pkg/domain/entities/images.go
@@ -50,7 +50,7 @@ func (i *Image) Id() string {
}
type ImageSummary struct {
- ID string
+ ID string `json:"Id"`
ParentId string `json:",omitempty"`
RepoTags []string `json:",omitempty"`
Created time.Time `json:",omitempty"`
@@ -128,9 +128,10 @@ type ImagePullOptions struct {
// CertDir is the path to certificate directories. Ignored for remote
// calls.
CertDir string
- // Credentials for authenticating against the registry in the format
- // USERNAME:PASSWORD.
- Credentials string
+ // Username for authenticating against the registry.
+ Username string
+ // Password for authenticating against the registry.
+ Password string
// OverrideArch will overwrite the local architecture for image pulls.
OverrideArch string
// OverrideOS will overwrite the local operating system (OS) for image
@@ -162,9 +163,10 @@ type ImagePushOptions struct {
// transport. Default is same compression type as source. Ignored for remote
// calls.
Compress bool
- // Credentials for authenticating against the registry in the format
- // USERNAME:PASSWORD.
- Credentials string
+ // Username for authenticating against the registry.
+ Username string
+ // Password for authenticating against the registry.
+ Password string
// DigestFile, after copying the image, write the digest of the resulting
// image to the file. Ignored for remote calls.
DigestFile string
diff --git a/pkg/domain/entities/manifest.go b/pkg/domain/entities/manifest.go
index 273052bb9..853619b19 100644
--- a/pkg/domain/entities/manifest.go
+++ b/pkg/domain/entities/manifest.go
@@ -1,5 +1,9 @@
package entities
+import "github.com/containers/image/v5/types"
+
+// TODO: add comments to *all* types and fields.
+
type ManifestCreateOptions struct {
All bool `schema:"all"`
}
@@ -26,6 +30,9 @@ type ManifestAnnotateOptions struct {
}
type ManifestPushOptions struct {
- Purge, Quiet, All, TlsVerify, RemoveSignatures bool
- Authfile, CertDir, Creds, DigestFile, Format, SignBy string
+ Purge, Quiet, All, RemoveSignatures bool
+
+ Authfile, CertDir, Username, Password, DigestFile, Format, SignBy string
+
+ SkipTLSVerify types.OptionalBool
}
diff --git a/pkg/domain/entities/play.go b/pkg/domain/entities/play.go
index 93864c23b..4f485cbee 100644
--- a/pkg/domain/entities/play.go
+++ b/pkg/domain/entities/play.go
@@ -8,9 +8,10 @@ type PlayKubeOptions struct {
Authfile string
// CertDir - to a directory containing TLS certifications and keys.
CertDir string
- // Credentials - `username:password` for authentication against a
- // container registry.
- Credentials string
+ // Username for authenticating against the registry.
+ Username string
+ // Password for authenticating against the registry.
+ Password string
// Network - name of the CNI network to connect to.
Network string
// Quiet - suppress output when pulling images.
diff --git a/pkg/domain/entities/pods.go b/pkg/domain/entities/pods.go
index a4896ce4d..1a38a7aa4 100644
--- a/pkg/domain/entities/pods.go
+++ b/pkg/domain/entities/pods.go
@@ -184,6 +184,8 @@ type PodInspectOptions struct {
// Options for the API.
NameOrID string
+
+ Format string
}
type PodInspectReport struct {
diff --git a/pkg/domain/entities/system.go b/pkg/domain/entities/system.go
index 5e4760d12..79a90be48 100644
--- a/pkg/domain/entities/system.go
+++ b/pkg/domain/entities/system.go
@@ -97,3 +97,9 @@ type SystemVersionReport struct {
type ComponentVersion struct {
types.Version
}
+
+// ListRegistriesReport is the report when querying for a sorted list of
+// registries which may be contacted during certain operations.
+type ListRegistriesReport struct {
+ Registries []string
+}
diff --git a/pkg/domain/infra/abi/containers.go b/pkg/domain/infra/abi/containers.go
index b4e38ca23..e982c7c11 100644
--- a/pkg/domain/infra/abi/containers.go
+++ b/pkg/domain/infra/abi/containers.go
@@ -1087,6 +1087,7 @@ func (ic *ContainerEngine) Shutdown(_ context.Context) {
}
func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []string, options entities.ContainerStatsOptions) error {
+ defer close(options.StatChan)
containerFunc := ic.Libpod.GetRunningContainers
switch {
case len(namesOrIds) > 0:
diff --git a/pkg/domain/infra/abi/events.go b/pkg/domain/infra/abi/events.go
index 20773cdce..7ec9db369 100644
--- a/pkg/domain/infra/abi/events.go
+++ b/pkg/domain/infra/abi/events.go
@@ -5,12 +5,9 @@ import (
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/pkg/domain/entities"
- "github.com/sirupsen/logrus"
)
func (ic *ContainerEngine) Events(ctx context.Context, opts entities.EventsOptions) error {
readOpts := events.ReadOptions{FromStart: opts.FromStart, Stream: opts.Stream, Filters: opts.Filter, EventChannel: opts.EventChan, Since: opts.Since, Until: opts.Until}
- err := ic.Libpod.Events(readOpts)
- logrus.Error(err)
- return err
+ return ic.Libpod.Events(readOpts)
}
diff --git a/pkg/domain/infra/abi/generate.go b/pkg/domain/infra/abi/generate.go
index be5d452bd..abb5e2911 100644
--- a/pkg/domain/infra/abi/generate.go
+++ b/pkg/domain/infra/abi/generate.go
@@ -159,14 +159,14 @@ func (ic *ContainerEngine) generateSystemdgenContainerInfo(nameOrID string, pod
func generateServiceName(ctr *libpod.Container, pod *libpod.Pod, options entities.GenerateSystemdOptions) (string, string) {
var kind, name, ctrName string
if pod == nil {
- kind = "container"
+ kind = options.ContainerPrefix //defaults to container
name = ctr.ID()
if options.Name {
name = ctr.Name()
}
ctrName = name
} else {
- kind = "pod"
+ kind = options.PodPrefix //defaults to pod
name = pod.ID()
ctrName = ctr.ID()
if options.Name {
@@ -174,7 +174,7 @@ func generateServiceName(ctr *libpod.Container, pod *libpod.Pod, options entitie
ctrName = ctr.Name()
}
}
- return ctrName, fmt.Sprintf("%s-%s", kind, name)
+ return ctrName, fmt.Sprintf("%s%s%s", kind, options.Separator, name)
}
func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrID string, options entities.GenerateKubeOptions) (*entities.GenerateKubeReport, error) {
diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go
index 6e774df8e..d8af4d339 100644
--- a/pkg/domain/infra/abi/images.go
+++ b/pkg/domain/infra/abi/images.go
@@ -121,12 +121,11 @@ func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, options entiti
}
var registryCreds *types.DockerAuthConfig
- if options.Credentials != "" {
- creds, err := util.ParseRegistryCreds(options.Credentials)
- if err != nil {
- return nil, err
+ if len(options.Username) > 0 && len(options.Password) > 0 {
+ registryCreds = &types.DockerAuthConfig{
+ Username: options.Username,
+ Password: options.Password,
}
- registryCreds = creds
}
dockerRegistryOptions := image.DockerRegistryOptions{
DockerRegistryCreds: registryCreds,
@@ -226,12 +225,11 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri
}
var registryCreds *types.DockerAuthConfig
- if options.Credentials != "" {
- creds, err := util.ParseRegistryCreds(options.Credentials)
- if err != nil {
- return err
+ if len(options.Username) > 0 && len(options.Password) > 0 {
+ registryCreds = &types.DockerAuthConfig{
+ Username: options.Username,
+ Password: options.Password,
}
- registryCreds = creds
}
dockerRegistryOptions := image.DockerRegistryOptions{
DockerRegistryCreds: registryCreds,
diff --git a/pkg/domain/infra/abi/manifest.go b/pkg/domain/infra/abi/manifest.go
index fca34dda2..6e311dec7 100644
--- a/pkg/domain/infra/abi/manifest.go
+++ b/pkg/domain/infra/abi/manifest.go
@@ -16,6 +16,7 @@ import (
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/transports/alltransports"
+ "github.com/containers/image/v5/types"
libpodImage "github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/libpod/pkg/util"
@@ -179,12 +180,28 @@ func (ir *ImageEngine) ManifestPush(ctx context.Context, names []string, opts en
case "v2s2", "docker":
manifestType = manifest.DockerV2Schema2MediaType
default:
- return errors.Errorf("unknown format %q. Choose on of the supported formats: 'oci' or 'v2s2'", opts.Format)
+ return errors.Errorf("unknown format %q. Choose one of the supported formats: 'oci' or 'v2s2'", opts.Format)
}
}
+
+ // Set the system context.
+ sys := ir.Libpod.SystemContext()
+ if sys != nil {
+ sys = &types.SystemContext{}
+ }
+ sys.AuthFilePath = opts.Authfile
+ sys.DockerInsecureSkipTLSVerify = opts.SkipTLSVerify
+
+ if opts.Username != "" && opts.Password != "" {
+ sys.DockerAuthConfig = &types.DockerAuthConfig{
+ Username: opts.Username,
+ Password: opts.Password,
+ }
+ }
+
options := manifests.PushOptions{
Store: ir.Libpod.GetStore(),
- SystemContext: ir.Libpod.SystemContext(),
+ SystemContext: sys,
ImageListSelection: cp.CopySpecificImages,
Instances: nil,
RemoveSignatures: opts.RemoveSignatures,
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index cd7eec7e6..6d0919d2b 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -56,6 +56,9 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options en
return nil, errors.Wrapf(err, "unable to read %q as YAML", path)
}
+ // NOTE: pkg/bindings/play is also parsing the file.
+ // A pkg/kube would be nice to refactor and abstract
+ // parts of the K8s-related code.
if podYAML.Kind != "Pod" {
return nil, errors.Errorf("invalid YAML kind: %q. Pod is the only supported Kubernetes YAML kind", podYAML.Kind)
}
@@ -147,6 +150,13 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, path string, options en
writer = os.Stderr
}
+ if len(options.Username) > 0 && len(options.Password) > 0 {
+ registryCreds = &types.DockerAuthConfig{
+ Username: options.Username,
+ Password: options.Password,
+ }
+ }
+
dockerRegistryOptions := image.DockerRegistryOptions{
DockerRegistryCreds: registryCreds,
DockerCertPath: options.CertDir,
diff --git a/pkg/domain/infra/abi/system.go b/pkg/domain/infra/abi/system.go
index af2ec5f7b..52dfaba7d 100644
--- a/pkg/domain/infra/abi/system.go
+++ b/pkg/domain/infra/abi/system.go
@@ -16,56 +16,18 @@ import (
"github.com/containers/libpod/pkg/domain/entities"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/util"
- iopodman "github.com/containers/libpod/pkg/varlink"
- iopodmanAPI "github.com/containers/libpod/pkg/varlinkapi"
"github.com/containers/libpod/utils"
- "github.com/containers/libpod/version"
"github.com/docker/distribution/reference"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
- "github.com/varlink/go/varlink"
)
func (ic *ContainerEngine) Info(ctx context.Context) (*define.Info, error) {
return ic.Libpod.Info()
}
-func (ic *ContainerEngine) VarlinkService(_ context.Context, opts entities.ServiceOptions) error {
- var varlinkInterfaces = []*iopodman.VarlinkInterface{
- iopodmanAPI.New(opts.Command, ic.Libpod),
- }
-
- service, err := varlink.NewService(
- "Atomic",
- "podman",
- version.Version,
- "https://github.com/containers/libpod",
- )
- if err != nil {
- return errors.Wrapf(err, "unable to create new varlink service")
- }
-
- for _, i := range varlinkInterfaces {
- if err := service.RegisterInterface(i); err != nil {
- return errors.Errorf("unable to register varlink interface %v", i)
- }
- }
-
- // Run the varlink server at the given address
- if err = service.Listen(opts.URI, opts.Timeout); err != nil {
- switch err.(type) {
- case varlink.ServiceTimeoutError:
- logrus.Infof("varlink service expired (use --timeout to increase session time beyond %s ms, 0 means never timeout)", opts.Timeout.String())
- return nil
- default:
- return errors.Wrapf(err, "unable to start varlink service")
- }
- }
- return nil
-}
-
func (ic *ContainerEngine) SetupRootless(_ context.Context, cmd *cobra.Command) error {
// do it only after podman has already re-execed and running with uid==0.
if os.Geteuid() == 0 {
diff --git a/pkg/domain/infra/abi/system_novalink.go b/pkg/domain/infra/abi/system_novalink.go
new file mode 100644
index 000000000..a71b0170a
--- /dev/null
+++ b/pkg/domain/infra/abi/system_novalink.go
@@ -0,0 +1,14 @@
+// +build !varlink
+
+package abi
+
+import (
+ "context"
+
+ "github.com/containers/libpod/pkg/domain/entities"
+ "github.com/pkg/errors"
+)
+
+func (ic *ContainerEngine) VarlinkService(_ context.Context, opts entities.ServiceOptions) error {
+ return errors.Errorf("varlink is not supported")
+}
diff --git a/pkg/domain/infra/abi/system_varlink.go b/pkg/domain/infra/abi/system_varlink.go
new file mode 100644
index 000000000..4dc766f52
--- /dev/null
+++ b/pkg/domain/infra/abi/system_varlink.go
@@ -0,0 +1,49 @@
+// +build varlink
+
+package abi
+
+import (
+ "context"
+
+ "github.com/containers/libpod/pkg/domain/entities"
+ iopodman "github.com/containers/libpod/pkg/varlink"
+ iopodmanAPI "github.com/containers/libpod/pkg/varlinkapi"
+ "github.com/containers/libpod/version"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "github.com/varlink/go/varlink"
+)
+
+func (ic *ContainerEngine) VarlinkService(_ context.Context, opts entities.ServiceOptions) error {
+ var varlinkInterfaces = []*iopodman.VarlinkInterface{
+ iopodmanAPI.New(opts.Command, ic.Libpod),
+ }
+
+ service, err := varlink.NewService(
+ "Atomic",
+ "podman",
+ version.Version,
+ "https://github.com/containers/libpod",
+ )
+ if err != nil {
+ return errors.Wrapf(err, "unable to create new varlink service")
+ }
+
+ for _, i := range varlinkInterfaces {
+ if err := service.RegisterInterface(i); err != nil {
+ return errors.Errorf("unable to register varlink interface %v", i)
+ }
+ }
+
+ // Run the varlink server at the given address
+ if err = service.Listen(opts.URI, opts.Timeout); err != nil {
+ switch err.(type) {
+ case varlink.ServiceTimeoutError:
+ logrus.Infof("varlink service expired (use --time to increase session time beyond %s ms, 0 means never timeout)", opts.Timeout.String())
+ return nil
+ default:
+ return errors.Wrapf(err, "unable to start varlink service")
+ }
+ }
+ return nil
+}
diff --git a/pkg/domain/infra/runtime_libpod.go b/pkg/domain/infra/runtime_libpod.go
index 7c9180d43..a57eadc63 100644
--- a/pkg/domain/infra/runtime_libpod.go
+++ b/pkg/domain/infra/runtime_libpod.go
@@ -213,6 +213,9 @@ func getRuntime(ctx context.Context, fs *flag.FlagSet, opts *engineOpts) (*libpo
if fs.Changed("hooks-dir") {
options = append(options, libpod.WithHooksDir(cfg.Engine.HooksDir...))
}
+ if fs.Changed("registries-conf") {
+ options = append(options, libpod.WithRegistriesConf(cfg.RegistriesConf))
+ }
// TODO flag to set CNI plugins dir?
diff --git a/pkg/domain/infra/tunnel/events.go b/pkg/domain/infra/tunnel/events.go
index 93da3aeb4..6a08a1f85 100644
--- a/pkg/domain/infra/tunnel/events.go
+++ b/pkg/domain/infra/tunnel/events.go
@@ -25,6 +25,7 @@ func (ic *ContainerEngine) Events(ctx context.Context, opts entities.EventsOptio
for e := range binChan {
opts.EventChan <- entities.ConvertToLibpodEvent(e)
}
+ close(opts.EventChan)
}()
- return system.Events(ic.ClientCxt, binChan, nil, &opts.Since, &opts.Until, filters)
+ return system.Events(ic.ClientCxt, binChan, nil, &opts.Since, &opts.Until, filters, &opts.Stream)
}
diff --git a/pkg/network/network.go b/pkg/network/network.go
index 5e9062019..526ee92d8 100644
--- a/pkg/network/network.go
+++ b/pkg/network/network.go
@@ -13,8 +13,11 @@ import (
"github.com/sirupsen/logrus"
)
+// DefaultNetworkDriver is the default network type used
+var DefaultNetworkDriver string = "bridge"
+
// SupportedNetworkDrivers describes the list of supported drivers
-var SupportedNetworkDrivers = []string{"bridge"}
+var SupportedNetworkDrivers = []string{DefaultNetworkDriver}
// IsSupportedDriver checks if the user provided driver is supported
func IsSupportedDriver(driver string) error {
@@ -191,3 +194,16 @@ func InspectNetwork(config *config.Config, name string) (map[string]interface{},
err = json.Unmarshal(b, &rawList)
return rawList, err
}
+
+// Exists says whether a given network exists or not; it meant
+// specifically for restful reponses so 404s can be used
+func Exists(config *config.Config, name string) (bool, error) {
+ _, err := ReadRawCNIConfByName(config, name)
+ if err != nil {
+ if errors.Cause(err) == ErrNetworkNotFound {
+ return false, nil
+ }
+ return false, err
+ }
+ return true, nil
+}
diff --git a/pkg/ps/ps.go b/pkg/ps/ps.go
index 907063df9..ec96367cb 100644
--- a/pkg/ps/ps.go
+++ b/pkg/ps/ps.go
@@ -23,7 +23,7 @@ func GetContainerLists(runtime *libpod.Runtime, options entities.ContainerListOp
filterFuncs []libpod.ContainerFilter
pss []entities.ListContainer
)
- all := options.All
+ all := options.All || options.Last > 0
if len(options.Filters) > 0 {
for k, v := range options.Filters {
for _, val := range v {
diff --git a/pkg/signal/signal_common.go b/pkg/signal/signal_common.go
new file mode 100644
index 000000000..8ff4b4dbf
--- /dev/null
+++ b/pkg/signal/signal_common.go
@@ -0,0 +1,41 @@
+package signal
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+// ParseSignal translates a string to a valid syscall signal.
+// It returns an error if the signal map doesn't include the given signal.
+func ParseSignal(rawSignal string) (syscall.Signal, error) {
+ s, err := strconv.Atoi(rawSignal)
+ if err == nil {
+ if s == 0 {
+ return -1, fmt.Errorf("invalid signal: %s", rawSignal)
+ }
+ return syscall.Signal(s), nil
+ }
+ sig, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]
+ if !ok {
+ return -1, fmt.Errorf("invalid signal: %s", rawSignal)
+ }
+ return sig, nil
+}
+
+// ParseSignalNameOrNumber translates a string to a valid syscall signal. Input
+// can be a name or number representation i.e. "KILL" "9"
+func ParseSignalNameOrNumber(rawSignal string) (syscall.Signal, error) {
+ basename := strings.TrimPrefix(rawSignal, "-")
+ s, err := ParseSignal(basename)
+ if err == nil {
+ return s, nil
+ }
+ for k, v := range signalMap {
+ if k == strings.ToUpper(basename) {
+ return v, nil
+ }
+ }
+ return -1, fmt.Errorf("invalid signal: %s", basename)
+}
diff --git a/pkg/signal/signal_linux.go b/pkg/signal/signal_linux.go
index e6e0f1ca1..6eebf7e5a 100644
--- a/pkg/signal/signal_linux.go
+++ b/pkg/signal/signal_linux.go
@@ -8,11 +8,8 @@ package signal
// NOTE: this package has originally been copied from github.com/docker/docker.
import (
- "fmt"
"os"
"os/signal"
- "strconv"
- "strings"
"syscall"
"golang.org/x/sys/unix"
@@ -94,23 +91,6 @@ var signalMap = map[string]syscall.Signal{
"RTMAX": sigrtmax,
}
-// ParseSignal translates a string to a valid syscall signal.
-// It returns an error if the signal map doesn't include the given signal.
-func ParseSignal(rawSignal string) (syscall.Signal, error) {
- s, err := strconv.Atoi(rawSignal)
- if err == nil {
- if s == 0 {
- return -1, fmt.Errorf("invalid signal: %s", rawSignal)
- }
- return syscall.Signal(s), nil
- }
- sig, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]
- if !ok {
- return -1, fmt.Errorf("invalid signal: %s", rawSignal)
- }
- return sig, nil
-}
-
// CatchAll catches all signals and relays them to the specified channel.
func CatchAll(sigc chan os.Signal) {
var handledSigs []os.Signal
@@ -125,19 +105,3 @@ func StopCatch(sigc chan os.Signal) {
signal.Stop(sigc)
close(sigc)
}
-
-// ParseSignalNameOrNumber translates a string to a valid syscall signal. Input
-// can be a name or number representation i.e. "KILL" "9"
-func ParseSignalNameOrNumber(rawSignal string) (syscall.Signal, error) {
- basename := strings.TrimPrefix(rawSignal, "-")
- s, err := ParseSignal(basename)
- if err == nil {
- return s, nil
- }
- for k, v := range signalMap {
- if k == strings.ToUpper(basename) {
- return v, nil
- }
- }
- return -1, fmt.Errorf("invalid signal: %s", basename)
-}
diff --git a/pkg/signal/signal_unsupported.go b/pkg/signal/signal_unsupported.go
index f946d802d..9d1733c02 100644
--- a/pkg/signal/signal_unsupported.go
+++ b/pkg/signal/signal_unsupported.go
@@ -4,17 +4,88 @@
package signal
import (
- "fmt"
"os"
"syscall"
)
-const SIGWINCH = syscall.Signal(0xff)
+const (
+ sigrtmin = 34
+ sigrtmax = 64
-// ParseSignal translates a string to a valid syscall signal.
-// It returns an error if the signal map doesn't include the given signal.
-func ParseSignal(rawSignal string) (syscall.Signal, error) {
- return 0, fmt.Errorf("unsupported on non-linux platforms")
+ SIGWINCH = syscall.Signal(0xff)
+)
+
+// signalMap is a map of Linux signals.
+// These constants are sourced from the Linux version of golang.org/x/sys/unix
+// (I don't see much risk of this changing).
+// This should work as long as Podman only runs containers on Linux, which seems
+// a safe assumption for now.
+var signalMap = map[string]syscall.Signal{
+ "ABRT": syscall.Signal(0x6),
+ "ALRM": syscall.Signal(0xe),
+ "BUS": syscall.Signal(0x7),
+ "CHLD": syscall.Signal(0x11),
+ "CLD": syscall.Signal(0x11),
+ "CONT": syscall.Signal(0x12),
+ "FPE": syscall.Signal(0x8),
+ "HUP": syscall.Signal(0x1),
+ "ILL": syscall.Signal(0x4),
+ "INT": syscall.Signal(0x2),
+ "IO": syscall.Signal(0x1d),
+ "IOT": syscall.Signal(0x6),
+ "KILL": syscall.Signal(0x9),
+ "PIPE": syscall.Signal(0xd),
+ "POLL": syscall.Signal(0x1d),
+ "PROF": syscall.Signal(0x1b),
+ "PWR": syscall.Signal(0x1e),
+ "QUIT": syscall.Signal(0x3),
+ "SEGV": syscall.Signal(0xb),
+ "STKFLT": syscall.Signal(0x10),
+ "STOP": syscall.Signal(0x13),
+ "SYS": syscall.Signal(0x1f),
+ "TERM": syscall.Signal(0xf),
+ "TRAP": syscall.Signal(0x5),
+ "TSTP": syscall.Signal(0x14),
+ "TTIN": syscall.Signal(0x15),
+ "TTOU": syscall.Signal(0x16),
+ "URG": syscall.Signal(0x17),
+ "USR1": syscall.Signal(0xa),
+ "USR2": syscall.Signal(0xc),
+ "VTALRM": syscall.Signal(0x1a),
+ "WINCH": syscall.Signal(0x1c),
+ "XCPU": syscall.Signal(0x18),
+ "XFSZ": syscall.Signal(0x19),
+ "RTMIN": sigrtmin,
+ "RTMIN+1": sigrtmin + 1,
+ "RTMIN+2": sigrtmin + 2,
+ "RTMIN+3": sigrtmin + 3,
+ "RTMIN+4": sigrtmin + 4,
+ "RTMIN+5": sigrtmin + 5,
+ "RTMIN+6": sigrtmin + 6,
+ "RTMIN+7": sigrtmin + 7,
+ "RTMIN+8": sigrtmin + 8,
+ "RTMIN+9": sigrtmin + 9,
+ "RTMIN+10": sigrtmin + 10,
+ "RTMIN+11": sigrtmin + 11,
+ "RTMIN+12": sigrtmin + 12,
+ "RTMIN+13": sigrtmin + 13,
+ "RTMIN+14": sigrtmin + 14,
+ "RTMIN+15": sigrtmin + 15,
+ "RTMAX-14": sigrtmax - 14,
+ "RTMAX-13": sigrtmax - 13,
+ "RTMAX-12": sigrtmax - 12,
+ "RTMAX-11": sigrtmax - 11,
+ "RTMAX-10": sigrtmax - 10,
+ "RTMAX-9": sigrtmax - 9,
+ "RTMAX-8": sigrtmax - 8,
+ "RTMAX-7": sigrtmax - 7,
+ "RTMAX-6": sigrtmax - 6,
+ "RTMAX-5": sigrtmax - 5,
+ "RTMAX-4": sigrtmax - 4,
+ "RTMAX-3": sigrtmax - 3,
+ "RTMAX-2": sigrtmax - 2,
+ "RTMAX-1": sigrtmax - 1,
+ "RTMAX": sigrtmax,
}
// CatchAll catches all signals and relays them to the specified channel.
@@ -26,9 +97,3 @@ func CatchAll(sigc chan os.Signal) {
func StopCatch(sigc chan os.Signal) {
panic("Unsupported on non-linux platforms")
}
-
-// ParseSignalNameOrNumber translates a string to a valid syscall signal. Input
-// can be a name or number representation i.e. "KILL" "9"
-func ParseSignalNameOrNumber(rawSignal string) (syscall.Signal, error) {
- return 0, fmt.Errorf("unsupported on non-linux platforms")
-}
diff --git a/pkg/specgen/generate/config_linux.go b/pkg/specgen/generate/config_linux.go
index 1b2a2ac32..f4cf0c704 100644
--- a/pkg/specgen/generate/config_linux.go
+++ b/pkg/specgen/generate/config_linux.go
@@ -72,13 +72,17 @@ func addPrivilegedDevices(g *generate.Generator) error {
newMounts = append(newMounts, devMnt)
}
g.Config.Mounts = append(newMounts, g.Config.Mounts...)
- g.Config.Linux.Resources.Devices = nil
+ if g.Config.Linux.Resources != nil {
+ g.Config.Linux.Resources.Devices = nil
+ }
} else {
for _, d := range hostDevices {
g.AddDevice(Device(d))
}
// Add resources device - need to clear the existing one first.
- g.Config.Linux.Resources.Devices = nil
+ if g.Config.Linux.Resources != nil {
+ g.Config.Linux.Resources.Devices = nil
+ }
g.AddLinuxResourcesDevice(true, "", nil, nil, "rwm")
}
diff --git a/pkg/specgen/generate/config_linux_nocgo.go b/pkg/specgen/generate/config_linux_nocgo.go
index fc8ed206d..81d1c7011 100644
--- a/pkg/specgen/generate/config_linux_nocgo.go
+++ b/pkg/specgen/generate/config_linux_nocgo.go
@@ -5,10 +5,11 @@ package generate
import (
"errors"
+ "github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/specgen"
spec "github.com/opencontainers/runtime-spec/specs-go"
)
-func (s *specgen.SpecGenerator) getSeccompConfig(configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
+func getSeccompConfig(s *specgen.SpecGenerator, configSpec *spec.Spec, img *image.Image) (*spec.LinuxSeccomp, error) {
return nil, errors.New("not implemented")
}
diff --git a/test/apiv2/10-images.at b/test/apiv2/10-images.at
index 1c8da0c2f..1c7ba8948 100644
--- a/test/apiv2/10-images.at
+++ b/test/apiv2/10-images.at
@@ -7,15 +7,15 @@
podman pull -q $IMAGE
t GET libpod/images/json 200 \
- .[0].ID~[0-9a-f]\\{64\\}
-iid=$(jq -r '.[0].ID' <<<"$output")
+ .[0].Id~[0-9a-f]\\{64\\}
+iid=$(jq -r '.[0].Id' <<<"$output")
t GET libpod/images/$iid/exists 204
t GET libpod/images/$PODMAN_TEST_IMAGE_NAME/exists 204
# FIXME: compare to actual podman info
t GET libpod/images/json 200 \
- .[0].ID=${iid}
+ .[0].Id=${iid}
t GET libpod/images/$iid/json 200 \
.Id=$iid \
@@ -33,4 +33,27 @@ t GET images/$iid/json 200 \
#t POST images/create fromImage=alpine 201 foo
+# Display the image history
+t GET libpod/images/nonesuch/history 404
+
+for i in $iid ${iid:0:12} $PODMAN_TEST_IMAGE_NAME; do
+ t GET libpod/images/$i/history 200 \
+ .[0].Id=$iid \
+ .[0].Created~[0-9]\\{10\\} \
+ .[0].Tags=null \
+ .[0].Size=0 \
+ .[0].Comment=
+done
+
+# Export an image on the local
+t GET libpod/images/nonesuch/get 404
+t GET libpod/images/$iid/get?format=foo 500
+t GET libpod/images/$PODMAN_TEST_IMAGE_NAME/get?compress=bar 400
+
+for i in $iid ${iid:0:12} $PODMAN_TEST_IMAGE_NAME; do
+ t GET "libpod/images/$i/get" 200 '[POSIX tar archive]'
+ t GET "libpod/images/$i/get?compress=true" 200 '[POSIX tar archive]'
+ t GET "libpod/images/$i/get?compress=false" 200 '[POSIX tar archive]'
+done
+
# vim: filetype=sh
diff --git a/test/apiv2/40-pods.at b/test/apiv2/40-pods.at
index 26877a102..2dea1918a 100644
--- a/test/apiv2/40-pods.at
+++ b/test/apiv2/40-pods.at
@@ -5,8 +5,8 @@
t GET "libpod/pods/json (clean slate at start)" 200 null
-t POST libpod/pods/create name=foo 201 .id~[0-9a-f]\\{64\\}
-pod_id=$(jq -r .id <<<"$output")
+t POST libpod/pods/create name=foo 201 .Id~[0-9a-f]\\{64\\}
+pod_id=$(jq -r .Id <<<"$output")
t GET libpod/pods/foo/exists 204
t GET libpod/pods/$pod_id/exists 204
t GET libpod/pods/notfoo/exists 404
diff --git a/test/apiv2/rest_api/__init__.py b/test/apiv2/rest_api/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/test/apiv2/rest_api/__init__.py
diff --git a/test/apiv2/rest_api/test_rest_v1_0_0.py b/test/apiv2/rest_api/test_rest_v1_0_0.py
new file mode 100644
index 000000000..7c53623cb
--- /dev/null
+++ b/test/apiv2/rest_api/test_rest_v1_0_0.py
@@ -0,0 +1,219 @@
+import json
+import os
+import shlex
+import signal
+import string
+import subprocess
+import sys
+import time
+import unittest
+from collections.abc import Iterable
+from multiprocessing import Process
+
+import requests
+from dateutil.parser import parse
+
+
+def _url(path):
+ return "http://localhost:8080/v1.0.0/libpod" + path
+
+
+def podman():
+ binary = os.getenv("PODMAN_BINARY")
+ if binary is None:
+ binary = "bin/podman"
+ return binary
+
+
+def ctnr(path):
+ r = requests.get(_url("/containers/json?all=true"))
+ try:
+ ctnrs = json.loads(r.text)
+ except Exception as e:
+ sys.stderr.write("Bad container response: {}/{}".format(r.text, e))
+ raise e
+ return path.format(ctnrs[0]["Id"])
+
+
+class TestApi(unittest.TestCase):
+ podman = None
+
+ def setUp(self):
+ super().setUp()
+ if TestApi.podman.poll() is not None:
+ sys.stderr.write("podman service returned {}",
+ TestApi.podman.returncode)
+ sys.exit(2)
+ requests.get(
+ _url("/images/create?fromSrc=docker.io%2Falpine%3Alatest"))
+ # calling out to podman is easier than the API for running a container
+ subprocess.run([podman(), "run", "alpine", "/bin/ls"],
+ check=True,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL)
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+
+ TestApi.podman = subprocess.Popen(
+ [
+ podman(), "system", "service", "tcp:localhost:8080",
+ "--log-level=debug", "--time=0"
+ ],
+ shell=False,
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ time.sleep(2)
+
+ @classmethod
+ def tearDownClass(cls):
+ TestApi.podman.terminate()
+ stdout, stderr = TestApi.podman.communicate(timeout=0.5)
+ if stdout:
+ print("\nService Stdout:\n" + stdout.decode('utf-8'))
+ if stderr:
+ print("\nService Stderr:\n" + stderr.decode('utf-8'))
+
+ if TestApi.podman.returncode > 0:
+ sys.stderr.write("podman exited with error code {}\n".format(
+ TestApi.podman.returncode))
+ sys.exit(2)
+
+ return super().tearDownClass()
+
+ def test_info(self):
+ r = requests.get(_url("/info"))
+ self.assertEqual(r.status_code, 200)
+ self.assertIsNotNone(r.content)
+ _ = json.loads(r.text)
+
+ def test_events(self):
+ r = requests.get(_url("/events?stream=false"))
+ self.assertEqual(r.status_code, 200, r.text)
+ self.assertIsNotNone(r.content)
+ for line in r.text.splitlines():
+ obj = json.loads(line)
+ # Actor.ID is uppercase for compatibility
+ _ = obj["Actor"]["ID"]
+
+ def test_containers(self):
+ r = requests.get(_url("/containers/json"), timeout=5)
+ self.assertEqual(r.status_code, 200, r.text)
+ obj = json.loads(r.text)
+ self.assertEqual(len(obj), 0)
+
+ def test_containers_all(self):
+ r = requests.get(_url("/containers/json?all=true"))
+ self.assertEqual(r.status_code, 200, r.text)
+ self.validateObjectFields(r.text)
+
+ def test_inspect_container(self):
+ r = requests.get(_url(ctnr("/containers/{}/json")))
+ self.assertEqual(r.status_code, 200, r.text)
+ obj = self.validateObjectFields(r.content)
+ _ = parse(obj["Created"])
+
+ def test_stats(self):
+ r = requests.get(_url(ctnr("/containers/{}/stats?stream=false")))
+ self.assertIn(r.status_code, (200, 409), r.text)
+ if r.status_code == 200:
+ self.validateObjectFields(r.text)
+
+ def test_delete_containers(self):
+ r = requests.delete(_url(ctnr("/containers/{}")))
+ self.assertEqual(r.status_code, 204, r.text)
+
+ def test_stop_containers(self):
+ r = requests.post(_url(ctnr("/containers/{}/start")))
+ self.assertIn(r.status_code, (204, 304), r.text)
+
+ r = requests.post(_url(ctnr("/containers/{}/stop")))
+ self.assertIn(r.status_code, (204, 304), r.text)
+
+ def test_start_containers(self):
+ r = requests.post(_url(ctnr("/containers/{}/stop")))
+ self.assertIn(r.status_code, (204, 304), r.text)
+
+ r = requests.post(_url(ctnr("/containers/{}/start")))
+ self.assertIn(r.status_code, (204, 304), r.text)
+
+ def test_restart_containers(self):
+ r = requests.post(_url(ctnr("/containers/{}/start")))
+ self.assertIn(r.status_code, (204, 304), r.text)
+
+ r = requests.post(_url(ctnr("/containers/{}/restart")), timeout=5)
+ self.assertEqual(r.status_code, 204, r.text)
+
+ def test_resize(self):
+ r = requests.post(_url(ctnr("/containers/{}/resize?h=43&w=80")))
+ self.assertIn(r.status_code, (200, 409), r.text)
+ if r.status_code == 200:
+ self.assertIsNone(r.text)
+
+ def test_attach_containers(self):
+ r = requests.post(_url(ctnr("/containers/{}/attach")))
+ self.assertIn(r.status_code, (101, 409), r.text)
+
+ def test_logs_containers(self):
+ r = requests.get(_url(ctnr("/containers/{}/logs?stdout=true")))
+ self.assertEqual(r.status_code, 200, r.text)
+
+ def test_post_create(self):
+ self.skipTest("TODO: create request body")
+ r = requests.post(_url("/containers/create?args=True"))
+ self.assertEqual(r.status_code, 200, r.text)
+ json.loads(r.text)
+
+ def test_commit(self):
+ r = requests.post(_url(ctnr("/commit?container={}")))
+ self.assertEqual(r.status_code, 200, r.text)
+ self.validateObjectFields(r.text)
+
+ def test_images(self):
+ r = requests.get(_url("/images/json"))
+ self.assertEqual(r.status_code, 200, r.text)
+ self.validateObjectFields(r.content)
+
+ def test_inspect_image(self):
+ r = requests.get(_url("/images/alpine/json"))
+ self.assertEqual(r.status_code, 200, r.text)
+ obj = self.validateObjectFields(r.content)
+ _ = parse(obj["Created"])
+
+ def test_delete_image(self):
+ r = requests.delete(_url("/images/alpine?force=true"))
+ self.assertEqual(r.status_code, 200, r.text)
+ json.loads(r.text)
+
+ def test_pull(self):
+ r = requests.post(_url("/images/pull?reference=alpine"), timeout=5)
+ self.assertEqual(r.status_code, 200, r.text)
+ json.loads(r.text)
+
+ def test_search(self):
+ # Had issues with this test hanging when repositories not happy
+ def do_search():
+ r = requests.get(_url("/images/search?term=alpine"), timeout=5)
+ self.assertEqual(r.status_code, 200, r.text)
+ json.loads(r.text)
+
+ search = Process(target=do_search)
+ search.start()
+ search.join(timeout=10)
+ self.assertFalse(search.is_alive(), "/images/search took too long")
+
+ def validateObjectFields(self, buffer):
+ objs = json.loads(buffer)
+ if not isinstance(objs, dict):
+ for o in objs:
+ _ = o["Id"]
+ else:
+ _ = objs["Id"]
+ return objs
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/apiv2/test-apiv2 b/test/apiv2/test-apiv2
index 1af76b4be..7a3518df2 100755
--- a/test/apiv2/test-apiv2
+++ b/test/apiv2/test-apiv2
@@ -207,13 +207,21 @@ function t() {
fi
cat $WORKDIR/curl.headers.out >>$LOG 2>/dev/null || true
- output=$(< $WORKDIR/curl.result.out)
- # Log results. If JSON, filter through jq for readability
- if egrep -qi '^Content-Type: application/json' $WORKDIR/curl.headers.out; then
- jq . <<<"$output" >>$LOG
- else
+ # Log results, if text. If JSON, filter through jq for readability.
+ content_type=$(sed -ne 's/^Content-Type:[ ]\+//pi' <$WORKDIR/curl.headers.out)
+
+ if [[ $content_type =~ /octet ]]; then
+ output="[$(file --brief $WORKDIR/curl.result.out)]"
echo "$output" >>$LOG
+ else
+ output=$(< $WORKDIR/curl.result.out)
+
+ if [[ $content_type =~ application/json ]]; then
+ jq . <<<"$output" >>$LOG
+ else
+ echo "$output" >>$LOG
+ fi
fi
# Test return code
@@ -232,6 +240,7 @@ function t() {
return
fi
+ local i
for i; do
case "$i" in
# Exact match on json field
@@ -270,7 +279,7 @@ function start_service() {
die "Cannot start service on non-localhost ($HOST)"
fi
- $PODMAN_BIN --root $WORKDIR system service --timeout 15 tcp:127.0.0.1:$PORT \
+ $PODMAN_BIN --root $WORKDIR system service --time 15 tcp:127.0.0.1:$PORT \
&> $WORKDIR/server.log &
service_pid=$!
diff --git a/test/e2e/attach_test.go b/test/e2e/attach_test.go
index 7233d169c..e9050b53b 100644
--- a/test/e2e/attach_test.go
+++ b/test/e2e/attach_test.go
@@ -33,7 +33,6 @@ var _ = Describe("Podman attach", func() {
podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
processTestResult(f)
-
})
It("podman attach to bogus container", func() {
diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go
index 80ee83f44..e12edad49 100644
--- a/test/e2e/common_test.go
+++ b/test/e2e/common_test.go
@@ -21,6 +21,7 @@ import (
"github.com/containers/storage/pkg/reexec"
"github.com/containers/storage/pkg/stringid"
jsoniter "github.com/json-iterator/go"
+ "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
@@ -573,3 +574,10 @@ func (p *PodmanTestIntegration) CreateSeccompJson(in []byte) (string, error) {
}
return jsonFile, nil
}
+
+func SkipIfNotFedora() {
+ info := GetHostDistributionInfo()
+ if info.Distribution != "fedora" {
+ ginkgo.Skip("Test can only run on Fedora")
+ }
+}
diff --git a/test/e2e/events_test.go b/test/e2e/events_test.go
index 460554b77..8c496872f 100644
--- a/test/e2e/events_test.go
+++ b/test/e2e/events_test.go
@@ -5,6 +5,7 @@ import (
"fmt"
"os"
"strings"
+ "time"
. "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
@@ -24,23 +25,26 @@ var _ = Describe("Podman events", func() {
os.Exit(1)
}
podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.Setup()
podmanTest.SeedImages()
})
AfterEach(func() {
podmanTest.Cleanup()
f := CurrentGinkgoTestDescription()
- timedResult := fmt.Sprintf("Test: %s completed in %f seconds", f.TestText, f.Duration.Seconds())
- GinkgoWriter.Write([]byte(timedResult))
-
+ processTestResult(f)
})
// For most, all, of these tests we do not "live" test following a log because it may make a fragile test
// system more complex. Instead we run the "events" and then verify that the events are processed correctly.
// Perhaps a future version of this test would put events in a go func and send output back over a channel
// while events occur.
+
+ // These tests are only known to work on Fedora ATM. Other distributions
+ // will be skipped.
It("podman events", func() {
- Skip("need to verify images have correct packages for journald")
+ SkipIfRootless()
+ SkipIfNotFedora()
_, ec, _ := podmanTest.RunLsContainer("")
Expect(ec).To(Equal(0))
result := podmanTest.Podman([]string{"events", "--stream=false"})
@@ -49,7 +53,8 @@ var _ = Describe("Podman events", func() {
})
It("podman events with an event filter", func() {
- Skip("need to verify images have correct packages for journald")
+ SkipIfRootless()
+ SkipIfNotFedora()
_, ec, _ := podmanTest.RunLsContainer("")
Expect(ec).To(Equal(0))
result := podmanTest.Podman([]string{"events", "--stream=false", "--filter", "event=start"})
@@ -59,11 +64,14 @@ var _ = Describe("Podman events", func() {
})
It("podman events with an event filter and container=cid", func() {
- Skip("need to verify images have correct packages for journald")
+ Skip("Does not work on v2")
+ SkipIfRootless()
+ SkipIfNotFedora()
_, ec, cid := podmanTest.RunLsContainer("")
Expect(ec).To(Equal(0))
_, ec2, cid2 := podmanTest.RunLsContainer("")
Expect(ec2).To(Equal(0))
+ time.Sleep(5 * time.Second)
result := podmanTest.Podman([]string{"events", "--stream=false", "--filter", "event=start", "--filter", fmt.Sprintf("container=%s", cid)})
result.WaitWithDefaultTimeout()
Expect(result.ExitCode()).To(Equal(0))
@@ -72,7 +80,8 @@ var _ = Describe("Podman events", func() {
})
It("podman events with a type and filter container=id", func() {
- Skip("need to verify images have correct packages for journald")
+ SkipIfRootless()
+ SkipIfNotFedora()
_, ec, cid := podmanTest.RunLsContainer("")
Expect(ec).To(Equal(0))
result := podmanTest.Podman([]string{"events", "--stream=false", "--filter", "type=pod", "--filter", fmt.Sprintf("container=%s", cid)})
@@ -82,7 +91,8 @@ var _ = Describe("Podman events", func() {
})
It("podman events with a type", func() {
- Skip("need to verify images have correct packages for journald")
+ SkipIfRootless()
+ SkipIfNotFedora()
setup := podmanTest.Podman([]string{"run", "-dt", "--pod", "new:foobarpod", ALPINE, "top"})
setup.WaitWithDefaultTimeout()
stop := podmanTest.Podman([]string{"pod", "stop", "foobarpod"})
@@ -97,7 +107,8 @@ var _ = Describe("Podman events", func() {
})
It("podman events --since", func() {
- Skip("need to verify images have correct packages for journald")
+ SkipIfRootless()
+ SkipIfNotFedora()
_, ec, _ := podmanTest.RunLsContainer("")
Expect(ec).To(Equal(0))
result := podmanTest.Podman([]string{"events", "--stream=false", "--since", "1m"})
@@ -106,7 +117,8 @@ var _ = Describe("Podman events", func() {
})
It("podman events --until", func() {
- Skip("need to verify images have correct packages for journald")
+ SkipIfRootless()
+ SkipIfNotFedora()
_, ec, _ := podmanTest.RunLsContainer("")
Expect(ec).To(Equal(0))
test := podmanTest.Podman([]string{"events", "--help"})
@@ -118,37 +130,28 @@ var _ = Describe("Podman events", func() {
})
It("podman events format", func() {
- Skip(v2remotefail)
- info := GetHostDistributionInfo()
- if info.Distribution != "fedora" {
- Skip("need to verify images have correct packages for journald")
- }
+ SkipIfRootless()
+ SkipIfNotFedora()
_, ec, _ := podmanTest.RunLsContainer("")
Expect(ec).To(Equal(0))
test := podmanTest.Podman([]string{"events", "--stream=false", "--format", "json"})
test.WaitWithDefaultTimeout()
- fmt.Println(test.OutputToStringArray())
jsonArr := test.OutputToStringArray()
Expect(len(jsonArr)).To(Not(BeZero()))
eventsMap := make(map[string]string)
err := json.Unmarshal([]byte(jsonArr[0]), &eventsMap)
- if err != nil {
- os.Exit(1)
- }
+ Expect(err).To(BeNil())
_, exist := eventsMap["Status"]
Expect(exist).To(BeTrue())
Expect(test.ExitCode()).To(BeZero())
test = podmanTest.Podman([]string{"events", "--stream=false", "--format", "{{json.}}"})
test.WaitWithDefaultTimeout()
- fmt.Println(test.OutputToStringArray())
jsonArr = test.OutputToStringArray()
Expect(len(jsonArr)).To(Not(BeZero()))
eventsMap = make(map[string]string)
err = json.Unmarshal([]byte(jsonArr[0]), &eventsMap)
- if err != nil {
- os.Exit(1)
- }
+ Expect(err).To(BeNil())
_, exist = eventsMap["Status"]
Expect(exist).To(BeTrue())
Expect(test.ExitCode()).To(BeZero())
diff --git a/test/e2e/generate_systemd_test.go b/test/e2e/generate_systemd_test.go
index abfca4db9..d5ae441e2 100644
--- a/test/e2e/generate_systemd_test.go
+++ b/test/e2e/generate_systemd_test.go
@@ -233,4 +233,96 @@ var _ = Describe("Podman generate systemd", func() {
Expect(session.ExitCode()).To(Equal(125))
})
+ It("podman generate systemd --container-prefix con", func() {
+ n := podmanTest.Podman([]string{"create", "--name", "foo", "alpine", "top"})
+ n.WaitWithDefaultTimeout()
+ Expect(n.ExitCode()).To(Equal(0))
+
+ session := podmanTest.Podman([]string{"generate", "systemd", "--name", "--container-prefix", "con", "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Grepping the output (in addition to unit tests)
+ found, _ := session.GrepString("# con-foo.service")
+ Expect(found).To(BeTrue())
+ })
+
+ It("podman generate systemd --separator _", func() {
+ n := podmanTest.Podman([]string{"create", "--name", "foo", "alpine", "top"})
+ n.WaitWithDefaultTimeout()
+ Expect(n.ExitCode()).To(Equal(0))
+
+ session := podmanTest.Podman([]string{"generate", "systemd", "--name", "--separator", "_", "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Grepping the output (in addition to unit tests)
+ found, _ := session.GrepString("# container_foo.service")
+ Expect(found).To(BeTrue())
+ })
+
+ It("podman generate systemd pod --pod-prefix p", func() {
+ n := podmanTest.Podman([]string{"pod", "create", "--name", "foo"})
+ n.WaitWithDefaultTimeout()
+ Expect(n.ExitCode()).To(Equal(0))
+
+ n = podmanTest.Podman([]string{"create", "--pod", "foo", "--name", "foo-1", "alpine", "top"})
+ n.WaitWithDefaultTimeout()
+ Expect(n.ExitCode()).To(Equal(0))
+
+ n = podmanTest.Podman([]string{"create", "--pod", "foo", "--name", "foo-2", "alpine", "top"})
+ n.WaitWithDefaultTimeout()
+ Expect(n.ExitCode()).To(Equal(0))
+
+ session := podmanTest.Podman([]string{"generate", "systemd", "--pod-prefix", "p", "--name", "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Grepping the output (in addition to unit tests)
+ found, _ := session.GrepString("# p-foo.service")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("Requires=container-foo-1.service container-foo-2.service")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("# container-foo-1.service")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("BindsTo=p-foo.service")
+ Expect(found).To(BeTrue())
+ })
+
+ It("podman generate systemd pod --pod-prefix p --container-prefix con --separator _ change all prefixes/separator", func() {
+ n := podmanTest.Podman([]string{"pod", "create", "--name", "foo"})
+ n.WaitWithDefaultTimeout()
+ Expect(n.ExitCode()).To(Equal(0))
+
+ n = podmanTest.Podman([]string{"create", "--pod", "foo", "--name", "foo-1", "alpine", "top"})
+ n.WaitWithDefaultTimeout()
+ Expect(n.ExitCode()).To(Equal(0))
+
+ n = podmanTest.Podman([]string{"create", "--pod", "foo", "--name", "foo-2", "alpine", "top"})
+ n.WaitWithDefaultTimeout()
+ Expect(n.ExitCode()).To(Equal(0))
+
+ session := podmanTest.Podman([]string{"generate", "systemd", "--container-prefix", "con", "--pod-prefix", "p", "--separator", "_", "--name", "foo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ // Grepping the output (in addition to unit tests)
+ found, _ := session.GrepString("# p_foo.service")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("Requires=con_foo-1.service con_foo-2.service")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("# con_foo-1.service")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("# con_foo-2.service")
+ Expect(found).To(BeTrue())
+
+ found, _ = session.GrepString("BindsTo=p_foo.service")
+ Expect(found).To(BeTrue())
+ })
})
diff --git a/test/e2e/libpod_suite_remote_test.go b/test/e2e/libpod_suite_remote_test.go
index e8cdf91ee..dde853413 100644
--- a/test/e2e/libpod_suite_remote_test.go
+++ b/test/e2e/libpod_suite_remote_test.go
@@ -95,7 +95,7 @@ func (p *PodmanTestIntegration) StartRemoteService() {
args = append(args, "--log-level", "debug")
}
remoteSocket := p.RemoteSocket
- args = append(args, "system", "service", "--timeout", "0", remoteSocket)
+ args = append(args, "system", "service", "--time", "0", remoteSocket)
podmanOptions := getRemoteOptions(p, args)
command := exec.Command(p.PodmanBinary, podmanOptions...)
command.Stdout = os.Stdout
diff --git a/test/e2e/libpod_suite_varlink_test.go b/test/e2e/libpod_suite_varlink_test.go
index cbaed71cc..92c815b39 100644
--- a/test/e2e/libpod_suite_varlink_test.go
+++ b/test/e2e/libpod_suite_varlink_test.go
@@ -89,7 +89,7 @@ func (p *PodmanTestIntegration) StartVarlink() {
varlinkEndpoint := p.RemoteSocket
p.SetVarlinkAddress(p.RemoteSocket)
- args := []string{"varlink", "--timeout", "0", varlinkEndpoint}
+ args := []string{"varlink", "--time", "0", varlinkEndpoint}
podmanOptions := getVarlinkOptions(p, args)
command := exec.Command(p.PodmanBinary, podmanOptions...)
fmt.Printf("Running: %s %s\n", p.PodmanBinary, strings.Join(podmanOptions, " "))
diff --git a/test/e2e/ps_test.go b/test/e2e/ps_test.go
index 8965ce297..12ce4661f 100644
--- a/test/e2e/ps_test.go
+++ b/test/e2e/ps_test.go
@@ -114,6 +114,17 @@ var _ = Describe("Podman ps", func() {
It("podman ps last flag", func() {
Skip("--last flag nonfunctional and disabled")
+ // Make sure that non-running containers are being counted as
+ // well.
+ session := podmanTest.Podman([]string{"create", "alpine", "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ result := podmanTest.Podman([]string{"ps", "--last", "2"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(len(result.OutputToStringArray())).Should(Equal(2)) // 1 container
+
_, ec, _ := podmanTest.RunLsContainer("test1")
Expect(ec).To(Equal(0))
@@ -123,10 +134,20 @@ var _ = Describe("Podman ps", func() {
_, ec, _ = podmanTest.RunLsContainer("test3")
Expect(ec).To(Equal(0))
- result := podmanTest.Podman([]string{"ps", "--last", "2"})
+ result = podmanTest.Podman([]string{"ps", "--last", "2"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(len(result.OutputToStringArray())).Should(Equal(3)) // 2 containers
+
+ result = podmanTest.Podman([]string{"ps", "--last", "3"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(len(result.OutputToStringArray())).Should(Equal(4)) // 3 containers
+
+ result = podmanTest.Podman([]string{"ps", "--last", "100"})
result.WaitWithDefaultTimeout()
Expect(result.ExitCode()).To(Equal(0))
- Expect(len(result.OutputToStringArray())).Should(Equal(3))
+ Expect(len(result.OutputToStringArray())).Should(Equal(5)) // 4 containers (3 running + 1 created)
})
It("podman ps no-trunc", func() {
diff --git a/test/e2e/run_volume_test.go b/test/e2e/run_volume_test.go
index 58091ff68..7cd69f738 100644
--- a/test/e2e/run_volume_test.go
+++ b/test/e2e/run_volume_test.go
@@ -106,6 +106,11 @@ var _ = Describe("Podman run with volumes", func() {
Expect(session.ExitCode()).To(Equal(0))
Expect(session.OutputToString()).To(ContainSubstring(dest + " ro"))
+ session = podmanTest.Podman([]string{"run", "--rm", "--mount", mount + ",readonly", ALPINE, "grep", dest, "/proc/self/mountinfo"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring(dest + " ro"))
+
session = podmanTest.Podman([]string{"run", "--rm", "--mount", mount + ",shared", ALPINE, "grep", dest, "/proc/self/mountinfo"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
diff --git a/test/endpoint/endpoint.go b/test/endpoint/endpoint.go
index 284f0d79c..6f4ae6b1f 100644
--- a/test/endpoint/endpoint.go
+++ b/test/endpoint/endpoint.go
@@ -73,7 +73,7 @@ func (p *EndpointTestIntegration) startVarlink(useImageCache bool) {
varlinkEndpoint := p.VarlinkEndpoint
//p.SetVarlinkAddress(p.RemoteSocket)
- args := []string{"varlink", "--timeout", "0", varlinkEndpoint}
+ args := []string{"varlink", "--time", "0", varlinkEndpoint}
podmanOptions := getVarlinkOptions(p, args)
if useImageCache {
cacheOptions := []string{"--storage-opt", fmt.Sprintf("%s.imagestore=%s", p.ImageCacheFS, p.ImageCacheDir)}
diff --git a/test/system/010-images.bats b/test/system/010-images.bats
index 6957d4830..2b1845d72 100644
--- a/test/system/010-images.bats
+++ b/test/system/010-images.bats
@@ -28,7 +28,7 @@ load helpers
# 'created': podman includes fractional seconds, podman-remote does not
tests="
Names[0] | $PODMAN_TEST_IMAGE_FQN
-ID | [0-9a-f]\\\{64\\\}
+Id | [0-9a-f]\\\{64\\\}
Digest | sha256:[0-9a-f]\\\{64\\\}
CreatedAt | [0-9-]\\\+T[0-9:.]\\\+Z
Size | [0-9]\\\+
diff --git a/test/system/200-pod.bats b/test/system/200-pod.bats
index e3643a3bd..f34cd0707 100644
--- a/test/system/200-pod.bats
+++ b/test/system/200-pod.bats
@@ -170,4 +170,16 @@ function random_ip() {
is "$output" ".*options $dns_opt" "--dns-opt was added"
}
+@test "podman pod inspect - format" {
+ skip_if_remote "podman-pod does not work with podman-remote"
+
+ run_podman pod create --name podtest
+ podid=$output
+
+ run_podman pod inspect --format '-> {{.Name}}: {{.NumContainers}}' podtest
+ is "$output" "-> podtest: 1"
+
+ run_podman pod rm -f podtest
+}
+
# vim: filetype=sh
diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml
index 8453cdc38..cb16fa89c 100644
--- a/vendor/github.com/containers/buildah/.cirrus.yml
+++ b/vendor/github.com/containers/buildah/.cirrus.yml
@@ -27,11 +27,13 @@ env:
####
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
- # TODO: Setting up from base-images is very inefficient, use libpod's cache-images instead?
- FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-30-1-2-1565360543"
- PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-29-1-2-1565360543"
- UBUNTU_CACHE_IMAGE_NAME: "ubuntu-1904-disco-v20190724"
- PRIOR_UBUNTU_CACHE_IMAGE_NAME: "ubuntu-1804-bionic-v20190722a"
+ # See https://github.com/containers/libpod/blob/master/contrib/cirrus/README.md#test_build_cache_images_task-task
+ _BUILT_IMAGE_SUFFIX: "libpod-6224667180531712"
+ FEDORA_CACHE_IMAGE_NAME: "fedora-32-${_BUILT_IMAGE_SUFFIX}"
+ PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-31-${_BUILT_IMAGE_SUFFIX}"
+ UBUNTU_CACHE_IMAGE_NAME: "ubuntu-19-${_BUILT_IMAGE_SUFFIX}"
+ PRIOR_UBUNTU_CACHE_IMAGE_NAME: "ubuntu-18-${_BUILT_IMAGE_SUFFIX}"
+
####
#### Command variables to help avoid duplication
@@ -153,9 +155,6 @@ gce_instance:
- 'cirrus-ci/only_prs/gate'
- 'cirrus-ci/only_prs/vendor'
- container:
- image: registry.fedoraproject.org/fedora:30
-
env:
matrix:
CROSS_TARGET: darwin
@@ -179,10 +178,8 @@ gce_instance:
gce_instance: # Only need to specify differences from defaults (above)
matrix: # Duplicate this task for each matrix product.
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
- # TODO: Re-enable once prior image is F30 and above is F31
- # image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
- # TODO: Re-enable when package repositories functional
- #image_name: "${UBUNTU_CACHE_IMAGE_NAME}"
+ image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
+ image_name: "${UBUNTU_CACHE_IMAGE_NAME}"
image_name: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}"
# Separate scripts for separate outputs, makes debugging easier.
@@ -248,7 +245,7 @@ gce_instance:
CIRRUS_CLONE_DEPTH: 1 # no code is being used by this task
container:
- image: "registry.fedoraproject.org/fedora-minimal:latest"
+ image: "quay.io/libpod/fedora-minimal:latest"
cpu: 1
memory: 1
diff --git a/vendor/github.com/containers/buildah/.golangci.yml b/vendor/github.com/containers/buildah/.golangci.yml
index dde37ad79..888d89afa 100644
--- a/vendor/github.com/containers/buildah/.golangci.yml
+++ b/vendor/github.com/containers/buildah/.golangci.yml
@@ -4,8 +4,8 @@ run:
- apparmor
- seccomp
- selinux
- concurrency: 6
- deadline: 5m
+ # Don't exceed number of threads available when running under CI
+ concurrency: 4
linters:
disable-all: true
enable:
@@ -17,7 +17,8 @@ linters:
- gofmt
- goimports
- golint
- - gosimple
+ # Broken? Unpredictably dies w/o any error well before deadline/timeout expires
+ # - gosimple
- govet
- ineffassign
- interfacer
diff --git a/vendor/github.com/containers/buildah/SECURITY.md b/vendor/github.com/containers/buildah/SECURITY.md
new file mode 100644
index 000000000..0184bd22d
--- /dev/null
+++ b/vendor/github.com/containers/buildah/SECURITY.md
@@ -0,0 +1,3 @@
+## Security and Disclosure Information Policy for the Buildah Project
+
+The Buildah Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects.
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 2ece11acd..556506e4a 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -13,6 +13,7 @@ import (
"github.com/containers/buildah/docker"
"github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/ioutils"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -413,6 +414,9 @@ type BuilderOptions struct {
MaxPullRetries int
// PullRetryDelay is how long to wait before retrying a pull attempt.
PullRetryDelay time.Duration
+ // OciDecryptConfig contains the config that can be used to decrypt an image if it is
+ // encrypted if non-nil. If nil, it does not attempt to decrypt an image.
+ OciDecryptConfig *encconfig.DecryptConfig
}
// ImportOptions are used to initialize a Builder from an existing container
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index d25ba110a..6c3febd5d 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -19,11 +19,11 @@ import (
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/stringid"
digest "github.com/opencontainers/go-digest"
- configv1 "github.com/openshift/api/config/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -88,6 +88,15 @@ type CommitOptions struct {
// RetryDelay is how long to wait before retrying a commit attempt to a
// registry.
RetryDelay time.Duration
+ // OciEncryptConfig when non-nil indicates that an image should be encrypted.
+ // The encryption options is derived from the construction of EncryptConfig object.
+ OciEncryptConfig *encconfig.EncryptConfig
+ // OciEncryptLayers represents the list of layers to encrypt.
+ // If nil, don't encrypt any layers.
+ // If non-nil and len==0, denotes encrypt all layers.
+ // integers in the slice represent 0-indexed layer indices, with support for negative
+ // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer.
+ OciEncryptLayers *[]int
}
// PushOptions can be used to alter how an image is copied somewhere.
@@ -132,6 +141,15 @@ type PushOptions struct {
MaxRetries int
// RetryDelay is how long to wait before retrying a push attempt.
RetryDelay time.Duration
+ // OciEncryptConfig when non-nil indicates that an image should be encrypted.
+ // The encryption options is derived from the construction of EncryptConfig object.
+ OciEncryptConfig *encconfig.EncryptConfig
+ // OciEncryptLayers represents the list of layers to encrypt.
+ // If nil, don't encrypt any layers.
+ // If non-nil and len==0, denotes encrypt all layers.
+ // integers in the slice represent 0-indexed layer indices, with support for negative
+ // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer.
+ OciEncryptLayers *[]int
}
var (
@@ -162,7 +180,12 @@ func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) error
}
if registrySources, ok := os.LookupEnv("BUILD_REGISTRY_SOURCES"); ok && len(registrySources) > 0 {
- var sources configv1.RegistrySources
+ // Use local struct instead of github.com/openshift/api/config/v1 RegistrySources
+ var sources struct {
+ InsecureRegistries []string `json:"insecureRegistries,omitempty"`
+ BlockedRegistries []string `json:"blockedRegistries,omitempty"`
+ AllowedRegistries []string `json:"allowedRegistries,omitempty"`
+ }
if err := json.Unmarshal([]byte(registrySources), &sources); err != nil {
return errors.Wrapf(err, "error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON", registrySources)
}
@@ -270,7 +293,9 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
// Check if the base image is already in the destination and it's some kind of local
// storage. If so, we can skip recompressing any layers that come from the base image.
exportBaseLayers := true
- if transport, destIsStorage := dest.Transport().(is.StoreTransport); destIsStorage && b.FromImageID != "" {
+ if transport, destIsStorage := dest.Transport().(is.StoreTransport); destIsStorage && options.OciEncryptConfig != nil {
+ return imgID, nil, "", errors.New("unable to use local storage with image encryption")
+ } else if destIsStorage && b.FromImageID != "" {
if baseref, err := transport.ParseReference(b.FromImageID); baseref != nil && err == nil {
if img, err := transport.GetImage(baseref); img != nil && err == nil {
logrus.Debugf("base image %q is already present in local storage, no need to copy its layers", b.FromImageID)
@@ -319,7 +344,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
var manifestBytes []byte
- if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, "push", getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy), options.MaxRetries, options.RetryDelay); err != nil {
+ if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, "push", getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID)
}
// If we've got more names to attach, and we know how to do that for
@@ -451,7 +476,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
systemContext.DirForceCompress = true
}
var manifestBytes []byte
- if manifestBytes, err = retryCopyImage(ctx, policyContext, dest, maybeCachedSrc, dest, "push", getCopyOptions(options.Store, options.ReportWriter, nil, systemContext, options.ManifestType, options.RemoveSignatures, options.SignBy), options.MaxRetries, options.RetryDelay); err != nil {
+ if manifestBytes, err = retryCopyImage(ctx, policyContext, dest, maybeCachedSrc, dest, "push", getCopyOptions(options.Store, options.ReportWriter, nil, systemContext, options.ManifestType, options.RemoveSignatures, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(maybeCachedSrc), transports.ImageName(dest))
}
if options.ReportWriter != nil {
diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go
index 8fb3ebdb7..b43cfffc9 100644
--- a/vendor/github.com/containers/buildah/common.go
+++ b/vendor/github.com/containers/buildah/common.go
@@ -14,6 +14,7 @@ import (
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/unshare"
"github.com/docker/distribution/registry/api/errcode"
@@ -30,7 +31,7 @@ const (
DOCKER = "docker"
)
-func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceSystemContext *types.SystemContext, destinationSystemContext *types.SystemContext, manifestType string, removeSignatures bool, addSigner string) *cp.Options {
+func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceSystemContext *types.SystemContext, destinationSystemContext *types.SystemContext, manifestType string, removeSignatures bool, addSigner string, ociEncryptLayers *[]int, ociEncryptConfig *encconfig.EncryptConfig, ociDecryptConfig *encconfig.DecryptConfig) *cp.Options {
sourceCtx := getSystemContext(store, nil, "")
if sourceSystemContext != nil {
*sourceCtx = *sourceSystemContext
@@ -47,6 +48,9 @@ func getCopyOptions(store storage.Store, reportWriter io.Writer, sourceSystemCon
ForceManifestMIMEType: manifestType,
RemoveSignatures: removeSignatures,
SignBy: addSigner,
+ OciEncryptConfig: ociEncryptConfig,
+ OciDecryptConfig: ociDecryptConfig,
+ OciEncryptLayers: ociEncryptLayers,
}
}
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index 472603e52..2d50e1e48 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -4,9 +4,10 @@ go 1.12
require (
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784
- github.com/containers/common v0.10.0
- github.com/containers/image/v5 v5.4.3
- github.com/containers/storage v1.19.0
+ github.com/containers/common v0.11.2
+ github.com/containers/image/v5 v5.4.4
+ github.com/containers/ocicrypt v1.0.2
+ github.com/containers/storage v1.19.2
github.com/cyphar/filepath-securejoin v0.2.2
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/go-units v0.4.0
@@ -17,27 +18,26 @@ require (
github.com/hashicorp/go-multierror v1.0.0
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
github.com/mattn/go-shellwords v1.0.10
- github.com/onsi/ginkgo v1.12.0
- github.com/onsi/gomega v1.9.0
- github.com/opencontainers/go-digest v1.0.0-rc1
+ github.com/onsi/ginkgo v1.12.1
+ github.com/onsi/gomega v1.10.0
+ github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/runc v1.0.0-rc9
- github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7
+ github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/selinux v1.5.1
- github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316
github.com/openshift/imagebuilder v1.1.4
github.com/pkg/errors v0.9.1
- github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f
+ github.com/seccomp/containers-golang v0.4.1
github.com/seccomp/libseccomp-golang v0.9.1
- github.com/sirupsen/logrus v1.5.0
+ github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v0.0.7
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.5.1
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
github.com/vishvananda/netlink v1.1.0 // indirect
- golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59
- golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775
+ golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
+ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f
)
replace github.com/sirupsen/logrus => github.com/sirupsen/logrus v1.4.2
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index 5612c03db..c66b8256b 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -6,18 +6,14 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7O
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873 h1:93nQ7k53GjoMQ07HVP8g6Zj1fQZDDj7Xy2VkNNtvX8o=
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg=
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
-github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
+github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
@@ -31,6 +27,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver v3.1.0+incompatible h1:7hqmJYuaEK3qwVjWubYiht3j93YI0WQBuysxHIfUriU=
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
@@ -41,6 +39,8 @@ github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtM
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY=
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=
+github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c h1:8ahmSVELW1wghbjerVAyuEYD5+Dio66RYvSS0iGfL1M=
github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
@@ -50,17 +50,20 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDG
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 h1:rqUVLD8I859xRgUx/WMC3v7QAFqbLKZbs+0kqYboRJc=
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containers/common v0.10.0 h1:Km1foMJJBIxceA1/UCZcIuwf8sCF71sP5DwE6Oh1BEA=
-github.com/containers/common v0.10.0/go.mod h1:6A/moCuQITXLqBe5A0WKKTcCfCmEQRbknI05HcPzOL0=
-github.com/containers/image/v5 v5.4.3 h1:zn2HR7uu4hpvT5QQHgjqonOzKDuM1I1UHUEmzZT5sbs=
+github.com/containers/common v0.11.2 h1:e4477fCE3qSA+Z2vT+uUMUTn8s8CyIM++qNm3PCSl68=
+github.com/containers/common v0.11.2/go.mod h1:2w3QE6VUmhltGYW4wV00h4okq1Crs7hNI1ZD2I0QRUY=
github.com/containers/image/v5 v5.4.3/go.mod h1:pN0tvp3YbDd7BWavK2aE0mvJUqVd2HmhPjekyWSFm0U=
+github.com/containers/image/v5 v5.4.4 h1:JSanNn3v/BMd3o0MEvO4R4OKNuoJUSzVGQAI1+0FMXE=
+github.com/containers/image/v5 v5.4.4/go.mod h1:g7cxNXitiLi6pEr9/L9n/0wfazRuhDKXU15kV86N8h8=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.2 h1:Q0/IPs8ohfbXNxEfyJ2pFVmvJu5BhqJUAmc6ES9NKbo=
github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNjsqWarIUce4M=
github.com/containers/storage v1.18.2/go.mod h1:WTBMf+a9ZZ/LbmEVeLHH2TX4CikWbO1Bt+/m58ZHVPg=
-github.com/containers/storage v1.19.0 h1:bVIF5EglbT5PQnqcN7sE6VWqoQzlToqzjXdz+eNubQg=
-github.com/containers/storage v1.19.0/go.mod h1:9Xc4rrTubn5hmtBfL+PSJH1XlfTQwR4VAG1NDUIpCts=
+github.com/containers/storage v1.19.1 h1:YKIzOO12iaD5Ra0PKFS6emcygbHLmwmQOCQRU/19YAQ=
+github.com/containers/storage v1.19.1/go.mod h1:KbXjSwKnx17ejOsjFcCXSf78mCgZkQSLPBNTMRc3XrQ=
+github.com/containers/storage v1.19.2 h1:vhcUwEjDZiPJxaLPFsjvyavnEjFw6qQi9HAkVz1amfI=
+github.com/containers/storage v1.19.2/go.mod h1:gYCp3jzgXkvubO0rI14QAjz5Mxm/qKJgLmHFyqayDnw=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
@@ -69,7 +72,6 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
-github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -92,53 +94,32 @@ github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 h1:moehP
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
-github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsouza/go-dockerclient v1.6.5 h1:vuFDnPcds3LvTWGYb9h0Rty14FLgkjHZdwLDROCdgsw=
github.com/fsouza/go-dockerclient v1.6.5/go.mod h1:GOdftxWLWIbIWKbIMDroKFJzPdg6Iw7r+jX1DDZdVsA=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
-github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
-github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
@@ -148,11 +129,8 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
@@ -177,20 +155,18 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 h1:rw3IAne6CDuVFlZbPOkA7bhxlqawFh7RJJ+CejfMaxE=
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
-github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.10.4 h1:jFzIFaf586tquEB5EhzQG0HwGNSlgAJpG53G6Ss11wc=
-github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.10.5 h1:7q6vHIqubShURwQz8cQK6yIe/xC3IF0Vm7TGfqjewrc=
+github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.3 h1:Ce2to9wvs/cuJ2b86/CKQoTYr9VHfpanYosZ0UBJqdw=
github.com/klauspost/pgzip v1.2.3/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -198,14 +174,9 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
@@ -217,7 +188,6 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
@@ -225,23 +195,25 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mtrmac/gpgme v0.1.2 h1:dNOmvYmsrakgW7LcgiprD0yfRuQQe8/C8F6Z+zogO3s=
github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI=
-github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg=
-github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/onsi/gomega v1.10.0 h1:Gwkk+PTu/nfOwNMtUB/mRUv0X7ewW5dO4AERT1ThVKo=
+github.com/onsi/gomega v1.10.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU=
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
@@ -252,14 +224,15 @@ github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rm
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 h1:Dliu5QO+4JYWu/yMshaMU7G3JN2POGpwjJN7gjy10Go=
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
+github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
github.com/opencontainers/selinux v1.4.0/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.5.1 h1:jskKwSMFYqyTrHEuJgQoUlTcId0av64S6EWObrIfn5Y=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
-github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316 h1:enQG2QUGwug4fR1yM6hL0Fjzx6Km/exZY6RbSPwMu3o=
-github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316/go.mod h1:dv+J0b/HWai0QnMVb37/H0v36klkLBi2TNpPeWDxX10=
github.com/openshift/imagebuilder v1.1.4 h1:LUg8aTjyXMtlDx6IbtvaqofFGZ6aYqe+VIeATE735LM=
github.com/openshift/imagebuilder v1.1.4/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
@@ -270,7 +243,6 @@ github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
@@ -290,6 +262,7 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -297,13 +270,14 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f h1:OtU/w6sBKmXYaw2KEODxjcYi3oPSyyslhgGFgIJVGAI=
-github.com/seccomp/containers-golang v0.0.0-20190312124753-8ca8945ccf5f/go.mod h1:f/98/SnvAzhAEFQJ3u836FePXvcbE8BS0YGMQNn4mhA=
+github.com/seccomp/containers-golang v0.4.1 h1:6hsmsP8Y9T6PWKJELqAkRWkc6Te60+zK64avkjInd44=
+github.com/seccomp/containers-golang v0.4.1/go.mod h1:5fP9lgyYyklJ8fg8Geq193G1QLe0ikf34z+hZKIjmnE=
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@@ -315,7 +289,6 @@ github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKv
github.com/spf13/cobra v0.0.7 h1:FfTH+vuMXOas8jmfb5/M7dzEYx7LpcLb7a0LPe34uOU=
github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@@ -323,8 +296,6 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -342,8 +313,9 @@ github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oW
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
-github.com/vbauerster/mpb/v5 v5.0.3 h1:Ldt/azOkbThTk2loi6FrBd/3fhxGFQ24MxFAS88PoNY=
github.com/vbauerster/mpb/v5 v5.0.3/go.mod h1:h3YxU5CSr8rZP4Q3xZPVB3jJLhWPou63lHEdr9ytH4Y=
+github.com/vbauerster/mpb/v5 v5.0.4 h1:w7l/tJfHmtIOKZkU+bhbDZOUxj1kln9jy4DUOp3Tl14=
+github.com/vbauerster/mpb/v5 v5.0.4/go.mod h1:fvzasBUyuo35UyuA6sSOlVhpLoNQsp2nBdHw7OiSUU8=
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
@@ -368,21 +340,16 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 h1:3zb4D3T4G8jdExgVU/95+vQXfpEPiMdCaZgmGVxjNHM=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU=
+golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -394,9 +361,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
@@ -408,31 +373,29 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190921190940-14da1ac737cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775 h1:TC0v2RSO1u2kn1ZugjrFXkRZAEaqMN/RW+OTZkBzmLE=
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8=
+golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@@ -441,32 +404,26 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqG
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
-gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
-gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@@ -479,8 +436,6 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
-gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
-gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
@@ -496,23 +451,4 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/api v0.17.0 h1:H9d/lw+VkZKEVIUc8F3wgiQ+FUXTTr21M87jXLU7yqM=
-k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
-k8s.io/apimachinery v0.17.0 h1:xRBnuie9rXcPxUkDizUsGvPf1cnlZCFu210op7J7LJo=
-k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
-k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
-k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
-k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
-k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
-k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
-modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
-modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
-modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
-modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
-modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
-sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
-sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 94e97d870..57d8ecb93 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -586,16 +586,10 @@ func (i *containerImageSource) Reference() types.ImageReference {
}
func (i *containerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
- if instanceDigest != nil {
- return nil, errors.Errorf("containerImageSource does not support manifest lists")
- }
return nil, nil
}
func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
- if instanceDigest != nil {
- return nil, "", errors.Errorf("containerImageSource does not support manifest lists")
- }
return i.manifest, i.manifestType, nil
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 8d30367c1..a9e0641c0 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -3,6 +3,7 @@ package imagebuildah
import (
"bytes"
"context"
+ "fmt"
"io"
"io/ioutil"
"net/http"
@@ -16,10 +17,12 @@ import (
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/openshift/imagebuilder"
+ "github.com/openshift/imagebuilder/dockerfile/parser"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -171,6 +174,9 @@ type BuildOptions struct {
MaxPullPushRetries int
// PullPushRetryDelay is how long to wait before retrying a pull or push attempt.
PullPushRetryDelay time.Duration
+ // OciDecryptConfig contains the config that can be used to decrypt an image if it is
+ // encrypted if non-nil. If nil, it does not attempt to decrypt an image.
+ OciDecryptConfig *encconfig.DecryptConfig
}
// BuildDockerfiles parses a set of one or more Dockerfiles (which may be
@@ -249,6 +255,9 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
if err != nil {
return "", nil, errors.Wrapf(err, "error parsing main Dockerfile")
}
+
+ warnOnUnsetBuildArgs(mainNode, options.Args)
+
for _, d := range dockerfiles[1:] {
additionalNode, err := imagebuilder.ParseDockerfile(d)
if err != nil {
@@ -280,6 +289,20 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
return exec.Build(ctx, stages)
}
+func warnOnUnsetBuildArgs(node *parser.Node, args map[string]string) {
+ for _, child := range node.Children {
+ switch strings.ToUpper(child.Value) {
+ case "ARG":
+ argName := child.Next.Value
+ if _, ok := args[argName]; !strings.Contains(argName, "=") && !ok {
+ logrus.Warnf("missing %q build argument. Try adding %q to the command line", argName, fmt.Sprintf("--build-arg %s=<VALUE>", argName))
+ }
+ default:
+ continue
+ }
+ }
+}
+
// preprocessDockerfileContents runs CPP(1) in preprocess-only mode on the input
// dockerfile content and will use ctxDir as the base include path.
//
diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go
index 02123c822..a156313df 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go
@@ -20,6 +20,7 @@ import (
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -100,6 +101,7 @@ type Executor struct {
os string
maxPullPushRetries int
retryPullPushDelay time.Duration
+ ociDecryptConfig *encconfig.DecryptConfig
}
// NewExecutor creates a new instance of the imagebuilder.Executor interface.
@@ -188,6 +190,7 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
os: options.OS,
maxPullPushRetries: options.MaxPullPushRetries,
retryPullPushDelay: options.PullPushRetryDelay,
+ ociDecryptConfig: options.OciDecryptConfig,
}
if exec.err == nil {
exec.err = os.Stderr
@@ -233,7 +236,7 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
// startStage creates a new stage executor that will be referenced whenever a
// COPY or ADD statement uses a --from=NAME flag.
-func (b *Executor) startStage(stage *imagebuilder.Stage, stages int, from, output string) *StageExecutor {
+func (b *Executor) startStage(stage *imagebuilder.Stage, stages int, output string) *StageExecutor {
if b.stages == nil {
b.stages = make(map[string]*StageExecutor)
}
@@ -248,7 +251,6 @@ func (b *Executor) startStage(stage *imagebuilder.Stage, stages int, from, outpu
stage: stage,
}
b.stages[stage.Name] = stageExec
- b.stages[from] = stageExec
if idx := strconv.Itoa(stage.Position); idx != stage.Name {
b.stages[idx] = stageExec
}
@@ -421,7 +423,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
output = b.output
}
- stageExecutor := b.startStage(&stage, len(stages), base, output)
+ stageExecutor := b.startStage(&stage, len(stages), output)
// If this a single-layer build, or if it's a multi-layered
// build and b.forceRmIntermediateCtrs is set, make sure we
diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
index 5ab70e54c..7ba5e2e96 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
@@ -295,7 +295,7 @@ func (s *StageExecutor) digestSpecifiedContent(node *parser.Node, argValues []st
// container. Update the ID mappings and
// all-content-comes-from-below-this-directory value.
from := strings.TrimPrefix(flag, "--from=")
- if other, ok := s.executor.stages[from]; ok {
+ if other, ok := s.executor.stages[from]; ok && other.index < s.index {
contextDir = other.mountPoint
idMappingOptions = &other.builder.IDMappingOptions
} else if builder, ok := s.executor.containerMap[from]; ok {
@@ -633,6 +633,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
Devices: s.executor.devices,
MaxPullRetries: s.executor.maxPullPushRetries,
PullRetryDelay: s.executor.retryPullPushDelay,
+ OciDecryptConfig: s.executor.ociDecryptConfig,
}
// Check and see if the image is a pseudonym for the end result of a
@@ -868,13 +869,10 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if len(arr) != 2 {
return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
}
- otherStage, ok := s.executor.stages[arr[1]]
- if !ok {
- if mountPoint, err = s.getImageRootfs(ctx, arr[1]); err != nil {
- return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, arr[1])
- }
- } else {
+ if otherStage, ok := s.executor.stages[arr[1]]; ok && otherStage.index < s.index {
mountPoint = otherStage.mountPoint
+ } else if mountPoint, err = s.getImageRootfs(ctx, arr[1]); err != nil {
+ return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, arr[1])
}
s.copyFrom = mountPoint
break
diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go
index 160c2157d..4f4b1564b 100644
--- a/vendor/github.com/containers/buildah/new.go
+++ b/vendor/github.com/containers/buildah/new.go
@@ -30,12 +30,13 @@ const (
func pullAndFindImage(ctx context.Context, store storage.Store, srcRef types.ImageReference, options BuilderOptions, sc *types.SystemContext) (*storage.Image, types.ImageReference, error) {
pullOptions := PullOptions{
- ReportWriter: options.ReportWriter,
- Store: store,
- SystemContext: options.SystemContext,
- BlobDirectory: options.BlobDirectory,
- MaxRetries: options.MaxPullRetries,
- RetryDelay: options.PullRetryDelay,
+ ReportWriter: options.ReportWriter,
+ Store: store,
+ SystemContext: options.SystemContext,
+ BlobDirectory: options.BlobDirectory,
+ MaxRetries: options.MaxPullRetries,
+ RetryDelay: options.PullRetryDelay,
+ OciDecryptConfig: options.OciDecryptConfig,
}
ref, err := pullImage(ctx, store, srcRef, pullOptions, sc)
if err != nil {
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index 8e7f7dd4a..1a457f34c 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -57,6 +57,7 @@ type BudResults struct {
Creds string
DisableCompression bool
DisableContentTrust bool
+ DecryptionKeys []string
File []string
Format string
Iidfile string
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index cbb98cbcf..f8d4bdeb6 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -19,6 +19,7 @@ import (
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
+ encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
multierror "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
@@ -56,6 +57,9 @@ type PullOptions struct {
MaxRetries int
// RetryDelay is how long to wait before retrying a pull attempt.
RetryDelay time.Duration
+ // OciDecryptConfig contains the config that can be used to decrypt an image if it is
+ // encrypted if non-nil. If nil, it does not attempt to decrypt an image.
+ OciDecryptConfig *encconfig.DecryptConfig
}
func localImageNameForReference(ctx context.Context, store storage.Store, srcRef types.ImageReference) (string, error) {
@@ -164,6 +168,7 @@ func Pull(ctx context.Context, imageName string, options PullOptions) (imageID s
ReportWriter: options.ReportWriter,
MaxPullRetries: options.MaxRetries,
PullRetryDelay: options.RetryDelay,
+ OciDecryptConfig: options.OciDecryptConfig,
}
storageRef, transport, img, err := resolveImage(ctx, systemContext, options.Store, boptions)
@@ -275,7 +280,7 @@ func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageRefer
}()
logrus.Debugf("copying %q to %q", transports.ImageName(srcRef), destName)
- if _, err := retryCopyImage(ctx, policyContext, maybeCachedDestRef, srcRef, srcRef, "pull", getCopyOptions(store, options.ReportWriter, sc, nil, "", options.RemoveSignatures, ""), options.MaxRetries, options.RetryDelay); err != nil {
+ if _, err := retryCopyImage(ctx, policyContext, maybeCachedDestRef, srcRef, srcRef, "pull", getCopyOptions(store, options.ReportWriter, sc, nil, "", options.RemoveSignatures, "", nil, nil, options.OciDecryptConfig), options.MaxRetries, options.RetryDelay); err != nil {
logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", transports.ImageName(srcRef), destName, err)
return nil, err
}
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index a358b7c54..f41daa2cc 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -74,7 +74,7 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
return []string{strings.TrimPrefix(name, DefaultTransport)}, DefaultTransport, false, nil
}
split := strings.SplitN(name, ":", 2)
- if len(split) == 2 {
+ if StartsWithValidTransport(name) && len(split) == 2 {
if trans := transports.Get(split[0]); trans != nil {
return []string{split[1]}, trans.Name(), false, nil
}
@@ -148,6 +148,12 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
return candidates, DefaultTransport, searchRegistriesAreEmpty, nil
}
+// StartsWithValidTransport validates the name starts with Buildah supported transport
+// to avoid the corner case image name same as the transport name
+func StartsWithValidTransport(name string) bool {
+ return strings.HasPrefix(name, "dir:") || strings.HasPrefix(name, "docker://") || strings.HasPrefix(name, "docker-archive:") || strings.HasPrefix(name, "docker-daemon:") || strings.HasPrefix(name, "oci:") || strings.HasPrefix(name, "oci-archive:")
+}
+
// ExpandNames takes unqualified names, parses them as image names, and returns
// the fully expanded result, including a tag. Names which don't include a registry
// name will be marked for the most-preferred registry (i.e., the first one in our
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index ef75d9847..d0b56c7f6 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -7,7 +7,6 @@ import (
"path/filepath"
"strings"
"sync"
- "syscall"
"github.com/BurntSushi/toml"
"github.com/containers/common/pkg/capabilities"
@@ -263,6 +262,13 @@ type EngineConfig struct {
// PullPolicy determines whether to pull image before creating or running a container
// default is "missing"
PullPolicy string `toml:"pull_policy"`
+
+ // Indicates whether the application should be running in Remote mode
+ Remote bool `toml:"_"`
+
+ // RemoteURI containers connection information used to connect to remote system.
+ RemoteURI string `toml:"remote_uri,omitempty"`
+
// RuntimePath is the path to OCI runtime binary for launching containers.
// The first path pointing to a valid file will be used This is used only
// when there are no OCIRuntime/OCIRuntimes defined. It is used only to be
@@ -540,17 +546,8 @@ func (c *Config) Validate() error {
// It returns an `error` on validation failure, otherwise
// `nil`.
func (c *EngineConfig) Validate() error {
- // Relative paths can cause nasty bugs, because core paths we use could
- // shift between runs (or even parts of the program - the OCI runtime
- // uses a different working directory than we do, for example.
- if c.StaticDir != "" && !filepath.IsAbs(c.StaticDir) {
- return fmt.Errorf("static directory must be an absolute path - instead got %q", c.StaticDir)
- }
- if c.TmpDir != "" && !filepath.IsAbs(c.TmpDir) {
- return fmt.Errorf("temporary directory must be an absolute path - instead got %q", c.TmpDir)
- }
- if c.VolumePath != "" && !filepath.IsAbs(c.VolumePath) {
- return fmt.Errorf("volume path must be an absolute path - instead got %q", c.VolumePath)
+ if err := c.validatePaths(); err != nil {
+ return err
}
// Check if the pullPolicy from containers.conf is valid
@@ -566,22 +563,13 @@ func (c *EngineConfig) Validate() error {
// It returns an `error` on validation failure, otherwise
// `nil`.
func (c *ContainersConfig) Validate() error {
- for _, u := range c.DefaultUlimits {
- ul, err := units.ParseUlimit(u)
- if err != nil {
- return fmt.Errorf("unrecognized ulimit %s: %v", u, err)
- }
- _, err = ul.GetRlimit()
- if err != nil {
- return err
- }
+
+ if err := c.validateUlimits(); err != nil {
+ return err
}
- for _, d := range c.Devices {
- _, _, _, err := Device(d)
- if err != nil {
- return err
- }
+ if err := c.validateDevices(); err != nil {
+ return err
}
if c.LogSizeMax >= 0 && c.LogSizeMax < OCIBufSize {
@@ -600,8 +588,7 @@ func (c *ContainersConfig) Validate() error {
// execution checks. It returns an `error` on validation failure, otherwise
// `nil`.
func (c *NetworkConfig) Validate() error {
-
- if c.NetworkConfigDir != cniConfigDir {
+ if c.NetworkConfigDir != _cniConfigDir {
err := isDirectory(c.NetworkConfigDir)
if err != nil {
return errors.Wrapf(err, "invalid network_config_dir: %s", c.NetworkConfigDir)
@@ -803,31 +790,6 @@ func resolveHomeDir(path string) (string, error) {
return strings.Replace(path, "~", home, 1), nil
}
-// isDirectory tests whether the given path exists and is a directory. It
-// follows symlinks.
-func isDirectory(path string) error {
- path, err := resolveHomeDir(path)
- if err != nil {
- return err
- }
-
- info, err := os.Stat(path)
- if err != nil {
- return err
- }
-
- if !info.Mode().IsDir() {
- // Return a PathError to be consistent with os.Stat().
- return &os.PathError{
- Op: "stat",
- Path: path,
- Err: syscall.ENOTDIR,
- }
- }
-
- return nil
-}
-
func rootlessConfigPath() (string, error) {
if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" {
return filepath.Join(configHome, _configPath), nil
@@ -878,3 +840,16 @@ func Default() (*Config, error) {
})
return config, err
}
+
+func Path() string {
+ if path := os.Getenv("CONTAINERS_CONF"); path != "" {
+ return path
+ }
+ if unshare.IsRootless() {
+ if rpath, err := rootlessConfigPath(); err == nil {
+ return rpath
+ }
+ return "$HOME/" + UserOverrideContainersConfig
+ }
+ return OverrideContainersConfig
+}
diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go
new file mode 100644
index 000000000..8f4daa3d7
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/config/config_local.go
@@ -0,0 +1,81 @@
+// +build !remote
+
+package config
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ units "github.com/docker/go-units"
+)
+
+// isDirectory tests whether the given path exists and is a directory. It
+// follows symlinks.
+func isDirectory(path string) error {
+ path, err := resolveHomeDir(path)
+ if err != nil {
+ return err
+ }
+
+ info, err := os.Stat(path)
+ if err != nil {
+ return err
+ }
+
+ if !info.Mode().IsDir() {
+ // Return a PathError to be consistent with os.Stat().
+ return &os.PathError{
+ Op: "stat",
+ Path: path,
+ Err: syscall.ENOTDIR,
+ }
+ }
+
+ return nil
+}
+
+func (c *EngineConfig) validatePaths() error {
+ // Relative paths can cause nasty bugs, because core paths we use could
+ // shift between runs or even parts of the program. - The OCI runtime
+ // uses a different working directory than we do, for example.
+ if c.StaticDir != "" && !filepath.IsAbs(c.StaticDir) {
+ return fmt.Errorf("static directory must be an absolute path - instead got %q", c.StaticDir)
+ }
+ if c.TmpDir != "" && !filepath.IsAbs(c.TmpDir) {
+ return fmt.Errorf("temporary directory must be an absolute path - instead got %q", c.TmpDir)
+ }
+ if c.VolumePath != "" && !filepath.IsAbs(c.VolumePath) {
+ return fmt.Errorf("volume path must be an absolute path - instead got %q", c.VolumePath)
+ }
+ return nil
+}
+
+func (c *ContainersConfig) validateDevices() error {
+ for _, d := range c.Devices {
+ _, _, _, err := Device(d)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *ContainersConfig) validateUlimits() error {
+ for _, u := range c.DefaultUlimits {
+ ul, err := units.ParseUlimit(u)
+ if err != nil {
+ return fmt.Errorf("unrecognized ulimit %s: %v", u, err)
+ }
+ _, err = ul.GetRlimit()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func isRemote() bool {
+ return false
+}
diff --git a/vendor/github.com/containers/common/pkg/config/config_remote.go b/vendor/github.com/containers/common/pkg/config/config_remote.go
new file mode 100644
index 000000000..d012dbd2f
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/config/config_remote.go
@@ -0,0 +1,25 @@
+// +build remote
+
+package config
+
+// isDirectory tests whether the given path exists and is a directory. It
+// follows symlinks.
+func isDirectory(path string) error {
+ return nil
+}
+
+func isRemote() bool {
+ return true
+}
+
+func (c *EngineConfig) validatePaths() error {
+ return nil
+}
+
+func (c *ContainersConfig) validateDevices() error {
+ return nil
+}
+
+func (c *ContainersConfig) validateUlimits() error {
+ return nil
+}
diff --git a/vendor/github.com/containers/common/pkg/config/config_unix.go b/vendor/github.com/containers/common/pkg/config/config_unix.go
deleted file mode 100644
index f270f2e95..000000000
--- a/vendor/github.com/containers/common/pkg/config/config_unix.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build !windows
-
-package config
-
-// Defaults for linux/unix if none are specified
-const (
- cniConfigDir = "/etc/cni/net.d/"
-)
-
-var cniBinDir = []string{
- "/usr/libexec/cni",
- "/usr/lib/cni",
- "/usr/local/lib/cni",
- "/opt/cni/bin",
-}
diff --git a/vendor/github.com/containers/common/pkg/config/config_windows.go b/vendor/github.com/containers/common/pkg/config/config_windows.go
deleted file mode 100644
index f6a6512a1..000000000
--- a/vendor/github.com/containers/common/pkg/config/config_windows.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build windows
-
-package config
-
-// Defaults for linux/unix if none are specified
-const (
- cniConfigDir = "C:\\cni\\etc\\net.d\\"
-)
-
-var cniBinDir = []string{"C:\\cni\\bin\\"}
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index 185ce8cee..fe523cbf5 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -53,9 +53,6 @@ var (
// DefaultDetachKeys is the default keys sequence for detaching a
// container
DefaultDetachKeys = "ctrl-p,ctrl-q"
-)
-
-var (
// ErrConmonOutdated indicates the version of conmon found (whether via the configuration or $PATH)
// is out of date for the current podman version
ErrConmonOutdated = errors.New("outdated conmon version")
@@ -80,15 +77,24 @@ var (
"CAP_SETUID",
"CAP_SYS_CHROOT",
}
+
+ cniBinDir = []string{
+ "/usr/libexec/cni",
+ "/usr/lib/cni",
+ "/usr/local/lib/cni",
+ "/opt/cni/bin",
+ }
)
const (
- // EtcDir is the sysconfdir where podman should look for system config files.
+ // _etcDir is the sysconfdir where podman should look for system config files.
// It can be overridden at build time.
_etcDir = "/etc"
// InstallPrefix is the prefix where podman will be installed.
// It can be overridden at build time.
_installPrefix = "/usr"
+ // _cniConfigDir is the directory where cni plugins are found
+ _cniConfigDir = "/etc/cni/net.d/"
// CgroupfsCgroupsManager represents cgroupfs native cgroup manager
CgroupfsCgroupsManager = "cgroupfs"
// DefaultApparmorProfile specifies the default apparmor profile for the container.
@@ -191,7 +197,7 @@ func DefaultConfig() (*Config, error) {
},
Network: NetworkConfig{
DefaultNetwork: "podman",
- NetworkConfigDir: cniConfigDir,
+ NetworkConfigDir: _cniConfigDir,
CNIPluginDirs: cniBinDir,
},
Engine: *defaultEngineConfig,
@@ -233,6 +239,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
c.CgroupManager = defaultCgroupManager()
c.StopTimeout = uint(10)
+ c.Remote = isRemote()
c.OCIRuntimes = map[string][]string{
"runc": {
"/usr/bin/runc",
diff --git a/vendor/github.com/containers/common/pkg/config/libpodConfig.go b/vendor/github.com/containers/common/pkg/config/libpodConfig.go
index a8e4c9c93..ab507e864 100644
--- a/vendor/github.com/containers/common/pkg/config/libpodConfig.go
+++ b/vendor/github.com/containers/common/pkg/config/libpodConfig.go
@@ -226,7 +226,7 @@ func newLibpodConfig(c *Config) error {
// hard code EventsLogger to "file" to match older podman versions.
if config.EventsLogger != "file" {
- logrus.Debugf("Ignoring lipod.conf EventsLogger setting %q. Use containers.conf if you want to change this setting and remove libpod.conf files.", config.EventsLogger)
+ logrus.Debugf("Ignoring libpod.conf EventsLogger setting %q. Use %q if you want to change this setting and remove libpod.conf files.", Path(), config.EventsLogger)
config.EventsLogger = "file"
}
@@ -262,7 +262,7 @@ func systemLibpodConfigs() ([]string, error) {
}
// TODO: Raise to Warnf, when Podman is updated to
// remove libpod.conf by default
- logrus.Debugf("Found deprecated file %s, please remove. Use %s to override defaults.\n", path, containersConfPath)
+ logrus.Debugf("Found deprecated file %s, please remove. Use %s to override defaults.\n", Path(), containersConfPath)
return []string{path}, nil
}
return nil, err
diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go
index 269ea686a..fcb3cab72 100644
--- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go
+++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_linux.go
@@ -40,7 +40,7 @@ func New(quiet bool) *SysInfo {
sysInfo.cgroupCPUInfo = checkCgroupCPU(cgMounts, quiet)
sysInfo.cgroupBlkioInfo = checkCgroupBlkioInfo(cgMounts, quiet)
sysInfo.cgroupCpusetInfo = checkCgroupCpusetInfo(cgMounts, quiet)
- sysInfo.cgroupPids = checkCgroupPids(quiet)
+ sysInfo.cgroupPids = checkCgroupPids(cgMounts, quiet)
}
_, ok := cgMounts["devices"]
@@ -227,16 +227,17 @@ func checkCgroupCpusetInfo(cgMounts map[string]string, quiet bool) cgroupCpusetI
}
// checkCgroupPids reads the pids information from the pids cgroup mount point.
-func checkCgroupPids(quiet bool) cgroupPids {
+func checkCgroupPids(cgMounts map[string]string, quiet bool) cgroupPids {
cgroup2, err := cgroupv2.Enabled()
if err != nil {
logrus.Errorf("Failed to check cgroups version: %v", err)
+ return cgroupPids{}
}
if !cgroup2 {
- _, err := cgroups.FindCgroupMountpoint("", "pids")
- if err != nil {
+ _, ok := cgMounts["pids"]
+ if !ok {
if !quiet {
- logrus.Warn(err)
+ logrus.Warn("unable to find pids cgroup in mounts")
}
return cgroupPids{}
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
index 10aff615e..55eb38824 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go
@@ -190,6 +190,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
if err != nil {
return nil, "", err
}
+ logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go
index 40c40dee8..292614593 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci.go
@@ -172,7 +172,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
Architecture: v1.Architecture,
Os: v1.OS,
Layers: layerInfosToStrings(m.LayerInfos()),
- Env: d1.Config.Env,
+ Env: v1.Config.Env,
}
return i, nil
}
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
index dae3eb586..ce85af18a 100644
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
@@ -7,6 +7,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "runtime"
"strings"
"github.com/containers/image/v5/types"
@@ -37,7 +38,12 @@ var (
xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json")
dockerHomePath = filepath.FromSlash(".docker/config.json")
dockerLegacyHomePath = ".dockercfg"
+ nonLinuxAuthFilePath = filepath.FromSlash(".config/containers/auth.json")
+ // Note that the keyring support has been disabled as it was causing
+ // regressions. Before enabling, please revisit TODO(keyring) comments
+ // which need to be addressed if the need remerged to support the
+ // kernel keyring.
enableKeyring = false
// ErrNotLoggedIn is returned for users not logged into a registry
@@ -73,6 +79,70 @@ func SetAuthentication(sys *types.SystemContext, registry, username, password st
})
}
+// GetAllCredentials returns the registry credentials for all registries stored
+// in either the auth.json file or the docker/config.json.
+func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthConfig, error) {
+ // Note: we need to read the auth files in the inverse order to prevent
+ // a priority inversion when writing to the map.
+ authConfigs := make(map[string]types.DockerAuthConfig)
+ paths := getAuthFilePaths(sys)
+ for i := len(paths) - 1; i >= 0; i-- {
+ path := paths[i]
+ // readJSONFile returns an empty map in case the path doesn't exist.
+ auths, err := readJSONFile(path.path, path.legacyFormat)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading JSON file %q", path.path)
+ }
+
+ for registry, data := range auths.AuthConfigs {
+ conf, err := decodeDockerAuth(data)
+ if err != nil {
+ return nil, err
+ }
+ authConfigs[normalizeRegistry(registry)] = conf
+ }
+
+ // Credential helpers may override credentials from the auth file.
+ for registry, credHelper := range auths.CredHelpers {
+ username, password, err := getAuthFromCredHelper(credHelper, registry)
+ if err != nil {
+ if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
+ continue
+ }
+ return nil, err
+ }
+
+ conf := types.DockerAuthConfig{Username: username, Password: password}
+ authConfigs[normalizeRegistry(registry)] = conf
+ }
+ }
+
+ // TODO(keyring): if we ever reenable the keyring support, we had to
+ // query all credentials from the keyring here.
+
+ return authConfigs, nil
+}
+
+// getAuthFilePaths returns a slice of authPaths based on the system context
+// in the order they should be searched. Note that some paths may not exist.
+func getAuthFilePaths(sys *types.SystemContext) []authPath {
+ paths := []authPath{}
+ pathToAuth, lf, err := getPathToAuth(sys)
+ if err == nil {
+ paths = append(paths, authPath{path: pathToAuth, legacyFormat: lf})
+ } else {
+ // Error means that the path set for XDG_RUNTIME_DIR does not exist
+ // but we don't want to completely fail in the case that the user is pulling a public image
+ // Logging the error as a warning instead and moving on to pulling the image
+ logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err)
+ }
+ paths = append(paths,
+ authPath{path: filepath.Join(homedir.Get(), dockerHomePath), legacyFormat: false},
+ authPath{path: filepath.Join(homedir.Get(), dockerLegacyHomePath), legacyFormat: true},
+ )
+ return paths
+}
+
// GetCredentials returns the registry credentials stored in either auth.json
// file or .docker/config.json, including support for OAuth2 and IdentityToken.
// If an entry is not found, an empty struct is returned.
@@ -93,21 +163,7 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth
}
}
- paths := []authPath{}
- pathToAuth, lf, err := getPathToAuth(sys)
- if err == nil {
- paths = append(paths, authPath{path: pathToAuth, legacyFormat: lf})
- } else {
- // Error means that the path set for XDG_RUNTIME_DIR does not exist
- // but we don't want to completely fail in the case that the user is pulling a public image
- // Logging the error as a warning instead and moving on to pulling the image
- logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err)
- }
- paths = append(paths,
- authPath{path: filepath.Join(homedir.Get(), dockerHomePath), legacyFormat: false},
- authPath{path: filepath.Join(homedir.Get(), dockerLegacyHomePath), legacyFormat: true})
-
- for _, path := range paths {
+ for _, path := range getAuthFilePaths(sys) {
authConfig, err := findAuthentication(registry, path.path, path.legacyFormat)
if err != nil {
logrus.Debugf("Credentials not found")
@@ -189,10 +245,8 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
})
}
-// getPath gets the path of the auth.json file
-// The path can be overriden by the user if the overwrite-path flag is set
-// If the flag is not set and XDG_RUNTIME_DIR is set, the auth.json file is saved in XDG_RUNTIME_DIR/containers
-// Otherwise, the auth.json file is stored in /run/containers/UID
+// getPathToAuth gets the path of the auth.json file used for reading and writting credentials
+// returns the path, and a bool specifies whether the file is in legacy format
func getPathToAuth(sys *types.SystemContext) (string, bool, error) {
if sys != nil {
if sys.AuthFilePath != "" {
@@ -205,6 +259,9 @@ func getPathToAuth(sys *types.SystemContext) (string, bool, error) {
return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil
}
}
+ if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
+ return filepath.Join(homedir.Get(), nonLinuxAuthFilePath), false, nil
+ }
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir != "" {
@@ -248,6 +305,13 @@ func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path)
}
+ if auths.AuthConfigs == nil {
+ auths.AuthConfigs = map[string]dockerAuthConfig{}
+ }
+ if auths.CredHelpers == nil {
+ auths.CredHelpers = make(map[string]string)
+ }
+
return auths, nil
}
@@ -257,17 +321,15 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (
if err != nil {
return err
}
+ if legacyFormat {
+ return fmt.Errorf("writes to %s using legacy format are not supported", path)
+ }
dir := filepath.Dir(path)
- if _, err := os.Stat(dir); os.IsNotExist(err) {
- if err = os.MkdirAll(dir, 0700); err != nil {
- return errors.Wrapf(err, "error creating directory %q", dir)
- }
+ if err = os.MkdirAll(dir, 0700); err != nil {
+ return err
}
- if legacyFormat {
- return fmt.Errorf("writes to %s using legacy format are not supported", path)
- }
auths, err := readJSONFile(path, false)
if err != nil {
return errors.Wrapf(err, "error reading JSON file %q", path)
diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go
index 3eee70bc2..72fcf618a 100644
--- a/vendor/github.com/containers/image/v5/signature/policy_config.go
+++ b/vendor/github.com/containers/image/v5/signature/policy_config.go
@@ -17,11 +17,13 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
+ "os"
"path/filepath"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/homedir"
"github.com/pkg/errors"
)
@@ -34,6 +36,9 @@ var systemDefaultPolicyPath = builtinDefaultPolicyPath
// DO NOT change this, instead see systemDefaultPolicyPath above.
const builtinDefaultPolicyPath = "/etc/containers/policy.json"
+// userPolicyFile is the path to the per user policy path.
+var userPolicyFile = filepath.FromSlash(".config/containers/policy.json")
+
// InvalidPolicyFormatError is returned when parsing an invalid policy configuration.
type InvalidPolicyFormatError string
@@ -53,13 +58,15 @@ func DefaultPolicy(sys *types.SystemContext) (*Policy, error) {
// defaultPolicyPath returns a path to the default policy of the system.
func defaultPolicyPath(sys *types.SystemContext) string {
- if sys != nil {
- if sys.SignaturePolicyPath != "" {
- return sys.SignaturePolicyPath
- }
- if sys.RootForImplicitAbsolutePaths != "" {
- return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath)
- }
+ if sys != nil && sys.SignaturePolicyPath != "" {
+ return sys.SignaturePolicyPath
+ }
+ userPolicyFilePath := filepath.Join(homedir.Get(), userPolicyFile)
+ if _, err := os.Stat(userPolicyFilePath); err == nil {
+ return userPolicyFilePath
+ }
+ if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath)
}
return systemDefaultPolicyPath
}
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index 67f57e03e..7cf412723 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -6,12 +6,12 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
- VersionMinor = 4
+ VersionMinor = 5
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 4
+ VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
- VersionDev = ""
+ VersionDev = "-dev"
)
// Version is the specification version that the package types support.
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index c2f855e75..19181caea 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -156,8 +156,12 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
}
return ErrCompressedSizeTooBig
}
- default:
+ case blockTypeRaw:
b.RLESize = 0
+ // We do not need a destination for raw blocks.
+ maxSize = -1
+ default:
+ panic("Invalid block type")
}
// Read block data.
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index 234025505..324347623 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -461,6 +461,7 @@ func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
br := readerWrapper{r: stream.r}
decodeStream:
for {
+ frame.history.reset()
err := frame.reset(&br)
if debug && err != nil {
println("Frame decoder returned", err)
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index 15a45f7b5..39238e16a 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -64,6 +64,7 @@ type sequenceDecs struct {
hist []byte
literals []byte
out []byte
+ windowSize int
maxBits uint8
}
@@ -82,6 +83,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
s.hist = hist.b
s.prevOffset = hist.recentOffsets
s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
+ s.windowSize = hist.windowSize
s.out = out
return nil
}
@@ -131,6 +133,9 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
if matchOff > len(s.out)+len(hist)+litLen {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", matchOff, len(s.out)+len(hist)+litLen)
}
+ if matchOff > s.windowSize {
+ return fmt.Errorf("match offset (%d) bigger than window size (%d)", matchOff, s.windowSize)
+ }
if matchOff == 0 && matchLen > 0 {
return fmt.Errorf("zero matchoff and matchlen > 0")
}
diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml
new file mode 100644
index 000000000..604314dd4
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+sudo: false
+go:
+ - 1.13.x
+ - tip
+
+before_install:
+ - go get -t -v ./...
+
+script:
+ - ./go.test.sh
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE
new file mode 100644
index 000000000..65dc692b6
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/LICENSE
@@ -0,0 +1,9 @@
+Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
+
+MIT License (Expat)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md
new file mode 100644
index 000000000..38418353e
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/README.md
@@ -0,0 +1,50 @@
+# go-isatty
+
+[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty)
+[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty)
+[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty)
+
+isatty for golang
+
+## Usage
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/mattn/go-isatty"
+ "os"
+)
+
+func main() {
+ if isatty.IsTerminal(os.Stdout.Fd()) {
+ fmt.Println("Is Terminal")
+ } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) {
+ fmt.Println("Is Cygwin/MSYS2 Terminal")
+ } else {
+ fmt.Println("Is Not Terminal")
+ }
+}
+```
+
+## Installation
+
+```
+$ go get github.com/mattn/go-isatty
+```
+
+## License
+
+MIT
+
+## Author
+
+Yasuhiro Matsumoto (a.k.a mattn)
+
+## Thanks
+
+* k-takata: base idea for IsCygwinTerminal
+
+ https://github.com/k-takata/go-iscygpty
diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go
new file mode 100644
index 000000000..17d4f90eb
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/doc.go
@@ -0,0 +1,2 @@
+// Package isatty implements interface to isatty
+package isatty
diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod
new file mode 100644
index 000000000..605c4c221
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/go.mod
@@ -0,0 +1,5 @@
+module github.com/mattn/go-isatty
+
+go 1.12
+
+require golang.org/x/sys v0.0.0-20200116001909-b77594299b42
diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum
new file mode 100644
index 000000000..912e29cbc
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/go.sum
@@ -0,0 +1,2 @@
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh
new file mode 100644
index 000000000..012162b07
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/go.test.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -e
+echo "" > coverage.txt
+
+for d in $(go list ./... | grep -v vendor); do
+ go test -race -coverprofile=profile.out -covermode=atomic "$d"
+ if [ -f profile.out ]; then
+ cat profile.out >> coverage.txt
+ rm profile.out
+ fi
+done
diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
new file mode 100644
index 000000000..711f28808
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
@@ -0,0 +1,18 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package isatty
+
+import "golang.org/x/sys/unix"
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA)
+ return err == nil
+}
+
+// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go
new file mode 100644
index 000000000..ff714a376
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_others.go
@@ -0,0 +1,15 @@
+// +build appengine js nacl
+
+package isatty
+
+// IsTerminal returns true if the file descriptor is terminal which
+// is always false on js and appengine classic which is a sandboxed PaaS.
+func IsTerminal(fd uintptr) bool {
+ return false
+}
+
+// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go
new file mode 100644
index 000000000..c5b6e0c08
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go
@@ -0,0 +1,22 @@
+// +build plan9
+
+package isatty
+
+import (
+ "syscall"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd uintptr) bool {
+ path, err := syscall.Fd2path(int(fd))
+ if err != nil {
+ return false
+ }
+ return path == "/dev/cons" || path == "/mnt/term/dev/cons"
+}
+
+// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
new file mode 100644
index 000000000..bdd5c79a0
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
@@ -0,0 +1,22 @@
+// +build solaris
+// +build !appengine
+
+package isatty
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
+func IsTerminal(fd uintptr) bool {
+ var termio unix.Termio
+ err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
+ return err == nil
+}
+
+// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
new file mode 100644
index 000000000..31a1ca973
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
@@ -0,0 +1,18 @@
+// +build linux aix
+// +build !appengine
+
+package isatty
+
+import "golang.org/x/sys/unix"
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS)
+ return err == nil
+}
+
+// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go
new file mode 100644
index 000000000..1fa869154
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go
@@ -0,0 +1,125 @@
+// +build windows
+// +build !appengine
+
+package isatty
+
+import (
+ "errors"
+ "strings"
+ "syscall"
+ "unicode/utf16"
+ "unsafe"
+)
+
+const (
+ objectNameInfo uintptr = 1
+ fileNameInfo = 2
+ fileTypePipe = 3
+)
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32.dll")
+ ntdll = syscall.NewLazyDLL("ntdll.dll")
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+ procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx")
+ procGetFileType = kernel32.NewProc("GetFileType")
+ procNtQueryObject = ntdll.NewProc("NtQueryObject")
+)
+
+func init() {
+ // Check if GetFileInformationByHandleEx is available.
+ if procGetFileInformationByHandleEx.Find() != nil {
+ procGetFileInformationByHandleEx = nil
+ }
+}
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
+
+// Check pipe name is used for cygwin/msys2 pty.
+// Cygwin/MSYS2 PTY has a name like:
+// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
+func isCygwinPipeName(name string) bool {
+ token := strings.Split(name, "-")
+ if len(token) < 5 {
+ return false
+ }
+
+ if token[0] != `\msys` &&
+ token[0] != `\cygwin` &&
+ token[0] != `\Device\NamedPipe\msys` &&
+ token[0] != `\Device\NamedPipe\cygwin` {
+ return false
+ }
+
+ if token[1] == "" {
+ return false
+ }
+
+ if !strings.HasPrefix(token[2], "pty") {
+ return false
+ }
+
+ if token[3] != `from` && token[3] != `to` {
+ return false
+ }
+
+ if token[4] != "master" {
+ return false
+ }
+
+ return true
+}
+
+// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler
+// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion
+// guys are using Windows XP, this is a workaround for those guys, it will also work on system from
+// Windows vista to 10
+// see https://stackoverflow.com/a/18792477 for details
+func getFileNameByHandle(fd uintptr) (string, error) {
+ if procNtQueryObject == nil {
+ return "", errors.New("ntdll.dll: NtQueryObject not supported")
+ }
+
+ var buf [4 + syscall.MAX_PATH]uint16
+ var result int
+ r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5,
+ fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0)
+ if r != 0 {
+ return "", e
+ }
+ return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil
+}
+
+// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
+// terminal.
+func IsCygwinTerminal(fd uintptr) bool {
+ if procGetFileInformationByHandleEx == nil {
+ name, err := getFileNameByHandle(fd)
+ if err != nil {
+ return false
+ }
+ return isCygwinPipeName(name)
+ }
+
+ // Cygwin/msys's pty is a pipe.
+ ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0)
+ if ft != fileTypePipe || e != 0 {
+ return false
+ }
+
+ var buf [2 + syscall.MAX_PATH]uint16
+ r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(),
+ 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)),
+ uintptr(len(buf)*2), 0, 0)
+ if r == 0 || e != 0 {
+ return false
+ }
+
+ l := *(*uint32)(unsafe.Pointer(&buf))
+ return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2])))
+}
diff --git a/vendor/github.com/mattn/go-isatty/renovate.json b/vendor/github.com/mattn/go-isatty/renovate.json
new file mode 100644
index 000000000..5ae9d96b7
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/renovate.json
@@ -0,0 +1,8 @@
+{
+ "extends": [
+ "config:base"
+ ],
+ "postUpdateOptions": [
+ "gomodTidy"
+ ]
+}
diff --git a/vendor/github.com/mattn/go-runewidth/.travis.yml b/vendor/github.com/mattn/go-runewidth/.travis.yml
new file mode 100644
index 000000000..6a21813a3
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/.travis.yml
@@ -0,0 +1,16 @@
+language: go
+sudo: false
+go:
+ - 1.13.x
+ - tip
+
+before_install:
+ - go get -t -v ./...
+
+script:
+ - go generate
+ - git diff --cached --exit-code
+ - ./go.test.sh
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE
new file mode 100644
index 000000000..91b5cef30
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/mattn/go-runewidth/README.md b/vendor/github.com/mattn/go-runewidth/README.md
new file mode 100644
index 000000000..aa56ab96c
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/README.md
@@ -0,0 +1,27 @@
+go-runewidth
+============
+
+[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth)
+[![Codecov](https://codecov.io/gh/mattn/go-runewidth/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-runewidth)
+[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth)
+[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth)
+
+Provides functions to get fixed width of the character or string.
+
+Usage
+-----
+
+```go
+runewidth.StringWidth("つのだ☆HIRO") == 12
+```
+
+
+Author
+------
+
+Yasuhiro Matsumoto
+
+License
+-------
+
+under the MIT License: http://mattn.mit-license.org/2013
diff --git a/vendor/github.com/mattn/go-runewidth/go.mod b/vendor/github.com/mattn/go-runewidth/go.mod
new file mode 100644
index 000000000..fa7f4d864
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/go.mod
@@ -0,0 +1,3 @@
+module github.com/mattn/go-runewidth
+
+go 1.9
diff --git a/vendor/github.com/mattn/go-runewidth/go.test.sh b/vendor/github.com/mattn/go-runewidth/go.test.sh
new file mode 100644
index 000000000..012162b07
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/go.test.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -e
+echo "" > coverage.txt
+
+for d in $(go list ./... | grep -v vendor); do
+ go test -race -coverprofile=profile.out -covermode=atomic "$d"
+ if [ -f profile.out ]; then
+ cat profile.out >> coverage.txt
+ rm profile.out
+ fi
+done
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go
new file mode 100644
index 000000000..19f8e0449
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth.go
@@ -0,0 +1,257 @@
+package runewidth
+
+import (
+ "os"
+)
+
+//go:generate go run script/generate.go
+
+var (
+ // EastAsianWidth will be set true if the current locale is CJK
+ EastAsianWidth bool
+
+ // ZeroWidthJoiner is flag to set to use UTR#51 ZWJ
+ ZeroWidthJoiner bool
+
+ // DefaultCondition is a condition in current locale
+ DefaultCondition = &Condition{}
+)
+
+func init() {
+ handleEnv()
+}
+
+func handleEnv() {
+ env := os.Getenv("RUNEWIDTH_EASTASIAN")
+ if env == "" {
+ EastAsianWidth = IsEastAsian()
+ } else {
+ EastAsianWidth = env == "1"
+ }
+ // update DefaultCondition
+ DefaultCondition.EastAsianWidth = EastAsianWidth
+ DefaultCondition.ZeroWidthJoiner = ZeroWidthJoiner
+}
+
+type interval struct {
+ first rune
+ last rune
+}
+
+type table []interval
+
+func inTables(r rune, ts ...table) bool {
+ for _, t := range ts {
+ if inTable(r, t) {
+ return true
+ }
+ }
+ return false
+}
+
+func inTable(r rune, t table) bool {
+ if r < t[0].first {
+ return false
+ }
+
+ bot := 0
+ top := len(t) - 1
+ for top >= bot {
+ mid := (bot + top) >> 1
+
+ switch {
+ case t[mid].last < r:
+ bot = mid + 1
+ case t[mid].first > r:
+ top = mid - 1
+ default:
+ return true
+ }
+ }
+
+ return false
+}
+
+var private = table{
+ {0x00E000, 0x00F8FF}, {0x0F0000, 0x0FFFFD}, {0x100000, 0x10FFFD},
+}
+
+var nonprint = table{
+ {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD},
+ {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F},
+ {0x2028, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF},
+ {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF},
+}
+
+// Condition have flag EastAsianWidth whether the current locale is CJK or not.
+type Condition struct {
+ EastAsianWidth bool
+ ZeroWidthJoiner bool
+}
+
+// NewCondition return new instance of Condition which is current locale.
+func NewCondition() *Condition {
+ return &Condition{
+ EastAsianWidth: EastAsianWidth,
+ ZeroWidthJoiner: ZeroWidthJoiner,
+ }
+}
+
+// RuneWidth returns the number of cells in r.
+// See http://www.unicode.org/reports/tr11/
+func (c *Condition) RuneWidth(r rune) int {
+ switch {
+ case r < 0 || r > 0x10FFFF || inTables(r, nonprint, combining, notassigned):
+ return 0
+ case (c.EastAsianWidth && IsAmbiguousWidth(r)) || inTables(r, doublewidth):
+ return 2
+ default:
+ return 1
+ }
+}
+
+func (c *Condition) stringWidth(s string) (width int) {
+ for _, r := range []rune(s) {
+ width += c.RuneWidth(r)
+ }
+ return width
+}
+
+func (c *Condition) stringWidthZeroJoiner(s string) (width int) {
+ r1, r2 := rune(0), rune(0)
+ for _, r := range []rune(s) {
+ if r == 0xFE0E || r == 0xFE0F {
+ continue
+ }
+ w := c.RuneWidth(r)
+ if r2 == 0x200D && inTables(r, emoji) && inTables(r1, emoji) {
+ if width < w {
+ width = w
+ }
+ } else {
+ width += w
+ }
+ r1, r2 = r2, r
+ }
+ return width
+}
+
+// StringWidth return width as you can see
+func (c *Condition) StringWidth(s string) (width int) {
+ if c.ZeroWidthJoiner {
+ return c.stringWidthZeroJoiner(s)
+ }
+ return c.stringWidth(s)
+}
+
+// Truncate return string truncated with w cells
+func (c *Condition) Truncate(s string, w int, tail string) string {
+ if c.StringWidth(s) <= w {
+ return s
+ }
+ r := []rune(s)
+ tw := c.StringWidth(tail)
+ w -= tw
+ width := 0
+ i := 0
+ for ; i < len(r); i++ {
+ cw := c.RuneWidth(r[i])
+ if width+cw > w {
+ break
+ }
+ width += cw
+ }
+ return string(r[0:i]) + tail
+}
+
+// Wrap return string wrapped with w cells
+func (c *Condition) Wrap(s string, w int) string {
+ width := 0
+ out := ""
+ for _, r := range []rune(s) {
+ cw := RuneWidth(r)
+ if r == '\n' {
+ out += string(r)
+ width = 0
+ continue
+ } else if width+cw > w {
+ out += "\n"
+ width = 0
+ out += string(r)
+ width += cw
+ continue
+ }
+ out += string(r)
+ width += cw
+ }
+ return out
+}
+
+// FillLeft return string filled in left by spaces in w cells
+func (c *Condition) FillLeft(s string, w int) string {
+ width := c.StringWidth(s)
+ count := w - width
+ if count > 0 {
+ b := make([]byte, count)
+ for i := range b {
+ b[i] = ' '
+ }
+ return string(b) + s
+ }
+ return s
+}
+
+// FillRight return string filled in left by spaces in w cells
+func (c *Condition) FillRight(s string, w int) string {
+ width := c.StringWidth(s)
+ count := w - width
+ if count > 0 {
+ b := make([]byte, count)
+ for i := range b {
+ b[i] = ' '
+ }
+ return s + string(b)
+ }
+ return s
+}
+
+// RuneWidth returns the number of cells in r.
+// See http://www.unicode.org/reports/tr11/
+func RuneWidth(r rune) int {
+ return DefaultCondition.RuneWidth(r)
+}
+
+// IsAmbiguousWidth returns whether is ambiguous width or not.
+func IsAmbiguousWidth(r rune) bool {
+ return inTables(r, private, ambiguous)
+}
+
+// IsNeutralWidth returns whether is neutral width or not.
+func IsNeutralWidth(r rune) bool {
+ return inTable(r, neutral)
+}
+
+// StringWidth return width as you can see
+func StringWidth(s string) (width int) {
+ return DefaultCondition.StringWidth(s)
+}
+
+// Truncate return string truncated with w cells
+func Truncate(s string, w int, tail string) string {
+ return DefaultCondition.Truncate(s, w, tail)
+}
+
+// Wrap return string wrapped with w cells
+func Wrap(s string, w int) string {
+ return DefaultCondition.Wrap(s, w)
+}
+
+// FillLeft return string filled in left by spaces in w cells
+func FillLeft(s string, w int) string {
+ return DefaultCondition.FillLeft(s, w)
+}
+
+// FillRight return string filled in left by spaces in w cells
+func FillRight(s string, w int) string {
+ return DefaultCondition.FillRight(s, w)
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go
new file mode 100644
index 000000000..7d99f6e52
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go
@@ -0,0 +1,8 @@
+// +build appengine
+
+package runewidth
+
+// IsEastAsian return true if the current locale is CJK
+func IsEastAsian() bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go
new file mode 100644
index 000000000..c5fdf40ba
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_js.go
@@ -0,0 +1,9 @@
+// +build js
+// +build !appengine
+
+package runewidth
+
+func IsEastAsian() bool {
+ // TODO: Implement this for the web. Detect east asian in a compatible way, and return true.
+ return false
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
new file mode 100644
index 000000000..480ad7485
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
@@ -0,0 +1,82 @@
+// +build !windows
+// +build !js
+// +build !appengine
+
+package runewidth
+
+import (
+ "os"
+ "regexp"
+ "strings"
+)
+
+var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`)
+
+var mblenTable = map[string]int{
+ "utf-8": 6,
+ "utf8": 6,
+ "jis": 8,
+ "eucjp": 3,
+ "euckr": 2,
+ "euccn": 2,
+ "sjis": 2,
+ "cp932": 2,
+ "cp51932": 2,
+ "cp936": 2,
+ "cp949": 2,
+ "cp950": 2,
+ "big5": 2,
+ "gbk": 2,
+ "gb2312": 2,
+}
+
+func isEastAsian(locale string) bool {
+ charset := strings.ToLower(locale)
+ r := reLoc.FindStringSubmatch(locale)
+ if len(r) == 2 {
+ charset = strings.ToLower(r[1])
+ }
+
+ if strings.HasSuffix(charset, "@cjk_narrow") {
+ return false
+ }
+
+ for pos, b := range []byte(charset) {
+ if b == '@' {
+ charset = charset[:pos]
+ break
+ }
+ }
+ max := 1
+ if m, ok := mblenTable[charset]; ok {
+ max = m
+ }
+ if max > 1 && (charset[0] != 'u' ||
+ strings.HasPrefix(locale, "ja") ||
+ strings.HasPrefix(locale, "ko") ||
+ strings.HasPrefix(locale, "zh")) {
+ return true
+ }
+ return false
+}
+
+// IsEastAsian return true if the current locale is CJK
+func IsEastAsian() bool {
+ locale := os.Getenv("LC_ALL")
+ if locale == "" {
+ locale = os.Getenv("LC_CTYPE")
+ }
+ if locale == "" {
+ locale = os.Getenv("LANG")
+ }
+
+ // ignore C locale
+ if locale == "POSIX" || locale == "C" {
+ return false
+ }
+ if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') {
+ return false
+ }
+
+ return isEastAsian(locale)
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_table.go b/vendor/github.com/mattn/go-runewidth/runewidth_table.go
new file mode 100644
index 000000000..b27d77d89
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_table.go
@@ -0,0 +1,437 @@
+// Code generated by script/generate.go. DO NOT EDIT.
+
+package runewidth
+
+var combining = table{
+ {0x0300, 0x036F}, {0x0483, 0x0489}, {0x07EB, 0x07F3},
+ {0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0D00, 0x0D01},
+ {0x135D, 0x135F}, {0x1A7F, 0x1A7F}, {0x1AB0, 0x1AC0},
+ {0x1B6B, 0x1B73}, {0x1DC0, 0x1DF9}, {0x1DFB, 0x1DFF},
+ {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2DE0, 0x2DFF},
+ {0x3099, 0x309A}, {0xA66F, 0xA672}, {0xA674, 0xA67D},
+ {0xA69E, 0xA69F}, {0xA6F0, 0xA6F1}, {0xA8E0, 0xA8F1},
+ {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, {0x10376, 0x1037A},
+ {0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x11300, 0x11301},
+ {0x1133B, 0x1133C}, {0x11366, 0x1136C}, {0x11370, 0x11374},
+ {0x16AF0, 0x16AF4}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172},
+ {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD},
+ {0x1D242, 0x1D244}, {0x1E000, 0x1E006}, {0x1E008, 0x1E018},
+ {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, {0x1E026, 0x1E02A},
+ {0x1E8D0, 0x1E8D6},
+}
+
+var doublewidth = table{
+ {0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A},
+ {0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3},
+ {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653},
+ {0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1},
+ {0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5},
+ {0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA},
+ {0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA},
+ {0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B},
+ {0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E},
+ {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797},
+ {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C},
+ {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99},
+ {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB},
+ {0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF},
+ {0x3105, 0x312F}, {0x3131, 0x318E}, {0x3190, 0x31E3},
+ {0x31F0, 0x321E}, {0x3220, 0x3247}, {0x3250, 0x4DBF},
+ {0x4E00, 0xA48C}, {0xA490, 0xA4C6}, {0xA960, 0xA97C},
+ {0xAC00, 0xD7A3}, {0xF900, 0xFAFF}, {0xFE10, 0xFE19},
+ {0xFE30, 0xFE52}, {0xFE54, 0xFE66}, {0xFE68, 0xFE6B},
+ {0xFF01, 0xFF60}, {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4},
+ {0x16FF0, 0x16FF1}, {0x17000, 0x187F7}, {0x18800, 0x18CD5},
+ {0x18D00, 0x18D08}, {0x1B000, 0x1B11E}, {0x1B150, 0x1B152},
+ {0x1B164, 0x1B167}, {0x1B170, 0x1B2FB}, {0x1F004, 0x1F004},
+ {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A},
+ {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248},
+ {0x1F250, 0x1F251}, {0x1F260, 0x1F265}, {0x1F300, 0x1F320},
+ {0x1F32D, 0x1F335}, {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393},
+ {0x1F3A0, 0x1F3CA}, {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0},
+ {0x1F3F4, 0x1F3F4}, {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440},
+ {0x1F442, 0x1F4FC}, {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E},
+ {0x1F550, 0x1F567}, {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596},
+ {0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5},
+ {0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2}, {0x1F6D5, 0x1F6D7},
+ {0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB},
+ {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F978},
+ {0x1F97A, 0x1F9CB}, {0x1F9CD, 0x1F9FF}, {0x1FA70, 0x1FA74},
+ {0x1FA78, 0x1FA7A}, {0x1FA80, 0x1FA86}, {0x1FA90, 0x1FAA8},
+ {0x1FAB0, 0x1FAB6}, {0x1FAC0, 0x1FAC2}, {0x1FAD0, 0x1FAD6},
+ {0x20000, 0x2FFFD}, {0x30000, 0x3FFFD},
+}
+
+var ambiguous = table{
+ {0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8},
+ {0x00AA, 0x00AA}, {0x00AD, 0x00AE}, {0x00B0, 0x00B4},
+ {0x00B6, 0x00BA}, {0x00BC, 0x00BF}, {0x00C6, 0x00C6},
+ {0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1},
+ {0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED},
+ {0x00F0, 0x00F0}, {0x00F2, 0x00F3}, {0x00F7, 0x00FA},
+ {0x00FC, 0x00FC}, {0x00FE, 0x00FE}, {0x0101, 0x0101},
+ {0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B},
+ {0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133},
+ {0x0138, 0x0138}, {0x013F, 0x0142}, {0x0144, 0x0144},
+ {0x0148, 0x014B}, {0x014D, 0x014D}, {0x0152, 0x0153},
+ {0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE},
+ {0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4},
+ {0x01D6, 0x01D6}, {0x01D8, 0x01D8}, {0x01DA, 0x01DA},
+ {0x01DC, 0x01DC}, {0x0251, 0x0251}, {0x0261, 0x0261},
+ {0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB},
+ {0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB},
+ {0x02DD, 0x02DD}, {0x02DF, 0x02DF}, {0x0300, 0x036F},
+ {0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1},
+ {0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F},
+ {0x0451, 0x0451}, {0x2010, 0x2010}, {0x2013, 0x2016},
+ {0x2018, 0x2019}, {0x201C, 0x201D}, {0x2020, 0x2022},
+ {0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033},
+ {0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E},
+ {0x2074, 0x2074}, {0x207F, 0x207F}, {0x2081, 0x2084},
+ {0x20AC, 0x20AC}, {0x2103, 0x2103}, {0x2105, 0x2105},
+ {0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116},
+ {0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B},
+ {0x2153, 0x2154}, {0x215B, 0x215E}, {0x2160, 0x216B},
+ {0x2170, 0x2179}, {0x2189, 0x2189}, {0x2190, 0x2199},
+ {0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4},
+ {0x21E7, 0x21E7}, {0x2200, 0x2200}, {0x2202, 0x2203},
+ {0x2207, 0x2208}, {0x220B, 0x220B}, {0x220F, 0x220F},
+ {0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A},
+ {0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225},
+ {0x2227, 0x222C}, {0x222E, 0x222E}, {0x2234, 0x2237},
+ {0x223C, 0x223D}, {0x2248, 0x2248}, {0x224C, 0x224C},
+ {0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267},
+ {0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283},
+ {0x2286, 0x2287}, {0x2295, 0x2295}, {0x2299, 0x2299},
+ {0x22A5, 0x22A5}, {0x22BF, 0x22BF}, {0x2312, 0x2312},
+ {0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573},
+ {0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1},
+ {0x25A3, 0x25A9}, {0x25B2, 0x25B3}, {0x25B6, 0x25B7},
+ {0x25BC, 0x25BD}, {0x25C0, 0x25C1}, {0x25C6, 0x25C8},
+ {0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5},
+ {0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609},
+ {0x260E, 0x260F}, {0x261C, 0x261C}, {0x261E, 0x261E},
+ {0x2640, 0x2640}, {0x2642, 0x2642}, {0x2660, 0x2661},
+ {0x2663, 0x2665}, {0x2667, 0x266A}, {0x266C, 0x266D},
+ {0x266F, 0x266F}, {0x269E, 0x269F}, {0x26BF, 0x26BF},
+ {0x26C6, 0x26CD}, {0x26CF, 0x26D3}, {0x26D5, 0x26E1},
+ {0x26E3, 0x26E3}, {0x26E8, 0x26E9}, {0x26EB, 0x26F1},
+ {0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC},
+ {0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F},
+ {0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF},
+ {0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A},
+ {0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D},
+ {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF},
+ {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD},
+}
+var notassigned = table{
+ {0x27E6, 0x27ED}, {0x2985, 0x2986},
+}
+
+var neutral = table{
+ {0x0000, 0x001F}, {0x007F, 0x00A0}, {0x00A9, 0x00A9},
+ {0x00AB, 0x00AB}, {0x00B5, 0x00B5}, {0x00BB, 0x00BB},
+ {0x00C0, 0x00C5}, {0x00C7, 0x00CF}, {0x00D1, 0x00D6},
+ {0x00D9, 0x00DD}, {0x00E2, 0x00E5}, {0x00E7, 0x00E7},
+ {0x00EB, 0x00EB}, {0x00EE, 0x00EF}, {0x00F1, 0x00F1},
+ {0x00F4, 0x00F6}, {0x00FB, 0x00FB}, {0x00FD, 0x00FD},
+ {0x00FF, 0x0100}, {0x0102, 0x0110}, {0x0112, 0x0112},
+ {0x0114, 0x011A}, {0x011C, 0x0125}, {0x0128, 0x012A},
+ {0x012C, 0x0130}, {0x0134, 0x0137}, {0x0139, 0x013E},
+ {0x0143, 0x0143}, {0x0145, 0x0147}, {0x014C, 0x014C},
+ {0x014E, 0x0151}, {0x0154, 0x0165}, {0x0168, 0x016A},
+ {0x016C, 0x01CD}, {0x01CF, 0x01CF}, {0x01D1, 0x01D1},
+ {0x01D3, 0x01D3}, {0x01D5, 0x01D5}, {0x01D7, 0x01D7},
+ {0x01D9, 0x01D9}, {0x01DB, 0x01DB}, {0x01DD, 0x0250},
+ {0x0252, 0x0260}, {0x0262, 0x02C3}, {0x02C5, 0x02C6},
+ {0x02C8, 0x02C8}, {0x02CC, 0x02CC}, {0x02CE, 0x02CF},
+ {0x02D1, 0x02D7}, {0x02DC, 0x02DC}, {0x02DE, 0x02DE},
+ {0x02E0, 0x02FF}, {0x0370, 0x0377}, {0x037A, 0x037F},
+ {0x0384, 0x038A}, {0x038C, 0x038C}, {0x038E, 0x0390},
+ {0x03AA, 0x03B0}, {0x03C2, 0x03C2}, {0x03CA, 0x0400},
+ {0x0402, 0x040F}, {0x0450, 0x0450}, {0x0452, 0x052F},
+ {0x0531, 0x0556}, {0x0559, 0x058A}, {0x058D, 0x058F},
+ {0x0591, 0x05C7}, {0x05D0, 0x05EA}, {0x05EF, 0x05F4},
+ {0x0600, 0x061C}, {0x061E, 0x070D}, {0x070F, 0x074A},
+ {0x074D, 0x07B1}, {0x07C0, 0x07FA}, {0x07FD, 0x082D},
+ {0x0830, 0x083E}, {0x0840, 0x085B}, {0x085E, 0x085E},
+ {0x0860, 0x086A}, {0x08A0, 0x08B4}, {0x08B6, 0x08C7},
+ {0x08D3, 0x0983}, {0x0985, 0x098C}, {0x098F, 0x0990},
+ {0x0993, 0x09A8}, {0x09AA, 0x09B0}, {0x09B2, 0x09B2},
+ {0x09B6, 0x09B9}, {0x09BC, 0x09C4}, {0x09C7, 0x09C8},
+ {0x09CB, 0x09CE}, {0x09D7, 0x09D7}, {0x09DC, 0x09DD},
+ {0x09DF, 0x09E3}, {0x09E6, 0x09FE}, {0x0A01, 0x0A03},
+ {0x0A05, 0x0A0A}, {0x0A0F, 0x0A10}, {0x0A13, 0x0A28},
+ {0x0A2A, 0x0A30}, {0x0A32, 0x0A33}, {0x0A35, 0x0A36},
+ {0x0A38, 0x0A39}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42},
+ {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51},
+ {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, {0x0A66, 0x0A76},
+ {0x0A81, 0x0A83}, {0x0A85, 0x0A8D}, {0x0A8F, 0x0A91},
+ {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3},
+ {0x0AB5, 0x0AB9}, {0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9},
+ {0x0ACB, 0x0ACD}, {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3},
+ {0x0AE6, 0x0AF1}, {0x0AF9, 0x0AFF}, {0x0B01, 0x0B03},
+ {0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, {0x0B13, 0x0B28},
+ {0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, {0x0B35, 0x0B39},
+ {0x0B3C, 0x0B44}, {0x0B47, 0x0B48}, {0x0B4B, 0x0B4D},
+ {0x0B55, 0x0B57}, {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63},
+ {0x0B66, 0x0B77}, {0x0B82, 0x0B83}, {0x0B85, 0x0B8A},
+ {0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, {0x0B99, 0x0B9A},
+ {0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4},
+ {0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2},
+ {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0},
+ {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C},
+ {0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, {0x0C2A, 0x0C39},
+ {0x0C3D, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D},
+ {0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, {0x0C60, 0x0C63},
+ {0x0C66, 0x0C6F}, {0x0C77, 0x0C8C}, {0x0C8E, 0x0C90},
+ {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9},
+ {0x0CBC, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD},
+ {0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE3},
+ {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, {0x0D00, 0x0D0C},
+ {0x0D0E, 0x0D10}, {0x0D12, 0x0D44}, {0x0D46, 0x0D48},
+ {0x0D4A, 0x0D4F}, {0x0D54, 0x0D63}, {0x0D66, 0x0D7F},
+ {0x0D81, 0x0D83}, {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1},
+ {0x0DB3, 0x0DBB}, {0x0DBD, 0x0DBD}, {0x0DC0, 0x0DC6},
+ {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4}, {0x0DD6, 0x0DD6},
+ {0x0DD8, 0x0DDF}, {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF4},
+ {0x0E01, 0x0E3A}, {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82},
+ {0x0E84, 0x0E84}, {0x0E86, 0x0E8A}, {0x0E8C, 0x0EA3},
+ {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EBD}, {0x0EC0, 0x0EC4},
+ {0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9},
+ {0x0EDC, 0x0EDF}, {0x0F00, 0x0F47}, {0x0F49, 0x0F6C},
+ {0x0F71, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC},
+ {0x0FCE, 0x0FDA}, {0x1000, 0x10C5}, {0x10C7, 0x10C7},
+ {0x10CD, 0x10CD}, {0x10D0, 0x10FF}, {0x1160, 0x1248},
+ {0x124A, 0x124D}, {0x1250, 0x1256}, {0x1258, 0x1258},
+ {0x125A, 0x125D}, {0x1260, 0x1288}, {0x128A, 0x128D},
+ {0x1290, 0x12B0}, {0x12B2, 0x12B5}, {0x12B8, 0x12BE},
+ {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, {0x12C8, 0x12D6},
+ {0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A},
+ {0x135D, 0x137C}, {0x1380, 0x1399}, {0x13A0, 0x13F5},
+ {0x13F8, 0x13FD}, {0x1400, 0x169C}, {0x16A0, 0x16F8},
+ {0x1700, 0x170C}, {0x170E, 0x1714}, {0x1720, 0x1736},
+ {0x1740, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770},
+ {0x1772, 0x1773}, {0x1780, 0x17DD}, {0x17E0, 0x17E9},
+ {0x17F0, 0x17F9}, {0x1800, 0x180E}, {0x1810, 0x1819},
+ {0x1820, 0x1878}, {0x1880, 0x18AA}, {0x18B0, 0x18F5},
+ {0x1900, 0x191E}, {0x1920, 0x192B}, {0x1930, 0x193B},
+ {0x1940, 0x1940}, {0x1944, 0x196D}, {0x1970, 0x1974},
+ {0x1980, 0x19AB}, {0x19B0, 0x19C9}, {0x19D0, 0x19DA},
+ {0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E}, {0x1A60, 0x1A7C},
+ {0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, {0x1AA0, 0x1AAD},
+ {0x1AB0, 0x1AC0}, {0x1B00, 0x1B4B}, {0x1B50, 0x1B7C},
+ {0x1B80, 0x1BF3}, {0x1BFC, 0x1C37}, {0x1C3B, 0x1C49},
+ {0x1C4D, 0x1C88}, {0x1C90, 0x1CBA}, {0x1CBD, 0x1CC7},
+ {0x1CD0, 0x1CFA}, {0x1D00, 0x1DF9}, {0x1DFB, 0x1F15},
+ {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D},
+ {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B},
+ {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4},
+ {0x1FB6, 0x1FC4}, {0x1FC6, 0x1FD3}, {0x1FD6, 0x1FDB},
+ {0x1FDD, 0x1FEF}, {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFE},
+ {0x2000, 0x200F}, {0x2011, 0x2012}, {0x2017, 0x2017},
+ {0x201A, 0x201B}, {0x201E, 0x201F}, {0x2023, 0x2023},
+ {0x2028, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034},
+ {0x2036, 0x203A}, {0x203C, 0x203D}, {0x203F, 0x2064},
+ {0x2066, 0x2071}, {0x2075, 0x207E}, {0x2080, 0x2080},
+ {0x2085, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8},
+ {0x20AA, 0x20AB}, {0x20AD, 0x20BF}, {0x20D0, 0x20F0},
+ {0x2100, 0x2102}, {0x2104, 0x2104}, {0x2106, 0x2108},
+ {0x210A, 0x2112}, {0x2114, 0x2115}, {0x2117, 0x2120},
+ {0x2123, 0x2125}, {0x2127, 0x212A}, {0x212C, 0x2152},
+ {0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F},
+ {0x217A, 0x2188}, {0x218A, 0x218B}, {0x219A, 0x21B7},
+ {0x21BA, 0x21D1}, {0x21D3, 0x21D3}, {0x21D5, 0x21E6},
+ {0x21E8, 0x21FF}, {0x2201, 0x2201}, {0x2204, 0x2206},
+ {0x2209, 0x220A}, {0x220C, 0x220E}, {0x2210, 0x2210},
+ {0x2212, 0x2214}, {0x2216, 0x2219}, {0x221B, 0x221C},
+ {0x2221, 0x2222}, {0x2224, 0x2224}, {0x2226, 0x2226},
+ {0x222D, 0x222D}, {0x222F, 0x2233}, {0x2238, 0x223B},
+ {0x223E, 0x2247}, {0x2249, 0x224B}, {0x224D, 0x2251},
+ {0x2253, 0x225F}, {0x2262, 0x2263}, {0x2268, 0x2269},
+ {0x226C, 0x226D}, {0x2270, 0x2281}, {0x2284, 0x2285},
+ {0x2288, 0x2294}, {0x2296, 0x2298}, {0x229A, 0x22A4},
+ {0x22A6, 0x22BE}, {0x22C0, 0x2311}, {0x2313, 0x2319},
+ {0x231C, 0x2328}, {0x232B, 0x23E8}, {0x23ED, 0x23EF},
+ {0x23F1, 0x23F2}, {0x23F4, 0x2426}, {0x2440, 0x244A},
+ {0x24EA, 0x24EA}, {0x254C, 0x254F}, {0x2574, 0x257F},
+ {0x2590, 0x2591}, {0x2596, 0x259F}, {0x25A2, 0x25A2},
+ {0x25AA, 0x25B1}, {0x25B4, 0x25B5}, {0x25B8, 0x25BB},
+ {0x25BE, 0x25BF}, {0x25C2, 0x25C5}, {0x25C9, 0x25CA},
+ {0x25CC, 0x25CD}, {0x25D2, 0x25E1}, {0x25E6, 0x25EE},
+ {0x25F0, 0x25FC}, {0x25FF, 0x2604}, {0x2607, 0x2608},
+ {0x260A, 0x260D}, {0x2610, 0x2613}, {0x2616, 0x261B},
+ {0x261D, 0x261D}, {0x261F, 0x263F}, {0x2641, 0x2641},
+ {0x2643, 0x2647}, {0x2654, 0x265F}, {0x2662, 0x2662},
+ {0x2666, 0x2666}, {0x266B, 0x266B}, {0x266E, 0x266E},
+ {0x2670, 0x267E}, {0x2680, 0x2692}, {0x2694, 0x269D},
+ {0x26A0, 0x26A0}, {0x26A2, 0x26A9}, {0x26AC, 0x26BC},
+ {0x26C0, 0x26C3}, {0x26E2, 0x26E2}, {0x26E4, 0x26E7},
+ {0x2700, 0x2704}, {0x2706, 0x2709}, {0x270C, 0x2727},
+ {0x2729, 0x273C}, {0x273E, 0x274B}, {0x274D, 0x274D},
+ {0x274F, 0x2752}, {0x2756, 0x2756}, {0x2758, 0x2775},
+ {0x2780, 0x2794}, {0x2798, 0x27AF}, {0x27B1, 0x27BE},
+ {0x27C0, 0x27E5}, {0x27EE, 0x2984}, {0x2987, 0x2B1A},
+ {0x2B1D, 0x2B4F}, {0x2B51, 0x2B54}, {0x2B5A, 0x2B73},
+ {0x2B76, 0x2B95}, {0x2B97, 0x2C2E}, {0x2C30, 0x2C5E},
+ {0x2C60, 0x2CF3}, {0x2CF9, 0x2D25}, {0x2D27, 0x2D27},
+ {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D70},
+ {0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE},
+ {0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6},
+ {0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE},
+ {0x2DE0, 0x2E52}, {0x303F, 0x303F}, {0x4DC0, 0x4DFF},
+ {0xA4D0, 0xA62B}, {0xA640, 0xA6F7}, {0xA700, 0xA7BF},
+ {0xA7C2, 0xA7CA}, {0xA7F5, 0xA82C}, {0xA830, 0xA839},
+ {0xA840, 0xA877}, {0xA880, 0xA8C5}, {0xA8CE, 0xA8D9},
+ {0xA8E0, 0xA953}, {0xA95F, 0xA95F}, {0xA980, 0xA9CD},
+ {0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, {0xAA00, 0xAA36},
+ {0xAA40, 0xAA4D}, {0xAA50, 0xAA59}, {0xAA5C, 0xAAC2},
+ {0xAADB, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E},
+ {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E},
+ {0xAB30, 0xAB6B}, {0xAB70, 0xABED}, {0xABF0, 0xABF9},
+ {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDFFF},
+ {0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB36},
+ {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41},
+ {0xFB43, 0xFB44}, {0xFB46, 0xFBC1}, {0xFBD3, 0xFD3F},
+ {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, {0xFDF0, 0xFDFD},
+ {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC},
+ {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, {0x10000, 0x1000B},
+ {0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D},
+ {0x1003F, 0x1004D}, {0x10050, 0x1005D}, {0x10080, 0x100FA},
+ {0x10100, 0x10102}, {0x10107, 0x10133}, {0x10137, 0x1018E},
+ {0x10190, 0x1019C}, {0x101A0, 0x101A0}, {0x101D0, 0x101FD},
+ {0x10280, 0x1029C}, {0x102A0, 0x102D0}, {0x102E0, 0x102FB},
+ {0x10300, 0x10323}, {0x1032D, 0x1034A}, {0x10350, 0x1037A},
+ {0x10380, 0x1039D}, {0x1039F, 0x103C3}, {0x103C8, 0x103D5},
+ {0x10400, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3},
+ {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563},
+ {0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755},
+ {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808},
+ {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C},
+ {0x1083F, 0x10855}, {0x10857, 0x1089E}, {0x108A7, 0x108AF},
+ {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x1091B},
+ {0x1091F, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x109B7},
+ {0x109BC, 0x109CF}, {0x109D2, 0x10A03}, {0x10A05, 0x10A06},
+ {0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A35},
+ {0x10A38, 0x10A3A}, {0x10A3F, 0x10A48}, {0x10A50, 0x10A58},
+ {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, {0x10AEB, 0x10AF6},
+ {0x10B00, 0x10B35}, {0x10B39, 0x10B55}, {0x10B58, 0x10B72},
+ {0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF},
+ {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2},
+ {0x10CFA, 0x10D27}, {0x10D30, 0x10D39}, {0x10E60, 0x10E7E},
+ {0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD}, {0x10EB0, 0x10EB1},
+ {0x10F00, 0x10F27}, {0x10F30, 0x10F59}, {0x10FB0, 0x10FCB},
+ {0x10FE0, 0x10FF6}, {0x11000, 0x1104D}, {0x11052, 0x1106F},
+ {0x1107F, 0x110C1}, {0x110CD, 0x110CD}, {0x110D0, 0x110E8},
+ {0x110F0, 0x110F9}, {0x11100, 0x11134}, {0x11136, 0x11147},
+ {0x11150, 0x11176}, {0x11180, 0x111DF}, {0x111E1, 0x111F4},
+ {0x11200, 0x11211}, {0x11213, 0x1123E}, {0x11280, 0x11286},
+ {0x11288, 0x11288}, {0x1128A, 0x1128D}, {0x1128F, 0x1129D},
+ {0x1129F, 0x112A9}, {0x112B0, 0x112EA}, {0x112F0, 0x112F9},
+ {0x11300, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310},
+ {0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333},
+ {0x11335, 0x11339}, {0x1133B, 0x11344}, {0x11347, 0x11348},
+ {0x1134B, 0x1134D}, {0x11350, 0x11350}, {0x11357, 0x11357},
+ {0x1135D, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374},
+ {0x11400, 0x1145B}, {0x1145D, 0x11461}, {0x11480, 0x114C7},
+ {0x114D0, 0x114D9}, {0x11580, 0x115B5}, {0x115B8, 0x115DD},
+ {0x11600, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C},
+ {0x11680, 0x116B8}, {0x116C0, 0x116C9}, {0x11700, 0x1171A},
+ {0x1171D, 0x1172B}, {0x11730, 0x1173F}, {0x11800, 0x1183B},
+ {0x118A0, 0x118F2}, {0x118FF, 0x11906}, {0x11909, 0x11909},
+ {0x1190C, 0x11913}, {0x11915, 0x11916}, {0x11918, 0x11935},
+ {0x11937, 0x11938}, {0x1193B, 0x11946}, {0x11950, 0x11959},
+ {0x119A0, 0x119A7}, {0x119AA, 0x119D7}, {0x119DA, 0x119E4},
+ {0x11A00, 0x11A47}, {0x11A50, 0x11AA2}, {0x11AC0, 0x11AF8},
+ {0x11C00, 0x11C08}, {0x11C0A, 0x11C36}, {0x11C38, 0x11C45},
+ {0x11C50, 0x11C6C}, {0x11C70, 0x11C8F}, {0x11C92, 0x11CA7},
+ {0x11CA9, 0x11CB6}, {0x11D00, 0x11D06}, {0x11D08, 0x11D09},
+ {0x11D0B, 0x11D36}, {0x11D3A, 0x11D3A}, {0x11D3C, 0x11D3D},
+ {0x11D3F, 0x11D47}, {0x11D50, 0x11D59}, {0x11D60, 0x11D65},
+ {0x11D67, 0x11D68}, {0x11D6A, 0x11D8E}, {0x11D90, 0x11D91},
+ {0x11D93, 0x11D98}, {0x11DA0, 0x11DA9}, {0x11EE0, 0x11EF8},
+ {0x11FB0, 0x11FB0}, {0x11FC0, 0x11FF1}, {0x11FFF, 0x12399},
+ {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543},
+ {0x13000, 0x1342E}, {0x13430, 0x13438}, {0x14400, 0x14646},
+ {0x16800, 0x16A38}, {0x16A40, 0x16A5E}, {0x16A60, 0x16A69},
+ {0x16A6E, 0x16A6F}, {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF5},
+ {0x16B00, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61},
+ {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16E40, 0x16E9A},
+ {0x16F00, 0x16F4A}, {0x16F4F, 0x16F87}, {0x16F8F, 0x16F9F},
+ {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, {0x1BC80, 0x1BC88},
+ {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3}, {0x1D000, 0x1D0F5},
+ {0x1D100, 0x1D126}, {0x1D129, 0x1D1E8}, {0x1D200, 0x1D245},
+ {0x1D2E0, 0x1D2F3}, {0x1D300, 0x1D356}, {0x1D360, 0x1D378},
+ {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F},
+ {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC},
+ {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3},
+ {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514},
+ {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E},
+ {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550},
+ {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B},
+ {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006},
+ {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024},
+ {0x1E026, 0x1E02A}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D},
+ {0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E2C0, 0x1E2F9},
+ {0x1E2FF, 0x1E2FF}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6},
+ {0x1E900, 0x1E94B}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F},
+ {0x1EC71, 0x1ECB4}, {0x1ED01, 0x1ED3D}, {0x1EE00, 0x1EE03},
+ {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, {0x1EE24, 0x1EE24},
+ {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32}, {0x1EE34, 0x1EE37},
+ {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, {0x1EE42, 0x1EE42},
+ {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, {0x1EE4B, 0x1EE4B},
+ {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, {0x1EE54, 0x1EE54},
+ {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, {0x1EE5B, 0x1EE5B},
+ {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, {0x1EE61, 0x1EE62},
+ {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, {0x1EE6C, 0x1EE72},
+ {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, {0x1EE7E, 0x1EE7E},
+ {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, {0x1EEA1, 0x1EEA3},
+ {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, {0x1EEF0, 0x1EEF1},
+ {0x1F000, 0x1F003}, {0x1F005, 0x1F02B}, {0x1F030, 0x1F093},
+ {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, {0x1F0C1, 0x1F0CE},
+ {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10F}, {0x1F12E, 0x1F12F},
+ {0x1F16A, 0x1F16F}, {0x1F1AD, 0x1F1AD}, {0x1F1E6, 0x1F1FF},
+ {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D},
+ {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF},
+ {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F},
+ {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A},
+ {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594},
+ {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F},
+ {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6D3, 0x1F6D4},
+ {0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773},
+ {0x1F780, 0x1F7D8}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847},
+ {0x1F850, 0x1F859}, {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD},
+ {0x1F8B0, 0x1F8B1}, {0x1F900, 0x1F90B}, {0x1F93B, 0x1F93B},
+ {0x1F946, 0x1F946}, {0x1FA00, 0x1FA53}, {0x1FA60, 0x1FA6D},
+ {0x1FB00, 0x1FB92}, {0x1FB94, 0x1FBCA}, {0x1FBF0, 0x1FBF9},
+ {0xE0001, 0xE0001}, {0xE0020, 0xE007F},
+}
+
+var emoji = table{
+ {0x203C, 0x203C}, {0x2049, 0x2049}, {0x2122, 0x2122},
+ {0x2139, 0x2139}, {0x2194, 0x2199}, {0x21A9, 0x21AA},
+ {0x231A, 0x231B}, {0x2328, 0x2328}, {0x2388, 0x2388},
+ {0x23CF, 0x23CF}, {0x23E9, 0x23F3}, {0x23F8, 0x23FA},
+ {0x24C2, 0x24C2}, {0x25AA, 0x25AB}, {0x25B6, 0x25B6},
+ {0x25C0, 0x25C0}, {0x25FB, 0x25FE}, {0x2600, 0x2605},
+ {0x2607, 0x2612}, {0x2614, 0x2685}, {0x2690, 0x2705},
+ {0x2708, 0x2712}, {0x2714, 0x2714}, {0x2716, 0x2716},
+ {0x271D, 0x271D}, {0x2721, 0x2721}, {0x2728, 0x2728},
+ {0x2733, 0x2734}, {0x2744, 0x2744}, {0x2747, 0x2747},
+ {0x274C, 0x274C}, {0x274E, 0x274E}, {0x2753, 0x2755},
+ {0x2757, 0x2757}, {0x2763, 0x2767}, {0x2795, 0x2797},
+ {0x27A1, 0x27A1}, {0x27B0, 0x27B0}, {0x27BF, 0x27BF},
+ {0x2934, 0x2935}, {0x2B05, 0x2B07}, {0x2B1B, 0x2B1C},
+ {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x3030, 0x3030},
+ {0x303D, 0x303D}, {0x3297, 0x3297}, {0x3299, 0x3299},
+ {0x1F000, 0x1F0FF}, {0x1F10D, 0x1F10F}, {0x1F12F, 0x1F12F},
+ {0x1F16C, 0x1F171}, {0x1F17E, 0x1F17F}, {0x1F18E, 0x1F18E},
+ {0x1F191, 0x1F19A}, {0x1F1AD, 0x1F1E5}, {0x1F201, 0x1F20F},
+ {0x1F21A, 0x1F21A}, {0x1F22F, 0x1F22F}, {0x1F232, 0x1F23A},
+ {0x1F23C, 0x1F23F}, {0x1F249, 0x1F3FA}, {0x1F400, 0x1F53D},
+ {0x1F546, 0x1F64F}, {0x1F680, 0x1F6FF}, {0x1F774, 0x1F77F},
+ {0x1F7D5, 0x1F7FF}, {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F},
+ {0x1F85A, 0x1F85F}, {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F8FF},
+ {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1FAFF},
+ {0x1FC00, 0x1FFFD},
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
new file mode 100644
index 000000000..d6a61777d
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
@@ -0,0 +1,28 @@
+// +build windows
+// +build !appengine
+
+package runewidth
+
+import (
+ "syscall"
+)
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32")
+ procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP")
+)
+
+// IsEastAsian return true if the current locale is CJK
+func IsEastAsian() bool {
+ r1, _, _ := procGetConsoleOutputCP.Call()
+ if r1 == 0 {
+ return false
+ }
+
+ switch int(r1) {
+ case 932, 51932, 936, 949, 950:
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
index 6e38d3d32..fea096c18 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go
@@ -1,6 +1,8 @@
package label
import (
+ "fmt"
+
"github.com/opencontainers/selinux/go-selinux"
)
@@ -46,7 +48,7 @@ var PidLabel = selinux.PidLabel
// Init initialises the labeling system
func Init() {
- selinux.GetEnabled()
+ _ = selinux.GetEnabled()
}
// ClearLabels will clear all reserved labels
@@ -75,3 +77,21 @@ func ReleaseLabel(label string) error {
// can be used to set duplicate labels on future container processes
// Deprecated: use selinux.DupSecOpt
var DupSecOpt = selinux.DupSecOpt
+
+// FormatMountLabel returns a string to be used by the mount command.
+// The format of this string will be used to alter the labeling of the mountpoint.
+// The string returned is suitable to be used as the options field of the mount command.
+// If you need to have additional mount point options, you can pass them in as
+// the first parameter. Second parameter is the label that you wish to apply
+// to all content in the mount point.
+func FormatMountLabel(src, mountLabel string) string {
+ if mountLabel != "" {
+ switch src {
+ case "":
+ src = fmt.Sprintf("context=%q", mountLabel)
+ default:
+ src = fmt.Sprintf("%s,context=%q", src, mountLabel)
+ }
+ }
+ return src
+}
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
index 903829958..779e2e3a8 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
@@ -3,7 +3,6 @@
package label
import (
- "fmt"
"os"
"os/user"
"strings"
@@ -43,7 +42,7 @@ func InitLabels(options []string) (plabel string, mlabel string, Err error) {
if err != nil {
return "", "", err
}
-
+ mcsLevel := pcon["level"]
mcon, err := selinux.NewContext(mountLabel)
if err != nil {
return "", "", err
@@ -62,16 +61,21 @@ func InitLabels(options []string) (plabel string, mlabel string, Err error) {
}
if con[0] == "filetype" {
mcon["type"] = con[1]
+ continue
}
pcon[con[0]] = con[1]
if con[0] == "level" || con[0] == "user" {
mcon[con[0]] = con[1]
}
}
- selinux.ReleaseLabel(processLabel)
- processLabel = pcon.Get()
- mountLabel = mcon.Get()
- selinux.ReserveLabel(processLabel)
+ if pcon.Get() != processLabel {
+ if pcon["level"] != mcsLevel {
+ selinux.ReleaseLabel(processLabel)
+ }
+ processLabel = pcon.Get()
+ mountLabel = mcon.Get()
+ selinux.ReserveLabel(processLabel)
+ }
}
return processLabel, mountLabel, nil
}
@@ -82,24 +86,6 @@ func GenLabels(options string) (string, string, error) {
return InitLabels(strings.Fields(options))
}
-// FormatMountLabel returns a string to be used by the mount command.
-// The format of this string will be used to alter the labeling of the mountpoint.
-// The string returned is suitable to be used as the options field of the mount command.
-// If you need to have additional mount point options, you can pass them in as
-// the first parameter. Second parameter is the label that you wish to apply
-// to all content in the mount point.
-func FormatMountLabel(src, mountLabel string) string {
- if mountLabel != "" {
- switch src {
- case "":
- src = fmt.Sprintf("context=%q", mountLabel)
- default:
- src = fmt.Sprintf("%s,context=%q", src, mountLabel)
- }
- }
- return src
-}
-
// SetFileLabel modifies the "path" label to the specified file label
func SetFileLabel(path string, fileLabel string) error {
if !selinux.GetEnabled() || fileLabel == "" {
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go
index cda59d671..c2bdd35d7 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go
@@ -15,10 +15,6 @@ func GenLabels(options string) (string, string, error) {
return "", "", nil
}
-func FormatMountLabel(src string, mountLabel string) string {
- return src
-}
-
func SetFileLabel(path string, fileLabel string) error {
return nil
}
diff --git a/vendor/github.com/openshift/api/LICENSE b/vendor/github.com/openshift/api/LICENSE
deleted file mode 100644
index 8dada3eda..000000000
--- a/vendor/github.com/openshift/api/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml
deleted file mode 100644
index 114db5aec..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml
+++ /dev/null
@@ -1,164 +0,0 @@
-kind: CustomResourceDefinition
-apiVersion: apiextensions.k8s.io/v1beta1
-metadata:
- name: clusteroperators.config.openshift.io
-spec:
- additionalPrinterColumns:
- - JSONPath: .status.versions[?(@.name=="operator")].version
- description: The version the operator is at.
- name: Version
- type: string
- - JSONPath: .status.conditions[?(@.type=="Available")].status
- description: Whether the operator is running and stable.
- name: Available
- type: string
- - JSONPath: .status.conditions[?(@.type=="Progressing")].status
- description: Whether the operator is processing changes.
- name: Progressing
- type: string
- - JSONPath: .status.conditions[?(@.type=="Degraded")].status
- description: Whether the operator is degraded.
- name: Degraded
- type: string
- - JSONPath: .status.conditions[?(@.type=="Available")].lastTransitionTime
- description: The time the operator's Available status last changed.
- name: Since
- type: date
- group: config.openshift.io
- names:
- kind: ClusterOperator
- listKind: ClusterOperatorList
- plural: clusteroperators
- singular: clusteroperator
- shortNames:
- - co
- preserveUnknownFields: false
- scope: Cluster
- subresources:
- status: {}
- version: v1
- versions:
- - name: v1
- served: true
- storage: true
- validation:
- openAPIV3Schema:
- description: ClusterOperator is the Custom Resource object which holds the current
- state of an operator. This object is used by operators to convey their state
- to the rest of the cluster.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds configuration that could apply to any operator.
- type: object
- status:
- description: status holds the information about the state of an operator. It
- is consistent with status information across the Kubernetes ecosystem.
- type: object
- properties:
- conditions:
- description: conditions describes the state of the operator's managed
- and monitored components.
- type: array
- items:
- description: ClusterOperatorStatusCondition represents the state of
- the operator's managed and monitored components.
- type: object
- required:
- - lastTransitionTime
- - status
- - type
- properties:
- lastTransitionTime:
- description: lastTransitionTime is the time of the last update
- to the current status property.
- type: string
- format: date-time
- message:
- description: message provides additional information about the
- current condition. This is only to be consumed by humans.
- type: string
- reason:
- description: reason is the CamelCase reason for the condition's
- current status.
- type: string
- status:
- description: status of the condition, one of True, False, Unknown.
- type: string
- type:
- description: type specifies the aspect reported by this condition.
- type: string
- extension:
- description: extension contains any additional status information specific
- to the operator which owns this status object.
- type: object
- nullable: true
- x-kubernetes-preserve-unknown-fields: true
- relatedObjects:
- description: 'relatedObjects is a list of objects that are "interesting"
- or related to this operator. Common uses are: 1. the detailed resource
- driving the operator 2. operator namespaces 3. operand namespaces'
- type: array
- items:
- description: ObjectReference contains enough information to let you
- inspect or modify the referred object.
- type: object
- required:
- - group
- - name
- - resource
- properties:
- group:
- description: group of the referent.
- type: string
- name:
- description: name of the referent.
- type: string
- namespace:
- description: namespace of the referent.
- type: string
- resource:
- description: resource of the referent.
- type: string
- versions:
- description: versions is a slice of operator and operand version tuples. Operators
- which manage multiple operands will have multiple operand entries
- in the array. Available operators must report the version of the
- operator itself with the name "operator". An operator reports a new
- "operator" version when it has rolled out the new version to all of
- its operands.
- type: array
- items:
- type: object
- required:
- - name
- - version
- properties:
- name:
- description: name is the name of the particular operand this version
- is for. It usually matches container images, not operators.
- type: string
- version:
- description: version indicates which version of a particular operand
- is currently being managed. It must always match the Available
- operand. If 1.0.0 is Available, then this must indicate 1.0.0
- even if the operator is trying to rollout 1.1.0
- type: string
- versions:
- - name: v1
- served: true
- storage: true
diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml
deleted file mode 100644
index ccde0db23..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml
+++ /dev/null
@@ -1,328 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: clusterversions.config.openshift.io
-spec:
- group: config.openshift.io
- versions:
- - name: v1
- served: true
- storage: true
- scope: Cluster
- subresources:
- status: {}
- names:
- plural: clusterversions
- singular: clusterversion
- kind: ClusterVersion
- preserveUnknownFields: false
- additionalPrinterColumns:
- - name: Version
- type: string
- JSONPath: .status.history[?(@.state=="Completed")].version
- - name: Available
- type: string
- JSONPath: .status.conditions[?(@.type=="Available")].status
- - name: Progressing
- type: string
- JSONPath: .status.conditions[?(@.type=="Progressing")].status
- - name: Since
- type: date
- JSONPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime
- - name: Status
- type: string
- JSONPath: .status.conditions[?(@.type=="Progressing")].message
- validation:
- openAPIV3Schema:
- description: ClusterVersion is the configuration for the ClusterVersionOperator.
- This is where parameters related to automatic updates can be set.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec is the desired state of the cluster version - the operator
- will work to ensure that the desired version is applied to the cluster.
- type: object
- required:
- - clusterID
- properties:
- channel:
- description: channel is an identifier for explicitly requesting that
- a non-default set of updates be applied to this cluster. The default
- channel will be contain stable updates that are appropriate for production
- clusters.
- type: string
- clusterID:
- description: clusterID uniquely identifies this cluster. This is expected
- to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- in hexadecimal values). This is a required field.
- type: string
- desiredUpdate:
- description: "desiredUpdate is an optional field that indicates the
- desired value of the cluster version. Setting this value will trigger
- an upgrade (if the current version does not match the desired version).
- The set of recommended update values is listed as part of available
- updates in status, and setting values outside that range may cause
- the upgrade to fail. You may specify the version field without setting
- image if an update exists with that version in the availableUpdates
- or history. \n If an upgrade fails the operator will halt and report
- status about the failing component. Setting the desired update value
- back to the previous version will cause a rollback to be attempted.
- Not all rollbacks will succeed."
- type: object
- properties:
- force:
- description: "force allows an administrator to update to an image
- that has failed verification, does not appear in the availableUpdates
- list, or otherwise would be blocked by normal protections on update.
- This option should only be used when the authenticity of the provided
- image has been verified out of band because the provided image
- will run with full administrative access to the cluster. Do not
- use this flag with images that comes from unknown or potentially
- malicious sources. \n This flag does not override other forms
- of consistency checking that are required before a new update
- is deployed."
- type: boolean
- image:
- description: image is a container image location that contains the
- update. When this field is part of spec, image is optional if
- version is specified and the availableUpdates field contains a
- matching version.
- type: string
- version:
- description: version is a semantic versioning identifying the update
- version. When this field is part of spec, version is optional
- if image is specified.
- type: string
- overrides:
- description: overrides is list of overides for components that are managed
- by cluster version operator. Marking a component unmanaged will prevent
- the operator from creating or updating the object.
- type: array
- items:
- description: ComponentOverride allows overriding cluster version operator's
- behavior for a component.
- type: object
- required:
- - group
- - kind
- - name
- - namespace
- - unmanaged
- properties:
- group:
- description: group identifies the API group that the kind is in.
- type: string
- kind:
- description: kind indentifies which object to override.
- type: string
- name:
- description: name is the component's name.
- type: string
- namespace:
- description: namespace is the component's namespace. If the resource
- is cluster scoped, the namespace should be empty.
- type: string
- unmanaged:
- description: 'unmanaged controls if cluster version operator should
- stop managing the resources in this cluster. Default: false'
- type: boolean
- upstream:
- description: upstream may be used to specify the preferred update server.
- By default it will use the appropriate update server for the cluster
- and region.
- type: string
- status:
- description: status contains information about the available updates and
- any in-progress updates.
- type: object
- required:
- - availableUpdates
- - desired
- - observedGeneration
- - versionHash
- properties:
- availableUpdates:
- description: availableUpdates contains the list of updates that are
- appropriate for this cluster. This list may be empty if no updates
- are recommended, if the update service is unavailable, or if an invalid
- channel has been specified.
- type: array
- items:
- description: Update represents a release of the ClusterVersionOperator,
- referenced by the Image member.
- type: object
- properties:
- force:
- description: "force allows an administrator to update to an image
- that has failed verification, does not appear in the availableUpdates
- list, or otherwise would be blocked by normal protections on
- update. This option should only be used when the authenticity
- of the provided image has been verified out of band because
- the provided image will run with full administrative access
- to the cluster. Do not use this flag with images that comes
- from unknown or potentially malicious sources. \n This flag
- does not override other forms of consistency checking that are
- required before a new update is deployed."
- type: boolean
- image:
- description: image is a container image location that contains
- the update. When this field is part of spec, image is optional
- if version is specified and the availableUpdates field contains
- a matching version.
- type: string
- version:
- description: version is a semantic versioning identifying the
- update version. When this field is part of spec, version is
- optional if image is specified.
- type: string
- nullable: true
- conditions:
- description: conditions provides information about the cluster version.
- The condition "Available" is set to true if the desiredUpdate has
- been reached. The condition "Progressing" is set to true if an update
- is being applied. The condition "Degraded" is set to true if an update
- is currently blocked by a temporary or permanent error. Conditions
- are only valid for the current desiredUpdate when metadata.generation
- is equal to status.generation.
- type: array
- items:
- description: ClusterOperatorStatusCondition represents the state of
- the operator's managed and monitored components.
- type: object
- required:
- - lastTransitionTime
- - status
- - type
- properties:
- lastTransitionTime:
- description: lastTransitionTime is the time of the last update
- to the current status property.
- type: string
- format: date-time
- message:
- description: message provides additional information about the
- current condition. This is only to be consumed by humans.
- type: string
- reason:
- description: reason is the CamelCase reason for the condition's
- current status.
- type: string
- status:
- description: status of the condition, one of True, False, Unknown.
- type: string
- type:
- description: type specifies the aspect reported by this condition.
- type: string
- desired:
- description: desired is the version that the cluster is reconciling
- towards. If the cluster is not yet fully initialized desired will
- be set with the information available, which may be an image or a
- tag.
- type: object
- properties:
- force:
- description: "force allows an administrator to update to an image
- that has failed verification, does not appear in the availableUpdates
- list, or otherwise would be blocked by normal protections on update.
- This option should only be used when the authenticity of the provided
- image has been verified out of band because the provided image
- will run with full administrative access to the cluster. Do not
- use this flag with images that comes from unknown or potentially
- malicious sources. \n This flag does not override other forms
- of consistency checking that are required before a new update
- is deployed."
- type: boolean
- image:
- description: image is a container image location that contains the
- update. When this field is part of spec, image is optional if
- version is specified and the availableUpdates field contains a
- matching version.
- type: string
- version:
- description: version is a semantic versioning identifying the update
- version. When this field is part of spec, version is optional
- if image is specified.
- type: string
- history:
- description: history contains a list of the most recent versions applied
- to the cluster. This value may be empty during cluster startup, and
- then will be updated when a new update is being applied. The newest
- update is first in the list and it is ordered by recency. Updates
- in the history have state Completed if the rollout completed - if
- an update was failing or halfway applied the state will be Partial.
- Only a limited amount of update history is preserved.
- type: array
- items:
- description: UpdateHistory is a single attempted update to the cluster.
- type: object
- required:
- - completionTime
- - image
- - startedTime
- - state
- - verified
- properties:
- completionTime:
- description: completionTime, if set, is when the update was fully
- applied. The update that is currently being applied will have
- a null completion time. Completion time will always be set for
- entries that are not the current update (usually to the started
- time of the next update).
- type: string
- format: date-time
- nullable: true
- image:
- description: image is a container image location that contains
- the update. This value is always populated.
- type: string
- startedTime:
- description: startedTime is the time at which the update was started.
- type: string
- format: date-time
- state:
- description: state reflects whether the update was fully applied.
- The Partial state indicates the update is not fully applied,
- while the Completed state indicates the update was successfully
- rolled out at least once (all parts of the update successfully
- applied).
- type: string
- verified:
- description: verified indicates whether the provided update was
- properly verified before it was installed. If this is false
- the cluster may not be trusted.
- type: boolean
- version:
- description: version is a semantic versioning identifying the
- update version. If the requested image does not define a version,
- or if a failure occurs retrieving the image, this value may
- be empty.
- type: string
- observedGeneration:
- description: observedGeneration reports which version of the spec is
- being synced. If this value is not equal to metadata.generation, then
- the desired and conditions fields may represent a previous version.
- type: integer
- format: int64
- versionHash:
- description: versionHash is a fingerprint of the content that the cluster
- will be updated with. It is used by the operator to avoid unnecessary
- work and is for internal use only.
- type: string
- versions:
- - name: v1
- served: true
- storage: true
diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml
deleted file mode 100644
index 8c857d45a..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: operatorhubs.config.openshift.io
-spec:
- group: config.openshift.io
- names:
- kind: OperatorHub
- listKind: OperatorHubList
- plural: operatorhubs
- singular: operatorhub
- scope: Cluster
- preserveUnknownFields: false
- subresources:
- status: {}
- version: v1
- versions:
- - name: v1
- served: true
- storage: true
- "validation":
- "openAPIV3Schema":
- description: OperatorHub is the Schema for the operatorhubs API. It can be used
- to change the state of the default hub sources for OperatorHub on the cluster
- from enabled to disabled and vice versa.
- type: object
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: OperatorHubSpec defines the desired state of OperatorHub
- type: object
- properties:
- disableAllDefaultSources:
- description: disableAllDefaultSources allows you to disable all the
- default hub sources. If this is true, a specific entry in sources
- can be used to enable a default source. If this is false, a specific
- entry in sources can be used to disable or enable a default source.
- type: boolean
- sources:
- description: sources is the list of default hub sources and their configuration.
- If the list is empty, it implies that the default hub sources are
- enabled on the cluster unless disableAllDefaultSources is true. If
- disableAllDefaultSources is true and sources is not empty, the configuration
- present in sources will take precedence. The list of default hub sources
- and their current state will always be reflected in the status block.
- type: array
- items:
- description: HubSource is used to specify the hub source and its configuration
- type: object
- properties:
- disabled:
- description: disabled is used to disable a default hub source
- on cluster
- type: boolean
- name:
- description: name is the name of one of the default hub sources
- type: string
- maxLength: 253
- minLength: 1
- status:
- description: OperatorHubStatus defines the observed state of OperatorHub.
- The current state of the default hub sources will always be reflected
- here.
- type: object
- properties:
- sources:
- description: sources encapsulates the result of applying the configuration
- for each hub source
- type: array
- items:
- description: HubSourceStatus is used to reflect the current state
- of applying the configuration to a default source
- type: object
- properties:
- disabled:
- description: disabled is used to disable a default hub source
- on cluster
- type: boolean
- message:
- description: message provides more information regarding failures
- type: string
- name:
- description: name is the name of one of the default hub sources
- type: string
- maxLength: 253
- minLength: 1
- status:
- description: status indicates success or failure in applying the
- configuration
- type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml
deleted file mode 100644
index afd076747..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: proxies.config.openshift.io
-spec:
- group: config.openshift.io
- scope: Cluster
- preserveUnknownFields: false
- versions:
- - name: v1
- served: true
- storage: true
- names:
- kind: Proxy
- listKind: ProxyList
- plural: proxies
- singular: proxy
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: Proxy holds cluster-wide information on how to configure default
- proxies for the cluster. The canonical name is `cluster`
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: Spec holds user-settable values for the proxy configuration
- type: object
- properties:
- httpProxy:
- description: httpProxy is the URL of the proxy for HTTP requests. Empty
- means unset and will not result in an env var.
- type: string
- httpsProxy:
- description: httpsProxy is the URL of the proxy for HTTPS requests. Empty
- means unset and will not result in an env var.
- type: string
- noProxy:
- description: noProxy is a comma-separated list of hostnames and/or CIDRs
- for which the proxy should not be used. Empty means unset and will
- not result in an env var.
- type: string
- readinessEndpoints:
- description: readinessEndpoints is a list of endpoints used to verify
- readiness of the proxy.
- type: array
- items:
- type: string
- trustedCA:
- description: "trustedCA is a reference to a ConfigMap containing a CA
- certificate bundle used for client egress HTTPS connections. The certificate
- bundle must be from the CA that signed the proxy's certificate and
- be signed for everything. The trustedCA field should only be consumed
- by a proxy validator. The validator is responsible for reading the
- certificate bundle from required key \"ca-bundle.crt\" and copying
- it to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\"
- namespace. The namespace for the ConfigMap referenced by trustedCA
- is \"openshift-config\". Here is an example ConfigMap (in yaml): \n
- apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace:
- openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE-----
- \ Custom CA certificate bundle. -----END CERTIFICATE-----"
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced config
- map
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
- properties:
- httpProxy:
- description: httpProxy is the URL of the proxy for HTTP requests.
- type: string
- httpsProxy:
- description: httpsProxy is the URL of the proxy for HTTPS requests.
- type: string
- noProxy:
- description: noProxy is a comma-separated list of hostnames and/or CIDRs
- for which the proxy should not be used.
- type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml
deleted file mode 100644
index 4e1fdac37..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml
+++ /dev/null
@@ -1,219 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: apiservers.config.openshift.io
-spec:
- group: config.openshift.io
- scope: Cluster
- preserveUnknownFields: false
- names:
- kind: APIServer
- singular: apiserver
- plural: apiservers
- listKind: APIServerList
- versions:
- - name: v1
- served: true
- storage: true
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: APIServer holds configuration (like serving certificates, client
- CA and CORS domains) shared by all API servers in the system, among them especially
- kube-apiserver and openshift-apiserver. The canonical name of an instance
- is 'cluster'.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- type: object
- properties:
- additionalCORSAllowedOrigins:
- description: additionalCORSAllowedOrigins lists additional, user-defined
- regular expressions describing hosts for which the API server allows
- access using the CORS headers. This may be needed to access the API
- and the integrated OAuth server from JavaScript applications. The
- values are regular expressions that correspond to the Golang regular
- expression language.
- type: array
- items:
- type: string
- clientCA:
- description: 'clientCA references a ConfigMap containing a certificate
- bundle for the signers that will be recognized for incoming client
- certificates in addition to the operator managed signers. If this
- is empty, then only operator managed signers are valid. You usually
- only have to set this if you have your own PKI you wish to honor client
- certificates from. The ConfigMap must exist in the openshift-config
- namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"]
- - CA bundle.'
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced config
- map
- type: string
- encryption:
- description: encryption allows the configuration of encryption of resources
- at the datastore layer.
- type: object
- properties:
- type:
- description: "type defines what encryption type should be used to
- encrypt resources at the datastore layer. When this field is unset
- (i.e. when it is set to the empty string), identity is implied.
- The behavior of unset can and will change over time. Even if
- encryption is enabled by default, the meaning of unset may change
- to a different encryption type based on changes in best practices.
- \n When encryption is enabled, all sensitive resources shipped
- with the platform are encrypted. This list of sensitive resources
- can and will change over time. The current authoritative list
- is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io
- \ 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io"
- type: string
- enum:
- - ""
- - identity
- - aescbc
- servingCerts:
- description: servingCert is the TLS cert info for serving secure traffic.
- If not specified, operator managed certificates will be used for serving
- secure traffic.
- type: object
- properties:
- namedCertificates:
- description: namedCertificates references secrets containing the
- TLS cert info for serving secure traffic to specific hostnames.
- If no named certificates are provided, or no named certificates
- match the server name as understood by a client, the defaultServingCertificate
- will be used.
- type: array
- items:
- description: APIServerNamedServingCert maps a server DNS name,
- as understood by a client, to a certificate.
- type: object
- properties:
- names:
- description: names is a optional list of explicit DNS names
- (leading wildcards allowed) that should use this certificate
- to serve secure traffic. If no names are provided, the implicit
- names will be extracted from the certificates. Exact names
- trump over wildcard names. Explicit names defined here trump
- over extracted implicit names.
- type: array
- items:
- type: string
- servingCertificate:
- description: 'servingCertificate references a kubernetes.io/tls
- type secret containing the TLS cert info for serving secure
- traffic. The secret must exist in the openshift-config namespace
- and contain the following required fields: - Secret.Data["tls.key"]
- - TLS private key. - Secret.Data["tls.crt"] - TLS certificate.'
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- tlsSecurityProfile:
- description: "tlsSecurityProfile specifies settings for TLS connections
- for externally exposed servers. \n If unset, a default (which may
- change between releases) is chosen. Note that only Old and Intermediate
- profiles are currently supported, and the maximum available MinTLSVersions
- is VersionTLS12."
- type: object
- properties:
- custom:
- description: "custom is a user-defined TLS security profile. Be
- extremely careful using a custom profile as invalid configurations
- can be catastrophic. An example custom profile looks like this:
- \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
- \ - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256
- \ minTLSVersion: TLSv1.1"
- type: object
- properties:
- ciphers:
- description: "ciphers is used to specify the cipher algorithms
- that are negotiated during the TLS handshake. Operators may
- remove entries their operands do not support. For example,
- to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA"
- type: array
- items:
- type: string
- minTLSVersion:
- description: "minTLSVersion is used to specify the minimal version
- of the TLS protocol that is negotiated during the TLS handshake.
- For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):
- \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest
- minTLSVersion allowed is VersionTLS12"
- type: string
- nullable: true
- intermediate:
- description: "intermediate is a TLS security profile based on: \n
- https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29
- \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
- \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
- \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256
- \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384
- \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
- \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384
- \ minTLSVersion: TLSv1.2"
- type: object
- nullable: true
- modern:
- description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
- \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
- \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
- \ minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported."
- type: object
- nullable: true
- old:
- description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
- \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256
- \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256
- \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256
- \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384
- \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305
- \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384
- \ - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256
- \ - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA -
- ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384
- \ - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA -
- DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256
- \ - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256
- \ - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion:
- TLSv1.0"
- type: object
- nullable: true
- type:
- description: "type is one of Old, Intermediate, Modern or Custom.
- Custom provides the ability to specify individual TLS security
- profile parameters. Old, Intermediate and Modern are TLS security
- profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations
- \n The profiles are intent based, so they may change over time
- as new ciphers are developed and existing ciphers are found to
- be insecure. Depending on precisely which ciphers are available
- to a process, the list may be reduced. \n Note that the Modern
- profile is currently not supported because it is not yet well
- adopted by common software libraries."
- type: string
- status:
- type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml
deleted file mode 100644
index f21ac7ea8..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml
+++ /dev/null
@@ -1,123 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: authentications.config.openshift.io
-spec:
- group: config.openshift.io
- names:
- kind: Authentication
- listKind: AuthenticationList
- plural: authentications
- singular: authentication
- scope: Cluster
- preserveUnknownFields: false
- subresources:
- status: {}
- versions:
- - name: v1
- served: true
- storage: true
- "validation":
- "openAPIV3Schema":
- description: Authentication specifies cluster-wide settings for authentication
- (like OAuth and webhook token authenticators). The canonical name of an instance
- is `cluster`.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- oauthMetadata:
- description: 'oauthMetadata contains the discovery endpoint data for
- OAuth 2.0 Authorization Server Metadata for an external OAuth server.
- This discovery document can be viewed from its served location: oc
- get --raw ''/.well-known/oauth-authorization-server'' For further
- details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
- If oauthMetadata.name is non-empty, this value has precedence over
- any metadata reference stored in status. The key "oauthMetadata" is
- used to locate the data. If specified and the config map or expected
- key is not found, no metadata is served. If the specified metadata
- is not valid, no metadata is served. The namespace for this config
- map is openshift-config.'
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced config
- map
- type: string
- type:
- description: type identifies the cluster managed, user facing authentication
- mode in use. Specifically, it manages the component that responds
- to login attempts. The default is IntegratedOAuth.
- type: string
- webhookTokenAuthenticators:
- description: webhookTokenAuthenticators configures remote token reviewers.
- These remote authentication webhooks can be used to verify bearer
- tokens via the tokenreviews.authentication.k8s.io REST API. This
- is required to honor bearer tokens that are provisioned by an external
- authentication service. The namespace for these secrets is openshift-config.
- type: array
- items:
- description: webhookTokenAuthenticator holds the necessary configuration
- options for a remote token authenticator
- type: object
- properties:
- kubeConfig:
- description: 'kubeConfig contains kube config file data which
- describes how to access the remote webhook service. For further
- details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
- The key "kubeConfig" is used to locate the data. If the secret
- or expected key is not found, the webhook is not honored. If
- the specified kube config data is not valid, the webhook is
- not honored. The namespace for this secret is determined by
- the point of use.'
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced secret
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
- properties:
- integratedOAuthMetadata:
- description: 'integratedOAuthMetadata contains the discovery endpoint
- data for OAuth 2.0 Authorization Server Metadata for the in-cluster
- integrated OAuth server. This discovery document can be viewed from
- its served location: oc get --raw ''/.well-known/oauth-authorization-server''
- For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
- This contains the observed value based on cluster state. An explicitly
- set value in spec.oauthMetadata has precedence over this field. This
- field has no meaning if authentication spec.type is not set to IntegratedOAuth.
- The key "oauthMetadata" is used to locate the data. If the config
- map or expected key is not found, no metadata is served. If the specified
- metadata is not valid, no metadata is served. The namespace for this
- config map is openshift-config-managed.'
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced config
- map
- type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml
deleted file mode 100644
index 8f7583971..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml
+++ /dev/null
@@ -1,366 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: builds.config.openshift.io
-spec:
- group: config.openshift.io
- scope: Cluster
- preserveUnknownFields: false
- names:
- kind: Build
- singular: build
- plural: builds
- listKind: BuildList
- versions:
- - name: v1
- served: true
- storage: true
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: "Build configures the behavior of OpenShift builds for the entire
- cluster. This includes default settings that can be overridden in BuildConfig
- objects, and overrides which are applied to all builds. \n The canonical name
- is \"cluster\""
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: Spec holds user-settable values for the build controller configuration
- type: object
- properties:
- additionalTrustedCA:
- description: "AdditionalTrustedCA is a reference to a ConfigMap containing
- additional CAs that should be trusted for image pushes and pulls during
- builds. The namespace for this config map is openshift-config. \n
- DEPRECATED: Additional CAs for image pull and push should be set on
- image.config.openshift.io/cluster instead."
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced config
- map
- type: string
- buildDefaults:
- description: BuildDefaults controls the default information for Builds
- type: object
- properties:
- defaultProxy:
- description: "DefaultProxy contains the default proxy settings for
- all build operations, including image pull/push and source download.
- \n Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`,
- and `NO_PROXY` environment variables in the build config's strategy."
- type: object
- properties:
- httpProxy:
- description: httpProxy is the URL of the proxy for HTTP requests. Empty
- means unset and will not result in an env var.
- type: string
- httpsProxy:
- description: httpsProxy is the URL of the proxy for HTTPS requests. Empty
- means unset and will not result in an env var.
- type: string
- noProxy:
- description: noProxy is a comma-separated list of hostnames
- and/or CIDRs for which the proxy should not be used. Empty
- means unset and will not result in an env var.
- type: string
- readinessEndpoints:
- description: readinessEndpoints is a list of endpoints used
- to verify readiness of the proxy.
- type: array
- items:
- type: string
- trustedCA:
- description: "trustedCA is a reference to a ConfigMap containing
- a CA certificate bundle used for client egress HTTPS connections.
- The certificate bundle must be from the CA that signed the
- proxy's certificate and be signed for everything. The trustedCA
- field should only be consumed by a proxy validator. The validator
- is responsible for reading the certificate bundle from required
- key \"ca-bundle.crt\" and copying it to a ConfigMap named
- \"trusted-ca-bundle\" in the \"openshift-config-managed\"
- namespace. The namespace for the ConfigMap referenced by trustedCA
- is \"openshift-config\". Here is an example ConfigMap (in
- yaml): \n apiVersion: v1 kind: ConfigMap metadata: name:
- user-ca-bundle namespace: openshift-config data: ca-bundle.crt:
- | -----BEGIN CERTIFICATE----- Custom CA certificate
- bundle. -----END CERTIFICATE-----"
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- env:
- description: Env is a set of default environment variables that
- will be applied to the build if the specified variables do not
- exist on the build
- type: array
- items:
- description: EnvVar represents an environment variable present
- in a Container.
- type: object
- required:
- - name
- properties:
- name:
- description: Name of the environment variable. Must be a C_IDENTIFIER.
- type: string
- value:
- description: 'Variable references $(VAR_NAME) are expanded
- using the previous defined environment variables in the
- container and any service environment variables. If a variable
- cannot be resolved, the reference in the input string will
- be unchanged. The $(VAR_NAME) syntax can be escaped with
- a double $$, ie: $$(VAR_NAME). Escaped references will never
- be expanded, regardless of whether the variable exists or
- not. Defaults to "".'
- type: string
- valueFrom:
- description: Source for the environment variable's value.
- Cannot be used if value is not empty.
- type: object
- properties:
- configMapKeyRef:
- description: Selects a key of a ConfigMap.
- type: object
- required:
- - key
- properties:
- key:
- description: The key to select.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the ConfigMap or its
- key must be defined
- type: boolean
- fieldRef:
- description: 'Selects a field of the pod: supports metadata.name,
- metadata.namespace, metadata.labels, metadata.annotations,
- spec.nodeName, spec.serviceAccountName, status.hostIP,
- status.podIP, status.podIPs.'
- type: object
- required:
- - fieldPath
- properties:
- apiVersion:
- description: Version of the schema the FieldPath is
- written in terms of, defaults to "v1".
- type: string
- fieldPath:
- description: Path of the field to select in the specified
- API version.
- type: string
- resourceFieldRef:
- description: 'Selects a resource of the container: only
- resources limits and requests (limits.cpu, limits.memory,
- limits.ephemeral-storage, requests.cpu, requests.memory
- and requests.ephemeral-storage) are currently supported.'
- type: object
- required:
- - resource
- properties:
- containerName:
- description: 'Container name: required for volumes,
- optional for env vars'
- type: string
- divisor:
- description: Specifies the output format of the exposed
- resources, defaults to "1"
- type: string
- resource:
- description: 'Required: resource to select'
- type: string
- secretKeyRef:
- description: Selects a key of a secret in the pod's namespace
- type: object
- required:
- - key
- properties:
- key:
- description: The key of the secret to select from. Must
- be a valid secret key.
- type: string
- name:
- description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
- TODO: Add other useful fields. apiVersion, kind,
- uid?'
- type: string
- optional:
- description: Specify whether the Secret or its key
- must be defined
- type: boolean
- gitProxy:
- description: "GitProxy contains the proxy settings for git operations
- only. If set, this will override any Proxy settings for all git
- commands, such as git clone. \n Values that are not set here will
- be inherited from DefaultProxy."
- type: object
- properties:
- httpProxy:
- description: httpProxy is the URL of the proxy for HTTP requests. Empty
- means unset and will not result in an env var.
- type: string
- httpsProxy:
- description: httpsProxy is the URL of the proxy for HTTPS requests. Empty
- means unset and will not result in an env var.
- type: string
- noProxy:
- description: noProxy is a comma-separated list of hostnames
- and/or CIDRs for which the proxy should not be used. Empty
- means unset and will not result in an env var.
- type: string
- readinessEndpoints:
- description: readinessEndpoints is a list of endpoints used
- to verify readiness of the proxy.
- type: array
- items:
- type: string
- trustedCA:
- description: "trustedCA is a reference to a ConfigMap containing
- a CA certificate bundle used for client egress HTTPS connections.
- The certificate bundle must be from the CA that signed the
- proxy's certificate and be signed for everything. The trustedCA
- field should only be consumed by a proxy validator. The validator
- is responsible for reading the certificate bundle from required
- key \"ca-bundle.crt\" and copying it to a ConfigMap named
- \"trusted-ca-bundle\" in the \"openshift-config-managed\"
- namespace. The namespace for the ConfigMap referenced by trustedCA
- is \"openshift-config\". Here is an example ConfigMap (in
- yaml): \n apiVersion: v1 kind: ConfigMap metadata: name:
- user-ca-bundle namespace: openshift-config data: ca-bundle.crt:
- | -----BEGIN CERTIFICATE----- Custom CA certificate
- bundle. -----END CERTIFICATE-----"
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- imageLabels:
- description: ImageLabels is a list of docker labels that are applied
- to the resulting image. User can override a default label by providing
- a label with the same name in their Build/BuildConfig.
- type: array
- items:
- type: object
- properties:
- name:
- description: Name defines the name of the label. It must have
- non-zero length.
- type: string
- value:
- description: Value defines the literal value of the label.
- type: string
- resources:
- description: Resources defines resource requirements to execute
- the build.
- type: object
- properties:
- limits:
- description: 'Limits describes the maximum amount of compute
- resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- additionalProperties:
- type: string
- requests:
- description: 'Requests describes the minimum amount of compute
- resources required. If Requests is omitted for a container,
- it defaults to Limits if that is explicitly specified, otherwise
- to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- additionalProperties:
- type: string
- buildOverrides:
- description: BuildOverrides controls override settings for builds
- type: object
- properties:
- imageLabels:
- description: ImageLabels is a list of docker labels that are applied
- to the resulting image. If user provided a label in their Build/BuildConfig
- with the same name as one in this list, the user's label will
- be overwritten.
- type: array
- items:
- type: object
- properties:
- name:
- description: Name defines the name of the label. It must have
- non-zero length.
- type: string
- value:
- description: Value defines the literal value of the label.
- type: string
- nodeSelector:
- description: NodeSelector is a selector which must be true for the
- build pod to fit on a node
- type: object
- additionalProperties:
- type: string
- tolerations:
- description: Tolerations is a list of Tolerations that will override
- any existing tolerations set on a build pod.
- type: array
- items:
- description: The pod this Toleration is attached to tolerates
- any taint that matches the triple <key,value,effect> using the
- matching operator <operator>.
- type: object
- properties:
- effect:
- description: Effect indicates the taint effect to match. Empty
- means match all taint effects. When specified, allowed values
- are NoSchedule, PreferNoSchedule and NoExecute.
- type: string
- key:
- description: Key is the taint key that the toleration applies
- to. Empty means match all taint keys. If the key is empty,
- operator must be Exists; this combination means to match
- all values and all keys.
- type: string
- operator:
- description: Operator represents a key's relationship to the
- value. Valid operators are Exists and Equal. Defaults to
- Equal. Exists is equivalent to wildcard for value, so that
- a pod can tolerate all taints of a particular category.
- type: string
- tolerationSeconds:
- description: TolerationSeconds represents the period of time
- the toleration (which must be of effect NoExecute, otherwise
- this field is ignored) tolerates the taint. By default,
- it is not set, which means tolerate the taint forever (do
- not evict). Zero and negative values will be treated as
- 0 (evict immediately) by the system.
- type: integer
- format: int64
- value:
- description: Value is the taint value the toleration matches
- to. If the operator is Exists, the value should be empty,
- otherwise just a regular string.
- type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml
deleted file mode 100644
index b527f7aa3..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: consoles.config.openshift.io
-spec:
- scope: Cluster
- preserveUnknownFields: false
- group: config.openshift.io
- names:
- kind: Console
- listKind: ConsoleList
- plural: consoles
- singular: console
- subresources:
- status: {}
- versions:
- - name: v1
- served: true
- storage: true
- "validation":
- "openAPIV3Schema":
- description: Console holds cluster-wide configuration for the web console, including
- the logout URL, and reports the public URL of the console. The canonical name
- is `cluster`.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- authentication:
- description: ConsoleAuthentication defines a list of optional configuration
- for console authentication.
- type: object
- properties:
- logoutRedirect:
- description: 'An optional, absolute URL to redirect web browsers
- to after logging out of the console. If not specified, it will
- redirect to the default login page. This is required when using
- an identity provider that supports single sign-on (SSO) such as:
- - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML)
- - OAuth (GitHub, GitLab, Google) Logging out of the console will
- destroy the user''s token. The logoutRedirect provides the user
- the option to perform single logout (SLO) through the identity
- provider to destroy their single sign-on session.'
- type: string
- pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
- properties:
- consoleURL:
- description: The URL for the console. This will be derived from the
- host for the route that is created for the console.
- type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml
deleted file mode 100644
index c883ee0f0..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml
+++ /dev/null
@@ -1,100 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: dnses.config.openshift.io
-spec:
- group: config.openshift.io
- names:
- kind: DNS
- listKind: DNSList
- plural: dnses
- singular: dns
- scope: Cluster
- preserveUnknownFields: false
- versions:
- - name: v1
- served: true
- storage: true
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: DNS holds cluster-wide information about DNS. The canonical name
- is `cluster`
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- baseDomain:
- description: "baseDomain is the base domain of the cluster. All managed
- DNS records will be sub-domains of this base. \n For example, given
- the base domain `openshift.example.com`, an API server DNS record
- may be created for `cluster-api.openshift.example.com`. \n Once set,
- this field cannot be changed."
- type: string
- privateZone:
- description: "privateZone is the location where all the DNS records
- that are only available internally to the cluster exist. \n If this
- field is nil, no private records should be created. \n Once set, this
- field cannot be changed."
- type: object
- properties:
- id:
- description: "id is the identifier that can be used to find the
- DNS hosted zone. \n on AWS zone can be fetched using `ID` as id
- in [1] on Azure zone can be fetched using `ID` as a pre-determined
- name in [2], on GCP zone can be fetched using `ID` as a pre-determined
- name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options
- [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show
- [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get"
- type: string
- tags:
- description: "tags can be used to query the DNS hosted zone. \n
- on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone
- using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options"
- type: object
- additionalProperties:
- type: string
- publicZone:
- description: "publicZone is the location where all the DNS records that
- are publicly accessible to the internet exist. \n If this field is
- nil, no public records should be created. \n Once set, this field
- cannot be changed."
- type: object
- properties:
- id:
- description: "id is the identifier that can be used to find the
- DNS hosted zone. \n on AWS zone can be fetched using `ID` as id
- in [1] on Azure zone can be fetched using `ID` as a pre-determined
- name in [2], on GCP zone can be fetched using `ID` as a pre-determined
- name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options
- [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show
- [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get"
- type: string
- tags:
- description: "tags can be used to query the DNS hosted zone. \n
- on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone
- using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options"
- type: object
- additionalProperties:
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml
deleted file mode 100644
index 89084a33f..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: featuregates.config.openshift.io
-spec:
- group: config.openshift.io
- version: v1
- scope: Cluster
- preserveUnknownFields: false
- names:
- kind: FeatureGate
- singular: featuregate
- plural: featuregates
- listKind: FeatureGateList
- versions:
- - name: v1
- served: true
- storage: true
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: Feature holds cluster-wide information about feature gates. The
- canonical name is `cluster`
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- customNoUpgrade:
- description: customNoUpgrade allows the enabling or disabling of any
- feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE,
- and PREVENTS UPGRADES. Because of its nature, this setting cannot
- be validated. If you have any typos or accidentally apply invalid
- combinations your cluster may fail in an unrecoverable way. featureSet
- must equal "CustomNoUpgrade" must be set to use this field.
- type: object
- properties:
- disabled:
- description: disabled is a list of all feature gates that you want
- to force off
- type: array
- items:
- type: string
- enabled:
- description: enabled is a list of all feature gates that you want
- to force on
- type: array
- items:
- type: string
- nullable: true
- featureSet:
- description: featureSet changes the list of features in the cluster. The
- default is empty. Be very careful adjusting this setting. Turning
- on or off features may cause irreversible changes in your cluster
- which cannot be undone.
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml
deleted file mode 100644
index a0fd48709..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml
+++ /dev/null
@@ -1,144 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: images.config.openshift.io
-spec:
- group: config.openshift.io
- scope: Cluster
- preserveUnknownFields: false
- names:
- kind: Image
- singular: image
- plural: images
- listKind: ImageList
- versions:
- - name: v1
- served: true
- storage: true
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: Image governs policies related to imagestream imports and runtime
- configuration for external registries. It allows cluster admins to configure
- which registries OpenShift is allowed to import images from, extra CA trust
- bundles for external registries, and policies to blacklist/whitelist registry
- hostnames. When exposing OpenShift's image registry to the public, this also
- lets cluster admins specify the external hostname.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- additionalTrustedCA:
- description: additionalTrustedCA is a reference to a ConfigMap containing
- additional CAs that should be trusted during imagestream import, pod
- image pull, build image pull, and imageregistry pullthrough. The namespace
- for this config map is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced config
- map
- type: string
- allowedRegistriesForImport:
- description: allowedRegistriesForImport limits the container image registries
- that normal users may import images from. Set this list to the registries
- that you trust to contain valid Docker images and that you want applications
- to be able to import from. Users with permission to create Images
- or ImageStreamMappings via the API are not affected by this policy
- - typically only administrators or system integrations will have those
- permissions.
- type: array
- items:
- description: RegistryLocation contains a location of the registry
- specified by the registry domain name. The domain name might include
- wildcards, like '*' or '??'.
- type: object
- properties:
- domainName:
- description: domainName specifies a domain name for the registry
- In case the registry use non-standard (80 or 443) port, the
- port should be included in the domain name as well.
- type: string
- insecure:
- description: insecure indicates whether the registry is secure
- (https) or insecure (http) By default (if not specified) the
- registry is assumed as secure.
- type: boolean
- externalRegistryHostnames:
- description: externalRegistryHostnames provides the hostnames for the
- default external image registry. The external hostname should be set
- only when the image registry is exposed externally. The first value
- is used in 'publicDockerImageRepository' field in ImageStreams. The
- value must be in "hostname[:port]" format.
- type: array
- items:
- type: string
- registrySources:
- description: registrySources contains configuration that determines
- how the container runtime should treat individual registries when
- accessing images for builds+pods. (e.g. whether or not to allow insecure
- access). It does not contain configuration for the internal cluster
- registry.
- type: object
- properties:
- allowedRegistries:
- description: "allowedRegistries are whitelisted for image pull/push.
- All other registries are blocked. \n Only one of BlockedRegistries
- or AllowedRegistries may be set."
- type: array
- items:
- type: string
- blockedRegistries:
- description: "blockedRegistries are blacklisted from image pull/push.
- All other registries are allowed. \n Only one of BlockedRegistries
- or AllowedRegistries may be set."
- type: array
- items:
- type: string
- insecureRegistries:
- description: insecureRegistries are registries which do not have
- a valid TLS certificates or only support HTTP connections.
- type: array
- items:
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
- properties:
- externalRegistryHostnames:
- description: externalRegistryHostnames provides the hostnames for the
- default external image registry. The external hostname should be set
- only when the image registry is exposed externally. The first value
- is used in 'publicDockerImageRepository' field in ImageStreams. The
- value must be in "hostname[:port]" format.
- type: array
- items:
- type: string
- internalRegistryHostname:
- description: internalRegistryHostname sets the hostname for the default
- internal image registry. The value must be in "hostname[:port]" format.
- This value is set by the image registry operator which controls the
- internal registry hostname. For backward compatibility, users can
- still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this
- setting overrides the environment variable.
- type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml
deleted file mode 100644
index 2aba542da..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml
+++ /dev/null
@@ -1,221 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: infrastructures.config.openshift.io
-spec:
- group: config.openshift.io
- names:
- kind: Infrastructure
- listKind: InfrastructureList
- plural: infrastructures
- singular: infrastructure
- scope: Cluster
- preserveUnknownFields: false
- versions:
- - name: v1
- served: true
- storage: true
- "validation":
- "openAPIV3Schema":
- description: Infrastructure holds cluster-wide information about Infrastructure. The
- canonical name is `cluster`
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- cloudConfig:
- description: cloudConfig is a reference to a ConfigMap containing the
- cloud provider configuration file. This configuration file is used
- to configure the Kubernetes cloud provider integration when using
- the built-in cloud provider integration or the external cloud controller
- manager. The namespace for this config map is openshift-config.
- type: object
- properties:
- key:
- description: Key allows pointing to a specific key/value inside
- of the configmap. This is useful for logical file references.
- type: string
- name:
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
- properties:
- apiServerInternalURI:
- description: apiServerInternalURL is a valid URI with scheme(http/https),
- address and port. apiServerInternalURL can be used by components
- like kubelets, to contact the Kubernetes API server using the infrastructure
- provider rather than Kubernetes networking.
- type: string
- apiServerURL:
- description: apiServerURL is a valid URI with scheme(http/https), address
- and port. apiServerURL can be used by components like the web console
- to tell users where to find the Kubernetes API.
- type: string
- etcdDiscoveryDomain:
- description: 'etcdDiscoveryDomain is the domain used to fetch the SRV
- records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery'
- type: string
- infrastructureName:
- description: infrastructureName uniquely identifies a cluster with a
- human friendly name. Once set it should not be changed. Must be of
- max length 27 and must have only alphanumeric or hyphen characters.
- type: string
- platform:
- description: "platform is the underlying infrastructure provider for
- the cluster. \n Deprecated: Use platformStatus.type instead."
- type: string
- platformStatus:
- description: platformStatus holds status information specific to the
- underlying infrastructure provider.
- type: object
- properties:
- aws:
- description: AWS contains settings specific to the Amazon Web Services
- infrastructure provider.
- type: object
- properties:
- region:
- description: region holds the default AWS region for new AWS
- resources created by the cluster.
- type: string
- azure:
- description: Azure contains settings specific to the Azure infrastructure
- provider.
- type: object
- properties:
- networkResourceGroupName:
- description: networkResourceGroupName is the Resource Group
- for network resources like the Virtual Network and Subnets
- used by the cluster. If empty, the value is same as ResourceGroupName.
- type: string
- resourceGroupName:
- description: resourceGroupName is the Resource Group for new
- Azure resources created for the cluster.
- type: string
- baremetal:
- description: BareMetal contains settings specific to the BareMetal
- platform.
- type: object
- properties:
- apiServerInternalIP:
- description: apiServerInternalIP is an IP address to contact
- the Kubernetes API server that can be used by components inside
- the cluster, like kubelets using the infrastructure rather
- than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
- points to. It is the IP for a self-hosted load balancer in
- front of the API servers.
- type: string
- ingressIP:
- description: ingressIP is an external IP which routes to the
- default ingress controller. The IP is a suitable target of
- a wildcard DNS record used to resolve default route host names.
- type: string
- nodeDNSIP:
- description: nodeDNSIP is the IP address for the internal DNS
- used by the nodes. Unlike the one managed by the DNS operator,
- `NodeDNSIP` provides name resolution for the nodes themselves.
- There is no DNS-as-a-service for BareMetal deployments. In
- order to minimize necessary changes to the datacenter DNS,
- a DNS service is hosted as a static pod to serve those hostnames
- to the nodes in the cluster.
- type: string
- gcp:
- description: GCP contains settings specific to the Google Cloud
- Platform infrastructure provider.
- type: object
- properties:
- projectID:
- description: resourceGroupName is the Project ID for new GCP
- resources created for the cluster.
- type: string
- region:
- description: region holds the region for new GCP resources created
- for the cluster.
- type: string
- openstack:
- description: OpenStack contains settings specific to the OpenStack
- infrastructure provider.
- type: object
- properties:
- apiServerInternalIP:
- description: apiServerInternalIP is an IP address to contact
- the Kubernetes API server that can be used by components inside
- the cluster, like kubelets using the infrastructure rather
- than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
- points to. It is the IP for a self-hosted load balancer in
- front of the API servers.
- type: string
- cloudName:
- description: cloudName is the name of the desired OpenStack
- cloud in the client configuration file (`clouds.yaml`).
- type: string
- ingressIP:
- description: ingressIP is an external IP which routes to the
- default ingress controller. The IP is a suitable target of
- a wildcard DNS record used to resolve default route host names.
- type: string
- nodeDNSIP:
- description: nodeDNSIP is the IP address for the internal DNS
- used by the nodes. Unlike the one managed by the DNS operator,
- `NodeDNSIP` provides name resolution for the nodes themselves.
- There is no DNS-as-a-service for OpenStack deployments. In
- order to minimize necessary changes to the datacenter DNS,
- a DNS service is hosted as a static pod to serve those hostnames
- to the nodes in the cluster.
- type: string
- ovirt:
- description: Ovirt contains settings specific to the oVirt infrastructure
- provider.
- type: object
- properties:
- apiServerInternalIP:
- description: apiServerInternalIP is an IP address to contact
- the Kubernetes API server that can be used by components inside
- the cluster, like kubelets using the infrastructure rather
- than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
- points to. It is the IP for a self-hosted load balancer in
- front of the API servers.
- type: string
- ingressIP:
- description: ingressIP is an external IP which routes to the
- default ingress controller. The IP is a suitable target of
- a wildcard DNS record used to resolve default route host names.
- type: string
- nodeDNSIP:
- description: nodeDNSIP is the IP address for the internal DNS
- used by the nodes. Unlike the one managed by the DNS operator,
- `NodeDNSIP` provides name resolution for the nodes themselves.
- There is no DNS-as-a-service for oVirt deployments. In order
- to minimize necessary changes to the datacenter DNS, a DNS
- service is hosted as a static pod to serve those hostnames
- to the nodes in the cluster.
- type: string
- type:
- description: type is the underlying infrastructure provider for
- the cluster. This value controls whether infrastructure automation
- such as service load balancers, dynamic volume provisioning, machine
- creation and deletion, and other integrations are enabled. If
- None, no infrastructure automation is enabled. Allowed values
- are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack",
- "VSphere", "oVirt", and "None". Individual components may not
- support all platforms, and must handle unrecognized platforms
- as None if they do not support that platform.
- type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml
deleted file mode 100644
index ada440425..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: ingresses.config.openshift.io
-spec:
- group: config.openshift.io
- names:
- kind: Ingress
- listKind: IngressList
- plural: ingresses
- singular: ingress
- scope: Cluster
- preserveUnknownFields: false
- versions:
- - name: v1
- served: true
- storage: true
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: Ingress holds cluster-wide information about ingress, including
- the default ingress domain used for routes. The canonical name is `cluster`.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- domain:
- description: "domain is used to generate a default host name for a route
- when the route's host name is empty. The generated host name will
- follow this pattern: \"<route-name>.<route-namespace>.<domain>\".
- \n It is also used as the default wildcard domain suffix for ingress.
- The default ingresscontroller domain will follow this pattern: \"*.<domain>\".
- \n Once set, changing domain is not currently supported."
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml
deleted file mode 100644
index bc3b62a87..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml
+++ /dev/null
@@ -1,141 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: networks.config.openshift.io
-spec:
- group: config.openshift.io
- names:
- kind: Network
- listKind: NetworkList
- plural: networks
- singular: network
- scope: Cluster
- preserveUnknownFields: false
- versions:
- - name: v1
- served: true
- storage: true
- "validation":
- "openAPIV3Schema":
- description: 'Network holds cluster-wide information about Network. The canonical
- name is `cluster`. It is used to configure the desired network configuration,
- such as: IP address pools for services/pod IPs, network plugin, etc. Please
- view network.spec for an explanation on what applies when configuring this
- resource.'
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration. As a general
- rule, this SHOULD NOT be read directly. Instead, you should consume the
- NetworkStatus, as it indicates the currently deployed configuration. Currently,
- most spec fields are immutable after installation. Please view the individual
- ones for further details on each.
- type: object
- properties:
- clusterNetwork:
- description: IP address pool to use for pod IPs. This field is immutable
- after installation.
- type: array
- items:
- description: ClusterNetworkEntry is a contiguous block of IP addresses
- from which pod IPs are allocated.
- type: object
- properties:
- cidr:
- description: The complete block for pod IPs.
- type: string
- hostPrefix:
- description: The size (prefix) of block to allocate to each node.
- type: integer
- format: int32
- minimum: 0
- externalIP:
- description: externalIP defines configuration for controllers that affect
- Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.
- type: object
- properties:
- autoAssignCIDRs:
- description: autoAssignCIDRs is a list of CIDRs from which to automatically
- assign Service.ExternalIP. These are assigned when the service
- is of type LoadBalancer. In general, this is only useful for bare-metal
- clusters. In Openshift 3.x, this was misleadingly called "IngressIPs".
- Automatically assigned External IPs are not affected by any ExternalIPPolicy
- rules. Currently, only one entry may be provided.
- type: array
- items:
- type: string
- policy:
- description: policy is a set of restrictions applied to the ExternalIP
- field. If nil or empty, then ExternalIP is not allowed to be set.
- type: object
- properties:
- allowedCIDRs:
- description: allowedCIDRs is the list of allowed CIDRs.
- type: array
- items:
- type: string
- rejectedCIDRs:
- description: rejectedCIDRs is the list of disallowed CIDRs.
- These take precedence over allowedCIDRs.
- type: array
- items:
- type: string
- networkType:
- description: 'NetworkType is the plugin that is to be deployed (e.g.
- OpenShiftSDN). This should match a value that the cluster-network-operator
- understands, or else no networking will be installed. Currently supported
- values are: - OpenShiftSDN This field is immutable after installation.'
- type: string
- serviceNetwork:
- description: IP address pool for services. Currently, we only support
- a single entry here. This field is immutable after installation.
- type: array
- items:
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
- properties:
- clusterNetwork:
- description: IP address pool to use for pod IPs.
- type: array
- items:
- description: ClusterNetworkEntry is a contiguous block of IP addresses
- from which pod IPs are allocated.
- type: object
- properties:
- cidr:
- description: The complete block for pod IPs.
- type: string
- hostPrefix:
- description: The size (prefix) of block to allocate to each node.
- type: integer
- format: int32
- minimum: 0
- clusterNetworkMTU:
- description: ClusterNetworkMTU is the MTU for inter-pod networking.
- type: integer
- networkType:
- description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).
- type: string
- serviceNetwork:
- description: IP address pool for services. Currently, we only support
- a single entry here.
- type: array
- items:
- type: string
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml
deleted file mode 100644
index fd763d047..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml
+++ /dev/null
@@ -1,661 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: oauths.config.openshift.io
-spec:
- group: config.openshift.io
- names:
- kind: OAuth
- listKind: OAuthList
- plural: oauths
- singular: oauth
- scope: Cluster
- preserveUnknownFields: false
- subresources:
- status: {}
- versions:
- - name: v1
- served: true
- storage: true
- "validation":
- "openAPIV3Schema":
- description: OAuth holds cluster-wide information about OAuth. The canonical
- name is `cluster`. It is used to configure the integrated OAuth server. This
- configuration is only honored when the top level Authentication config has
- type set to IntegratedOAuth.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: OAuthSpec contains desired cluster auth configuration
- type: object
- properties:
- identityProviders:
- description: identityProviders is an ordered list of ways for a user
- to identify themselves. When this list is empty, no identities are
- provisioned for users.
- type: array
- items:
- description: IdentityProvider provides identities for users authenticating
- using credentials
- type: object
- properties:
- basicAuth:
- description: basicAuth contains configuration options for the
- BasicAuth IdP
- type: object
- properties:
- ca:
- description: ca is an optional reference to a config map by
- name containing the PEM-encoded CA bundle. It is used as
- a trust anchor to validate the TLS certificate presented
- by the remote server. The key "ca.crt" is used to locate
- the data. If specified and the config map or expected key
- is not found, the identity provider is not honored. If the
- specified ca data is not valid, the identity provider is
- not honored. If empty, the default system roots are used.
- The namespace for this config map is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- tlsClientCert:
- description: tlsClientCert is an optional reference to a secret
- by name that contains the PEM-encoded TLS client certificate
- to present when connecting to the server. The key "tls.crt"
- is used to locate the data. If specified and the secret
- or expected key is not found, the identity provider is not
- honored. If the specified certificate data is not valid,
- the identity provider is not honored. The namespace for
- this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- tlsClientKey:
- description: tlsClientKey is an optional reference to a secret
- by name that contains the PEM-encoded TLS private key for
- the client certificate referenced in tlsClientCert. The
- key "tls.key" is used to locate the data. If specified and
- the secret or expected key is not found, the identity provider
- is not honored. If the specified certificate data is not
- valid, the identity provider is not honored. The namespace
- for this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- url:
- description: url is the remote URL to connect to
- type: string
- github:
- description: github enables user authentication using GitHub credentials
- type: object
- properties:
- ca:
- description: ca is an optional reference to a config map by
- name containing the PEM-encoded CA bundle. It is used as
- a trust anchor to validate the TLS certificate presented
- by the remote server. The key "ca.crt" is used to locate
- the data. If specified and the config map or expected key
- is not found, the identity provider is not honored. If the
- specified ca data is not valid, the identity provider is
- not honored. If empty, the default system roots are used.
- This can only be configured when hostname is set to a non-empty
- value. The namespace for this config map is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- clientID:
- description: clientID is the oauth client ID
- type: string
- clientSecret:
- description: clientSecret is a required reference to the secret
- by name containing the oauth client secret. The key "clientSecret"
- is used to locate the data. If the secret or expected key
- is not found, the identity provider is not honored. The
- namespace for this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- hostname:
- description: hostname is the optional domain (e.g. "mycompany.com")
- for use with a hosted instance of GitHub Enterprise. It
- must match the GitHub Enterprise settings value configured
- at /setup/settings#hostname.
- type: string
- organizations:
- description: organizations optionally restricts which organizations
- are allowed to log in
- type: array
- items:
- type: string
- teams:
- description: teams optionally restricts which teams are allowed
- to log in. Format is <org>/<team>.
- type: array
- items:
- type: string
- gitlab:
- description: gitlab enables user authentication using GitLab credentials
- type: object
- properties:
- ca:
- description: ca is an optional reference to a config map by
- name containing the PEM-encoded CA bundle. It is used as
- a trust anchor to validate the TLS certificate presented
- by the remote server. The key "ca.crt" is used to locate
- the data. If specified and the config map or expected key
- is not found, the identity provider is not honored. If the
- specified ca data is not valid, the identity provider is
- not honored. If empty, the default system roots are used.
- The namespace for this config map is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- clientID:
- description: clientID is the oauth client ID
- type: string
- clientSecret:
- description: clientSecret is a required reference to the secret
- by name containing the oauth client secret. The key "clientSecret"
- is used to locate the data. If the secret or expected key
- is not found, the identity provider is not honored. The
- namespace for this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- url:
- description: url is the oauth server base URL
- type: string
- google:
- description: google enables user authentication using Google credentials
- type: object
- properties:
- clientID:
- description: clientID is the oauth client ID
- type: string
- clientSecret:
- description: clientSecret is a required reference to the secret
- by name containing the oauth client secret. The key "clientSecret"
- is used to locate the data. If the secret or expected key
- is not found, the identity provider is not honored. The
- namespace for this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- hostedDomain:
- description: hostedDomain is the optional Google App domain
- (e.g. "mycompany.com") to restrict logins to
- type: string
- htpasswd:
- description: htpasswd enables user authentication using an HTPasswd
- file to validate credentials
- type: object
- properties:
- fileData:
- description: fileData is a required reference to a secret
- by name containing the data to use as the htpasswd file.
- The key "htpasswd" is used to locate the data. If the secret
- or expected key is not found, the identity provider is not
- honored. If the specified htpasswd data is not valid, the
- identity provider is not honored. The namespace for this
- secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- keystone:
- description: keystone enables user authentication using keystone
- password credentials
- type: object
- properties:
- ca:
- description: ca is an optional reference to a config map by
- name containing the PEM-encoded CA bundle. It is used as
- a trust anchor to validate the TLS certificate presented
- by the remote server. The key "ca.crt" is used to locate
- the data. If specified and the config map or expected key
- is not found, the identity provider is not honored. If the
- specified ca data is not valid, the identity provider is
- not honored. If empty, the default system roots are used.
- The namespace for this config map is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- domainName:
- description: domainName is required for keystone v3
- type: string
- tlsClientCert:
- description: tlsClientCert is an optional reference to a secret
- by name that contains the PEM-encoded TLS client certificate
- to present when connecting to the server. The key "tls.crt"
- is used to locate the data. If specified and the secret
- or expected key is not found, the identity provider is not
- honored. If the specified certificate data is not valid,
- the identity provider is not honored. The namespace for
- this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- tlsClientKey:
- description: tlsClientKey is an optional reference to a secret
- by name that contains the PEM-encoded TLS private key for
- the client certificate referenced in tlsClientCert. The
- key "tls.key" is used to locate the data. If specified and
- the secret or expected key is not found, the identity provider
- is not honored. If the specified certificate data is not
- valid, the identity provider is not honored. The namespace
- for this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- url:
- description: url is the remote URL to connect to
- type: string
- ldap:
- description: ldap enables user authentication using LDAP credentials
- type: object
- properties:
- attributes:
- description: attributes maps LDAP attributes to identities
- type: object
- properties:
- email:
- description: email is the list of attributes whose values
- should be used as the email address. Optional. If unspecified,
- no email is set for the identity
- type: array
- items:
- type: string
- id:
- description: id is the list of attributes whose values
- should be used as the user ID. Required. First non-empty
- attribute is used. At least one attribute is required.
- If none of the listed attribute have a value, authentication
- fails. LDAP standard identity attribute is "dn"
- type: array
- items:
- type: string
- name:
- description: name is the list of attributes whose values
- should be used as the display name. Optional. If unspecified,
- no display name is set for the identity LDAP standard
- display name attribute is "cn"
- type: array
- items:
- type: string
- preferredUsername:
- description: preferredUsername is the list of attributes
- whose values should be used as the preferred username.
- LDAP standard login attribute is "uid"
- type: array
- items:
- type: string
- bindDN:
- description: bindDN is an optional DN to bind with during
- the search phase.
- type: string
- bindPassword:
- description: bindPassword is an optional reference to a secret
- by name containing a password to bind with during the search
- phase. The key "bindPassword" is used to locate the data.
- If specified and the secret or expected key is not found,
- the identity provider is not honored. The namespace for
- this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- ca:
- description: ca is an optional reference to a config map by
- name containing the PEM-encoded CA bundle. It is used as
- a trust anchor to validate the TLS certificate presented
- by the remote server. The key "ca.crt" is used to locate
- the data. If specified and the config map or expected key
- is not found, the identity provider is not honored. If the
- specified ca data is not valid, the identity provider is
- not honored. If empty, the default system roots are used.
- The namespace for this config map is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- insecure:
- description: 'insecure, if true, indicates the connection
- should not use TLS WARNING: Should not be set to `true`
- with the URL scheme "ldaps://" as "ldaps://" URLs always attempt
- to connect using TLS, even when `insecure` is set to `true`
- When `true`, "ldap://" URLS connect insecurely. When `false`,
- "ldap://" URLs are upgraded to a TLS connection using StartTLS
- as specified in https://tools.ietf.org/html/rfc2830.'
- type: boolean
- url:
- description: 'url is an RFC 2255 URL which specifies the LDAP
- search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter'
- type: string
- mappingMethod:
- description: mappingMethod determines how identities from this
- provider are mapped to users Defaults to "claim"
- type: string
- name:
- description: 'name is used to qualify the identities returned
- by this provider. - It MUST be unique and not shared by any
- other identity provider used - It MUST be a valid path segment:
- name cannot equal "." or ".." or contain "/" or "%" or ":" Ref:
- https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName'
- type: string
- openID:
- description: openID enables user authentication using OpenID credentials
- type: object
- properties:
- ca:
- description: ca is an optional reference to a config map by
- name containing the PEM-encoded CA bundle. It is used as
- a trust anchor to validate the TLS certificate presented
- by the remote server. The key "ca.crt" is used to locate
- the data. If specified and the config map or expected key
- is not found, the identity provider is not honored. If the
- specified ca data is not valid, the identity provider is
- not honored. If empty, the default system roots are used.
- The namespace for this config map is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- claims:
- description: claims mappings
- type: object
- properties:
- email:
- description: email is the list of claims whose values
- should be used as the email address. Optional. If unspecified,
- no email is set for the identity
- type: array
- items:
- type: string
- name:
- description: name is the list of claims whose values should
- be used as the display name. Optional. If unspecified,
- no display name is set for the identity
- type: array
- items:
- type: string
- preferredUsername:
- description: preferredUsername is the list of claims whose
- values should be used as the preferred username. If
- unspecified, the preferred username is determined from
- the value of the sub claim
- type: array
- items:
- type: string
- clientID:
- description: clientID is the oauth client ID
- type: string
- clientSecret:
- description: clientSecret is a required reference to the secret
- by name containing the oauth client secret. The key "clientSecret"
- is used to locate the data. If the secret or expected key
- is not found, the identity provider is not honored. The
- namespace for this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- secret
- type: string
- extraAuthorizeParameters:
- description: extraAuthorizeParameters are any custom parameters
- to add to the authorize request.
- type: object
- additionalProperties:
- type: string
- extraScopes:
- description: extraScopes are any scopes to request in addition
- to the standard "openid" scope.
- type: array
- items:
- type: string
- issuer:
- description: issuer is the URL that the OpenID Provider asserts
- as its Issuer Identifier. It must use the https scheme with
- no query or fragment component.
- type: string
- requestHeader:
- description: requestHeader enables user authentication using request
- header credentials
- type: object
- properties:
- ca:
- description: ca is a required reference to a config map by
- name containing the PEM-encoded CA bundle. It is used as
- a trust anchor to validate the TLS certificate presented
- by the remote server. Specifically, it allows verification
- of incoming requests to prevent header spoofing. The key
- "ca.crt" is used to locate the data. If the config map or
- expected key is not found, the identity provider is not
- honored. If the specified ca data is not valid, the identity
- provider is not honored. The namespace for this config map
- is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced
- config map
- type: string
- challengeURL:
- description: challengeURL is a URL to redirect unauthenticated
- /authorize requests to Unauthenticated requests from OAuth
- clients which expect WWW-Authenticate challenges will be
- redirected here. ${url} is replaced with the current URL,
- escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url}
- ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query}
- Required when challenge is set to true.
- type: string
- clientCommonNames:
- description: clientCommonNames is an optional list of common
- names to require a match from. If empty, any client certificate
- validated against the clientCA bundle is considered authoritative.
- type: array
- items:
- type: string
- emailHeaders:
- description: emailHeaders is the set of headers to check for
- the email address
- type: array
- items:
- type: string
- headers:
- description: headers is the set of headers to check for identity
- information
- type: array
- items:
- type: string
- loginURL:
- description: loginURL is a URL to redirect unauthenticated
- /authorize requests to Unauthenticated requests from OAuth
- clients which expect interactive logins will be redirected
- here ${url} is replaced with the current URL, escaped to
- be safe in a query parameter https://www.example.com/sso-login?then=${url}
- ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query}
- Required when login is set to true.
- type: string
- nameHeaders:
- description: nameHeaders is the set of headers to check for
- the display name
- type: array
- items:
- type: string
- preferredUsernameHeaders:
- description: preferredUsernameHeaders is the set of headers
- to check for the preferred username
- type: array
- items:
- type: string
- type:
- description: type identifies the identity provider type for this
- entry.
- type: string
- templates:
- description: templates allow you to customize pages like the login page.
- type: object
- properties:
- error:
- description: error is the name of a secret that specifies a go template
- to use to render error pages during the authentication or grant
- flow. The key "errors.html" is used to locate the template data.
- If specified and the secret or expected key is not found, the
- default error page is used. If the specified template is not valid,
- the default error page is used. If unspecified, the default error
- page is used. The namespace for this secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced secret
- type: string
- login:
- description: login is the name of a secret that specifies a go template
- to use to render the login page. The key "login.html" is used
- to locate the template data. If specified and the secret or expected
- key is not found, the default login page is used. If the specified
- template is not valid, the default login page is used. If unspecified,
- the default login page is used. The namespace for this secret
- is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced secret
- type: string
- providerSelection:
- description: providerSelection is the name of a secret that specifies
- a go template to use to render the provider selection page. The
- key "providers.html" is used to locate the template data. If specified
- and the secret or expected key is not found, the default provider
- selection page is used. If the specified template is not valid,
- the default provider selection page is used. If unspecified, the
- default provider selection page is used. The namespace for this
- secret is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced secret
- type: string
- tokenConfig:
- description: tokenConfig contains options for authorization and access
- tokens
- type: object
- properties:
- accessTokenInactivityTimeoutSeconds:
- description: 'accessTokenInactivityTimeoutSeconds defines the default
- token inactivity timeout for tokens granted by any client. The
- value represents the maximum amount of time that can occur between
- consecutive uses of the token. Tokens become invalid if they are
- not used within this temporal window. The user will need to acquire
- a new token to regain access once a token times out. Valid values
- are integer values: x < 0 Tokens time out is enabled but tokens
- never timeout unless configured per client (e.g. `-1`) x = 0 Tokens
- time out is disabled (default) x > 0 Tokens time out if there
- is no activity for x seconds The current minimum allowed value
- for X is 300 (5 minutes)'
- type: integer
- format: int32
- accessTokenMaxAgeSeconds:
- description: accessTokenMaxAgeSeconds defines the maximum age of
- access tokens
- type: integer
- format: int32
- status:
- description: OAuthStatus shows current known state of OAuth server in the
- cluster
- type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml
deleted file mode 100644
index a625aa617..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: projects.config.openshift.io
-spec:
- group: config.openshift.io
- scope: Cluster
- preserveUnknownFields: false
- versions:
- - name: v1
- served: true
- storage: true
- names:
- kind: Project
- listKind: ProjectList
- plural: projects
- singular: project
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: Project holds cluster-wide information about Project. The canonical
- name is `cluster`
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- projectRequestMessage:
- description: projectRequestMessage is the string presented to a user
- if they are unable to request a project via the projectrequest api
- endpoint
- type: string
- projectRequestTemplate:
- description: projectRequestTemplate is the template to use for creating
- projects in response to projectrequest. This must point to a template
- in 'openshift-config' namespace. It is optional. If it is not specified,
- a default template is used.
- type: object
- properties:
- name:
- description: name is the metadata.name of the referenced project
- request template
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml
deleted file mode 100644
index 6f5336c8f..000000000
--- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml
+++ /dev/null
@@ -1,88 +0,0 @@
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- name: schedulers.config.openshift.io
-spec:
- group: config.openshift.io
- scope: Cluster
- preserveUnknownFields: false
- names:
- kind: Scheduler
- singular: scheduler
- plural: schedulers
- listKind: SchedulerList
- versions:
- - name: v1
- served: true
- storage: true
- subresources:
- status: {}
- "validation":
- "openAPIV3Schema":
- description: Scheduler holds cluster-wide config information to run the Kubernetes
- Scheduler and influence its placement decisions. The canonical name for this
- config is `cluster`.
- type: object
- required:
- - spec
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: spec holds user settable values for configuration
- type: object
- properties:
- defaultNodeSelector:
- description: 'defaultNodeSelector helps set the cluster-wide default
- node selector to restrict pod placement to specific nodes. This is
- applied to the pods created in all namespaces without a specified
- nodeSelector value. For example, defaultNodeSelector: "type=user-node,region=east"
- would set nodeSelector field in pod spec to "type=user-node,region=east"
- to all pods created in all namespaces. Namespaces having project-wide
- node selectors won''t be impacted even if this field is set. This
- adds an annotation section to the namespace. For example, if a new
- namespace is created with node-selector=''type=user-node,region=east'',
- the annotation openshift.io/node-selector: type=user-node,region=east
- gets added to the project. When the openshift.io/node-selector annotation
- is set on the project the value is used in preference to the value
- we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector:
- "type=user-node,region=west" means that the default of "type=user-node,region=east"
- set in defaultNodeSelector would not be applied.'
- type: string
- mastersSchedulable:
- description: 'MastersSchedulable allows masters nodes to be schedulable.
- When this flag is turned on, all the master nodes in the cluster will
- be made schedulable, so that workload pods can run on them. The default
- value for this field is false, meaning none of the master nodes are
- schedulable. Important Note: Once the workload pods start running
- on the master nodes, extreme care must be taken to ensure that cluster-critical
- control plane components are not impacted. Please turn on this field
- after doing due diligence.'
- type: boolean
- policy:
- description: policy is a reference to a ConfigMap containing scheduler
- policy which has user specified predicates and priorities. If this
- ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider.
- The namespace for this configmap is openshift-config.
- type: object
- required:
- - name
- properties:
- name:
- description: name is the metadata.name of the referenced config
- map
- type: string
- status:
- description: status holds observed values from the cluster. They may not
- be overridden.
- type: object
diff --git a/vendor/github.com/openshift/api/config/v1/doc.go b/vendor/github.com/openshift/api/config/v1/doc.go
deleted file mode 100644
index 4ff5208f2..000000000
--- a/vendor/github.com/openshift/api/config/v1/doc.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +k8s:deepcopy-gen=package,register
-// +k8s:defaulter-gen=TypeMeta
-// +k8s:openapi-gen=true
-
-// +kubebuilder:validation:Optional
-// +groupName=config.openshift.io
-// Package v1 is the v1 version of the API.
-package v1
diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go
deleted file mode 100644
index 35eace370..000000000
--- a/vendor/github.com/openshift/api/config/v1/register.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-var (
- GroupName = "config.openshift.io"
- GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
- schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
- // Install is a function which adds this version to a scheme
- Install = schemeBuilder.AddToScheme
-
- // SchemeGroupVersion generated code relies on this name
- // Deprecated
- SchemeGroupVersion = GroupVersion
- // AddToScheme exists solely to keep the old generators creating valid code
- // DEPRECATED
- AddToScheme = schemeBuilder.AddToScheme
-)
-
-// Resource generated code relies on this being here, but it logically belongs to the group
-// DEPRECATED
-func Resource(resource string) schema.GroupResource {
- return schema.GroupResource{Group: GroupName, Resource: resource}
-}
-
-// Adds the list of known types to api.Scheme.
-func addKnownTypes(scheme *runtime.Scheme) error {
- scheme.AddKnownTypes(GroupVersion,
- &APIServer{},
- &APIServerList{},
- &Authentication{},
- &AuthenticationList{},
- &Build{},
- &BuildList{},
- &ClusterOperator{},
- &ClusterOperatorList{},
- &ClusterVersion{},
- &ClusterVersionList{},
- &Console{},
- &ConsoleList{},
- &DNS{},
- &DNSList{},
- &FeatureGate{},
- &FeatureGateList{},
- &Image{},
- &ImageList{},
- &Infrastructure{},
- &InfrastructureList{},
- &Ingress{},
- &IngressList{},
- &Network{},
- &NetworkList{},
- &OAuth{},
- &OAuthList{},
- &OperatorHub{},
- &OperatorHubList{},
- &Project{},
- &ProjectList{},
- &Proxy{},
- &ProxyList{},
- &Scheduler{},
- &SchedulerList{},
- )
- metav1.AddToGroupVersion(scheme, GroupVersion)
- return nil
-}
diff --git a/vendor/github.com/openshift/api/config/v1/stringsource.go b/vendor/github.com/openshift/api/config/v1/stringsource.go
deleted file mode 100644
index 6a5718c1d..000000000
--- a/vendor/github.com/openshift/api/config/v1/stringsource.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package v1
-
-import "encoding/json"
-
-// UnmarshalJSON implements the json.Unmarshaller interface.
-// If the value is a string, it sets the Value field of the StringSource.
-// Otherwise, it is unmarshaled into the StringSourceSpec struct
-func (s *StringSource) UnmarshalJSON(value []byte) error {
- // If we can unmarshal to a simple string, just set the value
- var simpleValue string
- if err := json.Unmarshal(value, &simpleValue); err == nil {
- s.Value = simpleValue
- return nil
- }
-
- // Otherwise do the full struct unmarshal
- return json.Unmarshal(value, &s.StringSourceSpec)
-}
-
-// MarshalJSON implements the json.Marshaller interface.
-// If the StringSource contains only a string Value (or is empty), it is marshaled as a JSON string.
-// Otherwise, the StringSourceSpec struct is marshaled as a JSON object.
-func (s *StringSource) MarshalJSON() ([]byte, error) {
- // If we have only a cleartext value set, do a simple string marshal
- if s.StringSourceSpec == (StringSourceSpec{Value: s.Value}) {
- return json.Marshal(s.Value)
- }
-
- // Otherwise do the full struct marshal of the externalized bits
- return json.Marshal(s.StringSourceSpec)
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go
deleted file mode 100644
index 142748423..000000000
--- a/vendor/github.com/openshift/api/config/v1/types.go
+++ /dev/null
@@ -1,312 +0,0 @@
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
-)
-
-// ConfigMapFileReference references a config map in a specific namespace.
-// The namespace must be specified at the point of use.
-type ConfigMapFileReference struct {
- Name string `json:"name"`
- // Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.
- Key string `json:"key,omitempty"`
-}
-
-// ConfigMapNameReference references a config map in a specific namespace.
-// The namespace must be specified at the point of use.
-type ConfigMapNameReference struct {
- // name is the metadata.name of the referenced config map
- // +kubebuilder:validation:Required
- // +required
- Name string `json:"name"`
-}
-
-// SecretNameReference references a secret in a specific namespace.
-// The namespace must be specified at the point of use.
-type SecretNameReference struct {
- // name is the metadata.name of the referenced secret
- // +kubebuilder:validation:Required
- // +required
- Name string `json:"name"`
-}
-
-// HTTPServingInfo holds configuration for serving HTTP
-type HTTPServingInfo struct {
- // ServingInfo is the HTTP serving information
- ServingInfo `json:",inline"`
- // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.
- MaxRequestsInFlight int64 `json:"maxRequestsInFlight"`
- // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if
- // -1 there is no limit on requests.
- RequestTimeoutSeconds int64 `json:"requestTimeoutSeconds"`
-}
-
-// ServingInfo holds information about serving web pages
-type ServingInfo struct {
- // BindAddress is the ip:port to serve on
- BindAddress string `json:"bindAddress"`
- // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp",
- // "tcp4", and "tcp6"
- BindNetwork string `json:"bindNetwork"`
- // CertInfo is the TLS cert info for serving secure traffic.
- // this is anonymous so that we can inline it for serialization
- CertInfo `json:",inline"`
- // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates
- // +optional
- ClientCA string `json:"clientCA,omitempty"`
- // NamedCertificates is a list of certificates to use to secure requests to specific hostnames
- NamedCertificates []NamedCertificate `json:"namedCertificates,omitempty"`
- // MinTLSVersion is the minimum TLS version supported.
- // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants
- MinTLSVersion string `json:"minTLSVersion,omitempty"`
- // CipherSuites contains an overridden list of ciphers for the server to support.
- // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants
- CipherSuites []string `json:"cipherSuites,omitempty"`
-}
-
-// CertInfo relates a certificate with a private key
-type CertInfo struct {
- // CertFile is a file containing a PEM-encoded certificate
- CertFile string `json:"certFile"`
- // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile
- KeyFile string `json:"keyFile"`
-}
-
-// NamedCertificate specifies a certificate/key, and the names it should be served for
-type NamedCertificate struct {
- // Names is a list of DNS names this certificate should be used to secure
- // A name can be a normal DNS name, or can contain leading wildcard segments.
- Names []string `json:"names,omitempty"`
- // CertInfo is the TLS cert info for serving secure traffic
- CertInfo `json:",inline"`
-}
-
-// LeaderElection provides information to elect a leader
-type LeaderElection struct {
- // disable allows leader election to be suspended while allowing a fully defaulted "normal" startup case.
- Disable bool `json:"disable,omitempty"`
- // namespace indicates which namespace the resource is in
- Namespace string `json:"namespace,omitempty"`
- // name indicates what name to use for the resource
- Name string `json:"name,omitempty"`
-
- // leaseDuration is the duration that non-leader candidates will wait
- // after observing a leadership renewal until attempting to acquire
- // leadership of a led but unrenewed leader slot. This is effectively the
- // maximum duration that a leader can be stopped before it is replaced
- // by another candidate. This is only applicable if leader election is
- // enabled.
- // +nullable
- LeaseDuration metav1.Duration `json:"leaseDuration"`
- // renewDeadline is the interval between attempts by the acting master to
- // renew a leadership slot before it stops leading. This must be less
- // than or equal to the lease duration. This is only applicable if leader
- // election is enabled.
- // +nullable
- RenewDeadline metav1.Duration `json:"renewDeadline"`
- // retryPeriod is the duration the clients should wait between attempting
- // acquisition and renewal of a leadership. This is only applicable if
- // leader election is enabled.
- // +nullable
- RetryPeriod metav1.Duration `json:"retryPeriod"`
-}
-
-// StringSource allows specifying a string inline, or externally via env var or file.
-// When it contains only a string value, it marshals to a simple JSON string.
-type StringSource struct {
- // StringSourceSpec specifies the string value, or external location
- StringSourceSpec `json:",inline"`
-}
-
-// StringSourceSpec specifies a string value, or external location
-type StringSourceSpec struct {
- // Value specifies the cleartext value, or an encrypted value if keyFile is specified.
- Value string `json:"value"`
-
- // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.
- Env string `json:"env"`
-
- // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.
- File string `json:"file"`
-
- // KeyFile references a file containing the key to use to decrypt the value.
- KeyFile string `json:"keyFile"`
-}
-
-// RemoteConnectionInfo holds information necessary for establishing a remote connection
-type RemoteConnectionInfo struct {
- // URL is the remote URL to connect to
- URL string `json:"url"`
- // CA is the CA for verifying TLS connections
- CA string `json:"ca"`
- // CertInfo is the TLS client cert information to present
- // this is anonymous so that we can inline it for serialization
- CertInfo `json:",inline"`
-}
-
-type AdmissionConfig struct {
- PluginConfig map[string]AdmissionPluginConfig `json:"pluginConfig,omitempty"`
-
- // enabledPlugins is a list of admission plugins that must be on in addition to the default list.
- // Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon
- // and can result in performance penalties and unexpected behavior.
- EnabledAdmissionPlugins []string `json:"enabledPlugins,omitempty"`
-
- // disabledPlugins is a list of admission plugins that must be off. Putting something in this list
- // is almost always a mistake and likely to result in cluster instability.
- DisabledAdmissionPlugins []string `json:"disabledPlugins,omitempty"`
-}
-
-// AdmissionPluginConfig holds the necessary configuration options for admission plugins
-type AdmissionPluginConfig struct {
- // Location is the path to a configuration file that contains the plugin's
- // configuration
- Location string `json:"location"`
-
- // Configuration is an embedded configuration object to be used as the plugin's
- // configuration. If present, it will be used instead of the path to the configuration file.
- // +nullable
- // +kubebuilder:pruning:PreserveUnknownFields
- Configuration runtime.RawExtension `json:"configuration"`
-}
-
-type LogFormatType string
-
-type WebHookModeType string
-
-const (
- // LogFormatLegacy saves event in 1-line text format.
- LogFormatLegacy LogFormatType = "legacy"
- // LogFormatJson saves event in structured json format.
- LogFormatJson LogFormatType = "json"
-
- // WebHookModeBatch indicates that the webhook should buffer audit events
- // internally, sending batch updates either once a certain number of
- // events have been received or a certain amount of time has passed.
- WebHookModeBatch WebHookModeType = "batch"
- // WebHookModeBlocking causes the webhook to block on every attempt to process
- // a set of events. This causes requests to the API server to wait for a
- // round trip to the external audit service before sending a response.
- WebHookModeBlocking WebHookModeType = "blocking"
-)
-
-// AuditConfig holds configuration for the audit capabilities
-type AuditConfig struct {
- // If this flag is set, audit log will be printed in the logs.
- // The logs contains, method, user and a requested URL.
- Enabled bool `json:"enabled"`
- // All requests coming to the apiserver will be logged to this file.
- AuditFilePath string `json:"auditFilePath"`
- // Maximum number of days to retain old log files based on the timestamp encoded in their filename.
- MaximumFileRetentionDays int32 `json:"maximumFileRetentionDays"`
- // Maximum number of old log files to retain.
- MaximumRetainedFiles int32 `json:"maximumRetainedFiles"`
- // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.
- MaximumFileSizeMegabytes int32 `json:"maximumFileSizeMegabytes"`
-
- // PolicyFile is a path to the file that defines the audit policy configuration.
- PolicyFile string `json:"policyFile"`
- // PolicyConfiguration is an embedded policy configuration object to be used
- // as the audit policy configuration. If present, it will be used instead of
- // the path to the policy file.
- // +nullable
- // +kubebuilder:pruning:PreserveUnknownFields
- PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"`
-
- // Format of saved audits (legacy or json).
- LogFormat LogFormatType `json:"logFormat"`
-
- // Path to a .kubeconfig formatted file that defines the audit webhook configuration.
- WebHookKubeConfig string `json:"webHookKubeConfig"`
- // Strategy for sending audit events (block or batch).
- WebHookMode WebHookModeType `json:"webHookMode"`
-}
-
-// EtcdConnectionInfo holds information necessary for connecting to an etcd server
-type EtcdConnectionInfo struct {
- // URLs are the URLs for etcd
- URLs []string `json:"urls,omitempty"`
- // CA is a file containing trusted roots for the etcd server certificates
- CA string `json:"ca"`
- // CertInfo is the TLS client cert information for securing communication to etcd
- // this is anonymous so that we can inline it for serialization
- CertInfo `json:",inline"`
-}
-
-type EtcdStorageConfig struct {
- EtcdConnectionInfo `json:",inline"`
-
- // StoragePrefix is the path within etcd that the OpenShift resources will
- // be rooted under. This value, if changed, will mean existing objects in etcd will
- // no longer be located.
- StoragePrefix string `json:"storagePrefix"`
-}
-
-// GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd
-type GenericAPIServerConfig struct {
- // servingInfo describes how to start serving
- ServingInfo HTTPServingInfo `json:"servingInfo"`
-
- // corsAllowedOrigins
- CORSAllowedOrigins []string `json:"corsAllowedOrigins"`
-
- // auditConfig describes how to configure audit information
- AuditConfig AuditConfig `json:"auditConfig"`
-
- // storageConfig contains information about how to use
- StorageConfig EtcdStorageConfig `json:"storageConfig"`
-
- // admissionConfig holds information about how to configure admission.
- AdmissionConfig AdmissionConfig `json:"admission"`
-
- KubeClientConfig KubeClientConfig `json:"kubeClientConfig"`
-}
-
-type KubeClientConfig struct {
- // kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config
- KubeConfig string `json:"kubeConfig"`
-
- // connectionOverrides specifies client overrides for system components to loop back to this master.
- ConnectionOverrides ClientConnectionOverrides `json:"connectionOverrides"`
-}
-
-type ClientConnectionOverrides struct {
- // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the
- // default value of 'application/json'. This field will control all connections to the server used by a particular
- // client.
- AcceptContentTypes string `json:"acceptContentTypes"`
- // contentType is the content type used when sending data to the server from this client.
- ContentType string `json:"contentType"`
-
- // qps controls the number of queries per second allowed for this connection.
- QPS float32 `json:"qps"`
- // burst allows extra queries to accumulate when a client is exceeding its rate.
- Burst int32 `json:"burst"`
-}
-
-// GenericControllerConfig provides information to configure a controller
-type GenericControllerConfig struct {
- // ServingInfo is the HTTP serving information for the controller's endpoints
- ServingInfo HTTPServingInfo `json:"servingInfo"`
-
- // leaderElection provides information to elect a leader. Only override this if you have a specific need
- LeaderElection LeaderElection `json:"leaderElection"`
-
- // authentication allows configuration of authentication for the endpoints
- Authentication DelegatedAuthentication `json:"authentication"`
- // authorization allows configuration of authentication for the endpoints
- Authorization DelegatedAuthorization `json:"authorization"`
-}
-
-// DelegatedAuthentication allows authentication to be disabled.
-type DelegatedAuthentication struct {
- // disabled indicates that authentication should be disabled. By default it will use delegated authentication.
- Disabled bool `json:"disabled,omitempty"`
-}
-
-// DelegatedAuthorization allows authorization to be disabled.
-type DelegatedAuthorization struct {
- // disabled indicates that authorization should be disabled. By default it will use delegated authorization.
- Disabled bool `json:"disabled,omitempty"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go
deleted file mode 100644
index b347bd80e..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// APIServer holds configuration (like serving certificates, client CA and CORS domains)
-// shared by all API servers in the system, among them especially kube-apiserver
-// and openshift-apiserver. The canonical name of an instance is 'cluster'.
-type APIServer struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
- // +kubebuilder:validation:Required
- // +required
- Spec APIServerSpec `json:"spec"`
- // +optional
- Status APIServerStatus `json:"status"`
-}
-
-type APIServerSpec struct {
- // servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates
- // will be used for serving secure traffic.
- // +optional
- ServingCerts APIServerServingCerts `json:"servingCerts"`
- // clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for
- // incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid.
- // You usually only have to set this if you have your own PKI you wish to honor client certificates from.
- // The ConfigMap must exist in the openshift-config namespace and contain the following required fields:
- // - ConfigMap.Data["ca-bundle.crt"] - CA bundle.
- // +optional
- ClientCA ConfigMapNameReference `json:"clientCA"`
- // additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the
- // API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth
- // server from JavaScript applications.
- // The values are regular expressions that correspond to the Golang regular expression language.
- // +optional
- AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"`
- // encryption allows the configuration of encryption of resources at the datastore layer.
- // +optional
- Encryption APIServerEncryption `json:"encryption"`
- // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.
- //
- // If unset, a default (which may change between releases) is chosen. Note that only Old and
- // Intermediate profiles are currently supported, and the maximum available MinTLSVersions
- // is VersionTLS12.
- // +optional
- TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"`
-}
-
-type APIServerServingCerts struct {
- // namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames.
- // If no named certificates are provided, or no named certificates match the server name as understood by a client,
- // the defaultServingCertificate will be used.
- // +optional
- NamedCertificates []APIServerNamedServingCert `json:"namedCertificates,omitempty"`
-}
-
-// APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate.
-type APIServerNamedServingCert struct {
- // names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to
- // serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates.
- // Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names.
- // +optional
- Names []string `json:"names,omitempty"`
- // servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic.
- // The secret must exist in the openshift-config namespace and contain the following required fields:
- // - Secret.Data["tls.key"] - TLS private key.
- // - Secret.Data["tls.crt"] - TLS certificate.
- ServingCertificate SecretNameReference `json:"servingCertificate"`
-}
-
-type APIServerEncryption struct {
- // type defines what encryption type should be used to encrypt resources at the datastore layer.
- // When this field is unset (i.e. when it is set to the empty string), identity is implied.
- // The behavior of unset can and will change over time. Even if encryption is enabled by default,
- // the meaning of unset may change to a different encryption type based on changes in best practices.
- //
- // When encryption is enabled, all sensitive resources shipped with the platform are encrypted.
- // This list of sensitive resources can and will change over time. The current authoritative list is:
- //
- // 1. secrets
- // 2. configmaps
- // 3. routes.route.openshift.io
- // 4. oauthaccesstokens.oauth.openshift.io
- // 5. oauthauthorizetokens.oauth.openshift.io
- //
- // +unionDiscriminator
- // +optional
- Type EncryptionType `json:"type,omitempty"`
-}
-
-// +kubebuilder:validation:Enum="";identity;aescbc
-type EncryptionType string
-
-const (
- // identity refers to a type where no encryption is performed at the datastore layer.
- // Resources are written as-is without encryption.
- EncryptionTypeIdentity EncryptionType = "identity"
-
- // aescbc refers to a type where AES-CBC with PKCS#7 padding and a 32-byte key
- // is used to perform encryption at the datastore layer.
- EncryptionTypeAESCBC EncryptionType = "aescbc"
-)
-
-type APIServerStatus struct {
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type APIServerList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
- Items []APIServer `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go
deleted file mode 100644
index eecfe75e7..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_authentication.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Authentication specifies cluster-wide settings for authentication (like OAuth and
-// webhook token authenticators). The canonical name of an instance is `cluster`.
-type Authentication struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec AuthenticationSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status AuthenticationStatus `json:"status"`
-}
-
-type AuthenticationSpec struct {
- // type identifies the cluster managed, user facing authentication mode in use.
- // Specifically, it manages the component that responds to login attempts.
- // The default is IntegratedOAuth.
- // +optional
- Type AuthenticationType `json:"type"`
-
- // oauthMetadata contains the discovery endpoint data for OAuth 2.0
- // Authorization Server Metadata for an external OAuth server.
- // This discovery document can be viewed from its served location:
- // oc get --raw '/.well-known/oauth-authorization-server'
- // For further details, see the IETF Draft:
- // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
- // If oauthMetadata.name is non-empty, this value has precedence
- // over any metadata reference stored in status.
- // The key "oauthMetadata" is used to locate the data.
- // If specified and the config map or expected key is not found, no metadata is served.
- // If the specified metadata is not valid, no metadata is served.
- // The namespace for this config map is openshift-config.
- // +optional
- OAuthMetadata ConfigMapNameReference `json:"oauthMetadata"`
-
- // webhookTokenAuthenticators configures remote token reviewers.
- // These remote authentication webhooks can be used to verify bearer tokens
- // via the tokenreviews.authentication.k8s.io REST API. This is required to
- // honor bearer tokens that are provisioned by an external authentication service.
- // The namespace for these secrets is openshift-config.
- // +optional
- WebhookTokenAuthenticators []WebhookTokenAuthenticator `json:"webhookTokenAuthenticators,omitempty"`
-}
-
-type AuthenticationStatus struct {
- // integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0
- // Authorization Server Metadata for the in-cluster integrated OAuth server.
- // This discovery document can be viewed from its served location:
- // oc get --raw '/.well-known/oauth-authorization-server'
- // For further details, see the IETF Draft:
- // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
- // This contains the observed value based on cluster state.
- // An explicitly set value in spec.oauthMetadata has precedence over this field.
- // This field has no meaning if authentication spec.type is not set to IntegratedOAuth.
- // The key "oauthMetadata" is used to locate the data.
- // If the config map or expected key is not found, no metadata is served.
- // If the specified metadata is not valid, no metadata is served.
- // The namespace for this config map is openshift-config-managed.
- IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"`
-
- // TODO if we add support for an in-cluster operator managed Keycloak instance
- // KeycloakOAuthMetadata ConfigMapNameReference `json:"keycloakOAuthMetadata"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type AuthenticationList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Authentication `json:"items"`
-}
-
-type AuthenticationType string
-
-const (
- // None means that no cluster managed authentication system is in place.
- // Note that user login will only work if a manually configured system is in place and
- // referenced in authentication spec via oauthMetadata and webhookTokenAuthenticators.
- AuthenticationTypeNone AuthenticationType = "None"
-
- // IntegratedOAuth refers to the cluster managed OAuth server.
- // It is configured via the top level OAuth config.
- AuthenticationTypeIntegratedOAuth AuthenticationType = "IntegratedOAuth"
-
- // TODO if we add support for an in-cluster operator managed Keycloak instance
- // AuthenticationTypeKeycloak AuthenticationType = "Keycloak"
-)
-
-// webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator
-type WebhookTokenAuthenticator struct {
- // kubeConfig contains kube config file data which describes how to access the remote webhook service.
- // For further details, see:
- // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
- // The key "kubeConfig" is used to locate the data.
- // If the secret or expected key is not found, the webhook is not honored.
- // If the specified kube config data is not valid, the webhook is not honored.
- // The namespace for this secret is determined by the point of use.
- KubeConfig SecretNameReference `json:"kubeConfig"`
-}
-
-const (
- // OAuthMetadataKey is the key for the oauth authorization server metadata
- OAuthMetadataKey = "oauthMetadata"
-
- // KubeConfigKey is the key for the kube config file data in a secret
- KubeConfigKey = "kubeConfig"
-)
diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go
deleted file mode 100644
index ef4512aa1..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_build.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package v1
-
-import (
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Build configures the behavior of OpenShift builds for the entire cluster.
-// This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.
-//
-// The canonical name is "cluster"
-type Build struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // Spec holds user-settable values for the build controller configuration
- // +kubebuilder:validation:Required
- // +required
- Spec BuildSpec `json:"spec"`
-}
-
-type BuildSpec struct {
- // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that
- // should be trusted for image pushes and pulls during builds.
- // The namespace for this config map is openshift-config.
- //
- // DEPRECATED: Additional CAs for image pull and push should be set on
- // image.config.openshift.io/cluster instead.
- //
- // +optional
- AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"`
- // BuildDefaults controls the default information for Builds
- // +optional
- BuildDefaults BuildDefaults `json:"buildDefaults"`
- // BuildOverrides controls override settings for builds
- // +optional
- BuildOverrides BuildOverrides `json:"buildOverrides"`
-}
-
-type BuildDefaults struct {
- // DefaultProxy contains the default proxy settings for all build operations, including image pull/push
- // and source download.
- //
- // Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables
- // in the build config's strategy.
- // +optional
- DefaultProxy *ProxySpec `json:"defaultProxy,omitempty"`
-
- // GitProxy contains the proxy settings for git operations only. If set, this will override
- // any Proxy settings for all git commands, such as git clone.
- //
- // Values that are not set here will be inherited from DefaultProxy.
- // +optional
- GitProxy *ProxySpec `json:"gitProxy,omitempty"`
-
- // Env is a set of default environment variables that will be applied to the
- // build if the specified variables do not exist on the build
- // +optional
- Env []corev1.EnvVar `json:"env,omitempty"`
-
- // ImageLabels is a list of docker labels that are applied to the resulting image.
- // User can override a default label by providing a label with the same name in their
- // Build/BuildConfig.
- // +optional
- ImageLabels []ImageLabel `json:"imageLabels,omitempty"`
-
- // Resources defines resource requirements to execute the build.
- // +optional
- Resources corev1.ResourceRequirements `json:"resources"`
-}
-
-type ImageLabel struct {
- // Name defines the name of the label. It must have non-zero length.
- Name string `json:"name"`
-
- // Value defines the literal value of the label.
- // +optional
- Value string `json:"value,omitempty"`
-}
-
-type BuildOverrides struct {
- // ImageLabels is a list of docker labels that are applied to the resulting image.
- // If user provided a label in their Build/BuildConfig with the same name as one in this
- // list, the user's label will be overwritten.
- // +optional
- ImageLabels []ImageLabel `json:"imageLabels,omitempty"`
-
- // NodeSelector is a selector which must be true for the build pod to fit on a node
- // +optional
- NodeSelector map[string]string `json:"nodeSelector,omitempty"`
-
- // Tolerations is a list of Tolerations that will override any existing
- // tolerations set on a build pod.
- // +optional
- Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type BuildList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Build `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
deleted file mode 100644
index 3681d0ff0..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
+++ /dev/null
@@ -1,184 +0,0 @@
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// ClusterOperator is the Custom Resource object which holds the current state
-// of an operator. This object is used by operators to convey their state to
-// the rest of the cluster.
-type ClusterOperator struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata"`
-
- // spec holds configuration that could apply to any operator.
- // +kubebuilder:validation:Required
- // +required
- Spec ClusterOperatorSpec `json:"spec"`
-
- // status holds the information about the state of an operator. It is consistent with status information across
- // the Kubernetes ecosystem.
- // +optional
- Status ClusterOperatorStatus `json:"status"`
-}
-
-// ClusterOperatorSpec is empty for now, but you could imagine holding information like "pause".
-type ClusterOperatorSpec struct {
-}
-
-// ClusterOperatorStatus provides information about the status of the operator.
-// +k8s:deepcopy-gen=true
-type ClusterOperatorStatus struct {
- // conditions describes the state of the operator's managed and monitored components.
- // +patchMergeKey=type
- // +patchStrategy=merge
- // +optional
- Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
-
- // versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple
- // operand entries in the array. Available operators must report the version of the operator itself with the name "operator".
- // An operator reports a new "operator" version when it has rolled out the new version to all of its operands.
- // +optional
- Versions []OperandVersion `json:"versions,omitempty"`
-
- // relatedObjects is a list of objects that are "interesting" or related to this operator. Common uses are:
- // 1. the detailed resource driving the operator
- // 2. operator namespaces
- // 3. operand namespaces
- // +optional
- RelatedObjects []ObjectReference `json:"relatedObjects,omitempty"`
-
- // extension contains any additional status information specific to the
- // operator which owns this status object.
- // +nullable
- // +optional
- // +kubebuilder:pruning:PreserveUnknownFields
- Extension runtime.RawExtension `json:"extension"`
-}
-
-type OperandVersion struct {
- // name is the name of the particular operand this version is for. It usually matches container images, not operators.
- // +kubebuilder:validation:Required
- // +required
- Name string `json:"name"`
-
- // version indicates which version of a particular operand is currently being managed. It must always match the Available
- // operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout
- // 1.1.0
- // +kubebuilder:validation:Required
- // +required
- Version string `json:"version"`
-}
-
-// ObjectReference contains enough information to let you inspect or modify the referred object.
-type ObjectReference struct {
- // group of the referent.
- // +kubebuilder:validation:Required
- // +required
- Group string `json:"group"`
- // resource of the referent.
- // +kubebuilder:validation:Required
- // +required
- Resource string `json:"resource"`
- // namespace of the referent.
- // +optional
- Namespace string `json:"namespace,omitempty"`
- // name of the referent.
- // +kubebuilder:validation:Required
- // +required
- Name string `json:"name"`
-}
-
-type ConditionStatus string
-
-// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
-// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
-// can't decide if a resource is in the condition or not. In the future, we could add other
-// intermediate conditions, e.g. ConditionDegraded.
-const (
- ConditionTrue ConditionStatus = "True"
- ConditionFalse ConditionStatus = "False"
- ConditionUnknown ConditionStatus = "Unknown"
-)
-
-// ClusterOperatorStatusCondition represents the state of the operator's
-// managed and monitored components.
-// +k8s:deepcopy-gen=true
-type ClusterOperatorStatusCondition struct {
- // type specifies the aspect reported by this condition.
- // +kubebuilder:validation:Required
- // +required
- Type ClusterStatusConditionType `json:"type"`
-
- // status of the condition, one of True, False, Unknown.
- // +kubebuilder:validation:Required
- // +required
- Status ConditionStatus `json:"status"`
-
- // lastTransitionTime is the time of the last update to the current status property.
- // +kubebuilder:validation:Required
- // +required
- LastTransitionTime metav1.Time `json:"lastTransitionTime"`
-
- // reason is the CamelCase reason for the condition's current status.
- // +optional
- Reason string `json:"reason,omitempty"`
-
- // message provides additional information about the current condition.
- // This is only to be consumed by humans.
- // +optional
- Message string `json:"message,omitempty"`
-}
-
-// ClusterStatusConditionType is an aspect of operator state.
-type ClusterStatusConditionType string
-
-const (
- // Available indicates that the operand (eg: openshift-apiserver for the
- // openshift-apiserver-operator), is functional and available in the cluster.
- OperatorAvailable ClusterStatusConditionType = "Available"
-
- // Progressing indicates that the operator is actively rolling out new code,
- // propagating config changes, or otherwise moving from one steady state to
- // another. Operators should not report progressing when they are reconciling
- // a previously known state.
- OperatorProgressing ClusterStatusConditionType = "Progressing"
-
- // Degraded indicates that the operator's current state does not match its
- // desired state over a period of time resulting in a lower quality of service.
- // The period of time may vary by component, but a Degraded state represents
- // persistent observation of a condition. As a result, a component should not
- // oscillate in and out of Degraded state. A service may be Available even
- // if its degraded. For example, your service may desire 3 running pods, but 1
- // pod is crash-looping. The service is Available but Degraded because it
- // may have a lower quality of service. A component may be Progressing but
- // not Degraded because the transition from one state to another does not
- // persist over a long enough period to report Degraded. A service should not
- // report Degraded during the course of a normal upgrade. A service may report
- // Degraded in response to a persistent infrastructure failure that requires
- // administrator intervention. For example, if a control plane host is unhealthy
- // and must be replaced. An operator should report Degraded if unexpected
- // errors occur over a period, but the expectation is that all unexpected errors
- // are handled as operators mature.
- OperatorDegraded ClusterStatusConditionType = "Degraded"
-
- // Upgradeable indicates whether the operator is in a state that is safe to upgrade. When status is `False`
- // administrators should not upgrade their cluster and the message field should contain a human readable description
- // of what the administrator should do to allow the operator to successfully update. A missing condition, True,
- // and Unknown are all treated by the CVO as allowing an upgrade.
- OperatorUpgradeable ClusterStatusConditionType = "Upgradeable"
-)
-
-// ClusterOperatorList is a list of OperatorStatus resources.
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-type ClusterOperatorList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []ClusterOperator `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
deleted file mode 100644
index 771e962ad..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
+++ /dev/null
@@ -1,267 +0,0 @@
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// ClusterVersion is the configuration for the ClusterVersionOperator. This is where
-// parameters related to automatic updates can be set.
-type ClusterVersion struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec is the desired state of the cluster version - the operator will work
- // to ensure that the desired version is applied to the cluster.
- // +kubebuilder:validation:Required
- // +required
- Spec ClusterVersionSpec `json:"spec"`
- // status contains information about the available updates and any in-progress
- // updates.
- // +optional
- Status ClusterVersionStatus `json:"status"`
-}
-
-// ClusterVersionSpec is the desired version state of the cluster. It includes
-// the version the cluster should be at, how the cluster is identified, and
-// where the cluster should look for version updates.
-// +k8s:deepcopy-gen=true
-type ClusterVersionSpec struct {
- // clusterID uniquely identifies this cluster. This is expected to be
- // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in
- // hexadecimal values). This is a required field.
- // +kubebuilder:validation:Required
- // +required
- ClusterID ClusterID `json:"clusterID"`
-
- // desiredUpdate is an optional field that indicates the desired value of
- // the cluster version. Setting this value will trigger an upgrade (if
- // the current version does not match the desired version). The set of
- // recommended update values is listed as part of available updates in
- // status, and setting values outside that range may cause the upgrade
- // to fail. You may specify the version field without setting image if
- // an update exists with that version in the availableUpdates or history.
- //
- // If an upgrade fails the operator will halt and report status
- // about the failing component. Setting the desired update value back to
- // the previous version will cause a rollback to be attempted. Not all
- // rollbacks will succeed.
- //
- // +optional
- DesiredUpdate *Update `json:"desiredUpdate,omitempty"`
-
- // upstream may be used to specify the preferred update server. By default
- // it will use the appropriate update server for the cluster and region.
- //
- // +optional
- Upstream URL `json:"upstream,omitempty"`
- // channel is an identifier for explicitly requesting that a non-default
- // set of updates be applied to this cluster. The default channel will be
- // contain stable updates that are appropriate for production clusters.
- //
- // +optional
- Channel string `json:"channel,omitempty"`
-
- // overrides is list of overides for components that are managed by
- // cluster version operator. Marking a component unmanaged will prevent
- // the operator from creating or updating the object.
- // +optional
- Overrides []ComponentOverride `json:"overrides,omitempty"`
-}
-
-// ClusterVersionStatus reports the status of the cluster versioning,
-// including any upgrades that are in progress. The current field will
-// be set to whichever version the cluster is reconciling to, and the
-// conditions array will report whether the update succeeded, is in
-// progress, or is failing.
-// +k8s:deepcopy-gen=true
-type ClusterVersionStatus struct {
- // desired is the version that the cluster is reconciling towards.
- // If the cluster is not yet fully initialized desired will be set
- // with the information available, which may be an image or a tag.
- // +kubebuilder:validation:Required
- // +required
- Desired Update `json:"desired"`
-
- // history contains a list of the most recent versions applied to the cluster.
- // This value may be empty during cluster startup, and then will be updated
- // when a new update is being applied. The newest update is first in the
- // list and it is ordered by recency. Updates in the history have state
- // Completed if the rollout completed - if an update was failing or halfway
- // applied the state will be Partial. Only a limited amount of update history
- // is preserved.
- // +optional
- History []UpdateHistory `json:"history,omitempty"`
-
- // observedGeneration reports which version of the spec is being synced.
- // If this value is not equal to metadata.generation, then the desired
- // and conditions fields may represent a previous version.
- // +kubebuilder:validation:Required
- // +required
- ObservedGeneration int64 `json:"observedGeneration"`
-
- // versionHash is a fingerprint of the content that the cluster will be
- // updated with. It is used by the operator to avoid unnecessary work
- // and is for internal use only.
- // +kubebuilder:validation:Required
- // +required
- VersionHash string `json:"versionHash"`
-
- // conditions provides information about the cluster version. The condition
- // "Available" is set to true if the desiredUpdate has been reached. The
- // condition "Progressing" is set to true if an update is being applied.
- // The condition "Degraded" is set to true if an update is currently blocked
- // by a temporary or permanent error. Conditions are only valid for the
- // current desiredUpdate when metadata.generation is equal to
- // status.generation.
- // +optional
- Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty"`
-
- // availableUpdates contains the list of updates that are appropriate
- // for this cluster. This list may be empty if no updates are recommended,
- // if the update service is unavailable, or if an invalid channel has
- // been specified.
- // +nullable
- // +kubebuilder:validation:Required
- // +required
- AvailableUpdates []Update `json:"availableUpdates"`
-}
-
-// UpdateState is a constant representing whether an update was successfully
-// applied to the cluster or not.
-type UpdateState string
-
-const (
- // CompletedUpdate indicates an update was successfully applied
- // to the cluster (all resource updates were successful).
- CompletedUpdate UpdateState = "Completed"
- // PartialUpdate indicates an update was never completely applied
- // or is currently being applied.
- PartialUpdate UpdateState = "Partial"
-)
-
-// UpdateHistory is a single attempted update to the cluster.
-type UpdateHistory struct {
- // state reflects whether the update was fully applied. The Partial state
- // indicates the update is not fully applied, while the Completed state
- // indicates the update was successfully rolled out at least once (all
- // parts of the update successfully applied).
- // +kubebuilder:validation:Required
- // +required
- State UpdateState `json:"state"`
-
- // startedTime is the time at which the update was started.
- // +kubebuilder:validation:Required
- // +required
- StartedTime metav1.Time `json:"startedTime"`
- // completionTime, if set, is when the update was fully applied. The update
- // that is currently being applied will have a null completion time.
- // Completion time will always be set for entries that are not the current
- // update (usually to the started time of the next update).
- // +kubebuilder:validation:Required
- // +required
- // +nullable
- CompletionTime *metav1.Time `json:"completionTime"`
-
- // version is a semantic versioning identifying the update version. If the
- // requested image does not define a version, or if a failure occurs
- // retrieving the image, this value may be empty.
- //
- // +optional
- Version string `json:"version"`
- // image is a container image location that contains the update. This value
- // is always populated.
- // +kubebuilder:validation:Required
- // +required
- Image string `json:"image"`
- // verified indicates whether the provided update was properly verified
- // before it was installed. If this is false the cluster may not be trusted.
- // +kubebuilder:validation:Required
- // +required
- Verified bool `json:"verified"`
-}
-
-// ClusterID is string RFC4122 uuid.
-type ClusterID string
-
-// ComponentOverride allows overriding cluster version operator's behavior
-// for a component.
-// +k8s:deepcopy-gen=true
-type ComponentOverride struct {
- // kind indentifies which object to override.
- // +kubebuilder:validation:Required
- // +required
- Kind string `json:"kind"`
- // group identifies the API group that the kind is in.
- // +kubebuilder:validation:Required
- // +required
- Group string `json:"group"`
-
- // namespace is the component's namespace. If the resource is cluster
- // scoped, the namespace should be empty.
- // +kubebuilder:validation:Required
- // +required
- Namespace string `json:"namespace"`
- // name is the component's name.
- // +kubebuilder:validation:Required
- // +required
- Name string `json:"name"`
-
- // unmanaged controls if cluster version operator should stop managing the
- // resources in this cluster.
- // Default: false
- // +kubebuilder:validation:Required
- // +required
- Unmanaged bool `json:"unmanaged"`
-}
-
-// URL is a thin wrapper around string that ensures the string is a valid URL.
-type URL string
-
-// Update represents a release of the ClusterVersionOperator, referenced by the
-// Image member.
-// +k8s:deepcopy-gen=true
-type Update struct {
- // version is a semantic versioning identifying the update version. When this
- // field is part of spec, version is optional if image is specified.
- //
- // +optional
- Version string `json:"version"`
- // image is a container image location that contains the update. When this
- // field is part of spec, image is optional if version is specified and the
- // availableUpdates field contains a matching version.
- //
- // +optional
- Image string `json:"image"`
- // force allows an administrator to update to an image that has failed
- // verification, does not appear in the availableUpdates list, or otherwise
- // would be blocked by normal protections on update. This option should only
- // be used when the authenticity of the provided image has been verified out
- // of band because the provided image will run with full administrative access
- // to the cluster. Do not use this flag with images that comes from unknown
- // or potentially malicious sources.
- //
- // This flag does not override other forms of consistency checking that are
- // required before a new update is deployed.
- //
- // +optional
- Force bool `json:"force"`
-}
-
-// RetrievedUpdates reports whether available updates have been retrieved from
-// the upstream update server. The condition is Unknown before retrieval, False
-// if the updates could not be retrieved or recently failed, or True if the
-// availableUpdates field is accurate and recent.
-const RetrievedUpdates ClusterStatusConditionType = "RetrievedUpdates"
-
-// ClusterVersionList is a list of ClusterVersion resources.
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-type ClusterVersionList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []ClusterVersion `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go
deleted file mode 100644
index 22b0b5160..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_console.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Console holds cluster-wide configuration for the web console, including the
-// logout URL, and reports the public URL of the console. The canonical name is
-// `cluster`.
-type Console struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec ConsoleSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status ConsoleStatus `json:"status"`
-}
-
-// ConsoleSpec is the specification of the desired behavior of the Console.
-type ConsoleSpec struct {
- // +optional
- Authentication ConsoleAuthentication `json:"authentication"`
-}
-
-// ConsoleStatus defines the observed status of the Console.
-type ConsoleStatus struct {
- // The URL for the console. This will be derived from the host for the route that
- // is created for the console.
- ConsoleURL string `json:"consoleURL"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type ConsoleList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Console `json:"items"`
-}
-
-// ConsoleAuthentication defines a list of optional configuration for console authentication.
-type ConsoleAuthentication struct {
- // An optional, absolute URL to redirect web browsers to after logging out of
- // the console. If not specified, it will redirect to the default login page.
- // This is required when using an identity provider that supports single
- // sign-on (SSO) such as:
- // - OpenID (Keycloak, Azure)
- // - RequestHeader (GSSAPI, SSPI, SAML)
- // - OAuth (GitHub, GitLab, Google)
- // Logging out of the console will destroy the user's token. The logoutRedirect
- // provides the user the option to perform single logout (SLO) through the identity
- // provider to destroy their single sign-on session.
- // +optional
- // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$`
- LogoutRedirect string `json:"logoutRedirect,omitempty"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go
deleted file mode 100644
index 989ef99c3..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_dns.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// DNS holds cluster-wide information about DNS. The canonical name is `cluster`
-type DNS struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec DNSSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status DNSStatus `json:"status"`
-}
-
-type DNSSpec struct {
- // baseDomain is the base domain of the cluster. All managed DNS records will
- // be sub-domains of this base.
- //
- // For example, given the base domain `openshift.example.com`, an API server
- // DNS record may be created for `cluster-api.openshift.example.com`.
- //
- // Once set, this field cannot be changed.
- BaseDomain string `json:"baseDomain"`
- // publicZone is the location where all the DNS records that are publicly accessible to
- // the internet exist.
- //
- // If this field is nil, no public records should be created.
- //
- // Once set, this field cannot be changed.
- //
- // +optional
- PublicZone *DNSZone `json:"publicZone,omitempty"`
- // privateZone is the location where all the DNS records that are only available internally
- // to the cluster exist.
- //
- // If this field is nil, no private records should be created.
- //
- // Once set, this field cannot be changed.
- //
- // +optional
- PrivateZone *DNSZone `json:"privateZone,omitempty"`
-}
-
-// DNSZone is used to define a DNS hosted zone.
-// A zone can be identified by an ID or tags.
-type DNSZone struct {
- // id is the identifier that can be used to find the DNS hosted zone.
- //
- // on AWS zone can be fetched using `ID` as id in [1]
- // on Azure zone can be fetched using `ID` as a pre-determined name in [2],
- // on GCP zone can be fetched using `ID` as a pre-determined name in [3].
- //
- // [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options
- // [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show
- // [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get
- // +optional
- ID string `json:"id,omitempty"`
-
- // tags can be used to query the DNS hosted zone.
- //
- // on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters,
- //
- // [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options
- // +optional
- Tags map[string]string `json:"tags,omitempty"`
-}
-
-type DNSStatus struct {
- // dnsSuffix (service-ca amongst others)
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type DNSList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []DNS `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go
deleted file mode 100644
index ce9012627..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_feature.go
+++ /dev/null
@@ -1,194 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Feature holds cluster-wide information about feature gates. The canonical name is `cluster`
-type FeatureGate struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec FeatureGateSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status FeatureGateStatus `json:"status"`
-}
-
-type FeatureSet string
-
-var (
- // Default feature set that allows upgrades.
- Default FeatureSet = ""
-
- // TechPreviewNoUpgrade turns on tech preview features that are not part of the normal supported platform. Turning
- // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES.
- TechPreviewNoUpgrade FeatureSet = "TechPreviewNoUpgrade"
-
- // CustomNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES.
- // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations
- // your cluster may fail in an unrecoverable way.
- CustomNoUpgrade FeatureSet = "CustomNoUpgrade"
-
- // TopologyManager enables ToplogyManager support. Upgrades are enabled with this feature.
- LatencySensitive FeatureSet = "LatencySensitive"
-)
-
-type FeatureGateSpec struct {
- FeatureGateSelection `json:",inline"`
-}
-
-// +union
-type FeatureGateSelection struct {
- // featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting.
- // Turning on or off features may cause irreversible changes in your cluster which cannot be undone.
- // +unionDiscriminator
- // +optional
- FeatureSet FeatureSet `json:"featureSet,omitempty"`
-
- // customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES.
- // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations
- // your cluster may fail in an unrecoverable way. featureSet must equal "CustomNoUpgrade" must be set to use this field.
- // +optional
- // +nullable
- CustomNoUpgrade *CustomFeatureGates `json:"customNoUpgrade,omitempty"`
-}
-
-type CustomFeatureGates struct {
- // enabled is a list of all feature gates that you want to force on
- // +optional
- Enabled []string `json:"enabled,omitempty"`
- // disabled is a list of all feature gates that you want to force off
- // +optional
- Disabled []string `json:"disabled,omitempty"`
-}
-
-type FeatureGateStatus struct {
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type FeatureGateList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []FeatureGate `json:"items"`
-}
-
-type FeatureGateEnabledDisabled struct {
- Enabled []string
- Disabled []string
-}
-
-// FeatureSets Contains a map of Feature names to Enabled/Disabled Feature.
-//
-// NOTE: The caller needs to make sure to check for the existence of the value
-// using golang's existence field. A possible scenario is an upgrade where new
-// FeatureSets are added and a controller has not been upgraded with a newer
-// version of this file. In this upgrade scenario the map could return nil.
-//
-// example:
-// if featureSet, ok := FeatureSets["SomeNewFeature"]; ok { }
-//
-// If you put an item in either of these lists, put your area and name on it so we can find owners.
-var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{
- Default: defaultFeatures,
- CustomNoUpgrade: {
- Enabled: []string{},
- Disabled: []string{},
- },
- TechPreviewNoUpgrade: newDefaultFeatures().toFeatures(),
- LatencySensitive: newDefaultFeatures().
- with(
- "TopologyManager", // sig-pod, sjenning
- ).
- toFeatures(),
-}
-
-var defaultFeatures = &FeatureGateEnabledDisabled{
- Enabled: []string{
- "RotateKubeletServerCertificate", // sig-pod, sjenning
- "SupportPodPidsLimit", // sig-pod, sjenning
- "NodeDisruptionExclusion", // sig-scheduling, ccoleman
- "ServiceNodeExclusion", // sig-scheduling, ccoleman
- "SCTPSupport", // sig-network, ccallend
- },
- Disabled: []string{
- "LegacyNodeRoleBehavior", // sig-scheduling, ccoleman
- },
-}
-
-type featureSetBuilder struct {
- forceOn []string
- forceOff []string
-}
-
-func newDefaultFeatures() *featureSetBuilder {
- return &featureSetBuilder{}
-}
-
-func (f *featureSetBuilder) with(forceOn ...string) *featureSetBuilder {
- f.forceOn = append(f.forceOn, forceOn...)
- return f
-}
-
-func (f *featureSetBuilder) without(forceOff ...string) *featureSetBuilder {
- f.forceOff = append(f.forceOff, forceOff...)
- return f
-}
-
-func (f *featureSetBuilder) isForcedOff(needle string) bool {
- for _, forcedOff := range f.forceOff {
- if needle == forcedOff {
- return true
- }
- }
- return false
-}
-
-func (f *featureSetBuilder) isForcedOn(needle string) bool {
- for _, forceOn := range f.forceOn {
- if needle == forceOn {
- return true
- }
- }
- return false
-}
-
-func (f *featureSetBuilder) toFeatures() *FeatureGateEnabledDisabled {
- finalOn := []string{}
- finalOff := []string{}
-
- // only add the default enabled features if they haven't been explicitly set off
- for _, defaultOn := range defaultFeatures.Enabled {
- if !f.isForcedOff(defaultOn) {
- finalOn = append(finalOn, defaultOn)
- }
- }
- for _, currOn := range f.forceOn {
- if f.isForcedOff(currOn) {
- panic("coding error, you can't have features both on and off")
- }
- finalOn = append(finalOn, currOn)
- }
-
- // only add the default disabled features if they haven't been explicitly set on
- for _, defaultOff := range defaultFeatures.Disabled {
- if !f.isForcedOn(defaultOff) {
- finalOff = append(finalOff, defaultOff)
- }
- }
- for _, currOff := range f.forceOff {
- finalOff = append(finalOff, currOff)
- }
-
- return &FeatureGateEnabledDisabled{
- Enabled: finalOn,
- Disabled: finalOff,
- }
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go
deleted file mode 100644
index bf594c1b7..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_image.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Image governs policies related to imagestream imports and runtime configuration
-// for external registries. It allows cluster admins to configure which registries
-// OpenShift is allowed to import images from, extra CA trust bundles for external
-// registries, and policies to blacklist/whitelist registry hostnames.
-// When exposing OpenShift's image registry to the public, this also lets cluster
-// admins specify the external hostname.
-type Image struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec ImageSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status ImageStatus `json:"status"`
-}
-
-type ImageSpec struct {
- // allowedRegistriesForImport limits the container image registries that normal users may import
- // images from. Set this list to the registries that you trust to contain valid Docker
- // images and that you want applications to be able to import from. Users with
- // permission to create Images or ImageStreamMappings via the API are not affected by
- // this policy - typically only administrators or system integrations will have those
- // permissions.
- // +optional
- AllowedRegistriesForImport []RegistryLocation `json:"allowedRegistriesForImport,omitempty"`
-
- // externalRegistryHostnames provides the hostnames for the default external image
- // registry. The external hostname should be set only when the image registry
- // is exposed externally. The first value is used in 'publicDockerImageRepository'
- // field in ImageStreams. The value must be in "hostname[:port]" format.
- // +optional
- ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"`
-
- // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that
- // should be trusted during imagestream import, pod image pull, build image pull, and
- // imageregistry pullthrough.
- // The namespace for this config map is openshift-config.
- // +optional
- AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"`
-
- // registrySources contains configuration that determines how the container runtime
- // should treat individual registries when accessing images for builds+pods. (e.g.
- // whether or not to allow insecure access). It does not contain configuration for the
- // internal cluster registry.
- // +optional
- RegistrySources RegistrySources `json:"registrySources"`
-}
-
-type ImageStatus struct {
-
- // internalRegistryHostname sets the hostname for the default internal image
- // registry. The value must be in "hostname[:port]" format.
- // This value is set by the image registry operator which controls the internal registry
- // hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY
- // environment variable but this setting overrides the environment variable.
- // +optional
- InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"`
-
- // externalRegistryHostnames provides the hostnames for the default external image
- // registry. The external hostname should be set only when the image registry
- // is exposed externally. The first value is used in 'publicDockerImageRepository'
- // field in ImageStreams. The value must be in "hostname[:port]" format.
- // +optional
- ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type ImageList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Image `json:"items"`
-}
-
-// RegistryLocation contains a location of the registry specified by the registry domain
-// name. The domain name might include wildcards, like '*' or '??'.
-type RegistryLocation struct {
- // domainName specifies a domain name for the registry
- // In case the registry use non-standard (80 or 443) port, the port should be included
- // in the domain name as well.
- DomainName string `json:"domainName"`
- // insecure indicates whether the registry is secure (https) or insecure (http)
- // By default (if not specified) the registry is assumed as secure.
- // +optional
- Insecure bool `json:"insecure,omitempty"`
-}
-
-// RegistrySources holds cluster-wide information about how to handle the registries config.
-type RegistrySources struct {
- // insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.
- // +optional
- InsecureRegistries []string `json:"insecureRegistries,omitempty"`
- // blockedRegistries are blacklisted from image pull/push. All other registries are allowed.
- //
- // Only one of BlockedRegistries or AllowedRegistries may be set.
- // +optional
- BlockedRegistries []string `json:"blockedRegistries,omitempty"`
- // allowedRegistries are whitelisted for image pull/push. All other registries are blocked.
- //
- // Only one of BlockedRegistries or AllowedRegistries may be set.
- // +optional
- AllowedRegistries []string `json:"allowedRegistries,omitempty"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
deleted file mode 100644
index ac1e5048e..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
+++ /dev/null
@@ -1,241 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`
-type Infrastructure struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec InfrastructureSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status InfrastructureStatus `json:"status"`
-}
-
-// InfrastructureSpec contains settings that apply to the cluster infrastructure.
-type InfrastructureSpec struct {
- // cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file.
- // This configuration file is used to configure the Kubernetes cloud provider integration
- // when using the built-in cloud provider integration or the external cloud controller manager.
- // The namespace for this config map is openshift-config.
- // +optional
- CloudConfig ConfigMapFileReference `json:"cloudConfig"`
-}
-
-// InfrastructureStatus describes the infrastructure the cluster is leveraging.
-type InfrastructureStatus struct {
- // infrastructureName uniquely identifies a cluster with a human friendly name.
- // Once set it should not be changed. Must be of max length 27 and must have only
- // alphanumeric or hyphen characters.
- InfrastructureName string `json:"infrastructureName"`
-
- // platform is the underlying infrastructure provider for the cluster.
- //
- // Deprecated: Use platformStatus.type instead.
- Platform PlatformType `json:"platform,omitempty"`
-
- // platformStatus holds status information specific to the underlying
- // infrastructure provider.
- // +optional
- PlatformStatus *PlatformStatus `json:"platformStatus,omitempty"`
-
- // etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering
- // etcd servers and clients.
- // For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery
- EtcdDiscoveryDomain string `json:"etcdDiscoveryDomain"`
-
- // apiServerURL is a valid URI with scheme(http/https), address and
- // port. apiServerURL can be used by components like the web console
- // to tell users where to find the Kubernetes API.
- APIServerURL string `json:"apiServerURL"`
-
- // apiServerInternalURL is a valid URI with scheme(http/https),
- // address and port. apiServerInternalURL can be used by components
- // like kubelets, to contact the Kubernetes API server using the
- // infrastructure provider rather than Kubernetes networking.
- APIServerInternalURL string `json:"apiServerInternalURI"`
-}
-
-// PlatformType is a specific supported infrastructure provider.
-type PlatformType string
-
-const (
- // AWSPlatformType represents Amazon Web Services infrastructure.
- AWSPlatformType PlatformType = "AWS"
-
- // AzurePlatformType represents Microsoft Azure infrastructure.
- AzurePlatformType PlatformType = "Azure"
-
- // BareMetalPlatformType represents managed bare metal infrastructure.
- BareMetalPlatformType PlatformType = "BareMetal"
-
- // GCPPlatformType represents Google Cloud Platform infrastructure.
- GCPPlatformType PlatformType = "GCP"
-
- // LibvirtPlatformType represents libvirt infrastructure.
- LibvirtPlatformType PlatformType = "Libvirt"
-
- // OpenStackPlatformType represents OpenStack infrastructure.
- OpenStackPlatformType PlatformType = "OpenStack"
-
- // NonePlatformType means there is no infrastructure provider.
- NonePlatformType PlatformType = "None"
-
- // VSpherePlatformType represents VMWare vSphere infrastructure.
- VSpherePlatformType PlatformType = "VSphere"
-
- // OvirtPlatformType represents oVirt/RHV infrastructure.
- OvirtPlatformType PlatformType = "oVirt"
-)
-
-// PlatformStatus holds the current status specific to the underlying infrastructure provider
-// of the current cluster. Since these are used at status-level for the underlying cluster, it
-// is supposed that only one of the status structs is set.
-type PlatformStatus struct {
- // type is the underlying infrastructure provider for the cluster. This
- // value controls whether infrastructure automation such as service load
- // balancers, dynamic volume provisioning, machine creation and deletion, and
- // other integrations are enabled. If None, no infrastructure automation is
- // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt",
- // "OpenStack", "VSphere", "oVirt", and "None". Individual components may not support
- // all platforms, and must handle unrecognized platforms as None if they do
- // not support that platform.
- Type PlatformType `json:"type"`
-
- // AWS contains settings specific to the Amazon Web Services infrastructure provider.
- // +optional
- AWS *AWSPlatformStatus `json:"aws,omitempty"`
-
- // Azure contains settings specific to the Azure infrastructure provider.
- // +optional
- Azure *AzurePlatformStatus `json:"azure,omitempty"`
-
- // GCP contains settings specific to the Google Cloud Platform infrastructure provider.
- // +optional
- GCP *GCPPlatformStatus `json:"gcp,omitempty"`
-
- // BareMetal contains settings specific to the BareMetal platform.
- // +optional
- BareMetal *BareMetalPlatformStatus `json:"baremetal,omitempty"`
-
- // OpenStack contains settings specific to the OpenStack infrastructure provider.
- // +optional
- OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"`
-
- // Ovirt contains settings specific to the oVirt infrastructure provider.
- // +optional
- Ovirt *OvirtPlatformStatus `json:"ovirt,omitempty"`
-}
-
-// AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.
-type AWSPlatformStatus struct {
- // region holds the default AWS region for new AWS resources created by the cluster.
- Region string `json:"region"`
-}
-
-// AzurePlatformStatus holds the current status of the Azure infrastructure provider.
-type AzurePlatformStatus struct {
- // resourceGroupName is the Resource Group for new Azure resources created for the cluster.
- ResourceGroupName string `json:"resourceGroupName"`
-
- // networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster.
- // If empty, the value is same as ResourceGroupName.
- // +optional
- NetworkResourceGroupName string `json:"networkResourceGroupName,omitempty"`
-}
-
-// GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.
-type GCPPlatformStatus struct {
- // resourceGroupName is the Project ID for new GCP resources created for the cluster.
- ProjectID string `json:"projectID"`
-
- // region holds the region for new GCP resources created for the cluster.
- Region string `json:"region"`
-}
-
-// BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider.
-// For more information about the network architecture used with the BareMetal platform type, see:
-// https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md
-type BareMetalPlatformStatus struct {
- // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
- // by components inside the cluster, like kubelets using the infrastructure rather
- // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
- // points to. It is the IP for a self-hosted load balancer in front of the API servers.
- APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
-
- // ingressIP is an external IP which routes to the default ingress controller.
- // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
- IngressIP string `json:"ingressIP,omitempty"`
-
- // nodeDNSIP is the IP address for the internal DNS used by the
- // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP`
- // provides name resolution for the nodes themselves. There is no DNS-as-a-service for
- // BareMetal deployments. In order to minimize necessary changes to the
- // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames
- // to the nodes in the cluster.
- NodeDNSIP string `json:"nodeDNSIP,omitempty"`
-}
-
-// OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider.
-type OpenStackPlatformStatus struct {
- // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
- // by components inside the cluster, like kubelets using the infrastructure rather
- // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
- // points to. It is the IP for a self-hosted load balancer in front of the API servers.
- APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
-
- // cloudName is the name of the desired OpenStack cloud in the
- // client configuration file (`clouds.yaml`).
- CloudName string `json:"cloudName,omitempty"`
-
- // ingressIP is an external IP which routes to the default ingress controller.
- // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
- IngressIP string `json:"ingressIP,omitempty"`
-
- // nodeDNSIP is the IP address for the internal DNS used by the
- // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP`
- // provides name resolution for the nodes themselves. There is no DNS-as-a-service for
- // OpenStack deployments. In order to minimize necessary changes to the
- // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames
- // to the nodes in the cluster.
- NodeDNSIP string `json:"nodeDNSIP,omitempty"`
-}
-
-// OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.
-type OvirtPlatformStatus struct {
- // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
- // by components inside the cluster, like kubelets using the infrastructure rather
- // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
- // points to. It is the IP for a self-hosted load balancer in front of the API servers.
- APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
-
- // ingressIP is an external IP which routes to the default ingress controller.
- // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
- IngressIP string `json:"ingressIP,omitempty"`
-
- // nodeDNSIP is the IP address for the internal DNS used by the
- // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP`
- // provides name resolution for the nodes themselves. There is no DNS-as-a-service for
- // oVirt deployments. In order to minimize necessary changes to the
- // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames
- // to the nodes in the cluster.
- NodeDNSIP string `json:"nodeDNSIP,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// InfrastructureList is
-type InfrastructureList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Infrastructure `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go
deleted file mode 100644
index 0216919ad..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_ingress.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Ingress holds cluster-wide information about ingress, including the default ingress domain
-// used for routes. The canonical name is `cluster`.
-type Ingress struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec IngressSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status IngressStatus `json:"status"`
-}
-
-type IngressSpec struct {
- // domain is used to generate a default host name for a route when the
- // route's host name is empty. The generated host name will follow this
- // pattern: "<route-name>.<route-namespace>.<domain>".
- //
- // It is also used as the default wildcard domain suffix for ingress. The
- // default ingresscontroller domain will follow this pattern: "*.<domain>".
- //
- // Once set, changing domain is not currently supported.
- Domain string `json:"domain"`
-}
-
-type IngressStatus struct {
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type IngressList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Ingress `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go
deleted file mode 100644
index a09c5fe8e..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_network.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc.
-// Please view network.spec for an explanation on what applies when configuring this resource.
-type Network struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration.
- // As a general rule, this SHOULD NOT be read directly. Instead, you should
- // consume the NetworkStatus, as it indicates the currently deployed configuration.
- // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.
- // +kubebuilder:validation:Required
- // +required
- Spec NetworkSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status NetworkStatus `json:"status"`
-}
-
-// NetworkSpec is the desired network configuration.
-// As a general rule, this SHOULD NOT be read directly. Instead, you should
-// consume the NetworkStatus, as it indicates the currently deployed configuration.
-// Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.
-type NetworkSpec struct {
- // IP address pool to use for pod IPs.
- // This field is immutable after installation.
- ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"`
-
- // IP address pool for services.
- // Currently, we only support a single entry here.
- // This field is immutable after installation.
- ServiceNetwork []string `json:"serviceNetwork"`
-
- // NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN).
- // This should match a value that the cluster-network-operator understands,
- // or else no networking will be installed.
- // Currently supported values are:
- // - OpenShiftSDN
- // This field is immutable after installation.
- NetworkType string `json:"networkType"`
-
- // externalIP defines configuration for controllers that
- // affect Service.ExternalIP. If nil, then ExternalIP is
- // not allowed to be set.
- // +optional
- ExternalIP *ExternalIPConfig `json:"externalIP,omitempty"`
-}
-
-// NetworkStatus is the current network configuration.
-type NetworkStatus struct {
- // IP address pool to use for pod IPs.
- ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"`
-
- // IP address pool for services.
- // Currently, we only support a single entry here.
- ServiceNetwork []string `json:"serviceNetwork,omitempty"`
-
- // NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).
- NetworkType string `json:"networkType,omitempty"`
-
- // ClusterNetworkMTU is the MTU for inter-pod networking.
- ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"`
-}
-
-// ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs
-// are allocated.
-type ClusterNetworkEntry struct {
- // The complete block for pod IPs.
- CIDR string `json:"cidr"`
-
- // The size (prefix) of block to allocate to each node.
- // +kubebuilder:validation:Minimum=0
- HostPrefix uint32 `json:"hostPrefix"`
-}
-
-// ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field
-// of a Service resource.
-type ExternalIPConfig struct {
- // policy is a set of restrictions applied to the ExternalIP field.
- // If nil or empty, then ExternalIP is not allowed to be set.
- // +optional
- Policy *ExternalIPPolicy `json:"policy,omitempty"`
-
- // autoAssignCIDRs is a list of CIDRs from which to automatically assign
- // Service.ExternalIP. These are assigned when the service is of type
- // LoadBalancer. In general, this is only useful for bare-metal clusters.
- // In Openshift 3.x, this was misleadingly called "IngressIPs".
- // Automatically assigned External IPs are not affected by any
- // ExternalIPPolicy rules.
- // Currently, only one entry may be provided.
- // +optional
- AutoAssignCIDRs []string `json:"autoAssignCIDRs,omitempty"`
-}
-
-// ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP
-// field in a Service. If the zero struct is supplied, then none are permitted.
-// The policy controller always allows automatically assigned external IPs.
-type ExternalIPPolicy struct {
- // allowedCIDRs is the list of allowed CIDRs.
- AllowedCIDRs []string `json:"allowedCIDRs,omitempty"`
-
- // rejectedCIDRs is the list of disallowed CIDRs. These take precedence
- // over allowedCIDRs.
- // +optional
- RejectedCIDRs []string `json:"rejectedCIDRs,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type NetworkList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Network `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go
deleted file mode 100644
index 15bc5b1c1..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_oauth.go
+++ /dev/null
@@ -1,557 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// OAuth Server and Identity Provider Config
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`.
-// It is used to configure the integrated OAuth server.
-// This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.
-type OAuth struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata"`
-
- // +kubebuilder:validation:Required
- // +required
- Spec OAuthSpec `json:"spec"`
- // +optional
- Status OAuthStatus `json:"status"`
-}
-
-// OAuthSpec contains desired cluster auth configuration
-type OAuthSpec struct {
- // identityProviders is an ordered list of ways for a user to identify themselves.
- // When this list is empty, no identities are provisioned for users.
- // +optional
- IdentityProviders []IdentityProvider `json:"identityProviders,omitempty"`
-
- // tokenConfig contains options for authorization and access tokens
- TokenConfig TokenConfig `json:"tokenConfig"`
-
- // templates allow you to customize pages like the login page.
- // +optional
- Templates OAuthTemplates `json:"templates"`
-}
-
-// OAuthStatus shows current known state of OAuth server in the cluster
-type OAuthStatus struct {
- // TODO Fill in with status of identityProviders and templates (and maybe tokenConfig)
-}
-
-// TokenConfig holds the necessary configuration options for authorization and access tokens
-type TokenConfig struct {
- // accessTokenMaxAgeSeconds defines the maximum age of access tokens
- AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds"`
-
- // accessTokenInactivityTimeoutSeconds defines the default token
- // inactivity timeout for tokens granted by any client.
- // The value represents the maximum amount of time that can occur between
- // consecutive uses of the token. Tokens become invalid if they are not
- // used within this temporal window. The user will need to acquire a new
- // token to regain access once a token times out.
- // Valid values are integer values:
- // x < 0 Tokens time out is enabled but tokens never timeout unless configured per client (e.g. `-1`)
- // x = 0 Tokens time out is disabled (default)
- // x > 0 Tokens time out if there is no activity for x seconds
- // The current minimum allowed value for X is 300 (5 minutes)
- // +optional
- AccessTokenInactivityTimeoutSeconds int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"`
-}
-
-const (
- // LoginTemplateKey is the key of the login template in a secret
- LoginTemplateKey = "login.html"
-
- // ProviderSelectionTemplateKey is the key for the provider selection template in a secret
- ProviderSelectionTemplateKey = "providers.html"
-
- // ErrorsTemplateKey is the key for the errors template in a secret
- ErrorsTemplateKey = "errors.html"
-
- // BindPasswordKey is the key for the LDAP bind password in a secret
- BindPasswordKey = "bindPassword"
-
- // ClientSecretKey is the key for the oauth client secret data in a secret
- ClientSecretKey = "clientSecret"
-
- // HTPasswdDataKey is the key for the htpasswd file data in a secret
- HTPasswdDataKey = "htpasswd"
-)
-
-// OAuthTemplates allow for customization of pages like the login page
-type OAuthTemplates struct {
- // login is the name of a secret that specifies a go template to use to render the login page.
- // The key "login.html" is used to locate the template data.
- // If specified and the secret or expected key is not found, the default login page is used.
- // If the specified template is not valid, the default login page is used.
- // If unspecified, the default login page is used.
- // The namespace for this secret is openshift-config.
- // +optional
- Login SecretNameReference `json:"login"`
-
- // providerSelection is the name of a secret that specifies a go template to use to render
- // the provider selection page.
- // The key "providers.html" is used to locate the template data.
- // If specified and the secret or expected key is not found, the default provider selection page is used.
- // If the specified template is not valid, the default provider selection page is used.
- // If unspecified, the default provider selection page is used.
- // The namespace for this secret is openshift-config.
- // +optional
- ProviderSelection SecretNameReference `json:"providerSelection"`
-
- // error is the name of a secret that specifies a go template to use to render error pages
- // during the authentication or grant flow.
- // The key "errors.html" is used to locate the template data.
- // If specified and the secret or expected key is not found, the default error page is used.
- // If the specified template is not valid, the default error page is used.
- // If unspecified, the default error page is used.
- // The namespace for this secret is openshift-config.
- // +optional
- Error SecretNameReference `json:"error"`
-}
-
-// IdentityProvider provides identities for users authenticating using credentials
-type IdentityProvider struct {
- // name is used to qualify the identities returned by this provider.
- // - It MUST be unique and not shared by any other identity provider used
- // - It MUST be a valid path segment: name cannot equal "." or ".." or contain "/" or "%" or ":"
- // Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName
- Name string `json:"name"`
-
- // mappingMethod determines how identities from this provider are mapped to users
- // Defaults to "claim"
- // +optional
- MappingMethod MappingMethodType `json:"mappingMethod,omitempty"`
-
- IdentityProviderConfig `json:",inline"`
-}
-
-// MappingMethodType specifies how new identities should be mapped to users when they log in
-type MappingMethodType string
-
-const (
- // MappingMethodClaim provisions a user with the identity’s preferred user name. Fails if a user
- // with that user name is already mapped to another identity.
- // Default.
- MappingMethodClaim MappingMethodType = "claim"
-
- // MappingMethodLookup looks up existing users already mapped to an identity but does not
- // automatically provision users or identities. Requires identities and users be set up
- // manually or using an external process.
- MappingMethodLookup MappingMethodType = "lookup"
-
- // MappingMethodAdd provisions a user with the identity’s preferred user name. If a user with
- // that user name already exists, the identity is mapped to the existing user, adding to any
- // existing identity mappings for the user.
- MappingMethodAdd MappingMethodType = "add"
-)
-
-type IdentityProviderType string
-
-const (
- // IdentityProviderTypeBasicAuth provides identities for users authenticating with HTTP Basic Auth
- IdentityProviderTypeBasicAuth IdentityProviderType = "BasicAuth"
-
- // IdentityProviderTypeGitHub provides identities for users authenticating using GitHub credentials
- IdentityProviderTypeGitHub IdentityProviderType = "GitHub"
-
- // IdentityProviderTypeGitLab provides identities for users authenticating using GitLab credentials
- IdentityProviderTypeGitLab IdentityProviderType = "GitLab"
-
- // IdentityProviderTypeGoogle provides identities for users authenticating using Google credentials
- IdentityProviderTypeGoogle IdentityProviderType = "Google"
-
- // IdentityProviderTypeHTPasswd provides identities from an HTPasswd file
- IdentityProviderTypeHTPasswd IdentityProviderType = "HTPasswd"
-
- // IdentityProviderTypeKeystone provides identitities for users authenticating using keystone password credentials
- IdentityProviderTypeKeystone IdentityProviderType = "Keystone"
-
- // IdentityProviderTypeLDAP provides identities for users authenticating using LDAP credentials
- IdentityProviderTypeLDAP IdentityProviderType = "LDAP"
-
- // IdentityProviderTypeOpenID provides identities for users authenticating using OpenID credentials
- IdentityProviderTypeOpenID IdentityProviderType = "OpenID"
-
- // IdentityProviderTypeRequestHeader provides identities for users authenticating using request header credentials
- IdentityProviderTypeRequestHeader IdentityProviderType = "RequestHeader"
-)
-
-// IdentityProviderConfig contains configuration for using a specific identity provider
-type IdentityProviderConfig struct {
- // type identifies the identity provider type for this entry.
- Type IdentityProviderType `json:"type"`
-
- // Provider-specific configuration
- // The json tag MUST match the `Type` specified above, case-insensitively
- // e.g. For `Type: "LDAP"`, the `ldap` configuration should be provided
-
- // basicAuth contains configuration options for the BasicAuth IdP
- // +optional
- BasicAuth *BasicAuthIdentityProvider `json:"basicAuth,omitempty"`
-
- // github enables user authentication using GitHub credentials
- // +optional
- GitHub *GitHubIdentityProvider `json:"github,omitempty"`
-
- // gitlab enables user authentication using GitLab credentials
- // +optional
- GitLab *GitLabIdentityProvider `json:"gitlab,omitempty"`
-
- // google enables user authentication using Google credentials
- // +optional
- Google *GoogleIdentityProvider `json:"google,omitempty"`
-
- // htpasswd enables user authentication using an HTPasswd file to validate credentials
- // +optional
- HTPasswd *HTPasswdIdentityProvider `json:"htpasswd,omitempty"`
-
- // keystone enables user authentication using keystone password credentials
- // +optional
- Keystone *KeystoneIdentityProvider `json:"keystone,omitempty"`
-
- // ldap enables user authentication using LDAP credentials
- // +optional
- LDAP *LDAPIdentityProvider `json:"ldap,omitempty"`
-
- // openID enables user authentication using OpenID credentials
- // +optional
- OpenID *OpenIDIdentityProvider `json:"openID,omitempty"`
-
- // requestHeader enables user authentication using request header credentials
- // +optional
- RequestHeader *RequestHeaderIdentityProvider `json:"requestHeader,omitempty"`
-}
-
-// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials
-type BasicAuthIdentityProvider struct {
- // OAuthRemoteConnectionInfo contains information about how to connect to the external basic auth server
- OAuthRemoteConnectionInfo `json:",inline"`
-}
-
-// OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection
-type OAuthRemoteConnectionInfo struct {
- // url is the remote URL to connect to
- URL string `json:"url"`
-
- // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
- // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
- // The key "ca.crt" is used to locate the data.
- // If specified and the config map or expected key is not found, the identity provider is not honored.
- // If the specified ca data is not valid, the identity provider is not honored.
- // If empty, the default system roots are used.
- // The namespace for this config map is openshift-config.
- // +optional
- CA ConfigMapNameReference `json:"ca"`
-
- // tlsClientCert is an optional reference to a secret by name that contains the
- // PEM-encoded TLS client certificate to present when connecting to the server.
- // The key "tls.crt" is used to locate the data.
- // If specified and the secret or expected key is not found, the identity provider is not honored.
- // If the specified certificate data is not valid, the identity provider is not honored.
- // The namespace for this secret is openshift-config.
- // +optional
- TLSClientCert SecretNameReference `json:"tlsClientCert"`
-
- // tlsClientKey is an optional reference to a secret by name that contains the
- // PEM-encoded TLS private key for the client certificate referenced in tlsClientCert.
- // The key "tls.key" is used to locate the data.
- // If specified and the secret or expected key is not found, the identity provider is not honored.
- // If the specified certificate data is not valid, the identity provider is not honored.
- // The namespace for this secret is openshift-config.
- // +optional
- TLSClientKey SecretNameReference `json:"tlsClientKey"`
-}
-
-// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials
-type HTPasswdIdentityProvider struct {
- // fileData is a required reference to a secret by name containing the data to use as the htpasswd file.
- // The key "htpasswd" is used to locate the data.
- // If the secret or expected key is not found, the identity provider is not honored.
- // If the specified htpasswd data is not valid, the identity provider is not honored.
- // The namespace for this secret is openshift-config.
- FileData SecretNameReference `json:"fileData"`
-}
-
-// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials
-type LDAPIdentityProvider struct {
- // url is an RFC 2255 URL which specifies the LDAP search parameters to use.
- // The syntax of the URL is:
- // ldap://host:port/basedn?attribute?scope?filter
- URL string `json:"url"`
-
- // bindDN is an optional DN to bind with during the search phase.
- // +optional
- BindDN string `json:"bindDN"`
-
- // bindPassword is an optional reference to a secret by name
- // containing a password to bind with during the search phase.
- // The key "bindPassword" is used to locate the data.
- // If specified and the secret or expected key is not found, the identity provider is not honored.
- // The namespace for this secret is openshift-config.
- // +optional
- BindPassword SecretNameReference `json:"bindPassword"`
-
- // insecure, if true, indicates the connection should not use TLS
- // WARNING: Should not be set to `true` with the URL scheme "ldaps://" as "ldaps://" URLs always
- // attempt to connect using TLS, even when `insecure` is set to `true`
- // When `true`, "ldap://" URLS connect insecurely. When `false`, "ldap://" URLs are upgraded to
- // a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.
- Insecure bool `json:"insecure"`
-
- // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
- // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
- // The key "ca.crt" is used to locate the data.
- // If specified and the config map or expected key is not found, the identity provider is not honored.
- // If the specified ca data is not valid, the identity provider is not honored.
- // If empty, the default system roots are used.
- // The namespace for this config map is openshift-config.
- // +optional
- CA ConfigMapNameReference `json:"ca"`
-
- // attributes maps LDAP attributes to identities
- Attributes LDAPAttributeMapping `json:"attributes"`
-}
-
-// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields
-type LDAPAttributeMapping struct {
- // id is the list of attributes whose values should be used as the user ID. Required.
- // First non-empty attribute is used. At least one attribute is required. If none of the listed
- // attribute have a value, authentication fails.
- // LDAP standard identity attribute is "dn"
- ID []string `json:"id"`
-
- // preferredUsername is the list of attributes whose values should be used as the preferred username.
- // LDAP standard login attribute is "uid"
- // +optional
- PreferredUsername []string `json:"preferredUsername,omitempty"`
-
- // name is the list of attributes whose values should be used as the display name. Optional.
- // If unspecified, no display name is set for the identity
- // LDAP standard display name attribute is "cn"
- // +optional
- Name []string `json:"name,omitempty"`
-
- // email is the list of attributes whose values should be used as the email address. Optional.
- // If unspecified, no email is set for the identity
- // +optional
- Email []string `json:"email,omitempty"`
-}
-
-// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials
-type KeystoneIdentityProvider struct {
- // OAuthRemoteConnectionInfo contains information about how to connect to the keystone server
- OAuthRemoteConnectionInfo `json:",inline"`
-
- // domainName is required for keystone v3
- DomainName string `json:"domainName"`
-
- // TODO if we ever add support for 3.11 to 4.0 upgrades, add this configuration
- // useUsernameIdentity indicates that users should be authenticated by username, not keystone ID
- // DEPRECATED - only use this option for legacy systems to ensure backwards compatibility
- // +optional
- // UseUsernameIdentity bool `json:"useUsernameIdentity"`
-}
-
-// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials
-type RequestHeaderIdentityProvider struct {
- // loginURL is a URL to redirect unauthenticated /authorize requests to
- // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here
- // ${url} is replaced with the current URL, escaped to be safe in a query parameter
- // https://www.example.com/sso-login?then=${url}
- // ${query} is replaced with the current query string
- // https://www.example.com/auth-proxy/oauth/authorize?${query}
- // Required when login is set to true.
- LoginURL string `json:"loginURL"`
-
- // challengeURL is a URL to redirect unauthenticated /authorize requests to
- // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be
- // redirected here.
- // ${url} is replaced with the current URL, escaped to be safe in a query parameter
- // https://www.example.com/sso-login?then=${url}
- // ${query} is replaced with the current query string
- // https://www.example.com/auth-proxy/oauth/authorize?${query}
- // Required when challenge is set to true.
- ChallengeURL string `json:"challengeURL"`
-
- // ca is a required reference to a config map by name containing the PEM-encoded CA bundle.
- // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
- // Specifically, it allows verification of incoming requests to prevent header spoofing.
- // The key "ca.crt" is used to locate the data.
- // If the config map or expected key is not found, the identity provider is not honored.
- // If the specified ca data is not valid, the identity provider is not honored.
- // The namespace for this config map is openshift-config.
- ClientCA ConfigMapNameReference `json:"ca"`
-
- // clientCommonNames is an optional list of common names to require a match from. If empty, any
- // client certificate validated against the clientCA bundle is considered authoritative.
- // +optional
- ClientCommonNames []string `json:"clientCommonNames,omitempty"`
-
- // headers is the set of headers to check for identity information
- Headers []string `json:"headers"`
-
- // preferredUsernameHeaders is the set of headers to check for the preferred username
- PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"`
-
- // nameHeaders is the set of headers to check for the display name
- NameHeaders []string `json:"nameHeaders"`
-
- // emailHeaders is the set of headers to check for the email address
- EmailHeaders []string `json:"emailHeaders"`
-}
-
-// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials
-type GitHubIdentityProvider struct {
- // clientID is the oauth client ID
- ClientID string `json:"clientID"`
-
- // clientSecret is a required reference to the secret by name containing the oauth client secret.
- // The key "clientSecret" is used to locate the data.
- // If the secret or expected key is not found, the identity provider is not honored.
- // The namespace for this secret is openshift-config.
- ClientSecret SecretNameReference `json:"clientSecret"`
-
- // organizations optionally restricts which organizations are allowed to log in
- // +optional
- Organizations []string `json:"organizations,omitempty"`
-
- // teams optionally restricts which teams are allowed to log in. Format is <org>/<team>.
- // +optional
- Teams []string `json:"teams,omitempty"`
-
- // hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of
- // GitHub Enterprise.
- // It must match the GitHub Enterprise settings value configured at /setup/settings#hostname.
- // +optional
- Hostname string `json:"hostname"`
-
- // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
- // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
- // The key "ca.crt" is used to locate the data.
- // If specified and the config map or expected key is not found, the identity provider is not honored.
- // If the specified ca data is not valid, the identity provider is not honored.
- // If empty, the default system roots are used.
- // This can only be configured when hostname is set to a non-empty value.
- // The namespace for this config map is openshift-config.
- // +optional
- CA ConfigMapNameReference `json:"ca"`
-}
-
-// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials
-type GitLabIdentityProvider struct {
- // clientID is the oauth client ID
- ClientID string `json:"clientID"`
-
- // clientSecret is a required reference to the secret by name containing the oauth client secret.
- // The key "clientSecret" is used to locate the data.
- // If the secret or expected key is not found, the identity provider is not honored.
- // The namespace for this secret is openshift-config.
- ClientSecret SecretNameReference `json:"clientSecret"`
-
- // url is the oauth server base URL
- URL string `json:"url"`
-
- // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
- // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
- // The key "ca.crt" is used to locate the data.
- // If specified and the config map or expected key is not found, the identity provider is not honored.
- // If the specified ca data is not valid, the identity provider is not honored.
- // If empty, the default system roots are used.
- // The namespace for this config map is openshift-config.
- // +optional
- CA ConfigMapNameReference `json:"ca"`
-}
-
-// GoogleIdentityProvider provides identities for users authenticating using Google credentials
-type GoogleIdentityProvider struct {
- // clientID is the oauth client ID
- ClientID string `json:"clientID"`
-
- // clientSecret is a required reference to the secret by name containing the oauth client secret.
- // The key "clientSecret" is used to locate the data.
- // If the secret or expected key is not found, the identity provider is not honored.
- // The namespace for this secret is openshift-config.
- ClientSecret SecretNameReference `json:"clientSecret"`
-
- // hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to
- // +optional
- HostedDomain string `json:"hostedDomain"`
-}
-
-// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials
-type OpenIDIdentityProvider struct {
- // clientID is the oauth client ID
- ClientID string `json:"clientID"`
-
- // clientSecret is a required reference to the secret by name containing the oauth client secret.
- // The key "clientSecret" is used to locate the data.
- // If the secret or expected key is not found, the identity provider is not honored.
- // The namespace for this secret is openshift-config.
- ClientSecret SecretNameReference `json:"clientSecret"`
-
- // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
- // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
- // The key "ca.crt" is used to locate the data.
- // If specified and the config map or expected key is not found, the identity provider is not honored.
- // If the specified ca data is not valid, the identity provider is not honored.
- // If empty, the default system roots are used.
- // The namespace for this config map is openshift-config.
- // +optional
- CA ConfigMapNameReference `json:"ca"`
-
- // extraScopes are any scopes to request in addition to the standard "openid" scope.
- // +optional
- ExtraScopes []string `json:"extraScopes,omitempty"`
-
- // extraAuthorizeParameters are any custom parameters to add to the authorize request.
- // +optional
- ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters,omitempty"`
-
- // issuer is the URL that the OpenID Provider asserts as its Issuer Identifier.
- // It must use the https scheme with no query or fragment component.
- Issuer string `json:"issuer"`
-
- // claims mappings
- Claims OpenIDClaims `json:"claims"`
-}
-
-// UserIDClaim is the claim used to provide a stable identifier for OIDC identities.
-// Per http://openid.net/specs/openid-connect-core-1_0.html#ClaimStability
-// "The sub (subject) and iss (issuer) Claims, used together, are the only Claims that an RP can
-// rely upon as a stable identifier for the End-User, since the sub Claim MUST be locally unique
-// and never reassigned within the Issuer for a particular End-User, as described in Section 2.
-// Therefore, the only guaranteed unique identifier for a given End-User is the combination of the
-// iss Claim and the sub Claim."
-const UserIDClaim = "sub"
-
-// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider
-type OpenIDClaims struct {
- // preferredUsername is the list of claims whose values should be used as the preferred username.
- // If unspecified, the preferred username is determined from the value of the sub claim
- // +optional
- PreferredUsername []string `json:"preferredUsername,omitempty"`
-
- // name is the list of claims whose values should be used as the display name. Optional.
- // If unspecified, no display name is set for the identity
- // +optional
- Name []string `json:"name,omitempty"`
-
- // email is the list of claims whose values should be used as the email address. Optional.
- // If unspecified, no email is set for the identity
- // +optional
- Email []string `json:"email,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type OAuthList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []OAuth `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go
deleted file mode 100644
index 1d998bf37..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// OperatorHubSpec defines the desired state of OperatorHub
-type OperatorHubSpec struct {
- // disableAllDefaultSources allows you to disable all the default hub
- // sources. If this is true, a specific entry in sources can be used to
- // enable a default source. If this is false, a specific entry in
- // sources can be used to disable or enable a default source.
- // +optional
- DisableAllDefaultSources bool `json:"disableAllDefaultSources,omitempty"`
- // sources is the list of default hub sources and their configuration.
- // If the list is empty, it implies that the default hub sources are
- // enabled on the cluster unless disableAllDefaultSources is true.
- // If disableAllDefaultSources is true and sources is not empty,
- // the configuration present in sources will take precedence. The list of
- // default hub sources and their current state will always be reflected in
- // the status block.
- // +optional
- Sources []HubSource `json:"sources,omitempty"`
-}
-
-// OperatorHubStatus defines the observed state of OperatorHub. The current
-// state of the default hub sources will always be reflected here.
-type OperatorHubStatus struct {
- // sources encapsulates the result of applying the configuration for each
- // hub source
- Sources []HubSourceStatus `json:"sources,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// OperatorHub is the Schema for the operatorhubs API. It can be used to change
-// the state of the default hub sources for OperatorHub on the cluster from
-// enabled to disabled and vice versa.
-// +kubebuilder:subresource:status
-// +genclient:nonNamespaced
-type OperatorHub struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata"`
-
- Spec OperatorHubSpec `json:"spec"`
- Status OperatorHubStatus `json:"status"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// OperatorHubList contains a list of OperatorHub
-type OperatorHubList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
- Items []OperatorHub `json:"items"`
-}
-
-// HubSource is used to specify the hub source and its configuration
-type HubSource struct {
- // name is the name of one of the default hub sources
- // +kubebuilder:validation:MaxLength=253
- // +kubebuilder:validation:MinLength=1
- // +kubebuilder:Required
- Name string `json:"name"`
- // disabled is used to disable a default hub source on cluster
- // +kubebuilder:Required
- Disabled bool `json:"disabled"`
-}
-
-// HubSourceStatus is used to reflect the current state of applying the
-// configuration to a default source
-type HubSourceStatus struct {
- HubSource `json:",omitempty"`
- // status indicates success or failure in applying the configuration
- Status string `json:"status,omitempty"`
- // message provides more information regarding failures
- Message string `json:"message,omitempty"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go
deleted file mode 100644
index 244ce3ef8..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_project.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Project holds cluster-wide information about Project. The canonical name is `cluster`
-type Project struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec ProjectSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status ProjectStatus `json:"status"`
-}
-
-// TemplateReference references a template in a specific namespace.
-// The namespace must be specified at the point of use.
-type TemplateReference struct {
- // name is the metadata.name of the referenced project request template
- Name string `json:"name"`
-}
-
-// ProjectSpec holds the project creation configuration.
-type ProjectSpec struct {
- // projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint
- // +optional
- ProjectRequestMessage string `json:"projectRequestMessage"`
-
- // projectRequestTemplate is the template to use for creating projects in response to projectrequest.
- // This must point to a template in 'openshift-config' namespace. It is optional.
- // If it is not specified, a default template is used.
- //
- // +optional
- ProjectRequestTemplate TemplateReference `json:"projectRequestTemplate"`
-}
-
-type ProjectStatus struct {
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type ProjectList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Project `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go
deleted file mode 100644
index 93f4c487e..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_proxy.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package v1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`
-type Proxy struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // Spec holds user-settable values for the proxy configuration
- // +kubebuilder:validation:Required
- // +required
- Spec ProxySpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status ProxyStatus `json:"status"`
-}
-
-// ProxySpec contains cluster proxy creation configuration.
-type ProxySpec struct {
- // httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.
- // +optional
- HTTPProxy string `json:"httpProxy,omitempty"`
-
- // httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.
- // +optional
- HTTPSProxy string `json:"httpsProxy,omitempty"`
-
- // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.
- // Empty means unset and will not result in an env var.
- // +optional
- NoProxy string `json:"noProxy,omitempty"`
-
- // readinessEndpoints is a list of endpoints used to verify readiness of the proxy.
- // +optional
- ReadinessEndpoints []string `json:"readinessEndpoints,omitempty"`
-
- // trustedCA is a reference to a ConfigMap containing a CA certificate bundle used
- // for client egress HTTPS connections. The certificate bundle must be from the CA
- // that signed the proxy's certificate and be signed for everything. The trustedCA
- // field should only be consumed by a proxy validator. The validator is responsible
- // for reading the certificate bundle from required key "ca-bundle.crt" and copying
- // it to a ConfigMap named "trusted-ca-bundle" in the "openshift-config-managed"
- // namespace. The namespace for the ConfigMap referenced by trustedCA is
- // "openshift-config". Here is an example ConfigMap (in yaml):
- //
- // apiVersion: v1
- // kind: ConfigMap
- // metadata:
- // name: user-ca-bundle
- // namespace: openshift-config
- // data:
- // ca-bundle.crt: |
- // -----BEGIN CERTIFICATE-----
- // Custom CA certificate bundle.
- // -----END CERTIFICATE-----
- //
- // +optional
- TrustedCA ConfigMapNameReference `json:"trustedCA,omitempty"`
-}
-
-// ProxyStatus shows current known state of the cluster proxy.
-type ProxyStatus struct {
- // httpProxy is the URL of the proxy for HTTP requests.
- // +optional
- HTTPProxy string `json:"httpProxy,omitempty"`
-
- // httpsProxy is the URL of the proxy for HTTPS requests.
- // +optional
- HTTPSProxy string `json:"httpsProxy,omitempty"`
-
- // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.
- // +optional
- NoProxy string `json:"noProxy,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type ProxyList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Proxy `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go
deleted file mode 100644
index d5bf0c362..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package v1
-
-import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Scheduler holds cluster-wide config information to run the Kubernetes Scheduler
-// and influence its placement decisions. The canonical name for this config is `cluster`.
-type Scheduler struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- // spec holds user settable values for configuration
- // +kubebuilder:validation:Required
- // +required
- Spec SchedulerSpec `json:"spec"`
- // status holds observed values from the cluster. They may not be overridden.
- // +optional
- Status SchedulerStatus `json:"status"`
-}
-
-type SchedulerSpec struct {
- // policy is a reference to a ConfigMap containing scheduler policy which has
- // user specified predicates and priorities. If this ConfigMap is not available
- // scheduler will default to use DefaultAlgorithmProvider.
- // The namespace for this configmap is openshift-config.
- // +optional
- Policy ConfigMapNameReference `json:"policy"`
- // defaultNodeSelector helps set the cluster-wide default node selector to
- // restrict pod placement to specific nodes. This is applied to the pods
- // created in all namespaces without a specified nodeSelector value.
- // For example,
- // defaultNodeSelector: "type=user-node,region=east" would set nodeSelector
- // field in pod spec to "type=user-node,region=east" to all pods created
- // in all namespaces. Namespaces having project-wide node selectors won't be
- // impacted even if this field is set. This adds an annotation section to
- // the namespace.
- // For example, if a new namespace is created with
- // node-selector='type=user-node,region=east',
- // the annotation openshift.io/node-selector: type=user-node,region=east
- // gets added to the project. When the openshift.io/node-selector annotation
- // is set on the project the value is used in preference to the value we are setting
- // for defaultNodeSelector field.
- // For instance,
- // openshift.io/node-selector: "type=user-node,region=west" means
- // that the default of "type=user-node,region=east" set in defaultNodeSelector
- // would not be applied.
- // +optional
- DefaultNodeSelector string `json:"defaultNodeSelector,omitempty"`
- // MastersSchedulable allows masters nodes to be schedulable. When this flag is
- // turned on, all the master nodes in the cluster will be made schedulable,
- // so that workload pods can run on them. The default value for this field is false,
- // meaning none of the master nodes are schedulable.
- // Important Note: Once the workload pods start running on the master nodes,
- // extreme care must be taken to ensure that cluster-critical control plane components
- // are not impacted.
- // Please turn on this field after doing due diligence.
- // +optional
- MastersSchedulable bool `json:"mastersSchedulable"`
-}
-
-type SchedulerStatus struct {
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-type SchedulerList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []Scheduler `json:"items"`
-}
diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go
deleted file mode 100644
index ea788dc16..000000000
--- a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go
+++ /dev/null
@@ -1,260 +0,0 @@
-package v1
-
-// TLSSecurityProfile defines the schema for a TLS security profile. This object
-// is used by operators to apply TLS security settings to operands.
-// +union
-type TLSSecurityProfile struct {
- // type is one of Old, Intermediate, Modern or Custom. Custom provides
- // the ability to specify individual TLS security profile parameters.
- // Old, Intermediate and Modern are TLS security profiles based on:
- //
- // https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations
- //
- // The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers
- // are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be
- // reduced.
- //
- // Note that the Modern profile is currently not supported because it is not
- // yet well adopted by common software libraries.
- //
- // +unionDiscriminator
- // +optional
- Type TLSProfileType `json:"type"`
- // old is a TLS security profile based on:
- //
- // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
- //
- // and looks like this (yaml):
- //
- // ciphers:
- // - TLS_AES_128_GCM_SHA256
- // - TLS_AES_256_GCM_SHA384
- // - TLS_CHACHA20_POLY1305_SHA256
- // - ECDHE-ECDSA-AES128-GCM-SHA256
- // - ECDHE-RSA-AES128-GCM-SHA256
- // - ECDHE-ECDSA-AES256-GCM-SHA384
- // - ECDHE-RSA-AES256-GCM-SHA384
- // - ECDHE-ECDSA-CHACHA20-POLY1305
- // - ECDHE-RSA-CHACHA20-POLY1305
- // - DHE-RSA-AES128-GCM-SHA256
- // - DHE-RSA-AES256-GCM-SHA384
- // - DHE-RSA-CHACHA20-POLY1305
- // - ECDHE-ECDSA-AES128-SHA256
- // - ECDHE-RSA-AES128-SHA256
- // - ECDHE-ECDSA-AES128-SHA
- // - ECDHE-RSA-AES128-SHA
- // - ECDHE-ECDSA-AES256-SHA384
- // - ECDHE-RSA-AES256-SHA384
- // - ECDHE-ECDSA-AES256-SHA
- // - ECDHE-RSA-AES256-SHA
- // - DHE-RSA-AES128-SHA256
- // - DHE-RSA-AES256-SHA256
- // - AES128-GCM-SHA256
- // - AES256-GCM-SHA384
- // - AES128-SHA256
- // - AES256-SHA256
- // - AES128-SHA
- // - AES256-SHA
- // - DES-CBC3-SHA
- // minTLSVersion: TLSv1.0
- //
- // +optional
- // +nullable
- Old *OldTLSProfile `json:"old,omitempty"`
- // intermediate is a TLS security profile based on:
- //
- // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29
- //
- // and looks like this (yaml):
- //
- // ciphers:
- // - TLS_AES_128_GCM_SHA256
- // - TLS_AES_256_GCM_SHA384
- // - TLS_CHACHA20_POLY1305_SHA256
- // - ECDHE-ECDSA-AES128-GCM-SHA256
- // - ECDHE-RSA-AES128-GCM-SHA256
- // - ECDHE-ECDSA-AES256-GCM-SHA384
- // - ECDHE-RSA-AES256-GCM-SHA384
- // - ECDHE-ECDSA-CHACHA20-POLY1305
- // - ECDHE-RSA-CHACHA20-POLY1305
- // - DHE-RSA-AES128-GCM-SHA256
- // - DHE-RSA-AES256-GCM-SHA384
- // minTLSVersion: TLSv1.2
- //
- // +optional
- // +nullable
- Intermediate *IntermediateTLSProfile `json:"intermediate,omitempty"`
- // modern is a TLS security profile based on:
- //
- // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
- //
- // and looks like this (yaml):
- //
- // ciphers:
- // - TLS_AES_128_GCM_SHA256
- // - TLS_AES_256_GCM_SHA384
- // - TLS_CHACHA20_POLY1305_SHA256
- // minTLSVersion: TLSv1.3
- //
- // NOTE: Currently unsupported.
- //
- // +optional
- // +nullable
- Modern *ModernTLSProfile `json:"modern,omitempty"`
- // custom is a user-defined TLS security profile. Be extremely careful using a custom
- // profile as invalid configurations can be catastrophic. An example custom profile
- // looks like this:
- //
- // ciphers:
- // - ECDHE-ECDSA-CHACHA20-POLY1305
- // - ECDHE-RSA-CHACHA20-POLY1305
- // - ECDHE-RSA-AES128-GCM-SHA256
- // - ECDHE-ECDSA-AES128-GCM-SHA256
- // minTLSVersion: TLSv1.1
- //
- // +optional
- // +nullable
- Custom *CustomTLSProfile `json:"custom,omitempty"`
-}
-
-// OldTLSProfile is a TLS security profile based on:
-// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
-type OldTLSProfile struct{}
-
-// IntermediateTLSProfile is a TLS security profile based on:
-// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29
-type IntermediateTLSProfile struct{}
-
-// ModernTLSProfile is a TLS security profile based on:
-// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
-type ModernTLSProfile struct{}
-
-// CustomTLSProfile is a user-defined TLS security profile. Be extremely careful
-// using a custom TLS profile as invalid configurations can be catastrophic.
-type CustomTLSProfile struct {
- TLSProfileSpec `json:",inline"`
-}
-
-// TLSProfileType defines a TLS security profile type.
-type TLSProfileType string
-
-const (
- // Old is a TLS security profile based on:
- // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
- TLSProfileOldType TLSProfileType = "Old"
- // Intermediate is a TLS security profile based on:
- // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29
- TLSProfileIntermediateType TLSProfileType = "Intermediate"
- // Modern is a TLS security profile based on:
- // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
- TLSProfileModernType TLSProfileType = "Modern"
- // Custom is a TLS security profile that allows for user-defined parameters.
- TLSProfileCustomType TLSProfileType = "Custom"
-)
-
-// TLSProfileSpec is the desired behavior of a TLSSecurityProfile.
-type TLSProfileSpec struct {
- // ciphers is used to specify the cipher algorithms that are negotiated
- // during the TLS handshake. Operators may remove entries their operands
- // do not support. For example, to use DES-CBC3-SHA (yaml):
- //
- // ciphers:
- // - DES-CBC3-SHA
- //
- Ciphers []string `json:"ciphers"`
- // minTLSVersion is used to specify the minimal version of the TLS protocol
- // that is negotiated during the TLS handshake. For example, to use TLS
- // versions 1.1, 1.2 and 1.3 (yaml):
- //
- // minTLSVersion: TLSv1.1
- //
- // NOTE: currently the highest minTLSVersion allowed is VersionTLS12
- //
- MinTLSVersion TLSProtocolVersion `json:"minTLSVersion"`
-}
-
-// TLSProtocolVersion is a way to specify the protocol version used for TLS connections.
-// Protocol versions are based on the following most common TLS configurations:
-//
-// https://ssl-config.mozilla.org/
-//
-// Note that SSLv3.0 is not a supported protocol version due to well known
-// vulnerabilities such as POODLE: https://en.wikipedia.org/wiki/POODLE
-type TLSProtocolVersion string
-
-const (
- // VersionTLSv10 is version 1.0 of the TLS security protocol.
- VersionTLS10 TLSProtocolVersion = "VersionTLS10"
- // VersionTLSv11 is version 1.1 of the TLS security protocol.
- VersionTLS11 TLSProtocolVersion = "VersionTLS11"
- // VersionTLSv12 is version 1.2 of the TLS security protocol.
- VersionTLS12 TLSProtocolVersion = "VersionTLS12"
- // VersionTLSv13 is version 1.3 of the TLS security protocol.
- VersionTLS13 TLSProtocolVersion = "VersionTLS13"
-)
-
-// TLSProfiles Contains a map of TLSProfileType names to TLSProfileSpec.
-//
-// NOTE: The caller needs to make sure to check that these constants are valid for their binary. Not all
-// entries map to values for all binaries. In the case of ties, the kube-apiserver wins. Do not fail,
-// just be sure to whitelist only and everything will be ok.
-var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{
- TLSProfileOldType: {
- Ciphers: []string{
- "TLS_AES_128_GCM_SHA256",
- "TLS_AES_256_GCM_SHA384",
- "TLS_CHACHA20_POLY1305_SHA256",
- "ECDHE-ECDSA-AES128-GCM-SHA256",
- "ECDHE-RSA-AES128-GCM-SHA256",
- "ECDHE-ECDSA-AES256-GCM-SHA384",
- "ECDHE-RSA-AES256-GCM-SHA384",
- "ECDHE-ECDSA-CHACHA20-POLY1305",
- "ECDHE-RSA-CHACHA20-POLY1305",
- "DHE-RSA-AES128-GCM-SHA256",
- "DHE-RSA-AES256-GCM-SHA384",
- "DHE-RSA-CHACHA20-POLY1305",
- "ECDHE-ECDSA-AES128-SHA256",
- "ECDHE-RSA-AES128-SHA256",
- "ECDHE-ECDSA-AES128-SHA",
- "ECDHE-RSA-AES128-SHA",
- "ECDHE-ECDSA-AES256-SHA384",
- "ECDHE-RSA-AES256-SHA384",
- "ECDHE-ECDSA-AES256-SHA",
- "ECDHE-RSA-AES256-SHA",
- "DHE-RSA-AES128-SHA256",
- "DHE-RSA-AES256-SHA256",
- "AES128-GCM-SHA256",
- "AES256-GCM-SHA384",
- "AES128-SHA256",
- "AES256-SHA256",
- "AES128-SHA",
- "AES256-SHA",
- "DES-CBC3-SHA",
- },
- MinTLSVersion: VersionTLS10,
- },
- TLSProfileIntermediateType: {
- Ciphers: []string{
- "TLS_AES_128_GCM_SHA256",
- "TLS_AES_256_GCM_SHA384",
- "TLS_CHACHA20_POLY1305_SHA256",
- "ECDHE-ECDSA-AES128-GCM-SHA256",
- "ECDHE-RSA-AES128-GCM-SHA256",
- "ECDHE-ECDSA-AES256-GCM-SHA384",
- "ECDHE-RSA-AES256-GCM-SHA384",
- "ECDHE-ECDSA-CHACHA20-POLY1305",
- "ECDHE-RSA-CHACHA20-POLY1305",
- "DHE-RSA-AES128-GCM-SHA256",
- "DHE-RSA-AES256-GCM-SHA384",
- },
- MinTLSVersion: VersionTLS12,
- },
- TLSProfileModernType: {
- Ciphers: []string{
- "TLS_AES_128_GCM_SHA256",
- "TLS_AES_256_GCM_SHA384",
- "TLS_CHACHA20_POLY1305_SHA256",
- },
- MinTLSVersion: VersionTLS13,
- },
-}
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
deleted file mode 100644
index 37888a939..000000000
--- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,3365 +0,0 @@
-// +build !ignore_autogenerated
-
-// Code generated by deepcopy-gen. DO NOT EDIT.
-
-package v1
-
-import (
- corev1 "k8s.io/api/core/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIServer) DeepCopyInto(out *APIServer) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServer.
-func (in *APIServer) DeepCopy() *APIServer {
- if in == nil {
- return nil
- }
- out := new(APIServer)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *APIServer) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIServerEncryption) DeepCopyInto(out *APIServerEncryption) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerEncryption.
-func (in *APIServerEncryption) DeepCopy() *APIServerEncryption {
- if in == nil {
- return nil
- }
- out := new(APIServerEncryption)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIServerList) DeepCopyInto(out *APIServerList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]APIServer, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerList.
-func (in *APIServerList) DeepCopy() *APIServerList {
- if in == nil {
- return nil
- }
- out := new(APIServerList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *APIServerList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIServerNamedServingCert) DeepCopyInto(out *APIServerNamedServingCert) {
- *out = *in
- if in.Names != nil {
- in, out := &in.Names, &out.Names
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.ServingCertificate = in.ServingCertificate
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerNamedServingCert.
-func (in *APIServerNamedServingCert) DeepCopy() *APIServerNamedServingCert {
- if in == nil {
- return nil
- }
- out := new(APIServerNamedServingCert)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIServerServingCerts) DeepCopyInto(out *APIServerServingCerts) {
- *out = *in
- if in.NamedCertificates != nil {
- in, out := &in.NamedCertificates, &out.NamedCertificates
- *out = make([]APIServerNamedServingCert, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerServingCerts.
-func (in *APIServerServingCerts) DeepCopy() *APIServerServingCerts {
- if in == nil {
- return nil
- }
- out := new(APIServerServingCerts)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) {
- *out = *in
- in.ServingCerts.DeepCopyInto(&out.ServingCerts)
- out.ClientCA = in.ClientCA
- if in.AdditionalCORSAllowedOrigins != nil {
- in, out := &in.AdditionalCORSAllowedOrigins, &out.AdditionalCORSAllowedOrigins
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.Encryption = in.Encryption
- if in.TLSSecurityProfile != nil {
- in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile
- *out = new(TLSSecurityProfile)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerSpec.
-func (in *APIServerSpec) DeepCopy() *APIServerSpec {
- if in == nil {
- return nil
- }
- out := new(APIServerSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *APIServerStatus) DeepCopyInto(out *APIServerStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerStatus.
-func (in *APIServerStatus) DeepCopy() *APIServerStatus {
- if in == nil {
- return nil
- }
- out := new(APIServerStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformStatus.
-func (in *AWSPlatformStatus) DeepCopy() *AWSPlatformStatus {
- if in == nil {
- return nil
- }
- out := new(AWSPlatformStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) {
- *out = *in
- if in.PluginConfig != nil {
- in, out := &in.PluginConfig, &out.PluginConfig
- *out = make(map[string]AdmissionPluginConfig, len(*in))
- for key, val := range *in {
- (*out)[key] = *val.DeepCopy()
- }
- }
- if in.EnabledAdmissionPlugins != nil {
- in, out := &in.EnabledAdmissionPlugins, &out.EnabledAdmissionPlugins
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.DisabledAdmissionPlugins != nil {
- in, out := &in.DisabledAdmissionPlugins, &out.DisabledAdmissionPlugins
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfig.
-func (in *AdmissionConfig) DeepCopy() *AdmissionConfig {
- if in == nil {
- return nil
- }
- out := new(AdmissionConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AdmissionPluginConfig) DeepCopyInto(out *AdmissionPluginConfig) {
- *out = *in
- in.Configuration.DeepCopyInto(&out.Configuration)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfig.
-func (in *AdmissionPluginConfig) DeepCopy() *AdmissionPluginConfig {
- if in == nil {
- return nil
- }
- out := new(AdmissionPluginConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AuditConfig) DeepCopyInto(out *AuditConfig) {
- *out = *in
- in.PolicyConfiguration.DeepCopyInto(&out.PolicyConfiguration)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig.
-func (in *AuditConfig) DeepCopy() *AuditConfig {
- if in == nil {
- return nil
- }
- out := new(AuditConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Authentication) DeepCopyInto(out *Authentication) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication.
-func (in *Authentication) DeepCopy() *Authentication {
- if in == nil {
- return nil
- }
- out := new(Authentication)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Authentication) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Authentication, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList.
-func (in *AuthenticationList) DeepCopy() *AuthenticationList {
- if in == nil {
- return nil
- }
- out := new(AuthenticationList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *AuthenticationList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) {
- *out = *in
- out.OAuthMetadata = in.OAuthMetadata
- if in.WebhookTokenAuthenticators != nil {
- in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators
- *out = make([]WebhookTokenAuthenticator, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec.
-func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec {
- if in == nil {
- return nil
- }
- out := new(AuthenticationSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) {
- *out = *in
- out.IntegratedOAuthMetadata = in.IntegratedOAuthMetadata
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus.
-func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus {
- if in == nil {
- return nil
- }
- out := new(AuthenticationStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AzurePlatformStatus) DeepCopyInto(out *AzurePlatformStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformStatus.
-func (in *AzurePlatformStatus) DeepCopy() *AzurePlatformStatus {
- if in == nil {
- return nil
- }
- out := new(AzurePlatformStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BareMetalPlatformStatus) DeepCopyInto(out *BareMetalPlatformStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformStatus.
-func (in *BareMetalPlatformStatus) DeepCopy() *BareMetalPlatformStatus {
- if in == nil {
- return nil
- }
- out := new(BareMetalPlatformStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BasicAuthIdentityProvider) DeepCopyInto(out *BasicAuthIdentityProvider) {
- *out = *in
- out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthIdentityProvider.
-func (in *BasicAuthIdentityProvider) DeepCopy() *BasicAuthIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(BasicAuthIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Build) DeepCopyInto(out *Build) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build.
-func (in *Build) DeepCopy() *Build {
- if in == nil {
- return nil
- }
- out := new(Build)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Build) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BuildDefaults) DeepCopyInto(out *BuildDefaults) {
- *out = *in
- if in.DefaultProxy != nil {
- in, out := &in.DefaultProxy, &out.DefaultProxy
- *out = new(ProxySpec)
- (*in).DeepCopyInto(*out)
- }
- if in.GitProxy != nil {
- in, out := &in.GitProxy, &out.GitProxy
- *out = new(ProxySpec)
- (*in).DeepCopyInto(*out)
- }
- if in.Env != nil {
- in, out := &in.Env, &out.Env
- *out = make([]corev1.EnvVar, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.ImageLabels != nil {
- in, out := &in.ImageLabels, &out.ImageLabels
- *out = make([]ImageLabel, len(*in))
- copy(*out, *in)
- }
- in.Resources.DeepCopyInto(&out.Resources)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildDefaults.
-func (in *BuildDefaults) DeepCopy() *BuildDefaults {
- if in == nil {
- return nil
- }
- out := new(BuildDefaults)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BuildList) DeepCopyInto(out *BuildList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Build, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList.
-func (in *BuildList) DeepCopy() *BuildList {
- if in == nil {
- return nil
- }
- out := new(BuildList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *BuildList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BuildOverrides) DeepCopyInto(out *BuildOverrides) {
- *out = *in
- if in.ImageLabels != nil {
- in, out := &in.ImageLabels, &out.ImageLabels
- *out = make([]ImageLabel, len(*in))
- copy(*out, *in)
- }
- if in.NodeSelector != nil {
- in, out := &in.NodeSelector, &out.NodeSelector
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.Tolerations != nil {
- in, out := &in.Tolerations, &out.Tolerations
- *out = make([]corev1.Toleration, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOverrides.
-func (in *BuildOverrides) DeepCopy() *BuildOverrides {
- if in == nil {
- return nil
- }
- out := new(BuildOverrides)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BuildSpec) DeepCopyInto(out *BuildSpec) {
- *out = *in
- out.AdditionalTrustedCA = in.AdditionalTrustedCA
- in.BuildDefaults.DeepCopyInto(&out.BuildDefaults)
- in.BuildOverrides.DeepCopyInto(&out.BuildOverrides)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec.
-func (in *BuildSpec) DeepCopy() *BuildSpec {
- if in == nil {
- return nil
- }
- out := new(BuildSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CertInfo) DeepCopyInto(out *CertInfo) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertInfo.
-func (in *CertInfo) DeepCopy() *CertInfo {
- if in == nil {
- return nil
- }
- out := new(CertInfo)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClientConnectionOverrides) DeepCopyInto(out *ClientConnectionOverrides) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionOverrides.
-func (in *ClientConnectionOverrides) DeepCopy() *ClientConnectionOverrides {
- if in == nil {
- return nil
- }
- out := new(ClientConnectionOverrides)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry.
-func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry {
- if in == nil {
- return nil
- }
- out := new(ClusterNetworkEntry)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterOperator) DeepCopyInto(out *ClusterOperator) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- out.Spec = in.Spec
- in.Status.DeepCopyInto(&out.Status)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperator.
-func (in *ClusterOperator) DeepCopy() *ClusterOperator {
- if in == nil {
- return nil
- }
- out := new(ClusterOperator)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ClusterOperator) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterOperatorList) DeepCopyInto(out *ClusterOperatorList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]ClusterOperator, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorList.
-func (in *ClusterOperatorList) DeepCopy() *ClusterOperatorList {
- if in == nil {
- return nil
- }
- out := new(ClusterOperatorList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ClusterOperatorList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterOperatorSpec) DeepCopyInto(out *ClusterOperatorSpec) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorSpec.
-func (in *ClusterOperatorSpec) DeepCopy() *ClusterOperatorSpec {
- if in == nil {
- return nil
- }
- out := new(ClusterOperatorSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterOperatorStatus) DeepCopyInto(out *ClusterOperatorStatus) {
- *out = *in
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make([]ClusterOperatorStatusCondition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Versions != nil {
- in, out := &in.Versions, &out.Versions
- *out = make([]OperandVersion, len(*in))
- copy(*out, *in)
- }
- if in.RelatedObjects != nil {
- in, out := &in.RelatedObjects, &out.RelatedObjects
- *out = make([]ObjectReference, len(*in))
- copy(*out, *in)
- }
- in.Extension.DeepCopyInto(&out.Extension)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatus.
-func (in *ClusterOperatorStatus) DeepCopy() *ClusterOperatorStatus {
- if in == nil {
- return nil
- }
- out := new(ClusterOperatorStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterOperatorStatusCondition) DeepCopyInto(out *ClusterOperatorStatusCondition) {
- *out = *in
- in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatusCondition.
-func (in *ClusterOperatorStatusCondition) DeepCopy() *ClusterOperatorStatusCondition {
- if in == nil {
- return nil
- }
- out := new(ClusterOperatorStatusCondition)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterVersion) DeepCopyInto(out *ClusterVersion) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersion.
-func (in *ClusterVersion) DeepCopy() *ClusterVersion {
- if in == nil {
- return nil
- }
- out := new(ClusterVersion)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ClusterVersion) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterVersionList) DeepCopyInto(out *ClusterVersionList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]ClusterVersion, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionList.
-func (in *ClusterVersionList) DeepCopy() *ClusterVersionList {
- if in == nil {
- return nil
- }
- out := new(ClusterVersionList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ClusterVersionList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterVersionSpec) DeepCopyInto(out *ClusterVersionSpec) {
- *out = *in
- if in.DesiredUpdate != nil {
- in, out := &in.DesiredUpdate, &out.DesiredUpdate
- *out = new(Update)
- **out = **in
- }
- if in.Overrides != nil {
- in, out := &in.Overrides, &out.Overrides
- *out = make([]ComponentOverride, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionSpec.
-func (in *ClusterVersionSpec) DeepCopy() *ClusterVersionSpec {
- if in == nil {
- return nil
- }
- out := new(ClusterVersionSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) {
- *out = *in
- out.Desired = in.Desired
- if in.History != nil {
- in, out := &in.History, &out.History
- *out = make([]UpdateHistory, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make([]ClusterOperatorStatusCondition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.AvailableUpdates != nil {
- in, out := &in.AvailableUpdates, &out.AvailableUpdates
- *out = make([]Update, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionStatus.
-func (in *ClusterVersionStatus) DeepCopy() *ClusterVersionStatus {
- if in == nil {
- return nil
- }
- out := new(ClusterVersionStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ComponentOverride) DeepCopyInto(out *ComponentOverride) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentOverride.
-func (in *ComponentOverride) DeepCopy() *ComponentOverride {
- if in == nil {
- return nil
- }
- out := new(ComponentOverride)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ConfigMapFileReference) DeepCopyInto(out *ConfigMapFileReference) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapFileReference.
-func (in *ConfigMapFileReference) DeepCopy() *ConfigMapFileReference {
- if in == nil {
- return nil
- }
- out := new(ConfigMapFileReference)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ConfigMapNameReference) DeepCopyInto(out *ConfigMapNameReference) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNameReference.
-func (in *ConfigMapNameReference) DeepCopy() *ConfigMapNameReference {
- if in == nil {
- return nil
- }
- out := new(ConfigMapNameReference)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Console) DeepCopyInto(out *Console) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- out.Spec = in.Spec
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console.
-func (in *Console) DeepCopy() *Console {
- if in == nil {
- return nil
- }
- out := new(Console)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Console) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ConsoleAuthentication) DeepCopyInto(out *ConsoleAuthentication) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleAuthentication.
-func (in *ConsoleAuthentication) DeepCopy() *ConsoleAuthentication {
- if in == nil {
- return nil
- }
- out := new(ConsoleAuthentication)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ConsoleList) DeepCopyInto(out *ConsoleList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Console, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList.
-func (in *ConsoleList) DeepCopy() *ConsoleList {
- if in == nil {
- return nil
- }
- out := new(ConsoleList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ConsoleList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) {
- *out = *in
- out.Authentication = in.Authentication
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec.
-func (in *ConsoleSpec) DeepCopy() *ConsoleSpec {
- if in == nil {
- return nil
- }
- out := new(ConsoleSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus.
-func (in *ConsoleStatus) DeepCopy() *ConsoleStatus {
- if in == nil {
- return nil
- }
- out := new(ConsoleStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CustomFeatureGates) DeepCopyInto(out *CustomFeatureGates) {
- *out = *in
- if in.Enabled != nil {
- in, out := &in.Enabled, &out.Enabled
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Disabled != nil {
- in, out := &in.Disabled, &out.Disabled
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFeatureGates.
-func (in *CustomFeatureGates) DeepCopy() *CustomFeatureGates {
- if in == nil {
- return nil
- }
- out := new(CustomFeatureGates)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CustomTLSProfile) DeepCopyInto(out *CustomTLSProfile) {
- *out = *in
- in.TLSProfileSpec.DeepCopyInto(&out.TLSProfileSpec)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomTLSProfile.
-func (in *CustomTLSProfile) DeepCopy() *CustomTLSProfile {
- if in == nil {
- return nil
- }
- out := new(CustomTLSProfile)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DNS) DeepCopyInto(out *DNS) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS.
-func (in *DNS) DeepCopy() *DNS {
- if in == nil {
- return nil
- }
- out := new(DNS)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *DNS) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DNSList) DeepCopyInto(out *DNSList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]DNS, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList.
-func (in *DNSList) DeepCopy() *DNSList {
- if in == nil {
- return nil
- }
- out := new(DNSList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *DNSList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DNSSpec) DeepCopyInto(out *DNSSpec) {
- *out = *in
- if in.PublicZone != nil {
- in, out := &in.PublicZone, &out.PublicZone
- *out = new(DNSZone)
- (*in).DeepCopyInto(*out)
- }
- if in.PrivateZone != nil {
- in, out := &in.PrivateZone, &out.PrivateZone
- *out = new(DNSZone)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec.
-func (in *DNSSpec) DeepCopy() *DNSSpec {
- if in == nil {
- return nil
- }
- out := new(DNSSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DNSStatus) DeepCopyInto(out *DNSStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus.
-func (in *DNSStatus) DeepCopy() *DNSStatus {
- if in == nil {
- return nil
- }
- out := new(DNSStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DNSZone) DeepCopyInto(out *DNSZone) {
- *out = *in
- if in.Tags != nil {
- in, out := &in.Tags, &out.Tags
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZone.
-func (in *DNSZone) DeepCopy() *DNSZone {
- if in == nil {
- return nil
- }
- out := new(DNSZone)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DelegatedAuthentication) DeepCopyInto(out *DelegatedAuthentication) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthentication.
-func (in *DelegatedAuthentication) DeepCopy() *DelegatedAuthentication {
- if in == nil {
- return nil
- }
- out := new(DelegatedAuthentication)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DelegatedAuthorization) DeepCopyInto(out *DelegatedAuthorization) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthorization.
-func (in *DelegatedAuthorization) DeepCopy() *DelegatedAuthorization {
- if in == nil {
- return nil
- }
- out := new(DelegatedAuthorization)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EtcdConnectionInfo) DeepCopyInto(out *EtcdConnectionInfo) {
- *out = *in
- if in.URLs != nil {
- in, out := &in.URLs, &out.URLs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.CertInfo = in.CertInfo
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConnectionInfo.
-func (in *EtcdConnectionInfo) DeepCopy() *EtcdConnectionInfo {
- if in == nil {
- return nil
- }
- out := new(EtcdConnectionInfo)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EtcdStorageConfig) DeepCopyInto(out *EtcdStorageConfig) {
- *out = *in
- in.EtcdConnectionInfo.DeepCopyInto(&out.EtcdConnectionInfo)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStorageConfig.
-func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig {
- if in == nil {
- return nil
- }
- out := new(EtcdStorageConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExternalIPConfig) DeepCopyInto(out *ExternalIPConfig) {
- *out = *in
- if in.Policy != nil {
- in, out := &in.Policy, &out.Policy
- *out = new(ExternalIPPolicy)
- (*in).DeepCopyInto(*out)
- }
- if in.AutoAssignCIDRs != nil {
- in, out := &in.AutoAssignCIDRs, &out.AutoAssignCIDRs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPConfig.
-func (in *ExternalIPConfig) DeepCopy() *ExternalIPConfig {
- if in == nil {
- return nil
- }
- out := new(ExternalIPConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExternalIPPolicy) DeepCopyInto(out *ExternalIPPolicy) {
- *out = *in
- if in.AllowedCIDRs != nil {
- in, out := &in.AllowedCIDRs, &out.AllowedCIDRs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.RejectedCIDRs != nil {
- in, out := &in.RejectedCIDRs, &out.RejectedCIDRs
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPPolicy.
-func (in *ExternalIPPolicy) DeepCopy() *ExternalIPPolicy {
- if in == nil {
- return nil
- }
- out := new(ExternalIPPolicy)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FeatureGate) DeepCopyInto(out *FeatureGate) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGate.
-func (in *FeatureGate) DeepCopy() *FeatureGate {
- if in == nil {
- return nil
- }
- out := new(FeatureGate)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *FeatureGate) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FeatureGateEnabledDisabled) DeepCopyInto(out *FeatureGateEnabledDisabled) {
- *out = *in
- if in.Enabled != nil {
- in, out := &in.Enabled, &out.Enabled
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Disabled != nil {
- in, out := &in.Disabled, &out.Disabled
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateEnabledDisabled.
-func (in *FeatureGateEnabledDisabled) DeepCopy() *FeatureGateEnabledDisabled {
- if in == nil {
- return nil
- }
- out := new(FeatureGateEnabledDisabled)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FeatureGateList) DeepCopyInto(out *FeatureGateList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]FeatureGate, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateList.
-func (in *FeatureGateList) DeepCopy() *FeatureGateList {
- if in == nil {
- return nil
- }
- out := new(FeatureGateList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *FeatureGateList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FeatureGateSelection) DeepCopyInto(out *FeatureGateSelection) {
- *out = *in
- if in.CustomNoUpgrade != nil {
- in, out := &in.CustomNoUpgrade, &out.CustomNoUpgrade
- *out = new(CustomFeatureGates)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSelection.
-func (in *FeatureGateSelection) DeepCopy() *FeatureGateSelection {
- if in == nil {
- return nil
- }
- out := new(FeatureGateSelection)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FeatureGateSpec) DeepCopyInto(out *FeatureGateSpec) {
- *out = *in
- in.FeatureGateSelection.DeepCopyInto(&out.FeatureGateSelection)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSpec.
-func (in *FeatureGateSpec) DeepCopy() *FeatureGateSpec {
- if in == nil {
- return nil
- }
- out := new(FeatureGateSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *FeatureGateStatus) DeepCopyInto(out *FeatureGateStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateStatus.
-func (in *FeatureGateStatus) DeepCopy() *FeatureGateStatus {
- if in == nil {
- return nil
- }
- out := new(FeatureGateStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GCPPlatformStatus) DeepCopyInto(out *GCPPlatformStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformStatus.
-func (in *GCPPlatformStatus) DeepCopy() *GCPPlatformStatus {
- if in == nil {
- return nil
- }
- out := new(GCPPlatformStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) {
- *out = *in
- in.ServingInfo.DeepCopyInto(&out.ServingInfo)
- if in.CORSAllowedOrigins != nil {
- in, out := &in.CORSAllowedOrigins, &out.CORSAllowedOrigins
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- in.AuditConfig.DeepCopyInto(&out.AuditConfig)
- in.StorageConfig.DeepCopyInto(&out.StorageConfig)
- in.AdmissionConfig.DeepCopyInto(&out.AdmissionConfig)
- out.KubeClientConfig = in.KubeClientConfig
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericAPIServerConfig.
-func (in *GenericAPIServerConfig) DeepCopy() *GenericAPIServerConfig {
- if in == nil {
- return nil
- }
- out := new(GenericAPIServerConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GenericControllerConfig) DeepCopyInto(out *GenericControllerConfig) {
- *out = *in
- in.ServingInfo.DeepCopyInto(&out.ServingInfo)
- out.LeaderElection = in.LeaderElection
- out.Authentication = in.Authentication
- out.Authorization = in.Authorization
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericControllerConfig.
-func (in *GenericControllerConfig) DeepCopy() *GenericControllerConfig {
- if in == nil {
- return nil
- }
- out := new(GenericControllerConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) {
- *out = *in
- out.ClientSecret = in.ClientSecret
- if in.Organizations != nil {
- in, out := &in.Organizations, &out.Organizations
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Teams != nil {
- in, out := &in.Teams, &out.Teams
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.CA = in.CA
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider.
-func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(GitHubIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) {
- *out = *in
- out.ClientSecret = in.ClientSecret
- out.CA = in.CA
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider.
-func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(GitLabIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) {
- *out = *in
- out.ClientSecret = in.ClientSecret
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider.
-func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(GoogleIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *HTPasswdIdentityProvider) DeepCopyInto(out *HTPasswdIdentityProvider) {
- *out = *in
- out.FileData = in.FileData
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdIdentityProvider.
-func (in *HTPasswdIdentityProvider) DeepCopy() *HTPasswdIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(HTPasswdIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *HTTPServingInfo) DeepCopyInto(out *HTTPServingInfo) {
- *out = *in
- in.ServingInfo.DeepCopyInto(&out.ServingInfo)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServingInfo.
-func (in *HTTPServingInfo) DeepCopy() *HTTPServingInfo {
- if in == nil {
- return nil
- }
- out := new(HTTPServingInfo)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *HubSource) DeepCopyInto(out *HubSource) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSource.
-func (in *HubSource) DeepCopy() *HubSource {
- if in == nil {
- return nil
- }
- out := new(HubSource)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *HubSourceStatus) DeepCopyInto(out *HubSourceStatus) {
- *out = *in
- out.HubSource = in.HubSource
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSourceStatus.
-func (in *HubSourceStatus) DeepCopy() *HubSourceStatus {
- if in == nil {
- return nil
- }
- out := new(HubSourceStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) {
- *out = *in
- in.IdentityProviderConfig.DeepCopyInto(&out.IdentityProviderConfig)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider.
-func (in *IdentityProvider) DeepCopy() *IdentityProvider {
- if in == nil {
- return nil
- }
- out := new(IdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IdentityProviderConfig) DeepCopyInto(out *IdentityProviderConfig) {
- *out = *in
- if in.BasicAuth != nil {
- in, out := &in.BasicAuth, &out.BasicAuth
- *out = new(BasicAuthIdentityProvider)
- **out = **in
- }
- if in.GitHub != nil {
- in, out := &in.GitHub, &out.GitHub
- *out = new(GitHubIdentityProvider)
- (*in).DeepCopyInto(*out)
- }
- if in.GitLab != nil {
- in, out := &in.GitLab, &out.GitLab
- *out = new(GitLabIdentityProvider)
- **out = **in
- }
- if in.Google != nil {
- in, out := &in.Google, &out.Google
- *out = new(GoogleIdentityProvider)
- **out = **in
- }
- if in.HTPasswd != nil {
- in, out := &in.HTPasswd, &out.HTPasswd
- *out = new(HTPasswdIdentityProvider)
- **out = **in
- }
- if in.Keystone != nil {
- in, out := &in.Keystone, &out.Keystone
- *out = new(KeystoneIdentityProvider)
- **out = **in
- }
- if in.LDAP != nil {
- in, out := &in.LDAP, &out.LDAP
- *out = new(LDAPIdentityProvider)
- (*in).DeepCopyInto(*out)
- }
- if in.OpenID != nil {
- in, out := &in.OpenID, &out.OpenID
- *out = new(OpenIDIdentityProvider)
- (*in).DeepCopyInto(*out)
- }
- if in.RequestHeader != nil {
- in, out := &in.RequestHeader, &out.RequestHeader
- *out = new(RequestHeaderIdentityProvider)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfig.
-func (in *IdentityProviderConfig) DeepCopy() *IdentityProviderConfig {
- if in == nil {
- return nil
- }
- out := new(IdentityProviderConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Image) DeepCopyInto(out *Image) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image.
-func (in *Image) DeepCopy() *Image {
- if in == nil {
- return nil
- }
- out := new(Image)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Image) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ImageLabel) DeepCopyInto(out *ImageLabel) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel.
-func (in *ImageLabel) DeepCopy() *ImageLabel {
- if in == nil {
- return nil
- }
- out := new(ImageLabel)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ImageList) DeepCopyInto(out *ImageList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Image, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList.
-func (in *ImageList) DeepCopy() *ImageList {
- if in == nil {
- return nil
- }
- out := new(ImageList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ImageList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ImageSpec) DeepCopyInto(out *ImageSpec) {
- *out = *in
- if in.AllowedRegistriesForImport != nil {
- in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport
- *out = make([]RegistryLocation, len(*in))
- copy(*out, *in)
- }
- if in.ExternalRegistryHostnames != nil {
- in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.AdditionalTrustedCA = in.AdditionalTrustedCA
- in.RegistrySources.DeepCopyInto(&out.RegistrySources)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec.
-func (in *ImageSpec) DeepCopy() *ImageSpec {
- if in == nil {
- return nil
- }
- out := new(ImageSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ImageStatus) DeepCopyInto(out *ImageStatus) {
- *out = *in
- if in.ExternalRegistryHostnames != nil {
- in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus.
-func (in *ImageStatus) DeepCopy() *ImageStatus {
- if in == nil {
- return nil
- }
- out := new(ImageStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Infrastructure) DeepCopyInto(out *Infrastructure) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- out.Spec = in.Spec
- in.Status.DeepCopyInto(&out.Status)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Infrastructure.
-func (in *Infrastructure) DeepCopy() *Infrastructure {
- if in == nil {
- return nil
- }
- out := new(Infrastructure)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Infrastructure) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *InfrastructureList) DeepCopyInto(out *InfrastructureList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Infrastructure, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureList.
-func (in *InfrastructureList) DeepCopy() *InfrastructureList {
- if in == nil {
- return nil
- }
- out := new(InfrastructureList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *InfrastructureList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *InfrastructureSpec) DeepCopyInto(out *InfrastructureSpec) {
- *out = *in
- out.CloudConfig = in.CloudConfig
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureSpec.
-func (in *InfrastructureSpec) DeepCopy() *InfrastructureSpec {
- if in == nil {
- return nil
- }
- out := new(InfrastructureSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *InfrastructureStatus) DeepCopyInto(out *InfrastructureStatus) {
- *out = *in
- if in.PlatformStatus != nil {
- in, out := &in.PlatformStatus, &out.PlatformStatus
- *out = new(PlatformStatus)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureStatus.
-func (in *InfrastructureStatus) DeepCopy() *InfrastructureStatus {
- if in == nil {
- return nil
- }
- out := new(InfrastructureStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Ingress) DeepCopyInto(out *Ingress) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- out.Spec = in.Spec
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress.
-func (in *Ingress) DeepCopy() *Ingress {
- if in == nil {
- return nil
- }
- out := new(Ingress)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Ingress) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IngressList) DeepCopyInto(out *IngressList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Ingress, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList.
-func (in *IngressList) DeepCopy() *IngressList {
- if in == nil {
- return nil
- }
- out := new(IngressList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *IngressList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IngressSpec) DeepCopyInto(out *IngressSpec) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec.
-func (in *IngressSpec) DeepCopy() *IngressSpec {
- if in == nil {
- return nil
- }
- out := new(IngressSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IngressStatus) DeepCopyInto(out *IngressStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus.
-func (in *IngressStatus) DeepCopy() *IngressStatus {
- if in == nil {
- return nil
- }
- out := new(IngressStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IntermediateTLSProfile) DeepCopyInto(out *IntermediateTLSProfile) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntermediateTLSProfile.
-func (in *IntermediateTLSProfile) DeepCopy() *IntermediateTLSProfile {
- if in == nil {
- return nil
- }
- out := new(IntermediateTLSProfile)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *KeystoneIdentityProvider) DeepCopyInto(out *KeystoneIdentityProvider) {
- *out = *in
- out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystoneIdentityProvider.
-func (in *KeystoneIdentityProvider) DeepCopy() *KeystoneIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(KeystoneIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *KubeClientConfig) DeepCopyInto(out *KubeClientConfig) {
- *out = *in
- out.ConnectionOverrides = in.ConnectionOverrides
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeClientConfig.
-func (in *KubeClientConfig) DeepCopy() *KubeClientConfig {
- if in == nil {
- return nil
- }
- out := new(KubeClientConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) {
- *out = *in
- if in.ID != nil {
- in, out := &in.ID, &out.ID
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.PreferredUsername != nil {
- in, out := &in.PreferredUsername, &out.PreferredUsername
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Name != nil {
- in, out := &in.Name, &out.Name
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Email != nil {
- in, out := &in.Email, &out.Email
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping.
-func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping {
- if in == nil {
- return nil
- }
- out := new(LDAPAttributeMapping)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *LDAPIdentityProvider) DeepCopyInto(out *LDAPIdentityProvider) {
- *out = *in
- out.BindPassword = in.BindPassword
- out.CA = in.CA
- in.Attributes.DeepCopyInto(&out.Attributes)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPIdentityProvider.
-func (in *LDAPIdentityProvider) DeepCopy() *LDAPIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(LDAPIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *LeaderElection) DeepCopyInto(out *LeaderElection) {
- *out = *in
- out.LeaseDuration = in.LeaseDuration
- out.RenewDeadline = in.RenewDeadline
- out.RetryPeriod = in.RetryPeriod
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElection.
-func (in *LeaderElection) DeepCopy() *LeaderElection {
- if in == nil {
- return nil
- }
- out := new(LeaderElection)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ModernTLSProfile) DeepCopyInto(out *ModernTLSProfile) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModernTLSProfile.
-func (in *ModernTLSProfile) DeepCopy() *ModernTLSProfile {
- if in == nil {
- return nil
- }
- out := new(ModernTLSProfile)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) {
- *out = *in
- if in.Names != nil {
- in, out := &in.Names, &out.Names
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.CertInfo = in.CertInfo
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCertificate.
-func (in *NamedCertificate) DeepCopy() *NamedCertificate {
- if in == nil {
- return nil
- }
- out := new(NamedCertificate)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Network) DeepCopyInto(out *Network) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network.
-func (in *Network) DeepCopy() *Network {
- if in == nil {
- return nil
- }
- out := new(Network)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Network) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *NetworkList) DeepCopyInto(out *NetworkList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Network, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList.
-func (in *NetworkList) DeepCopy() *NetworkList {
- if in == nil {
- return nil
- }
- out := new(NetworkList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *NetworkList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) {
- *out = *in
- if in.ClusterNetwork != nil {
- in, out := &in.ClusterNetwork, &out.ClusterNetwork
- *out = make([]ClusterNetworkEntry, len(*in))
- copy(*out, *in)
- }
- if in.ServiceNetwork != nil {
- in, out := &in.ServiceNetwork, &out.ServiceNetwork
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.ExternalIP != nil {
- in, out := &in.ExternalIP, &out.ExternalIP
- *out = new(ExternalIPConfig)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec.
-func (in *NetworkSpec) DeepCopy() *NetworkSpec {
- if in == nil {
- return nil
- }
- out := new(NetworkSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) {
- *out = *in
- if in.ClusterNetwork != nil {
- in, out := &in.ClusterNetwork, &out.ClusterNetwork
- *out = make([]ClusterNetworkEntry, len(*in))
- copy(*out, *in)
- }
- if in.ServiceNetwork != nil {
- in, out := &in.ServiceNetwork, &out.ServiceNetwork
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus.
-func (in *NetworkStatus) DeepCopy() *NetworkStatus {
- if in == nil {
- return nil
- }
- out := new(NetworkStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OAuth) DeepCopyInto(out *OAuth) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth.
-func (in *OAuth) DeepCopy() *OAuth {
- if in == nil {
- return nil
- }
- out := new(OAuth)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *OAuth) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OAuthList) DeepCopyInto(out *OAuthList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]OAuth, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthList.
-func (in *OAuthList) DeepCopy() *OAuthList {
- if in == nil {
- return nil
- }
- out := new(OAuthList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *OAuthList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OAuthRemoteConnectionInfo) DeepCopyInto(out *OAuthRemoteConnectionInfo) {
- *out = *in
- out.CA = in.CA
- out.TLSClientCert = in.TLSClientCert
- out.TLSClientKey = in.TLSClientKey
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthRemoteConnectionInfo.
-func (in *OAuthRemoteConnectionInfo) DeepCopy() *OAuthRemoteConnectionInfo {
- if in == nil {
- return nil
- }
- out := new(OAuthRemoteConnectionInfo)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OAuthSpec) DeepCopyInto(out *OAuthSpec) {
- *out = *in
- if in.IdentityProviders != nil {
- in, out := &in.IdentityProviders, &out.IdentityProviders
- *out = make([]IdentityProvider, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- out.TokenConfig = in.TokenConfig
- out.Templates = in.Templates
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthSpec.
-func (in *OAuthSpec) DeepCopy() *OAuthSpec {
- if in == nil {
- return nil
- }
- out := new(OAuthSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OAuthStatus) DeepCopyInto(out *OAuthStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthStatus.
-func (in *OAuthStatus) DeepCopy() *OAuthStatus {
- if in == nil {
- return nil
- }
- out := new(OAuthStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) {
- *out = *in
- out.Login = in.Login
- out.ProviderSelection = in.ProviderSelection
- out.Error = in.Error
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates.
-func (in *OAuthTemplates) DeepCopy() *OAuthTemplates {
- if in == nil {
- return nil
- }
- out := new(OAuthTemplates)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ObjectReference) DeepCopyInto(out *ObjectReference) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference.
-func (in *ObjectReference) DeepCopy() *ObjectReference {
- if in == nil {
- return nil
- }
- out := new(ObjectReference)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OldTLSProfile) DeepCopyInto(out *OldTLSProfile) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OldTLSProfile.
-func (in *OldTLSProfile) DeepCopy() *OldTLSProfile {
- if in == nil {
- return nil
- }
- out := new(OldTLSProfile)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) {
- *out = *in
- if in.PreferredUsername != nil {
- in, out := &in.PreferredUsername, &out.PreferredUsername
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Name != nil {
- in, out := &in.Name, &out.Name
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Email != nil {
- in, out := &in.Email, &out.Email
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims.
-func (in *OpenIDClaims) DeepCopy() *OpenIDClaims {
- if in == nil {
- return nil
- }
- out := new(OpenIDClaims)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) {
- *out = *in
- out.ClientSecret = in.ClientSecret
- out.CA = in.CA
- if in.ExtraScopes != nil {
- in, out := &in.ExtraScopes, &out.ExtraScopes
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.ExtraAuthorizeParameters != nil {
- in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- in.Claims.DeepCopyInto(&out.Claims)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider.
-func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(OpenIDIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OpenStackPlatformStatus) DeepCopyInto(out *OpenStackPlatformStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformStatus.
-func (in *OpenStackPlatformStatus) DeepCopy() *OpenStackPlatformStatus {
- if in == nil {
- return nil
- }
- out := new(OpenStackPlatformStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperandVersion) DeepCopyInto(out *OperandVersion) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperandVersion.
-func (in *OperandVersion) DeepCopy() *OperandVersion {
- if in == nil {
- return nil
- }
- out := new(OperandVersion)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorHub) DeepCopyInto(out *OperatorHub) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHub.
-func (in *OperatorHub) DeepCopy() *OperatorHub {
- if in == nil {
- return nil
- }
- out := new(OperatorHub)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *OperatorHub) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorHubList) DeepCopyInto(out *OperatorHubList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]OperatorHub, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubList.
-func (in *OperatorHubList) DeepCopy() *OperatorHubList {
- if in == nil {
- return nil
- }
- out := new(OperatorHubList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *OperatorHubList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorHubSpec) DeepCopyInto(out *OperatorHubSpec) {
- *out = *in
- if in.Sources != nil {
- in, out := &in.Sources, &out.Sources
- *out = make([]HubSource, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubSpec.
-func (in *OperatorHubSpec) DeepCopy() *OperatorHubSpec {
- if in == nil {
- return nil
- }
- out := new(OperatorHubSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OperatorHubStatus) DeepCopyInto(out *OperatorHubStatus) {
- *out = *in
- if in.Sources != nil {
- in, out := &in.Sources, &out.Sources
- *out = make([]HubSourceStatus, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubStatus.
-func (in *OperatorHubStatus) DeepCopy() *OperatorHubStatus {
- if in == nil {
- return nil
- }
- out := new(OperatorHubStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OvirtPlatformStatus) DeepCopyInto(out *OvirtPlatformStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformStatus.
-func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus {
- if in == nil {
- return nil
- }
- out := new(OvirtPlatformStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) {
- *out = *in
- if in.AWS != nil {
- in, out := &in.AWS, &out.AWS
- *out = new(AWSPlatformStatus)
- **out = **in
- }
- if in.Azure != nil {
- in, out := &in.Azure, &out.Azure
- *out = new(AzurePlatformStatus)
- **out = **in
- }
- if in.GCP != nil {
- in, out := &in.GCP, &out.GCP
- *out = new(GCPPlatformStatus)
- **out = **in
- }
- if in.BareMetal != nil {
- in, out := &in.BareMetal, &out.BareMetal
- *out = new(BareMetalPlatformStatus)
- **out = **in
- }
- if in.OpenStack != nil {
- in, out := &in.OpenStack, &out.OpenStack
- *out = new(OpenStackPlatformStatus)
- **out = **in
- }
- if in.Ovirt != nil {
- in, out := &in.Ovirt, &out.Ovirt
- *out = new(OvirtPlatformStatus)
- **out = **in
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformStatus.
-func (in *PlatformStatus) DeepCopy() *PlatformStatus {
- if in == nil {
- return nil
- }
- out := new(PlatformStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Project) DeepCopyInto(out *Project) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- out.Spec = in.Spec
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project.
-func (in *Project) DeepCopy() *Project {
- if in == nil {
- return nil
- }
- out := new(Project)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Project) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ProjectList) DeepCopyInto(out *ProjectList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Project, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList.
-func (in *ProjectList) DeepCopy() *ProjectList {
- if in == nil {
- return nil
- }
- out := new(ProjectList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ProjectList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) {
- *out = *in
- out.ProjectRequestTemplate = in.ProjectRequestTemplate
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec.
-func (in *ProjectSpec) DeepCopy() *ProjectSpec {
- if in == nil {
- return nil
- }
- out := new(ProjectSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus.
-func (in *ProjectStatus) DeepCopy() *ProjectStatus {
- if in == nil {
- return nil
- }
- out := new(ProjectStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Proxy) DeepCopyInto(out *Proxy) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Proxy.
-func (in *Proxy) DeepCopy() *Proxy {
- if in == nil {
- return nil
- }
- out := new(Proxy)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Proxy) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ProxyList) DeepCopyInto(out *ProxyList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Proxy, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyList.
-func (in *ProxyList) DeepCopy() *ProxyList {
- if in == nil {
- return nil
- }
- out := new(ProxyList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ProxyList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ProxySpec) DeepCopyInto(out *ProxySpec) {
- *out = *in
- if in.ReadinessEndpoints != nil {
- in, out := &in.ReadinessEndpoints, &out.ReadinessEndpoints
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- out.TrustedCA = in.TrustedCA
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxySpec.
-func (in *ProxySpec) DeepCopy() *ProxySpec {
- if in == nil {
- return nil
- }
- out := new(ProxySpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ProxyStatus) DeepCopyInto(out *ProxyStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatus.
-func (in *ProxyStatus) DeepCopy() *ProxyStatus {
- if in == nil {
- return nil
- }
- out := new(ProxyStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation.
-func (in *RegistryLocation) DeepCopy() *RegistryLocation {
- if in == nil {
- return nil
- }
- out := new(RegistryLocation)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RegistrySources) DeepCopyInto(out *RegistrySources) {
- *out = *in
- if in.InsecureRegistries != nil {
- in, out := &in.InsecureRegistries, &out.InsecureRegistries
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.BlockedRegistries != nil {
- in, out := &in.BlockedRegistries, &out.BlockedRegistries
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.AllowedRegistries != nil {
- in, out := &in.AllowedRegistries, &out.AllowedRegistries
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrySources.
-func (in *RegistrySources) DeepCopy() *RegistrySources {
- if in == nil {
- return nil
- }
- out := new(RegistrySources)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RemoteConnectionInfo) DeepCopyInto(out *RemoteConnectionInfo) {
- *out = *in
- out.CertInfo = in.CertInfo
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteConnectionInfo.
-func (in *RemoteConnectionInfo) DeepCopy() *RemoteConnectionInfo {
- if in == nil {
- return nil
- }
- out := new(RemoteConnectionInfo)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) {
- *out = *in
- out.ClientCA = in.ClientCA
- if in.ClientCommonNames != nil {
- in, out := &in.ClientCommonNames, &out.ClientCommonNames
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Headers != nil {
- in, out := &in.Headers, &out.Headers
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.PreferredUsernameHeaders != nil {
- in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.NameHeaders != nil {
- in, out := &in.NameHeaders, &out.NameHeaders
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.EmailHeaders != nil {
- in, out := &in.EmailHeaders, &out.EmailHeaders
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider.
-func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider {
- if in == nil {
- return nil
- }
- out := new(RequestHeaderIdentityProvider)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Scheduler) DeepCopyInto(out *Scheduler) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- out.Spec = in.Spec
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduler.
-func (in *Scheduler) DeepCopy() *Scheduler {
- if in == nil {
- return nil
- }
- out := new(Scheduler)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Scheduler) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SchedulerList) DeepCopyInto(out *SchedulerList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Scheduler, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerList.
-func (in *SchedulerList) DeepCopy() *SchedulerList {
- if in == nil {
- return nil
- }
- out := new(SchedulerList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *SchedulerList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SchedulerSpec) DeepCopyInto(out *SchedulerSpec) {
- *out = *in
- out.Policy = in.Policy
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerSpec.
-func (in *SchedulerSpec) DeepCopy() *SchedulerSpec {
- if in == nil {
- return nil
- }
- out := new(SchedulerSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SchedulerStatus) DeepCopyInto(out *SchedulerStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerStatus.
-func (in *SchedulerStatus) DeepCopy() *SchedulerStatus {
- if in == nil {
- return nil
- }
- out := new(SchedulerStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SecretNameReference) DeepCopyInto(out *SecretNameReference) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretNameReference.
-func (in *SecretNameReference) DeepCopy() *SecretNameReference {
- if in == nil {
- return nil
- }
- out := new(SecretNameReference)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ServingInfo) DeepCopyInto(out *ServingInfo) {
- *out = *in
- out.CertInfo = in.CertInfo
- if in.NamedCertificates != nil {
- in, out := &in.NamedCertificates, &out.NamedCertificates
- *out = make([]NamedCertificate, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.CipherSuites != nil {
- in, out := &in.CipherSuites, &out.CipherSuites
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServingInfo.
-func (in *ServingInfo) DeepCopy() *ServingInfo {
- if in == nil {
- return nil
- }
- out := new(ServingInfo)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *StringSource) DeepCopyInto(out *StringSource) {
- *out = *in
- out.StringSourceSpec = in.StringSourceSpec
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSource.
-func (in *StringSource) DeepCopy() *StringSource {
- if in == nil {
- return nil
- }
- out := new(StringSource)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *StringSourceSpec) DeepCopyInto(out *StringSourceSpec) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSourceSpec.
-func (in *StringSourceSpec) DeepCopy() *StringSourceSpec {
- if in == nil {
- return nil
- }
- out := new(StringSourceSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *TLSProfileSpec) DeepCopyInto(out *TLSProfileSpec) {
- *out = *in
- if in.Ciphers != nil {
- in, out := &in.Ciphers, &out.Ciphers
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSProfileSpec.
-func (in *TLSProfileSpec) DeepCopy() *TLSProfileSpec {
- if in == nil {
- return nil
- }
- out := new(TLSProfileSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *TLSSecurityProfile) DeepCopyInto(out *TLSSecurityProfile) {
- *out = *in
- if in.Old != nil {
- in, out := &in.Old, &out.Old
- *out = new(OldTLSProfile)
- **out = **in
- }
- if in.Intermediate != nil {
- in, out := &in.Intermediate, &out.Intermediate
- *out = new(IntermediateTLSProfile)
- **out = **in
- }
- if in.Modern != nil {
- in, out := &in.Modern, &out.Modern
- *out = new(ModernTLSProfile)
- **out = **in
- }
- if in.Custom != nil {
- in, out := &in.Custom, &out.Custom
- *out = new(CustomTLSProfile)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSecurityProfile.
-func (in *TLSSecurityProfile) DeepCopy() *TLSSecurityProfile {
- if in == nil {
- return nil
- }
- out := new(TLSSecurityProfile)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *TemplateReference) DeepCopyInto(out *TemplateReference) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateReference.
-func (in *TemplateReference) DeepCopy() *TemplateReference {
- if in == nil {
- return nil
- }
- out := new(TemplateReference)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *TokenConfig) DeepCopyInto(out *TokenConfig) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig.
-func (in *TokenConfig) DeepCopy() *TokenConfig {
- if in == nil {
- return nil
- }
- out := new(TokenConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Update) DeepCopyInto(out *Update) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Update.
-func (in *Update) DeepCopy() *Update {
- if in == nil {
- return nil
- }
- out := new(Update)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *UpdateHistory) DeepCopyInto(out *UpdateHistory) {
- *out = *in
- in.StartedTime.DeepCopyInto(&out.StartedTime)
- if in.CompletionTime != nil {
- in, out := &in.CompletionTime, &out.CompletionTime
- *out = (*in).DeepCopy()
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateHistory.
-func (in *UpdateHistory) DeepCopy() *UpdateHistory {
- if in == nil {
- return nil
- }
- out := new(UpdateHistory)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) {
- *out = *in
- out.KubeConfig = in.KubeConfig
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator.
-func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator {
- if in == nil {
- return nil
- }
- out := new(WebhookTokenAuthenticator)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
deleted file mode 100644
index 2d6b19d2d..000000000
--- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
+++ /dev/null
@@ -1,1292 +0,0 @@
-package v1
-
-// This file contains a collection of methods that can be used from go-restful to
-// generate Swagger API documentation for its models. Please read this PR for more
-// information on the implementation: https://github.com/emicklei/go-restful/pull/215
-//
-// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
-// they are on one line! For multiple line or blocks that you want to ignore use ---.
-// Any context after a --- is ignored.
-//
-// Those methods can be generated by using hack/update-swagger-docs.sh
-
-// AUTO-GENERATED FUNCTIONS START HERE
-var map_AdmissionConfig = map[string]string{
- "enabledPlugins": "enabledPlugins is a list of admission plugins that must be on in addition to the default list. Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon and can result in performance penalties and unexpected behavior.",
- "disabledPlugins": "disabledPlugins is a list of admission plugins that must be off. Putting something in this list is almost always a mistake and likely to result in cluster instability.",
-}
-
-func (AdmissionConfig) SwaggerDoc() map[string]string {
- return map_AdmissionConfig
-}
-
-var map_AdmissionPluginConfig = map[string]string{
- "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins",
- "location": "Location is the path to a configuration file that contains the plugin's configuration",
- "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.",
-}
-
-func (AdmissionPluginConfig) SwaggerDoc() map[string]string {
- return map_AdmissionPluginConfig
-}
-
-var map_AuditConfig = map[string]string{
- "": "AuditConfig holds configuration for the audit capabilities",
- "enabled": "If this flag is set, audit log will be printed in the logs. The logs contains, method, user and a requested URL.",
- "auditFilePath": "All requests coming to the apiserver will be logged to this file.",
- "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.",
- "maximumRetainedFiles": "Maximum number of old log files to retain.",
- "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.",
- "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.",
- "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.",
- "logFormat": "Format of saved audits (legacy or json).",
- "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.",
- "webHookMode": "Strategy for sending audit events (block or batch).",
-}
-
-func (AuditConfig) SwaggerDoc() map[string]string {
- return map_AuditConfig
-}
-
-var map_CertInfo = map[string]string{
- "": "CertInfo relates a certificate with a private key",
- "certFile": "CertFile is a file containing a PEM-encoded certificate",
- "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile",
-}
-
-func (CertInfo) SwaggerDoc() map[string]string {
- return map_CertInfo
-}
-
-var map_ClientConnectionOverrides = map[string]string{
- "acceptContentTypes": "acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.",
- "contentType": "contentType is the content type used when sending data to the server from this client.",
- "qps": "qps controls the number of queries per second allowed for this connection.",
- "burst": "burst allows extra queries to accumulate when a client is exceeding its rate.",
-}
-
-func (ClientConnectionOverrides) SwaggerDoc() map[string]string {
- return map_ClientConnectionOverrides
-}
-
-var map_ConfigMapFileReference = map[string]string{
- "": "ConfigMapFileReference references a config map in a specific namespace. The namespace must be specified at the point of use.",
- "key": "Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.",
-}
-
-func (ConfigMapFileReference) SwaggerDoc() map[string]string {
- return map_ConfigMapFileReference
-}
-
-var map_ConfigMapNameReference = map[string]string{
- "": "ConfigMapNameReference references a config map in a specific namespace. The namespace must be specified at the point of use.",
- "name": "name is the metadata.name of the referenced config map",
-}
-
-func (ConfigMapNameReference) SwaggerDoc() map[string]string {
- return map_ConfigMapNameReference
-}
-
-var map_DelegatedAuthentication = map[string]string{
- "": "DelegatedAuthentication allows authentication to be disabled.",
- "disabled": "disabled indicates that authentication should be disabled. By default it will use delegated authentication.",
-}
-
-func (DelegatedAuthentication) SwaggerDoc() map[string]string {
- return map_DelegatedAuthentication
-}
-
-var map_DelegatedAuthorization = map[string]string{
- "": "DelegatedAuthorization allows authorization to be disabled.",
- "disabled": "disabled indicates that authorization should be disabled. By default it will use delegated authorization.",
-}
-
-func (DelegatedAuthorization) SwaggerDoc() map[string]string {
- return map_DelegatedAuthorization
-}
-
-var map_EtcdConnectionInfo = map[string]string{
- "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server",
- "urls": "URLs are the URLs for etcd",
- "ca": "CA is a file containing trusted roots for the etcd server certificates",
-}
-
-func (EtcdConnectionInfo) SwaggerDoc() map[string]string {
- return map_EtcdConnectionInfo
-}
-
-var map_EtcdStorageConfig = map[string]string{
- "storagePrefix": "StoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.",
-}
-
-func (EtcdStorageConfig) SwaggerDoc() map[string]string {
- return map_EtcdStorageConfig
-}
-
-var map_GenericAPIServerConfig = map[string]string{
- "": "GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd",
- "servingInfo": "servingInfo describes how to start serving",
- "corsAllowedOrigins": "corsAllowedOrigins",
- "auditConfig": "auditConfig describes how to configure audit information",
- "storageConfig": "storageConfig contains information about how to use",
- "admission": "admissionConfig holds information about how to configure admission.",
-}
-
-func (GenericAPIServerConfig) SwaggerDoc() map[string]string {
- return map_GenericAPIServerConfig
-}
-
-var map_GenericControllerConfig = map[string]string{
- "": "GenericControllerConfig provides information to configure a controller",
- "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints",
- "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need",
- "authentication": "authentication allows configuration of authentication for the endpoints",
- "authorization": "authorization allows configuration of authentication for the endpoints",
-}
-
-func (GenericControllerConfig) SwaggerDoc() map[string]string {
- return map_GenericControllerConfig
-}
-
-var map_HTTPServingInfo = map[string]string{
- "": "HTTPServingInfo holds configuration for serving HTTP",
- "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.",
- "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.",
-}
-
-func (HTTPServingInfo) SwaggerDoc() map[string]string {
- return map_HTTPServingInfo
-}
-
-var map_KubeClientConfig = map[string]string{
- "kubeConfig": "kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config",
- "connectionOverrides": "connectionOverrides specifies client overrides for system components to loop back to this master.",
-}
-
-func (KubeClientConfig) SwaggerDoc() map[string]string {
- return map_KubeClientConfig
-}
-
-var map_LeaderElection = map[string]string{
- "": "LeaderElection provides information to elect a leader",
- "disable": "disable allows leader election to be suspended while allowing a fully defaulted \"normal\" startup case.",
- "namespace": "namespace indicates which namespace the resource is in",
- "name": "name indicates what name to use for the resource",
- "leaseDuration": "leaseDuration is the duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.",
- "renewDeadline": "renewDeadline is the interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.",
- "retryPeriod": "retryPeriod is the duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.",
-}
-
-func (LeaderElection) SwaggerDoc() map[string]string {
- return map_LeaderElection
-}
-
-var map_NamedCertificate = map[string]string{
- "": "NamedCertificate specifies a certificate/key, and the names it should be served for",
- "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.",
-}
-
-func (NamedCertificate) SwaggerDoc() map[string]string {
- return map_NamedCertificate
-}
-
-var map_RemoteConnectionInfo = map[string]string{
- "": "RemoteConnectionInfo holds information necessary for establishing a remote connection",
- "url": "URL is the remote URL to connect to",
- "ca": "CA is the CA for verifying TLS connections",
-}
-
-func (RemoteConnectionInfo) SwaggerDoc() map[string]string {
- return map_RemoteConnectionInfo
-}
-
-var map_SecretNameReference = map[string]string{
- "": "SecretNameReference references a secret in a specific namespace. The namespace must be specified at the point of use.",
- "name": "name is the metadata.name of the referenced secret",
-}
-
-func (SecretNameReference) SwaggerDoc() map[string]string {
- return map_SecretNameReference
-}
-
-var map_ServingInfo = map[string]string{
- "": "ServingInfo holds information about serving web pages",
- "bindAddress": "BindAddress is the ip:port to serve on",
- "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"",
- "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates",
- "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames",
- "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants",
- "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants",
-}
-
-func (ServingInfo) SwaggerDoc() map[string]string {
- return map_ServingInfo
-}
-
-var map_StringSource = map[string]string{
- "": "StringSource allows specifying a string inline, or externally via env var or file. When it contains only a string value, it marshals to a simple JSON string.",
-}
-
-func (StringSource) SwaggerDoc() map[string]string {
- return map_StringSource
-}
-
-var map_StringSourceSpec = map[string]string{
- "": "StringSourceSpec specifies a string value, or external location",
- "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.",
- "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.",
- "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.",
- "keyFile": "KeyFile references a file containing the key to use to decrypt the value.",
-}
-
-func (StringSourceSpec) SwaggerDoc() map[string]string {
- return map_StringSourceSpec
-}
-
-var map_APIServer = map[string]string{
- "": "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'.",
-}
-
-func (APIServer) SwaggerDoc() map[string]string {
- return map_APIServer
-}
-
-var map_APIServerEncryption = map[string]string{
- "type": "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices.\n\nWhen encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is:\n\n 1. secrets\n 2. configmaps\n 3. routes.route.openshift.io\n 4. oauthaccesstokens.oauth.openshift.io\n 5. oauthauthorizetokens.oauth.openshift.io",
-}
-
-func (APIServerEncryption) SwaggerDoc() map[string]string {
- return map_APIServerEncryption
-}
-
-var map_APIServerNamedServingCert = map[string]string{
- "": "APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate.",
- "names": "names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names.",
- "servingCertificate": "servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data[\"tls.key\"] - TLS private key. - Secret.Data[\"tls.crt\"] - TLS certificate.",
-}
-
-func (APIServerNamedServingCert) SwaggerDoc() map[string]string {
- return map_APIServerNamedServingCert
-}
-
-var map_APIServerServingCerts = map[string]string{
- "namedCertificates": "namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used.",
-}
-
-func (APIServerServingCerts) SwaggerDoc() map[string]string {
- return map_APIServerServingCerts
-}
-
-var map_APIServerSpec = map[string]string{
- "servingCerts": "servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic.",
- "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.",
- "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.",
- "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.",
- "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old and Intermediate profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12.",
-}
-
-func (APIServerSpec) SwaggerDoc() map[string]string {
- return map_APIServerSpec
-}
-
-var map_Authentication = map[string]string{
- "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Authentication) SwaggerDoc() map[string]string {
- return map_Authentication
-}
-
-var map_AuthenticationSpec = map[string]string{
- "type": "type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth.",
- "oauthMetadata": "oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key \"oauthMetadata\" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.",
- "webhookTokenAuthenticators": "webhookTokenAuthenticators configures remote token reviewers. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. The namespace for these secrets is openshift-config.",
-}
-
-func (AuthenticationSpec) SwaggerDoc() map[string]string {
- return map_AuthenticationSpec
-}
-
-var map_AuthenticationStatus = map[string]string{
- "integratedOAuthMetadata": "integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key \"oauthMetadata\" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.",
-}
-
-func (AuthenticationStatus) SwaggerDoc() map[string]string {
- return map_AuthenticationStatus
-}
-
-var map_WebhookTokenAuthenticator = map[string]string{
- "": "webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator",
- "kubeConfig": "kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.",
-}
-
-func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string {
- return map_WebhookTokenAuthenticator
-}
-
-var map_Build = map[string]string{
- "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"",
- "spec": "Spec holds user-settable values for the build controller configuration",
-}
-
-func (Build) SwaggerDoc() map[string]string {
- return map_Build
-}
-
-var map_BuildDefaults = map[string]string{
- "defaultProxy": "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.",
- "gitProxy": "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.",
- "env": "Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build",
- "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.",
- "resources": "Resources defines resource requirements to execute the build.",
-}
-
-func (BuildDefaults) SwaggerDoc() map[string]string {
- return map_BuildDefaults
-}
-
-var map_BuildOverrides = map[string]string{
- "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.",
- "nodeSelector": "NodeSelector is a selector which must be true for the build pod to fit on a node",
- "tolerations": "Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.",
-}
-
-func (BuildOverrides) SwaggerDoc() map[string]string {
- return map_BuildOverrides
-}
-
-var map_BuildSpec = map[string]string{
- "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.",
- "buildDefaults": "BuildDefaults controls the default information for Builds",
- "buildOverrides": "BuildOverrides controls override settings for builds",
-}
-
-func (BuildSpec) SwaggerDoc() map[string]string {
- return map_BuildSpec
-}
-
-var map_ImageLabel = map[string]string{
- "name": "Name defines the name of the label. It must have non-zero length.",
- "value": "Value defines the literal value of the label.",
-}
-
-func (ImageLabel) SwaggerDoc() map[string]string {
- return map_ImageLabel
-}
-
-var map_ClusterOperator = map[string]string{
- "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.",
- "spec": "spec holds configuration that could apply to any operator.",
- "status": "status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem.",
-}
-
-func (ClusterOperator) SwaggerDoc() map[string]string {
- return map_ClusterOperator
-}
-
-var map_ClusterOperatorList = map[string]string{
- "": "ClusterOperatorList is a list of OperatorStatus resources.",
-}
-
-func (ClusterOperatorList) SwaggerDoc() map[string]string {
- return map_ClusterOperatorList
-}
-
-var map_ClusterOperatorSpec = map[string]string{
- "": "ClusterOperatorSpec is empty for now, but you could imagine holding information like \"pause\".",
-}
-
-func (ClusterOperatorSpec) SwaggerDoc() map[string]string {
- return map_ClusterOperatorSpec
-}
-
-var map_ClusterOperatorStatus = map[string]string{
- "": "ClusterOperatorStatus provides information about the status of the operator.",
- "conditions": "conditions describes the state of the operator's managed and monitored components.",
- "versions": "versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple operand entries in the array. Available operators must report the version of the operator itself with the name \"operator\". An operator reports a new \"operator\" version when it has rolled out the new version to all of its operands.",
- "relatedObjects": "relatedObjects is a list of objects that are \"interesting\" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces",
- "extension": "extension contains any additional status information specific to the operator which owns this status object.",
-}
-
-func (ClusterOperatorStatus) SwaggerDoc() map[string]string {
- return map_ClusterOperatorStatus
-}
-
-var map_ClusterOperatorStatusCondition = map[string]string{
- "": "ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components.",
- "type": "type specifies the aspect reported by this condition.",
- "status": "status of the condition, one of True, False, Unknown.",
- "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status property.",
- "reason": "reason is the CamelCase reason for the condition's current status.",
- "message": "message provides additional information about the current condition. This is only to be consumed by humans.",
-}
-
-func (ClusterOperatorStatusCondition) SwaggerDoc() map[string]string {
- return map_ClusterOperatorStatusCondition
-}
-
-var map_ObjectReference = map[string]string{
- "": "ObjectReference contains enough information to let you inspect or modify the referred object.",
- "group": "group of the referent.",
- "resource": "resource of the referent.",
- "namespace": "namespace of the referent.",
- "name": "name of the referent.",
-}
-
-func (ObjectReference) SwaggerDoc() map[string]string {
- return map_ObjectReference
-}
-
-var map_OperandVersion = map[string]string{
- "name": "name is the name of the particular operand this version is for. It usually matches container images, not operators.",
- "version": "version indicates which version of a particular operand is currently being managed. It must always match the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0",
-}
-
-func (OperandVersion) SwaggerDoc() map[string]string {
- return map_OperandVersion
-}
-
-var map_ClusterVersion = map[string]string{
- "": "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set.",
- "spec": "spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster.",
- "status": "status contains information about the available updates and any in-progress updates.",
-}
-
-func (ClusterVersion) SwaggerDoc() map[string]string {
- return map_ClusterVersion
-}
-
-var map_ClusterVersionList = map[string]string{
- "": "ClusterVersionList is a list of ClusterVersion resources.",
-}
-
-func (ClusterVersionList) SwaggerDoc() map[string]string {
- return map_ClusterVersionList
-}
-
-var map_ClusterVersionSpec = map[string]string{
- "": "ClusterVersionSpec is the desired version state of the cluster. It includes the version the cluster should be at, how the cluster is identified, and where the cluster should look for version updates.",
- "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.",
- "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail. You may specify the version field without setting image if an update exists with that version in the availableUpdates or history.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.",
- "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.",
- "channel": "channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters.",
- "overrides": "overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object.",
-}
-
-func (ClusterVersionSpec) SwaggerDoc() map[string]string {
- return map_ClusterVersionSpec
-}
-
-var map_ClusterVersionStatus = map[string]string{
- "": "ClusterVersionStatus reports the status of the cluster versioning, including any upgrades that are in progress. The current field will be set to whichever version the cluster is reconciling to, and the conditions array will report whether the update succeeded, is in progress, or is failing.",
- "desired": "desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag.",
- "history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.",
- "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version.",
- "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.",
- "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.",
- "availableUpdates": "availableUpdates contains the list of updates that are appropriate for this cluster. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.",
-}
-
-func (ClusterVersionStatus) SwaggerDoc() map[string]string {
- return map_ClusterVersionStatus
-}
-
-var map_ComponentOverride = map[string]string{
- "": "ComponentOverride allows overriding cluster version operator's behavior for a component.",
- "kind": "kind indentifies which object to override.",
- "group": "group identifies the API group that the kind is in.",
- "namespace": "namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty.",
- "name": "name is the component's name.",
- "unmanaged": "unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false",
-}
-
-func (ComponentOverride) SwaggerDoc() map[string]string {
- return map_ComponentOverride
-}
-
-var map_Update = map[string]string{
- "": "Update represents a release of the ClusterVersionOperator, referenced by the Image member.",
- "version": "version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified.",
- "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.",
- "force": "force allows an administrator to update to an image that has failed verification, does not appear in the availableUpdates list, or otherwise would be blocked by normal protections on update. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.\n\nThis flag does not override other forms of consistency checking that are required before a new update is deployed.",
-}
-
-func (Update) SwaggerDoc() map[string]string {
- return map_Update
-}
-
-var map_UpdateHistory = map[string]string{
- "": "UpdateHistory is a single attempted update to the cluster.",
- "state": "state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied).",
- "startedTime": "startedTime is the time at which the update was started.",
- "completionTime": "completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update).",
- "version": "version is a semantic versioning identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.",
- "image": "image is a container image location that contains the update. This value is always populated.",
- "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted.",
-}
-
-func (UpdateHistory) SwaggerDoc() map[string]string {
- return map_UpdateHistory
-}
-
-var map_Console = map[string]string{
- "": "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Console) SwaggerDoc() map[string]string {
- return map_Console
-}
-
-var map_ConsoleAuthentication = map[string]string{
- "": "ConsoleAuthentication defines a list of optional configuration for console authentication.",
- "logoutRedirect": "An optional, absolute URL to redirect web browsers to after logging out of the console. If not specified, it will redirect to the default login page. This is required when using an identity provider that supports single sign-on (SSO) such as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console will destroy the user's token. The logoutRedirect provides the user the option to perform single logout (SLO) through the identity provider to destroy their single sign-on session.",
-}
-
-func (ConsoleAuthentication) SwaggerDoc() map[string]string {
- return map_ConsoleAuthentication
-}
-
-var map_ConsoleSpec = map[string]string{
- "": "ConsoleSpec is the specification of the desired behavior of the Console.",
-}
-
-func (ConsoleSpec) SwaggerDoc() map[string]string {
- return map_ConsoleSpec
-}
-
-var map_ConsoleStatus = map[string]string{
- "": "ConsoleStatus defines the observed status of the Console.",
- "consoleURL": "The URL for the console. This will be derived from the host for the route that is created for the console.",
-}
-
-func (ConsoleStatus) SwaggerDoc() map[string]string {
- return map_ConsoleStatus
-}
-
-var map_DNS = map[string]string{
- "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (DNS) SwaggerDoc() map[string]string {
- return map_DNS
-}
-
-var map_DNSSpec = map[string]string{
- "baseDomain": "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base.\n\nFor example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`.\n\nOnce set, this field cannot be changed.",
- "publicZone": "publicZone is the location where all the DNS records that are publicly accessible to the internet exist.\n\nIf this field is nil, no public records should be created.\n\nOnce set, this field cannot be changed.",
- "privateZone": "privateZone is the location where all the DNS records that are only available internally to the cluster exist.\n\nIf this field is nil, no private records should be created.\n\nOnce set, this field cannot be changed.",
-}
-
-func (DNSSpec) SwaggerDoc() map[string]string {
- return map_DNSSpec
-}
-
-var map_DNSZone = map[string]string{
- "": "DNSZone is used to define a DNS hosted zone. A zone can be identified by an ID or tags.",
- "id": "id is the identifier that can be used to find the DNS hosted zone.\n\non AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3].\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get",
- "tags": "tags can be used to query the DNS hosted zone.\n\non AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters,\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options",
-}
-
-func (DNSZone) SwaggerDoc() map[string]string {
- return map_DNSZone
-}
-
-var map_CustomFeatureGates = map[string]string{
- "enabled": "enabled is a list of all feature gates that you want to force on",
- "disabled": "disabled is a list of all feature gates that you want to force off",
-}
-
-func (CustomFeatureGates) SwaggerDoc() map[string]string {
- return map_CustomFeatureGates
-}
-
-var map_FeatureGate = map[string]string{
- "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (FeatureGate) SwaggerDoc() map[string]string {
- return map_FeatureGate
-}
-
-var map_FeatureGateSelection = map[string]string{
- "featureSet": "featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone.",
- "customNoUpgrade": "customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal \"CustomNoUpgrade\" must be set to use this field.",
-}
-
-func (FeatureGateSelection) SwaggerDoc() map[string]string {
- return map_FeatureGateSelection
-}
-
-var map_Image = map[string]string{
- "": "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to blacklist/whitelist registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Image) SwaggerDoc() map[string]string {
- return map_Image
-}
-
-var map_ImageSpec = map[string]string{
- "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.",
- "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.",
- "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.",
- "registrySources": "registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.",
-}
-
-func (ImageSpec) SwaggerDoc() map[string]string {
- return map_ImageSpec
-}
-
-var map_ImageStatus = map[string]string{
- "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. This value is set by the image registry operator which controls the internal registry hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.",
- "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.",
-}
-
-func (ImageStatus) SwaggerDoc() map[string]string {
- return map_ImageStatus
-}
-
-var map_RegistryLocation = map[string]string{
- "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.",
- "domainName": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.",
- "insecure": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.",
-}
-
-func (RegistryLocation) SwaggerDoc() map[string]string {
- return map_RegistryLocation
-}
-
-var map_RegistrySources = map[string]string{
- "": "RegistrySources holds cluster-wide information about how to handle the registries config.",
- "insecureRegistries": "insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.",
- "blockedRegistries": "blockedRegistries are blacklisted from image pull/push. All other registries are allowed.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.",
- "allowedRegistries": "allowedRegistries are whitelisted for image pull/push. All other registries are blocked.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.",
-}
-
-func (RegistrySources) SwaggerDoc() map[string]string {
- return map_RegistrySources
-}
-
-var map_AWSPlatformStatus = map[string]string{
- "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.",
- "region": "region holds the default AWS region for new AWS resources created by the cluster.",
-}
-
-func (AWSPlatformStatus) SwaggerDoc() map[string]string {
- return map_AWSPlatformStatus
-}
-
-var map_AzurePlatformStatus = map[string]string{
- "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.",
- "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.",
- "networkResourceGroupName": "networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName.",
-}
-
-func (AzurePlatformStatus) SwaggerDoc() map[string]string {
- return map_AzurePlatformStatus
-}
-
-var map_BareMetalPlatformStatus = map[string]string{
- "": "BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. For more information about the network architecture used with the BareMetal platform type, see: https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md",
- "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.",
- "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.",
- "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.",
-}
-
-func (BareMetalPlatformStatus) SwaggerDoc() map[string]string {
- return map_BareMetalPlatformStatus
-}
-
-var map_GCPPlatformStatus = map[string]string{
- "": "GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.",
- "projectID": "resourceGroupName is the Project ID for new GCP resources created for the cluster.",
- "region": "region holds the region for new GCP resources created for the cluster.",
-}
-
-func (GCPPlatformStatus) SwaggerDoc() map[string]string {
- return map_GCPPlatformStatus
-}
-
-var map_Infrastructure = map[string]string{
- "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Infrastructure) SwaggerDoc() map[string]string {
- return map_Infrastructure
-}
-
-var map_InfrastructureList = map[string]string{
- "": "InfrastructureList is",
-}
-
-func (InfrastructureList) SwaggerDoc() map[string]string {
- return map_InfrastructureList
-}
-
-var map_InfrastructureSpec = map[string]string{
- "": "InfrastructureSpec contains settings that apply to the cluster infrastructure.",
- "cloudConfig": "cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config.",
-}
-
-func (InfrastructureSpec) SwaggerDoc() map[string]string {
- return map_InfrastructureSpec
-}
-
-var map_InfrastructureStatus = map[string]string{
- "": "InfrastructureStatus describes the infrastructure the cluster is leveraging.",
- "infrastructureName": "infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters.",
- "platform": "platform is the underlying infrastructure provider for the cluster.\n\nDeprecated: Use platformStatus.type instead.",
- "platformStatus": "platformStatus holds status information specific to the underlying infrastructure provider.",
- "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery",
- "apiServerURL": "apiServerURL is a valid URI with scheme(http/https), address and port. apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API.",
- "apiServerInternalURI": "apiServerInternalURL is a valid URI with scheme(http/https), address and port. apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking.",
-}
-
-func (InfrastructureStatus) SwaggerDoc() map[string]string {
- return map_InfrastructureStatus
-}
-
-var map_OpenStackPlatformStatus = map[string]string{
- "": "OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider.",
- "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.",
- "cloudName": "cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`).",
- "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.",
- "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.",
-}
-
-func (OpenStackPlatformStatus) SwaggerDoc() map[string]string {
- return map_OpenStackPlatformStatus
-}
-
-var map_OvirtPlatformStatus = map[string]string{
- "": "OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.",
- "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.",
- "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.",
- "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for oVirt deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.",
-}
-
-func (OvirtPlatformStatus) SwaggerDoc() map[string]string {
- return map_OvirtPlatformStatus
-}
-
-var map_PlatformStatus = map[string]string{
- "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.",
- "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.",
- "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.",
- "azure": "Azure contains settings specific to the Azure infrastructure provider.",
- "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.",
- "baremetal": "BareMetal contains settings specific to the BareMetal platform.",
- "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.",
- "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.",
-}
-
-func (PlatformStatus) SwaggerDoc() map[string]string {
- return map_PlatformStatus
-}
-
-var map_Ingress = map[string]string{
- "": "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Ingress) SwaggerDoc() map[string]string {
- return map_Ingress
-}
-
-var map_IngressSpec = map[string]string{
- "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"<route-name>.<route-namespace>.<domain>\".\n\nIt is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.<domain>\".\n\nOnce set, changing domain is not currently supported.",
-}
-
-func (IngressSpec) SwaggerDoc() map[string]string {
- return map_IngressSpec
-}
-
-var map_ClusterNetworkEntry = map[string]string{
- "": "ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated.",
- "cidr": "The complete block for pod IPs.",
- "hostPrefix": "The size (prefix) of block to allocate to each node.",
-}
-
-func (ClusterNetworkEntry) SwaggerDoc() map[string]string {
- return map_ClusterNetworkEntry
-}
-
-var map_ExternalIPConfig = map[string]string{
- "": "ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field of a Service resource.",
- "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set.",
- "autoAssignCIDRs": "autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called \"IngressIPs\". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided.",
-}
-
-func (ExternalIPConfig) SwaggerDoc() map[string]string {
- return map_ExternalIPConfig
-}
-
-var map_ExternalIPPolicy = map[string]string{
- "": "ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP field in a Service. If the zero struct is supplied, then none are permitted. The policy controller always allows automatically assigned external IPs.",
- "allowedCIDRs": "allowedCIDRs is the list of allowed CIDRs.",
- "rejectedCIDRs": "rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs.",
-}
-
-func (ExternalIPPolicy) SwaggerDoc() map[string]string {
- return map_ExternalIPPolicy
-}
-
-var map_Network = map[string]string{
- "": "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource.",
- "spec": "spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Network) SwaggerDoc() map[string]string {
- return map_Network
-}
-
-var map_NetworkSpec = map[string]string{
- "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.",
- "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.",
- "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.",
- "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.",
- "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.",
-}
-
-func (NetworkSpec) SwaggerDoc() map[string]string {
- return map_NetworkSpec
-}
-
-var map_NetworkStatus = map[string]string{
- "": "NetworkStatus is the current network configuration.",
- "clusterNetwork": "IP address pool to use for pod IPs.",
- "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.",
- "networkType": "NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).",
- "clusterNetworkMTU": "ClusterNetworkMTU is the MTU for inter-pod networking.",
-}
-
-func (NetworkStatus) SwaggerDoc() map[string]string {
- return map_NetworkStatus
-}
-
-var map_BasicAuthIdentityProvider = map[string]string{
- "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials",
-}
-
-func (BasicAuthIdentityProvider) SwaggerDoc() map[string]string {
- return map_BasicAuthIdentityProvider
-}
-
-var map_GitHubIdentityProvider = map[string]string{
- "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials",
- "clientID": "clientID is the oauth client ID",
- "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
- "organizations": "organizations optionally restricts which organizations are allowed to log in",
- "teams": "teams optionally restricts which teams are allowed to log in. Format is <org>/<team>.",
- "hostname": "hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value configured at /setup/settings#hostname.",
- "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config.",
-}
-
-func (GitHubIdentityProvider) SwaggerDoc() map[string]string {
- return map_GitHubIdentityProvider
-}
-
-var map_GitLabIdentityProvider = map[string]string{
- "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials",
- "clientID": "clientID is the oauth client ID",
- "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
- "url": "url is the oauth server base URL",
- "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
-}
-
-func (GitLabIdentityProvider) SwaggerDoc() map[string]string {
- return map_GitLabIdentityProvider
-}
-
-var map_GoogleIdentityProvider = map[string]string{
- "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials",
- "clientID": "clientID is the oauth client ID",
- "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
- "hostedDomain": "hostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to",
-}
-
-func (GoogleIdentityProvider) SwaggerDoc() map[string]string {
- return map_GoogleIdentityProvider
-}
-
-var map_HTPasswdIdentityProvider = map[string]string{
- "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials",
- "fileData": "fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key \"htpasswd\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.",
-}
-
-func (HTPasswdIdentityProvider) SwaggerDoc() map[string]string {
- return map_HTPasswdIdentityProvider
-}
-
-var map_IdentityProvider = map[string]string{
- "": "IdentityProvider provides identities for users authenticating using credentials",
- "name": "name is used to qualify the identities returned by this provider. - It MUST be unique and not shared by any other identity provider used - It MUST be a valid path segment: name cannot equal \".\" or \"..\" or contain \"/\" or \"%\" or \":\"\n Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName",
- "mappingMethod": "mappingMethod determines how identities from this provider are mapped to users Defaults to \"claim\"",
-}
-
-func (IdentityProvider) SwaggerDoc() map[string]string {
- return map_IdentityProvider
-}
-
-var map_IdentityProviderConfig = map[string]string{
- "": "IdentityProviderConfig contains configuration for using a specific identity provider",
- "type": "type identifies the identity provider type for this entry.",
- "basicAuth": "basicAuth contains configuration options for the BasicAuth IdP",
- "github": "github enables user authentication using GitHub credentials",
- "gitlab": "gitlab enables user authentication using GitLab credentials",
- "google": "google enables user authentication using Google credentials",
- "htpasswd": "htpasswd enables user authentication using an HTPasswd file to validate credentials",
- "keystone": "keystone enables user authentication using keystone password credentials",
- "ldap": "ldap enables user authentication using LDAP credentials",
- "openID": "openID enables user authentication using OpenID credentials",
- "requestHeader": "requestHeader enables user authentication using request header credentials",
-}
-
-func (IdentityProviderConfig) SwaggerDoc() map[string]string {
- return map_IdentityProviderConfig
-}
-
-var map_KeystoneIdentityProvider = map[string]string{
- "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials",
- "domainName": "domainName is required for keystone v3",
-}
-
-func (KeystoneIdentityProvider) SwaggerDoc() map[string]string {
- return map_KeystoneIdentityProvider
-}
-
-var map_LDAPAttributeMapping = map[string]string{
- "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields",
- "id": "id is the list of attributes whose values should be used as the user ID. Required. First non-empty attribute is used. At least one attribute is required. If none of the listed attribute have a value, authentication fails. LDAP standard identity attribute is \"dn\"",
- "preferredUsername": "preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"",
- "name": "name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"",
- "email": "email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity",
-}
-
-func (LDAPAttributeMapping) SwaggerDoc() map[string]string {
- return map_LDAPAttributeMapping
-}
-
-var map_LDAPIdentityProvider = map[string]string{
- "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials",
- "url": "url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter",
- "bindDN": "bindDN is an optional DN to bind with during the search phase.",
- "bindPassword": "bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key \"bindPassword\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
- "insecure": "insecure, if true, indicates the connection should not use TLS WARNING: Should not be set to `true` with the URL scheme \"ldaps://\" as \"ldaps://\" URLs always\n attempt to connect using TLS, even when `insecure` is set to `true`\nWhen `true`, \"ldap://\" URLS connect insecurely. When `false`, \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.",
- "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
- "attributes": "attributes maps LDAP attributes to identities",
-}
-
-func (LDAPIdentityProvider) SwaggerDoc() map[string]string {
- return map_LDAPIdentityProvider
-}
-
-var map_OAuth = map[string]string{
- "": "OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.",
-}
-
-func (OAuth) SwaggerDoc() map[string]string {
- return map_OAuth
-}
-
-var map_OAuthRemoteConnectionInfo = map[string]string{
- "": "OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection",
- "url": "url is the remote URL to connect to",
- "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
- "tlsClientCert": "tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key \"tls.crt\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.",
- "tlsClientKey": "tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key \"tls.key\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.",
-}
-
-func (OAuthRemoteConnectionInfo) SwaggerDoc() map[string]string {
- return map_OAuthRemoteConnectionInfo
-}
-
-var map_OAuthSpec = map[string]string{
- "": "OAuthSpec contains desired cluster auth configuration",
- "identityProviders": "identityProviders is an ordered list of ways for a user to identify themselves. When this list is empty, no identities are provisioned for users.",
- "tokenConfig": "tokenConfig contains options for authorization and access tokens",
- "templates": "templates allow you to customize pages like the login page.",
-}
-
-func (OAuthSpec) SwaggerDoc() map[string]string {
- return map_OAuthSpec
-}
-
-var map_OAuthStatus = map[string]string{
- "": "OAuthStatus shows current known state of OAuth server in the cluster",
-}
-
-func (OAuthStatus) SwaggerDoc() map[string]string {
- return map_OAuthStatus
-}
-
-var map_OAuthTemplates = map[string]string{
- "": "OAuthTemplates allow for customization of pages like the login page",
- "login": "login is the name of a secret that specifies a go template to use to render the login page. The key \"login.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config.",
- "providerSelection": "providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key \"providers.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config.",
- "error": "error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key \"errors.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config.",
-}
-
-func (OAuthTemplates) SwaggerDoc() map[string]string {
- return map_OAuthTemplates
-}
-
-var map_OpenIDClaims = map[string]string{
- "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider",
- "preferredUsername": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim",
- "name": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity",
- "email": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity",
-}
-
-func (OpenIDClaims) SwaggerDoc() map[string]string {
- return map_OpenIDClaims
-}
-
-var map_OpenIDIdentityProvider = map[string]string{
- "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials",
- "clientID": "clientID is the oauth client ID",
- "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
- "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
- "extraScopes": "extraScopes are any scopes to request in addition to the standard \"openid\" scope.",
- "extraAuthorizeParameters": "extraAuthorizeParameters are any custom parameters to add to the authorize request.",
- "issuer": "issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. It must use the https scheme with no query or fragment component.",
- "claims": "claims mappings",
-}
-
-func (OpenIDIdentityProvider) SwaggerDoc() map[string]string {
- return map_OpenIDIdentityProvider
-}
-
-var map_RequestHeaderIdentityProvider = map[string]string{
- "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials",
- "loginURL": "loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when login is set to true.",
- "challengeURL": "challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here. ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when challenge is set to true.",
- "ca": "ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key \"ca.crt\" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config.",
- "clientCommonNames": "clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.",
- "headers": "headers is the set of headers to check for identity information",
- "preferredUsernameHeaders": "preferredUsernameHeaders is the set of headers to check for the preferred username",
- "nameHeaders": "nameHeaders is the set of headers to check for the display name",
- "emailHeaders": "emailHeaders is the set of headers to check for the email address",
-}
-
-func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string {
- return map_RequestHeaderIdentityProvider
-}
-
-var map_TokenConfig = map[string]string{
- "": "TokenConfig holds the necessary configuration options for authorization and access tokens",
- "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens",
- "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds defines the default token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are integer values:\n x < 0 Tokens time out is enabled but tokens never timeout unless configured per client (e.g. `-1`)\n x = 0 Tokens time out is disabled (default)\n x > 0 Tokens time out if there is no activity for x seconds\nThe current minimum allowed value for X is 300 (5 minutes)",
-}
-
-func (TokenConfig) SwaggerDoc() map[string]string {
- return map_TokenConfig
-}
-
-var map_HubSource = map[string]string{
- "": "HubSource is used to specify the hub source and its configuration",
- "name": "name is the name of one of the default hub sources",
- "disabled": "disabled is used to disable a default hub source on cluster",
-}
-
-func (HubSource) SwaggerDoc() map[string]string {
- return map_HubSource
-}
-
-var map_HubSourceStatus = map[string]string{
- "": "HubSourceStatus is used to reflect the current state of applying the configuration to a default source",
- "status": "status indicates success or failure in applying the configuration",
- "message": "message provides more information regarding failures",
-}
-
-func (HubSourceStatus) SwaggerDoc() map[string]string {
- return map_HubSourceStatus
-}
-
-var map_OperatorHub = map[string]string{
- "": "OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa.",
-}
-
-func (OperatorHub) SwaggerDoc() map[string]string {
- return map_OperatorHub
-}
-
-var map_OperatorHubList = map[string]string{
- "": "OperatorHubList contains a list of OperatorHub",
-}
-
-func (OperatorHubList) SwaggerDoc() map[string]string {
- return map_OperatorHubList
-}
-
-var map_OperatorHubSpec = map[string]string{
- "": "OperatorHubSpec defines the desired state of OperatorHub",
- "disableAllDefaultSources": "disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source.",
- "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block.",
-}
-
-func (OperatorHubSpec) SwaggerDoc() map[string]string {
- return map_OperatorHubSpec
-}
-
-var map_OperatorHubStatus = map[string]string{
- "": "OperatorHubStatus defines the observed state of OperatorHub. The current state of the default hub sources will always be reflected here.",
- "sources": "sources encapsulates the result of applying the configuration for each hub source",
-}
-
-func (OperatorHubStatus) SwaggerDoc() map[string]string {
- return map_OperatorHubStatus
-}
-
-var map_Project = map[string]string{
- "": "Project holds cluster-wide information about Project. The canonical name is `cluster`",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Project) SwaggerDoc() map[string]string {
- return map_Project
-}
-
-var map_ProjectSpec = map[string]string{
- "": "ProjectSpec holds the project creation configuration.",
- "projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint",
- "projectRequestTemplate": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used.",
-}
-
-func (ProjectSpec) SwaggerDoc() map[string]string {
- return map_ProjectSpec
-}
-
-var map_TemplateReference = map[string]string{
- "": "TemplateReference references a template in a specific namespace. The namespace must be specified at the point of use.",
- "name": "name is the metadata.name of the referenced project request template",
-}
-
-func (TemplateReference) SwaggerDoc() map[string]string {
- return map_TemplateReference
-}
-
-var map_Proxy = map[string]string{
- "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`",
- "spec": "Spec holds user-settable values for the proxy configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Proxy) SwaggerDoc() map[string]string {
- return map_Proxy
-}
-
-var map_ProxySpec = map[string]string{
- "": "ProxySpec contains cluster proxy creation configuration.",
- "httpProxy": "httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.",
- "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.",
- "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in an env var.",
- "readinessEndpoints": "readinessEndpoints is a list of endpoints used to verify readiness of the proxy.",
- "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle used for client egress HTTPS connections. The certificate bundle must be from the CA that signed the proxy's certificate and be signed for everything. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from required key \"ca-bundle.crt\" and copying it to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: user-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |",
-}
-
-func (ProxySpec) SwaggerDoc() map[string]string {
- return map_ProxySpec
-}
-
-var map_ProxyStatus = map[string]string{
- "": "ProxyStatus shows current known state of the cluster proxy.",
- "httpProxy": "httpProxy is the URL of the proxy for HTTP requests.",
- "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests.",
- "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.",
-}
-
-func (ProxyStatus) SwaggerDoc() map[string]string {
- return map_ProxyStatus
-}
-
-var map_Scheduler = map[string]string{
- "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.",
- "spec": "spec holds user settable values for configuration",
- "status": "status holds observed values from the cluster. They may not be overridden.",
-}
-
-func (Scheduler) SwaggerDoc() map[string]string {
- return map_Scheduler
-}
-
-var map_SchedulerSpec = map[string]string{
- "policy": "policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.",
- "defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces without a specified nodeSelector value. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.",
- "mastersSchedulable": "MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.",
-}
-
-func (SchedulerSpec) SwaggerDoc() map[string]string {
- return map_SchedulerSpec
-}
-
-var map_CustomTLSProfile = map[string]string{
- "": "CustomTLSProfile is a user-defined TLS security profile. Be extremely careful using a custom TLS profile as invalid configurations can be catastrophic.",
-}
-
-func (CustomTLSProfile) SwaggerDoc() map[string]string {
- return map_CustomTLSProfile
-}
-
-var map_IntermediateTLSProfile = map[string]string{
- "": "IntermediateTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29",
-}
-
-func (IntermediateTLSProfile) SwaggerDoc() map[string]string {
- return map_IntermediateTLSProfile
-}
-
-var map_ModernTLSProfile = map[string]string{
- "": "ModernTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility",
-}
-
-func (ModernTLSProfile) SwaggerDoc() map[string]string {
- return map_ModernTLSProfile
-}
-
-var map_OldTLSProfile = map[string]string{
- "": "OldTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility",
-}
-
-func (OldTLSProfile) SwaggerDoc() map[string]string {
- return map_OldTLSProfile
-}
-
-var map_TLSProfileSpec = map[string]string{
- "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.",
- "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA",
- "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: TLSv1.1\n\nNOTE: currently the highest minTLSVersion allowed is VersionTLS12",
-}
-
-func (TLSProfileSpec) SwaggerDoc() map[string]string {
- return map_TLSProfileSpec
-}
-
-var map_TLSSecurityProfile = map[string]string{
- "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.",
- "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.\n\nNote that the Modern profile is currently not supported because it is not yet well adopted by common software libraries.",
- "old": "old is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n - DHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-ECDSA-AES256-SHA384\n - ECDHE-RSA-AES256-SHA384\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - DHE-RSA-AES128-SHA256\n - DHE-RSA-AES256-SHA256\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES256-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA\n minTLSVersion: TLSv1.0",
- "intermediate": "intermediate is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n minTLSVersion: TLSv1.2",
- "modern": "modern is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n minTLSVersion: TLSv1.3\n\nNOTE: Currently unsupported.",
- "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n ciphers:\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n minTLSVersion: TLSv1.1",
-}
-
-func (TLSSecurityProfile) SwaggerDoc() map[string]string {
- return map_TLSSecurityProfile
-}
-
-// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE
index f38ec5956..4b0421cf9 100644
--- a/vendor/github.com/stretchr/testify/LICENSE
+++ b/vendor/github.com/stretchr/testify/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell
+Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
index 15a486ca6..dc200395c 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_order.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -5,20 +5,28 @@ import (
"reflect"
)
-func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
+type CompareType int
+
+const (
+ compareLess CompareType = iota - 1
+ compareEqual
+ compareGreater
+)
+
+func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
switch kind {
case reflect.Int:
{
intobj1 := obj1.(int)
intobj2 := obj2.(int)
if intobj1 > intobj2 {
- return -1, true
+ return compareGreater, true
}
if intobj1 == intobj2 {
- return 0, true
+ return compareEqual, true
}
if intobj1 < intobj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Int8:
@@ -26,13 +34,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
int8obj1 := obj1.(int8)
int8obj2 := obj2.(int8)
if int8obj1 > int8obj2 {
- return -1, true
+ return compareGreater, true
}
if int8obj1 == int8obj2 {
- return 0, true
+ return compareEqual, true
}
if int8obj1 < int8obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Int16:
@@ -40,13 +48,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
int16obj1 := obj1.(int16)
int16obj2 := obj2.(int16)
if int16obj1 > int16obj2 {
- return -1, true
+ return compareGreater, true
}
if int16obj1 == int16obj2 {
- return 0, true
+ return compareEqual, true
}
if int16obj1 < int16obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Int32:
@@ -54,13 +62,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
int32obj1 := obj1.(int32)
int32obj2 := obj2.(int32)
if int32obj1 > int32obj2 {
- return -1, true
+ return compareGreater, true
}
if int32obj1 == int32obj2 {
- return 0, true
+ return compareEqual, true
}
if int32obj1 < int32obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Int64:
@@ -68,13 +76,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
int64obj1 := obj1.(int64)
int64obj2 := obj2.(int64)
if int64obj1 > int64obj2 {
- return -1, true
+ return compareGreater, true
}
if int64obj1 == int64obj2 {
- return 0, true
+ return compareEqual, true
}
if int64obj1 < int64obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Uint:
@@ -82,13 +90,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
uintobj1 := obj1.(uint)
uintobj2 := obj2.(uint)
if uintobj1 > uintobj2 {
- return -1, true
+ return compareGreater, true
}
if uintobj1 == uintobj2 {
- return 0, true
+ return compareEqual, true
}
if uintobj1 < uintobj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Uint8:
@@ -96,13 +104,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
uint8obj1 := obj1.(uint8)
uint8obj2 := obj2.(uint8)
if uint8obj1 > uint8obj2 {
- return -1, true
+ return compareGreater, true
}
if uint8obj1 == uint8obj2 {
- return 0, true
+ return compareEqual, true
}
if uint8obj1 < uint8obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Uint16:
@@ -110,13 +118,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
uint16obj1 := obj1.(uint16)
uint16obj2 := obj2.(uint16)
if uint16obj1 > uint16obj2 {
- return -1, true
+ return compareGreater, true
}
if uint16obj1 == uint16obj2 {
- return 0, true
+ return compareEqual, true
}
if uint16obj1 < uint16obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Uint32:
@@ -124,13 +132,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
uint32obj1 := obj1.(uint32)
uint32obj2 := obj2.(uint32)
if uint32obj1 > uint32obj2 {
- return -1, true
+ return compareGreater, true
}
if uint32obj1 == uint32obj2 {
- return 0, true
+ return compareEqual, true
}
if uint32obj1 < uint32obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Uint64:
@@ -138,13 +146,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
uint64obj1 := obj1.(uint64)
uint64obj2 := obj2.(uint64)
if uint64obj1 > uint64obj2 {
- return -1, true
+ return compareGreater, true
}
if uint64obj1 == uint64obj2 {
- return 0, true
+ return compareEqual, true
}
if uint64obj1 < uint64obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Float32:
@@ -152,13 +160,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
float32obj1 := obj1.(float32)
float32obj2 := obj2.(float32)
if float32obj1 > float32obj2 {
- return -1, true
+ return compareGreater, true
}
if float32obj1 == float32obj2 {
- return 0, true
+ return compareEqual, true
}
if float32obj1 < float32obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Float64:
@@ -166,13 +174,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
float64obj1 := obj1.(float64)
float64obj2 := obj2.(float64)
if float64obj1 > float64obj2 {
- return -1, true
+ return compareGreater, true
}
if float64obj1 == float64obj2 {
- return 0, true
+ return compareEqual, true
}
if float64obj1 < float64obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.String:
@@ -180,18 +188,18 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
stringobj1 := obj1.(string)
stringobj2 := obj2.(string)
if stringobj1 > stringobj2 {
- return -1, true
+ return compareGreater, true
}
if stringobj1 == stringobj2 {
- return 0, true
+ return compareEqual, true
}
if stringobj1 < stringobj2 {
- return 1, true
+ return compareLess, true
}
}
}
- return 0, false
+ return compareEqual, false
}
// Greater asserts that the first element is greater than the second
@@ -200,26 +208,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
// assert.Greater(t, float64(2), float64(1))
// assert.Greater(t, "b", "a")
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- e1Kind := reflect.ValueOf(e1).Kind()
- e2Kind := reflect.ValueOf(e2).Kind()
- if e1Kind != e2Kind {
- return Fail(t, "Elements should be the same type", msgAndArgs...)
- }
-
- res, isComparable := compare(e1, e2, e1Kind)
- if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
- }
-
- if res != -1 {
- return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...)
- }
-
- return true
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
}
// GreaterOrEqual asserts that the first element is greater than or equal to the second
@@ -229,26 +218,7 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
// assert.GreaterOrEqual(t, "b", "a")
// assert.GreaterOrEqual(t, "b", "b")
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- e1Kind := reflect.ValueOf(e1).Kind()
- e2Kind := reflect.ValueOf(e2).Kind()
- if e1Kind != e2Kind {
- return Fail(t, "Elements should be the same type", msgAndArgs...)
- }
-
- res, isComparable := compare(e1, e2, e1Kind)
- if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
- }
-
- if res != -1 && res != 0 {
- return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...)
- }
-
- return true
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
}
// Less asserts that the first element is less than the second
@@ -257,26 +227,7 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
// assert.Less(t, float64(1), float64(2))
// assert.Less(t, "a", "b")
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- e1Kind := reflect.ValueOf(e1).Kind()
- e2Kind := reflect.ValueOf(e2).Kind()
- if e1Kind != e2Kind {
- return Fail(t, "Elements should be the same type", msgAndArgs...)
- }
-
- res, isComparable := compare(e1, e2, e1Kind)
- if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
- }
-
- if res != 1 {
- return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...)
- }
-
- return true
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
}
// LessOrEqual asserts that the first element is less than or equal to the second
@@ -286,6 +237,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
// assert.LessOrEqual(t, "a", "b")
// assert.LessOrEqual(t, "b", "b")
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
+}
+
+func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
@@ -296,14 +251,24 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
return Fail(t, "Elements should be the same type", msgAndArgs...)
}
- res, isComparable := compare(e1, e2, e1Kind)
+ compareResult, isComparable := compare(e1, e2, e1Kind)
if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
}
- if res != 1 && res != 0 {
- return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...)
+ if !containsValue(allowedComparesResults, compareResult) {
+ return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
}
return true
}
+
+func containsValue(values []CompareType, value CompareType) bool {
+ for _, v := range values {
+ if v == value {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index bf89ecd21..b4c46042b 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -6,6 +6,7 @@
package assert
import (
+ io "io"
http "net/http"
url "net/url"
time "time"
@@ -93,7 +94,7 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args
// EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal.
//
-// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123))
+// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -127,7 +128,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick
// Exactlyf asserts that two objects are equal in value and type.
//
-// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
+// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -173,7 +174,7 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool
// Greaterf asserts that the first element is greater than the second
//
// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
-// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1))
+// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -201,11 +202,11 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg
// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+ return HTTPBodyContains(t, handler, method, url, values, body, str, append([]interface{}{msg}, args...)...)
}
// HTTPBodyNotContainsf asserts that a specified handler returns a
@@ -214,18 +215,18 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url
// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+ return HTTPBodyNotContains(t, handler, method, url, values, body, str, append([]interface{}{msg}, args...)...)
}
// HTTPErrorf asserts that a specified handler returns an error status code.
//
// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -237,7 +238,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string,
//
// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -245,6 +246,18 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri
return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
}
+// HTTPStatusCodef asserts that a specified handler returns a specified status code.
+//
+// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCode(t, handler, method, url, values, statuscode, append([]interface{}{msg}, args...)...)
+}
+
// HTTPSuccessf asserts that a specified handler returns a success status code.
//
// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
@@ -259,7 +272,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin
// Implementsf asserts that an object is implemented by the specified interface.
//
-// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -341,7 +354,7 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf
// Lessf asserts that the first element is less than the second
//
// assert.Lessf(t, 1, 2, "error message %s", "formatted")
-// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2))
+// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
// assert.Lessf(t, "a", "b", "error message %s", "formatted")
func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -454,6 +467,16 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string,
return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
}
+// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
+//
+// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
+func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
// NotNilf asserts that the specified object is not nil.
//
// assert.NotNilf(t, err, "error message %s", "formatted")
@@ -476,7 +499,7 @@ func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bo
// NotRegexpf asserts that a specified regexp does not match a string.
//
-// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -552,7 +575,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str
// Regexpf asserts that a specified regexp matches a string.
//
-// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index 75ecdcaa2..9bea8d189 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -6,6 +6,7 @@
package assert
import (
+ io "io"
http "net/http"
url "net/url"
time "time"
@@ -169,7 +170,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
// EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal.
//
-// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123))
+// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -251,7 +252,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg
// Exactlyf asserts that two objects are equal in value and type.
//
-// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123))
+// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -370,7 +371,7 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string,
// Greaterf asserts that the first element is greater than the second
//
// a.Greaterf(2, 1, "error message %s", "formatted")
-// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1))
+// a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
// a.Greaterf("b", "a", "error message %s", "formatted")
func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@@ -385,11 +386,11 @@ func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args .
// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
- return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
+ return HTTPBodyContains(a.t, handler, method, url, values, body, str, msgAndArgs...)
}
// HTTPBodyContainsf asserts that a specified handler returns a
@@ -398,11 +399,11 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u
// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
- return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
+ return HTTPBodyContainsf(a.t, handler, method, url, values, body, str, msg, args...)
}
// HTTPBodyNotContains asserts that a specified handler returns a
@@ -411,11 +412,11 @@ func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string,
// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
- return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
+ return HTTPBodyNotContains(a.t, handler, method, url, values, body, str, msgAndArgs...)
}
// HTTPBodyNotContainsf asserts that a specified handler returns a
@@ -424,11 +425,11 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string
// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
- return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
+ return HTTPBodyNotContainsf(a.t, handler, method, url, values, body, str, msg, args...)
}
// HTTPError asserts that a specified handler returns an error status code.
@@ -447,7 +448,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri
//
// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -471,7 +472,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s
//
// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -479,6 +480,30 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url
return HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
}
+// HTTPStatusCode asserts that a specified handler returns a specified status code.
+//
+// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...)
+}
+
+// HTTPStatusCodef asserts that a specified handler returns a specified status code.
+//
+// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...)
+}
+
// HTTPSuccess asserts that a specified handler returns a success status code.
//
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
@@ -515,7 +540,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{},
// Implementsf asserts that an object is implemented by the specified interface.
//
-// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -706,7 +731,7 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar
// Lessf asserts that the first element is less than the second
//
// a.Lessf(1, 2, "error message %s", "formatted")
-// a.Lessf(float64(1, "error message %s", "formatted"), float64(2))
+// a.Lessf(float64(1), float64(2), "error message %s", "formatted")
// a.Lessf("a", "b", "error message %s", "formatted")
func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@@ -884,6 +909,26 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr
return NotEqual(a.t, expected, actual, msgAndArgs...)
}
+// NotEqualValues asserts that two objects are not equal even when converted to the same type
+//
+// a.NotEqualValues(obj1, obj2)
+func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
+//
+// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
+func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValuesf(a.t, expected, actual, msg, args...)
+}
+
// NotEqualf asserts that the specified values are NOT equal.
//
// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
@@ -950,7 +995,7 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in
// NotRegexpf asserts that a specified regexp does not match a string.
//
-// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@@ -1102,7 +1147,7 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter
// Regexpf asserts that a specified regexp matches a string.
//
-// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index bdd81389a..914a10d83 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -19,7 +19,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/pmezard/go-difflib/difflib"
- yaml "gopkg.in/yaml.v2"
+ yaml "gopkg.in/yaml.v3"
)
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
@@ -45,7 +45,7 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
// for table driven tests.
type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
-// Comparison a custom function that returns true on success and false on failure
+// Comparison is a custom function that returns true on success and false on failure
type Comparison func() (success bool)
/*
@@ -104,11 +104,11 @@ the problem actually occurred in calling code.*/
// failed.
func CallerInfo() []string {
- pc := uintptr(0)
- file := ""
- line := 0
- ok := false
- name := ""
+ var pc uintptr
+ var ok bool
+ var file string
+ var line int
+ var name string
callers := []string{}
for i := 0; ; i++ {
@@ -429,14 +429,27 @@ func samePointers(first, second interface{}) bool {
// to a type conversion in the Go grammar.
func formatUnequalValues(expected, actual interface{}) (e string, a string) {
if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
- return fmt.Sprintf("%T(%#v)", expected, expected),
- fmt.Sprintf("%T(%#v)", actual, actual)
+ return fmt.Sprintf("%T(%s)", expected, truncatingFormat(expected)),
+ fmt.Sprintf("%T(%s)", actual, truncatingFormat(actual))
}
switch expected.(type) {
case time.Duration:
return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual)
}
- return fmt.Sprintf("%#v", expected), fmt.Sprintf("%#v", actual)
+ return truncatingFormat(expected), truncatingFormat(actual)
+}
+
+// truncatingFormat formats the data and truncates it if it's too long.
+//
+// This helps keep formatted error messages lines from exceeding the
+// bufio.MaxScanTokenSize max line length that the go testing framework imposes.
+func truncatingFormat(data interface{}) string {
+ value := fmt.Sprintf("%#v", data)
+ max := bufio.MaxScanTokenSize - 100 // Give us some space the type info too if needed.
+ if len(value) > max {
+ value = value[0:max] + "<... truncated>"
+ }
+ return value
}
// EqualValues asserts that two objects are equal or convertable to the same types
@@ -483,12 +496,12 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
//
// assert.NotNil(t, err)
func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
if !isNil(object) {
return true
}
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, "Expected value not to be nil.", msgAndArgs...)
}
@@ -529,12 +542,12 @@ func isNil(object interface{}) bool {
//
// assert.Nil(t, err)
func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
if isNil(object) {
return true
}
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
}
@@ -571,12 +584,11 @@ func isEmpty(object interface{}) bool {
//
// assert.Empty(t, obj)
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
pass := isEmpty(object)
if !pass {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
}
@@ -591,12 +603,11 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
// assert.Equal(t, "two", obj[1])
// }
func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
pass := !isEmpty(object)
if !pass {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
}
@@ -639,16 +650,10 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{})
//
// assert.True(t, myBool)
func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if h, ok := t.(interface {
- Helper()
- }); ok {
- h.Helper()
- }
-
- if value != true {
+ if !value {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, "Should be true", msgAndArgs...)
}
@@ -660,11 +665,10 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
//
// assert.False(t, myBool)
func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- if value != false {
+ if value {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, "Should be false", msgAndArgs...)
}
@@ -695,6 +699,21 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{
}
+// NotEqualValues asserts that two objects are not equal even when converted to the same type
+//
+// assert.NotEqualValues(t, obj1, obj2)
+func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if ObjectsAreEqualValues(expected, actual) {
+ return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
+ }
+
+ return true
+}
+
// containsElement try loop over the list check if the list includes the element.
// return (false, false) if impossible.
// return (true, false) if element was not found.
@@ -747,10 +766,10 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
ok, found := includeElement(s, contains)
if !ok {
- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
}
if !found {
- return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", s, contains), msgAndArgs...)
}
return true
@@ -881,27 +900,39 @@ func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface
return true
}
- aKind := reflect.TypeOf(listA).Kind()
- bKind := reflect.TypeOf(listB).Kind()
+ if !isList(t, listA, msgAndArgs...) || !isList(t, listB, msgAndArgs...) {
+ return false
+ }
- if aKind != reflect.Array && aKind != reflect.Slice {
- return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...)
+ extraA, extraB := diffLists(listA, listB)
+
+ if len(extraA) == 0 && len(extraB) == 0 {
+ return true
}
- if bKind != reflect.Array && bKind != reflect.Slice {
- return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...)
+ return Fail(t, formatListDiff(listA, listB, extraA, extraB), msgAndArgs...)
+}
+
+// isList checks that the provided value is array or slice.
+func isList(t TestingT, list interface{}, msgAndArgs ...interface{}) (ok bool) {
+ kind := reflect.TypeOf(list).Kind()
+ if kind != reflect.Array && kind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s, expecting array or slice", list, kind),
+ msgAndArgs...)
}
+ return true
+}
+// diffLists diffs two arrays/slices and returns slices of elements that are only in A and only in B.
+// If some element is present multiple times, each instance is counted separately (e.g. if something is 2x in A and
+// 5x in B, it will be 0x in extraA and 3x in extraB). The order of items in both lists is ignored.
+func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) {
aValue := reflect.ValueOf(listA)
bValue := reflect.ValueOf(listB)
aLen := aValue.Len()
bLen := bValue.Len()
- if aLen != bLen {
- return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...)
- }
-
// Mark indexes in bValue that we already used
visited := make([]bool, bLen)
for i := 0; i < aLen; i++ {
@@ -918,11 +949,38 @@ func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface
}
}
if !found {
- return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...)
+ extraA = append(extraA, element)
}
}
- return true
+ for j := 0; j < bLen; j++ {
+ if visited[j] {
+ continue
+ }
+ extraB = append(extraB, bValue.Index(j).Interface())
+ }
+
+ return
+}
+
+func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) string {
+ var msg bytes.Buffer
+
+ msg.WriteString("elements differ")
+ if len(extraA) > 0 {
+ msg.WriteString("\n\nextra elements in list A:\n")
+ msg.WriteString(spewConfig.Sdump(extraA))
+ }
+ if len(extraB) > 0 {
+ msg.WriteString("\n\nextra elements in list B:\n")
+ msg.WriteString(spewConfig.Sdump(extraB))
+ }
+ msg.WriteString("\n\nlistA:\n")
+ msg.WriteString(spewConfig.Sdump(listA))
+ msg.WriteString("\n\nlistB:\n")
+ msg.WriteString(spewConfig.Sdump(listB))
+
+ return msg.String()
}
// Condition uses a Comparison to assert a complex condition.
@@ -1058,6 +1116,8 @@ func toFloat(x interface{}) (float64, bool) {
xok := true
switch xn := x.(type) {
+ case uint:
+ xf = float64(xn)
case uint8:
xf = float64(xn)
case uint16:
@@ -1079,7 +1139,7 @@ func toFloat(x interface{}) (float64, bool) {
case float32:
xf = float64(xn)
case float64:
- xf = float64(xn)
+ xf = xn
case time.Duration:
xf = float64(xn)
default:
@@ -1193,6 +1253,9 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
if !aok {
return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
}
+ if math.IsNaN(af) {
+ return 0, errors.New("expected value must not be NaN")
+ }
if af == 0 {
return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
}
@@ -1200,6 +1263,9 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
if !bok {
return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
}
+ if math.IsNaN(bf) {
+ return 0, errors.New("actual value must not be NaN")
+ }
return math.Abs(af-bf) / math.Abs(af), nil
}
@@ -1209,6 +1275,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd
if h, ok := t.(tHelper); ok {
h.Helper()
}
+ if math.IsNaN(epsilon) {
+ return Fail(t, "epsilon must not be NaN")
+ }
actualEpsilon, err := calcRelativeError(expected, actual)
if err != nil {
return Fail(t, err.Error(), msgAndArgs...)
@@ -1256,10 +1325,10 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
// assert.Equal(t, expectedObj, actualObj)
// }
func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
if err != nil {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...)
}
@@ -1273,11 +1342,10 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
// assert.Equal(t, expectedError, err)
// }
func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
if err == nil {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, "An error is expected but got nil.", msgAndArgs...)
}
@@ -1553,6 +1621,7 @@ var spewConfig = spew.ConfigState{
DisablePointerAddresses: true,
DisableCapacities: true,
SortKeys: true,
+ DisableMethods: true,
}
type tHelper interface {
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
index df46fa777..30ef7cc06 100644
--- a/vendor/github.com/stretchr/testify/assert/http_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -2,6 +2,7 @@ package assert
import (
"fmt"
+ "io"
"net/http"
"net/http/httptest"
"net/url"
@@ -33,7 +34,6 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
- return false
}
isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
@@ -56,7 +56,6 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
- return false
}
isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
@@ -79,7 +78,6 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
- return false
}
isErrorCode := code >= http.StatusBadRequest
@@ -90,11 +88,37 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values
return isErrorCode
}
+// HTTPStatusCode asserts that a specified handler returns a specified status code.
+//
+// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ }
+
+ successful := code == statuscode
+ if !successful {
+ Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
+ }
+
+ return successful
+}
+
// HTTPBody is a helper that returns HTTP body of the response. It returns
// empty string if building a new request fails.
-func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
+func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values, body io.Reader) string {
w := httptest.NewRecorder()
- req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
+
+ if values != nil {
+ url = url + "?" + values.Encode()
+ }
+ req, err := http.NewRequest(method, url, body)
if err != nil {
return ""
}
@@ -108,13 +132,13 @@ func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) s
// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- body := HTTPBody(handler, method, url, values)
+ httpBody := HTTPBody(handler, method, url, values, body)
- contains := strings.Contains(body, fmt.Sprint(str))
+ contains := strings.Contains(httpBody, fmt.Sprint(str))
if !contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
}
@@ -128,13 +152,13 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string,
// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- body := HTTPBody(handler, method, url, values)
+ httpBody := HTTPBody(handler, method, url, values, body)
- contains := strings.Contains(body, fmt.Sprint(str))
+ contains := strings.Contains(httpBody, fmt.Sprint(str))
if contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
}
diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go
index cf6c7b566..693648f8a 100644
--- a/vendor/github.com/stretchr/testify/require/require.go
+++ b/vendor/github.com/stretchr/testify/require/require.go
@@ -7,6 +7,7 @@ package require
import (
assert "github.com/stretchr/testify/assert"
+ io "io"
http "net/http"
url "net/url"
time "time"
@@ -212,7 +213,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg
// EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal.
//
-// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123))
+// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -315,7 +316,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ..
// Exactlyf asserts that two objects are equal in value and type.
//
-// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
+// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -470,7 +471,7 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg
// Greaterf asserts that the first element is greater than the second
//
// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
-// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1))
+// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
@@ -488,11 +489,11 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in
// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- if assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) {
+ if assert.HTTPBodyContains(t, handler, method, url, values, body, str, msgAndArgs...) {
return
}
t.FailNow()
@@ -504,11 +505,11 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s
// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
+func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- if assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) {
+ if assert.HTTPBodyContainsf(t, handler, method, url, values, body, str, msg, args...) {
return
}
t.FailNow()
@@ -520,11 +521,11 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url
// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- if assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) {
+ if assert.HTTPBodyNotContains(t, handler, method, url, values, body, str, msgAndArgs...) {
return
}
t.FailNow()
@@ -536,11 +537,11 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur
// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
+func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- if assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) {
+ if assert.HTTPBodyNotContainsf(t, handler, method, url, values, body, str, msg, args...) {
return
}
t.FailNow()
@@ -565,7 +566,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string,
//
// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -595,7 +596,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin
//
// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -606,6 +607,36 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri
t.FailNow()
}
+// HTTPStatusCode asserts that a specified handler returns a specified status code.
+//
+// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.HTTPStatusCode(t, handler, method, url, values, statuscode, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// HTTPStatusCodef asserts that a specified handler returns a specified status code.
+//
+// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.HTTPStatusCodef(t, handler, method, url, values, statuscode, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// HTTPSuccess asserts that a specified handler returns a success status code.
//
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
@@ -651,7 +682,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg
// Implementsf asserts that an object is implemented by the specified interface.
//
-// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -902,7 +933,7 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args .
// Lessf asserts that the first element is less than the second
//
// assert.Lessf(t, 1, 2, "error message %s", "formatted")
-// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2))
+// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
// assert.Lessf(t, "a", "b", "error message %s", "formatted")
func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
@@ -1128,6 +1159,32 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs .
t.FailNow()
}
+// NotEqualValues asserts that two objects are not equal even when converted to the same type
+//
+// assert.NotEqualValues(t, obj1, obj2)
+func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NotEqualValues(t, expected, actual, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
+//
+// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
+func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.NotEqualValuesf(t, expected, actual, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// NotEqualf asserts that the specified values are NOT equal.
//
// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
@@ -1212,7 +1269,7 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf
// NotRegexpf asserts that a specified regexp does not match a string.
//
-// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
@@ -1406,7 +1463,7 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface
// Regexpf asserts that a specified regexp matches a string.
//
-// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go
index 5aac226df..84fc1c88d 100644
--- a/vendor/github.com/stretchr/testify/require/require_forward.go
+++ b/vendor/github.com/stretchr/testify/require/require_forward.go
@@ -7,6 +7,7 @@ package require
import (
assert "github.com/stretchr/testify/assert"
+ io "io"
http "net/http"
url "net/url"
time "time"
@@ -170,7 +171,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
// EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal.
//
-// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123))
+// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -252,7 +253,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg
// Exactlyf asserts that two objects are equal in value and type.
//
-// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123))
+// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -371,7 +372,7 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string,
// Greaterf asserts that the first element is greater than the second
//
// a.Greaterf(2, 1, "error message %s", "formatted")
-// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1))
+// a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
// a.Greaterf("b", "a", "error message %s", "formatted")
func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
@@ -386,11 +387,11 @@ func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args .
// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
- HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
+ HTTPBodyContains(a.t, handler, method, url, values, body, str, msgAndArgs...)
}
// HTTPBodyContainsf asserts that a specified handler returns a
@@ -399,11 +400,11 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u
// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
+func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
- HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
+ HTTPBodyContainsf(a.t, handler, method, url, values, body, str, msg, args...)
}
// HTTPBodyNotContains asserts that a specified handler returns a
@@ -412,11 +413,11 @@ func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string,
// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) {
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
- HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
+ HTTPBodyNotContains(a.t, handler, method, url, values, body, str, msgAndArgs...)
}
// HTTPBodyNotContainsf asserts that a specified handler returns a
@@ -425,11 +426,11 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string
// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) {
+func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
- HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
+ HTTPBodyNotContainsf(a.t, handler, method, url, values, body, str, msg, args...)
}
// HTTPError asserts that a specified handler returns an error status code.
@@ -448,7 +449,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri
//
// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -472,7 +473,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s
//
// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -480,6 +481,30 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url
HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
}
+// HTTPStatusCode asserts that a specified handler returns a specified status code.
+//
+// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...)
+}
+
+// HTTPStatusCodef asserts that a specified handler returns a specified status code.
+//
+// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...)
+}
+
// HTTPSuccess asserts that a specified handler returns a success status code.
//
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
@@ -516,7 +541,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{},
// Implementsf asserts that an object is implemented by the specified interface.
//
-// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -707,7 +732,7 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar
// Lessf asserts that the first element is less than the second
//
// a.Lessf(1, 2, "error message %s", "formatted")
-// a.Lessf(float64(1, "error message %s", "formatted"), float64(2))
+// a.Lessf(float64(1), float64(2), "error message %s", "formatted")
// a.Lessf("a", "b", "error message %s", "formatted")
func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
@@ -885,6 +910,26 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr
NotEqual(a.t, expected, actual, msgAndArgs...)
}
+// NotEqualValues asserts that two objects are not equal even when converted to the same type
+//
+// a.NotEqualValues(obj1, obj2)
+func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NotEqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
+//
+// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
+func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ NotEqualValuesf(a.t, expected, actual, msg, args...)
+}
+
// NotEqualf asserts that the specified values are NOT equal.
//
// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
@@ -951,7 +996,7 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in
// NotRegexpf asserts that a specified regexp does not match a string.
//
-// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
@@ -1103,7 +1148,7 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter
// Regexpf asserts that a specified regexp matches a string.
//
-// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/vbauerster/mpb/v5/bar.go b/vendor/github.com/vbauerster/mpb/v5/bar.go
index 13bda2247..9c28a07a8 100644
--- a/vendor/github.com/vbauerster/mpb/v5/bar.go
+++ b/vendor/github.com/vbauerster/mpb/v5/bar.go
@@ -6,28 +6,15 @@ import (
"fmt"
"io"
"log"
+ "runtime/debug"
"strings"
"time"
- "unicode/utf8"
+ "github.com/acarl005/stripansi"
+ "github.com/mattn/go-runewidth"
"github.com/vbauerster/mpb/v5/decor"
)
-// BarFiller interface.
-// Bar renders itself by calling BarFiller's Fill method. You can
-// literally have any bar kind, by implementing this interface and
-// passing it to the *Progress.Add(...) *Bar method.
-type BarFiller interface {
- Fill(w io.Writer, width int, stat *decor.Statistics)
-}
-
-// BarFillerFunc is function type adapter to convert function into Filler.
-type BarFillerFunc func(w io.Writer, width int, stat *decor.Statistics)
-
-func (f BarFillerFunc) Fill(w io.Writer, width int, stat *decor.Statistics) {
- f(w, width, stat)
-}
-
// Bar represents a progress Bar.
type Bar struct {
priority int // used by heap
@@ -55,21 +42,22 @@ type Bar struct {
recoveredPanic interface{}
}
-type extFunc func(in io.Reader, tw int, st *decor.Statistics) (out io.Reader, lines int)
+type extFunc func(in io.Reader, reqWidth int, st decor.Statistics) (out io.Reader, lines int)
type bState struct {
- baseF BarFiller
- filler BarFiller
id int
- width int
+ priority int
+ reqWidth int
total int64
current int64
+ refill int64
lastN int64
iterated bool
trimSpace bool
toComplete bool
completeFlushed bool
ignoreComplete bool
+ dropOnComplete bool
noPop bool
aDecorators []decor.Decorator
pDecorators []decor.Decorator
@@ -77,12 +65,10 @@ type bState struct {
ewmaDecorators []decor.EwmaDecorator
shutdownListeners []decor.ShutdownListener
bufP, bufB, bufA *bytes.Buffer
+ filler BarFiller
+ middleware func(BarFiller) BarFiller
extender extFunc
- // priority overrides *Bar's priority, if set
- priority int
- // dropOnComplete propagates to *Bar
- dropOnComplete bool
// runningBar is a key for *pState.parkedBars
runningBar *Bar
@@ -146,13 +132,8 @@ func (b *Bar) Current() int64 {
// Given default bar style is "[=>-]<+", refill rune is '+'.
// To set bar style use mpb.BarStyle(string) BarOption.
func (b *Bar) SetRefill(amount int64) {
- type refiller interface {
- SetRefill(int64)
- }
b.operateState <- func(s *bState) {
- if f, ok := s.baseF.(refiller); ok {
- f.SetRefill(amount)
- }
+ s.refill = amount
}
}
@@ -318,44 +299,40 @@ func (b *Bar) serve(ctx context.Context, s *bState) {
}
func (b *Bar) render(tw int) {
- if b.recoveredPanic != nil {
- b.toShutdown = false
- b.frameCh <- b.panicToFrame(tw)
- return
- }
select {
case b.operateState <- func(s *bState) {
+ stat := newStatistics(tw, s)
defer func() {
// recovering if user defined decorator panics for example
if p := recover(); p != nil {
- b.dlogger.Println(p)
+ s.extender = makePanicExtender(p)
+ frame, lines := s.extender(nil, s.reqWidth, stat)
+ b.extendedLines = lines
+ b.toShutdown = !b.toShutdown
b.recoveredPanic = p
- b.toShutdown = !s.completeFlushed
- b.frameCh <- b.panicToFrame(tw)
+ b.frameCh <- frame
+ b.dlogger.Println(p)
}
+ s.completeFlushed = s.toComplete
}()
-
- st := newStatistics(s)
- frame := s.draw(tw, st)
- frame, b.extendedLines = s.extender(frame, tw, st)
-
+ frame, lines := s.extender(s.draw(stat), s.reqWidth, stat)
+ b.extendedLines = lines
b.toShutdown = s.toComplete && !s.completeFlushed
- s.completeFlushed = s.toComplete
b.frameCh <- frame
}:
case <-b.done:
s := b.cacheState
- st := newStatistics(s)
- frame := s.draw(tw, st)
- frame, b.extendedLines = s.extender(frame, tw, st)
+ stat := newStatistics(tw, s)
+ var r io.Reader
+ if b.recoveredPanic == nil {
+ r = s.draw(stat)
+ }
+ frame, lines := s.extender(r, s.reqWidth, stat)
+ b.extendedLines = lines
b.frameCh <- frame
}
}
-func (b *Bar) panicToFrame(termWidth int) io.Reader {
- return strings.NewReader(fmt.Sprintf(fmt.Sprintf("%%.%dv\n", termWidth), b.recoveredPanic))
-}
-
func (b *Bar) subscribeDecorators() {
var averageDecorators []decor.AverageDecorator
var ewmaDecorators []decor.EwmaDecorator
@@ -398,34 +375,41 @@ func (b *Bar) wSyncTable() [][]chan int {
}
}
-func (s *bState) draw(termWidth int, stat *decor.Statistics) io.Reader {
+func (s *bState) draw(stat decor.Statistics) io.Reader {
+ if !s.trimSpace {
+ stat.AvailableWidth -= 2
+ s.bufB.WriteByte(' ')
+ defer s.bufB.WriteByte(' ')
+ }
+
+ nlr := strings.NewReader("\n")
+ tw := stat.AvailableWidth
for _, d := range s.pDecorators {
- s.bufP.WriteString(d.Decor(stat))
+ str := d.Decor(stat)
+ stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str))
+ s.bufP.WriteString(str)
+ }
+ if stat.AvailableWidth <= 0 {
+ trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufP.String()), tw, "…"))
+ s.bufP.Reset()
+ return io.MultiReader(trunc, s.bufB, nlr)
}
+ tw = stat.AvailableWidth
for _, d := range s.aDecorators {
- s.bufA.WriteString(d.Decor(stat))
+ str := d.Decor(stat)
+ stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str))
+ s.bufA.WriteString(str)
}
-
- s.bufA.WriteByte('\n')
-
- prependCount := utf8.RuneCount(s.bufP.Bytes())
- appendCount := utf8.RuneCount(s.bufA.Bytes()) - 1
-
- if fitWidth := s.width; termWidth > 1 {
- if !s.trimSpace {
- // reserve space for edge spaces
- termWidth -= 2
- s.bufB.WriteByte(' ')
- defer s.bufB.WriteByte(' ')
- }
- if prependCount+s.width+appendCount > termWidth {
- fitWidth = termWidth - prependCount - appendCount
- }
- s.filler.Fill(s.bufB, fitWidth, stat)
+ if stat.AvailableWidth <= 0 {
+ trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufA.String()), tw, "…"))
+ s.bufA.Reset()
+ return io.MultiReader(s.bufP, s.bufB, trunc, nlr)
}
- return io.MultiReader(s.bufP, s.bufB, s.bufA)
+ s.filler.Fill(s.bufB, s.reqWidth, stat)
+
+ return io.MultiReader(s.bufP, s.bufB, s.bufA, nlr)
}
func (s *bState) wSyncTable() [][]chan int {
@@ -450,12 +434,14 @@ func (s *bState) wSyncTable() [][]chan int {
return table
}
-func newStatistics(s *bState) *decor.Statistics {
- return &decor.Statistics{
- ID: s.id,
- Completed: s.completeFlushed,
- Total: s.total,
- Current: s.current,
+func newStatistics(tw int, s *bState) decor.Statistics {
+ return decor.Statistics{
+ ID: s.id,
+ AvailableWidth: tw,
+ Total: s.total,
+ Current: s.current,
+ Refill: s.refill,
+ Completed: s.completeFlushed,
}
}
@@ -476,3 +462,17 @@ func ewmaIterationUpdate(done bool, s *bState, dur time.Duration) {
d.EwmaUpdate(s.lastN, dur)
}
}
+
+func makePanicExtender(p interface{}) extFunc {
+ pstr := fmt.Sprint(p)
+ stack := debug.Stack()
+ stackLines := bytes.Count(stack, []byte("\n"))
+ return func(_ io.Reader, _ int, st decor.Statistics) (io.Reader, int) {
+ mr := io.MultiReader(
+ strings.NewReader(runewidth.Truncate(pstr, st.AvailableWidth, "…")),
+ strings.NewReader(fmt.Sprintf("\n%#v\n", st)),
+ bytes.NewReader(stack),
+ )
+ return mr, stackLines + 1
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/v5/bar_filler.go b/vendor/github.com/vbauerster/mpb/v5/bar_filler.go
index 33dbf191d..07148bffb 100644
--- a/vendor/github.com/vbauerster/mpb/v5/bar_filler.go
+++ b/vendor/github.com/vbauerster/mpb/v5/bar_filler.go
@@ -2,137 +2,29 @@ package mpb
import (
"io"
- "unicode/utf8"
"github.com/vbauerster/mpb/v5/decor"
- "github.com/vbauerster/mpb/v5/internal"
)
-const (
- rLeft = iota
- rFill
- rTip
- rEmpty
- rRight
- rRevTip
- rRefill
-)
-
-// DefaultBarStyle is a string containing 7 runes.
-// Each rune is a building block of a progress bar.
-//
-// '1st rune' stands for left boundary rune
-//
-// '2nd rune' stands for fill rune
+// BarFiller interface.
+// Bar (without decorators) renders itself by calling BarFiller's Fill method.
//
-// '3rd rune' stands for tip rune
+// `reqWidth` is requested width, which is set via:
+// func WithWidth(width int) ContainerOption
+// func BarWidth(width int) BarOption
//
-// '4th rune' stands for empty rune
+// Default implementations can be obtained via:
//
-// '5th rune' stands for right boundary rune
+// func NewBarFiller(style string, reverse bool) BarFiller
+// func NewSpinnerFiller(style []string, alignment SpinnerAlignment) BarFiller
//
-// '6th rune' stands for reverse tip rune
-//
-// '7th rune' stands for refill rune
-//
-const DefaultBarStyle string = "[=>-]<+"
-
-type barFiller struct {
- format [][]byte
- tip []byte
- refill int64
- reverse bool
- flush func(w io.Writer, bb [][]byte)
-}
-
-// NewBarFiller constucts mpb.BarFiller, to be used with *Progress.Add(...) *Bar method.
-func NewBarFiller(style string, reverse bool) BarFiller {
- if style == "" {
- style = DefaultBarStyle
- }
- bf := &barFiller{
- format: make([][]byte, utf8.RuneCountInString(style)),
- reverse: reverse,
- }
- bf.SetStyle(style)
- return bf
-}
-
-func (s *barFiller) SetStyle(style string) {
- if !utf8.ValidString(style) {
- return
- }
- src := make([][]byte, 0, utf8.RuneCountInString(style))
- for _, r := range style {
- src = append(src, []byte(string(r)))
- }
- copy(s.format, src)
- s.SetReverse(s.reverse)
-}
-
-func (s *barFiller) SetReverse(reverse bool) {
- if reverse {
- s.tip = s.format[rRevTip]
- s.flush = reverseFlush
- } else {
- s.tip = s.format[rTip]
- s.flush = regularFlush
- }
- s.reverse = reverse
+type BarFiller interface {
+ Fill(w io.Writer, reqWidth int, stat decor.Statistics)
}
-func (s *barFiller) SetRefill(amount int64) {
- s.refill = amount
-}
-
-func (s *barFiller) Fill(w io.Writer, width int, stat *decor.Statistics) {
- // don't count rLeft and rRight as progress
- width -= 2
- if width < 2 {
- return
- }
- w.Write(s.format[rLeft])
- defer w.Write(s.format[rRight])
-
- bb := make([][]byte, width)
-
- cwidth := int(internal.PercentageRound(stat.Total, stat.Current, width))
-
- for i := 0; i < cwidth; i++ {
- bb[i] = s.format[rFill]
- }
-
- if s.refill > 0 {
- var rwidth int
- if s.refill > stat.Current {
- rwidth = cwidth
- } else {
- rwidth = int(internal.PercentageRound(stat.Total, int64(s.refill), width))
- }
- for i := 0; i < rwidth; i++ {
- bb[i] = s.format[rRefill]
- }
- }
-
- if cwidth > 0 && cwidth < width {
- bb[cwidth-1] = s.tip
- }
-
- for i := cwidth; i < width; i++ {
- bb[i] = s.format[rEmpty]
- }
-
- s.flush(w, bb)
-}
-
-func regularFlush(w io.Writer, bb [][]byte) {
- for i := 0; i < len(bb); i++ {
- w.Write(bb[i])
- }
-}
+// BarFillerFunc is function type adapter to convert function into BarFiller.
+type BarFillerFunc func(w io.Writer, reqWidth int, stat decor.Statistics)
-func reverseFlush(w io.Writer, bb [][]byte) {
- for i := len(bb) - 1; i >= 0; i-- {
- w.Write(bb[i])
- }
+func (f BarFillerFunc) Fill(w io.Writer, reqWidth int, stat decor.Statistics) {
+ f(w, reqWidth, stat)
}
diff --git a/vendor/github.com/vbauerster/mpb/v5/bar_filler_bar.go b/vendor/github.com/vbauerster/mpb/v5/bar_filler_bar.go
new file mode 100644
index 000000000..637bd88ca
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v5/bar_filler_bar.go
@@ -0,0 +1,173 @@
+package mpb
+
+import (
+ "bytes"
+ "io"
+ "unicode/utf8"
+
+ "github.com/mattn/go-runewidth"
+ "github.com/vbauerster/mpb/v5/decor"
+ "github.com/vbauerster/mpb/v5/internal"
+)
+
+const (
+ rLeft = iota
+ rFill
+ rTip
+ rSpace
+ rRight
+ rRevTip
+ rRefill
+)
+
+// DefaultBarStyle is a string containing 7 runes.
+// Each rune is a building block of a progress bar.
+//
+// '1st rune' stands for left boundary rune
+//
+// '2nd rune' stands for fill rune
+//
+// '3rd rune' stands for tip rune
+//
+// '4th rune' stands for space rune
+//
+// '5th rune' stands for right boundary rune
+//
+// '6th rune' stands for reverse tip rune
+//
+// '7th rune' stands for refill rune
+//
+const DefaultBarStyle string = "[=>-]<+"
+
+type barFiller struct {
+ format [][]byte
+ rwidth []int
+ tip []byte
+ refill int64
+ reverse bool
+ flush func(io.Writer, *space, [][]byte)
+}
+
+type space struct {
+ space []byte
+ rwidth int
+ count int
+}
+
+// NewBarFiller constucts mpb.BarFiller, to be used with *Progress.Add(...) *Bar method.
+func NewBarFiller(style string, reverse bool) BarFiller {
+ bf := &barFiller{
+ format: make([][]byte, len(DefaultBarStyle)),
+ rwidth: make([]int, len(DefaultBarStyle)),
+ reverse: reverse,
+ }
+ bf.SetStyle(style)
+ return bf
+}
+
+func (s *barFiller) SetStyle(style string) {
+ if !utf8.ValidString(style) {
+ panic("invalid bar style")
+ }
+ if style == "" {
+ style = DefaultBarStyle
+ }
+ src := make([][]byte, utf8.RuneCountInString(style))
+ i := 0
+ for _, r := range style {
+ s.rwidth[i] = runewidth.RuneWidth(r)
+ src[i] = []byte(string(r))
+ i++
+ }
+ copy(s.format, src)
+ s.SetReverse(s.reverse)
+}
+
+func (s *barFiller) SetReverse(reverse bool) {
+ if reverse {
+ s.tip = s.format[rRevTip]
+ s.flush = reverseFlush
+ } else {
+ s.tip = s.format[rTip]
+ s.flush = regularFlush
+ }
+ s.reverse = reverse
+}
+
+func (s *barFiller) Fill(w io.Writer, reqWidth int, stat decor.Statistics) {
+ width := internal.WidthForBarFiller(reqWidth, stat.AvailableWidth)
+
+ if brackets := s.rwidth[rLeft] + s.rwidth[rRight]; width < brackets {
+ return
+ } else {
+ // don't count brackets as progress
+ width -= brackets
+ }
+ w.Write(s.format[rLeft])
+ defer w.Write(s.format[rRight])
+
+ cwidth := int(internal.PercentageRound(stat.Total, stat.Current, width))
+ space := &space{
+ space: s.format[rSpace],
+ rwidth: s.rwidth[rSpace],
+ count: width - cwidth,
+ }
+
+ index, refill := 0, 0
+ bb := make([][]byte, cwidth)
+
+ if cwidth > 0 && cwidth != width {
+ bb[index] = s.tip
+ cwidth -= s.rwidth[rTip]
+ index++
+ }
+
+ if stat.Refill > 0 {
+ refill = int(internal.PercentageRound(stat.Total, int64(stat.Refill), width))
+ if refill > cwidth {
+ refill = cwidth
+ }
+ cwidth -= refill
+ }
+
+ for cwidth > 0 {
+ bb[index] = s.format[rFill]
+ cwidth -= s.rwidth[rFill]
+ index++
+ }
+
+ for refill > 0 {
+ bb[index] = s.format[rRefill]
+ refill -= s.rwidth[rRefill]
+ index++
+ }
+
+ if cwidth+refill < 0 || space.rwidth > 1 {
+ buf := new(bytes.Buffer)
+ s.flush(buf, space, bb[:index])
+ io.WriteString(w, runewidth.Truncate(buf.String(), width, "…"))
+ return
+ }
+
+ s.flush(w, space, bb)
+}
+
+func regularFlush(w io.Writer, space *space, bb [][]byte) {
+ for i := len(bb) - 1; i >= 0; i-- {
+ w.Write(bb[i])
+ }
+ for space.count > 0 {
+ w.Write(space.space)
+ space.count -= space.rwidth
+ }
+}
+
+func reverseFlush(w io.Writer, space *space, bb [][]byte) {
+ for space.count > 0 {
+ w.Write(space.space)
+ space.count -= space.rwidth
+ }
+ for i := 0; i < len(bb); i++ {
+ w.Write(bb[i])
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/v5/spinner_filler.go b/vendor/github.com/vbauerster/mpb/v5/bar_filler_spinner.go
index 517725fbf..d2cb2b726 100644
--- a/vendor/github.com/vbauerster/mpb/v5/spinner_filler.go
+++ b/vendor/github.com/vbauerster/mpb/v5/bar_filler_spinner.go
@@ -6,6 +6,7 @@ import (
"unicode/utf8"
"github.com/vbauerster/mpb/v5/decor"
+ "github.com/vbauerster/mpb/v5/internal"
)
// SpinnerAlignment enum.
@@ -39,7 +40,8 @@ func NewSpinnerFiller(style []string, alignment SpinnerAlignment) BarFiller {
return filler
}
-func (s *spinnerFiller) Fill(w io.Writer, width int, stat *decor.Statistics) {
+func (s *spinnerFiller) Fill(w io.Writer, reqWidth int, stat decor.Statistics) {
+ width := internal.WidthForBarFiller(reqWidth, stat.AvailableWidth)
frame := s.frames[s.count%uint(len(s.frames))]
frameWidth := utf8.RuneCountInString(frame)
diff --git a/vendor/github.com/vbauerster/mpb/v5/bar_option.go b/vendor/github.com/vbauerster/mpb/v5/bar_option.go
index 76f2050f1..31b7939b0 100644
--- a/vendor/github.com/vbauerster/mpb/v5/bar_option.go
+++ b/vendor/github.com/vbauerster/mpb/v5/bar_option.go
@@ -46,7 +46,7 @@ func BarID(id int) BarOption {
// BarWidth sets bar width independent of the container.
func BarWidth(width int) BarOption {
return func(s *bState) {
- s.width = width
+ s.reqWidth = width
}
}
@@ -77,19 +77,22 @@ func BarFillerClearOnComplete() BarOption {
// BarFillerOnComplete replaces bar's filler with message, on complete event.
func BarFillerOnComplete(message string) BarOption {
- return func(s *bState) {
- s.filler = makeBarFillerOnComplete(s.baseF, message)
- }
+ return BarFillerMiddleware(func(base BarFiller) BarFiller {
+ return BarFillerFunc(func(w io.Writer, reqWidth int, st decor.Statistics) {
+ if st.Completed {
+ io.WriteString(w, message)
+ } else {
+ base.Fill(w, reqWidth, st)
+ }
+ })
+ })
}
-func makeBarFillerOnComplete(filler BarFiller, message string) BarFiller {
- return BarFillerFunc(func(w io.Writer, width int, st *decor.Statistics) {
- if st.Completed {
- io.WriteString(w, message)
- } else {
- filler.Fill(w, width, st)
- }
- })
+// BarFillerMiddleware provides a way to augment default BarFiller.
+func BarFillerMiddleware(middle func(BarFiller) BarFiller) BarOption {
+ return func(s *bState) {
+ s.middleware = middle
+ }
}
// BarPriority sets bar's priority. Zero is highest priority, i.e. bar
@@ -103,21 +106,20 @@ func BarPriority(priority int) BarOption {
// BarExtender is an option to extend bar to the next new line, with
// arbitrary output.
-func BarExtender(extender BarFiller) BarOption {
- if extender == nil {
+func BarExtender(filler BarFiller) BarOption {
+ if filler == nil {
return nil
}
return func(s *bState) {
- s.extender = makeExtFunc(extender)
+ s.extender = makeExtFunc(filler)
}
}
-func makeExtFunc(extender BarFiller) extFunc {
+func makeExtFunc(filler BarFiller) extFunc {
buf := new(bytes.Buffer)
- nl := []byte("\n")
- return func(r io.Reader, tw int, st *decor.Statistics) (io.Reader, int) {
- extender.Fill(buf, tw, st)
- return io.MultiReader(r, buf), bytes.Count(buf.Bytes(), nl)
+ return func(r io.Reader, reqWidth int, st decor.Statistics) (io.Reader, int) {
+ filler.Fill(buf, reqWidth, st)
+ return io.MultiReader(r, buf), bytes.Count(buf.Bytes(), []byte("\n"))
}
}
@@ -139,7 +141,7 @@ func BarStyle(style string) BarOption {
SetStyle(string)
}
return func(s *bState) {
- if t, ok := s.baseF.(styleSetter); ok {
+ if t, ok := s.filler.(styleSetter); ok {
t.SetStyle(style)
}
}
@@ -159,7 +161,7 @@ func BarReverse() BarOption {
SetReverse(bool)
}
return func(s *bState) {
- if t, ok := s.baseF.(revSetter); ok {
+ if t, ok := s.filler.(revSetter); ok {
t.SetReverse(true)
}
}
@@ -189,7 +191,7 @@ func MakeFillerTypeSpecificBarOption(
cb func(interface{}),
) BarOption {
return func(s *bState) {
- if t, ok := typeChecker(s.baseF); ok {
+ if t, ok := typeChecker(s.filler); ok {
cb(t)
}
}
diff --git a/vendor/github.com/vbauerster/mpb/v5/options.go b/vendor/github.com/vbauerster/mpb/v5/container_option.go
index 048870284..fac59e436 100644
--- a/vendor/github.com/vbauerster/mpb/v5/options.go
+++ b/vendor/github.com/vbauerster/mpb/v5/container_option.go
@@ -21,14 +21,11 @@ func WithWaitGroup(wg *sync.WaitGroup) ContainerOption {
}
}
-// WithWidth sets container width. Default is 80. Bars inherit this
-// width, as long as no BarWidth is applied.
-func WithWidth(w int) ContainerOption {
+// WithWidth sets container width. If not set underlying bars will
+// occupy whole term width.
+func WithWidth(width int) ContainerOption {
return func(s *pState) {
- if w < 0 {
- return
- }
- s.width = w
+ s.reqWidth = width
}
}
diff --git a/vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go
index 9ec1ec66b..bb503360d 100644
--- a/vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go
+++ b/vendor/github.com/vbauerster/mpb/v5/cwriter/writer.go
@@ -7,7 +7,7 @@ import (
"io"
"os"
- "golang.org/x/crypto/ssh/terminal"
+ "github.com/mattn/go-isatty"
)
// NotATTY not a TeleTYpewriter error.
@@ -30,13 +30,14 @@ func New(out io.Writer) *Writer {
w := &Writer{out: out}
if f, ok := out.(*os.File); ok {
w.fd = f.Fd()
- w.isTerminal = terminal.IsTerminal(int(w.fd))
+ w.isTerminal = isatty.IsTerminal(w.fd)
}
return w
}
// Flush flushes the underlying buffer.
func (w *Writer) Flush(lineCount int) (err error) {
+ // some terminals interpret clear 0 lines as clear 1
if w.lineCount > 0 {
w.clearLines()
}
@@ -63,9 +64,9 @@ func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
// GetWidth returns width of underlying terminal.
func (w *Writer) GetWidth() (int, error) {
- if w.isTerminal {
- tw, _, err := terminal.GetSize(int(w.fd))
- return tw, err
+ if !w.isTerminal {
+ return -1, NotATTY
}
- return -1, NotATTY
+ tw, _, err := GetSize(w.fd)
+ return tw, err
}
diff --git a/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_posix.go b/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_posix.go
index 3fb8b7d75..e836cec3a 100644
--- a/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_posix.go
+++ b/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_posix.go
@@ -2,8 +2,21 @@
package cwriter
-import "fmt"
+import (
+ "fmt"
+
+ "golang.org/x/sys/unix"
+)
func (w *Writer) clearLines() {
fmt.Fprintf(w.out, cuuAndEd, w.lineCount)
}
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd uintptr) (width, height int, err error) {
+ ws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
+ if err != nil {
+ return -1, -1, err
+ }
+ return int(ws.Col), int(ws.Row), nil
+}
diff --git a/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_windows.go b/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_windows.go
index 712528900..7a3ed5bcc 100644
--- a/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_windows.go
+++ b/vendor/github.com/vbauerster/mpb/v5/cwriter/writer_windows.go
@@ -14,7 +14,6 @@ var (
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
- procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
)
type coord struct {
@@ -41,8 +40,9 @@ func (w *Writer) clearLines() {
if !w.isTerminal {
fmt.Fprintf(w.out, cuuAndEd, w.lineCount)
}
- var info consoleScreenBufferInfo
- procGetConsoleScreenBufferInfo.Call(w.fd, uintptr(unsafe.Pointer(&info)))
+
+ info := new(consoleScreenBufferInfo)
+ procGetConsoleScreenBufferInfo.Call(w.fd, uintptr(unsafe.Pointer(info)))
info.cursorPosition.y -= int16(w.lineCount)
if info.cursorPosition.y < 0 {
@@ -51,10 +51,19 @@ func (w *Writer) clearLines() {
procSetConsoleCursorPosition.Call(w.fd, uintptr(uint32(uint16(info.cursorPosition.y))<<16|uint32(uint16(info.cursorPosition.x))))
// clear the lines
- cursor := coord{
+ cursor := &coord{
x: info.window.left,
y: info.cursorPosition.y,
}
count := uint32(info.size.x) * uint32(w.lineCount)
- procFillConsoleOutputCharacter.Call(w.fd, uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(new(uint32))))
+ procFillConsoleOutputCharacter.Call(w.fd, uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(cursor)), uintptr(unsafe.Pointer(new(uint32))))
+}
+
+// GetSize returns the visible dimensions of the given terminal.
+//
+// These dimensions don't include any scrollback buffer height.
+func GetSize(fd uintptr) (width, height int, err error) {
+ info := new(consoleScreenBufferInfo)
+ procGetConsoleScreenBufferInfo.Call(fd, uintptr(unsafe.Pointer(info)))
+ return int(info.window.right - info.window.left), int(info.window.bottom - info.window.top), nil
}
diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/any.go b/vendor/github.com/vbauerster/mpb/v5/decor/any.go
index bf9cf51a5..39518f594 100644
--- a/vendor/github.com/vbauerster/mpb/v5/decor/any.go
+++ b/vendor/github.com/vbauerster/mpb/v5/decor/any.go
@@ -1,21 +1,21 @@
package decor
// Any decorator displays text, that can be changed during decorator's
-// lifetime via provided func call back.
+// lifetime via provided DecorFunc.
//
-// `f` call back which provides string to display
+// `fn` DecorFunc callback
//
// `wcc` optional WC config
//
-func Any(f func(*Statistics) string, wcc ...WC) Decorator {
- return &any{initWC(wcc...), f}
+func Any(fn DecorFunc, wcc ...WC) Decorator {
+ return &any{initWC(wcc...), fn}
}
type any struct {
WC
- f func(*Statistics) string
+ fn DecorFunc
}
-func (d *any) Decor(s *Statistics) string {
- return d.FormatMsg(d.f(s))
+func (d *any) Decor(s Statistics) string {
+ return d.FormatMsg(d.fn(s))
}
diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/counters.go b/vendor/github.com/vbauerster/mpb/v5/decor/counters.go
index 297bf937b..010ec371a 100644
--- a/vendor/github.com/vbauerster/mpb/v5/decor/counters.go
+++ b/vendor/github.com/vbauerster/mpb/v5/decor/counters.go
@@ -46,21 +46,21 @@ func Counters(unit int, pairFmt string, wcc ...WC) Decorator {
return Any(chooseSizeProducer(unit, pairFmt), wcc...)
}
-func chooseSizeProducer(unit int, format string) func(*Statistics) string {
+func chooseSizeProducer(unit int, format string) DecorFunc {
if format == "" {
format = "%d / %d"
}
switch unit {
case UnitKiB:
- return func(s *Statistics) string {
+ return func(s Statistics) string {
return fmt.Sprintf(format, SizeB1024(s.Current), SizeB1024(s.Total))
}
case UnitKB:
- return func(s *Statistics) string {
+ return func(s Statistics) string {
return fmt.Sprintf(format, SizeB1000(s.Current), SizeB1000(s.Total))
}
default:
- return func(s *Statistics) string {
+ return func(s Statistics) string {
return fmt.Sprintf(format, s.Current, s.Total)
}
}
diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/decorator.go b/vendor/github.com/vbauerster/mpb/v5/decor/decorator.go
index 5bca63d52..e81fae367 100644
--- a/vendor/github.com/vbauerster/mpb/v5/decor/decorator.go
+++ b/vendor/github.com/vbauerster/mpb/v5/decor/decorator.go
@@ -3,9 +3,9 @@ package decor
import (
"fmt"
"time"
- "unicode/utf8"
"github.com/acarl005/stripansi"
+ "github.com/mattn/go-runewidth"
)
const (
@@ -47,22 +47,32 @@ const (
// Statistics consists of progress related statistics, that Decorator
// may need.
type Statistics struct {
- ID int
- Completed bool
- Total int64
- Current int64
+ ID int
+ AvailableWidth int
+ Total int64
+ Current int64
+ Refill int64
+ Completed bool
}
// Decorator interface.
-// Implementors should embed WC type, that way only single method
-// Decor(*Statistics) needs to be implemented, the rest will be handled
-// by WC type.
+// Most of the time there is no need to implement this interface
+// manually, as decor package already provides a wide range of decorators
+// which implement this interface. If however built-in decorators don't
+// meet your needs, you're free to implement your own one by implementing
+// this particular interface. The easy way to go is to convert a
+// `DecorFunc` into a `Decorator` interface by using provided
+// `func Any(DecorFunc, ...WC) Decorator`.
type Decorator interface {
Configurator
Synchronizer
- Decor(*Statistics) string
+ Decor(Statistics) string
}
+// DecorFunc func type.
+// To be used with `func Any`(DecorFunc, ...WC) Decorator`.
+type DecorFunc func(Statistics) string
+
// Synchronizer interface.
// All decorators implement this interface implicitly. Its Sync
// method exposes width sync channel, if DSyncWidth bit is set.
@@ -117,38 +127,35 @@ var (
// W represents width and C represents bit set of width related config.
// A decorator should embed WC, to enable width synchronization.
type WC struct {
- W int
- C int
- dynFormat string
- wsync chan int
+ W int
+ C int
+ fill func(s string, w int) string
+ wsync chan int
}
// FormatMsg formats final message according to WC.W and WC.C.
// Should be called by any Decorator implementation.
func (wc *WC) FormatMsg(msg string) string {
- var format string
- runeCount := utf8.RuneCountInString(stripansi.Strip(msg))
- ansiCount := utf8.RuneCountInString(msg) - runeCount
+ pureWidth := runewidth.StringWidth(msg)
+ stripWidth := runewidth.StringWidth(stripansi.Strip(msg))
+ maxCell := wc.W
if (wc.C & DSyncWidth) != 0 {
+ cellCount := stripWidth
if (wc.C & DextraSpace) != 0 {
- runeCount++
+ cellCount++
}
- wc.wsync <- runeCount
- max := <-wc.wsync
- format = fmt.Sprintf(wc.dynFormat, ansiCount+max)
- } else {
- format = fmt.Sprintf(wc.dynFormat, ansiCount+wc.W)
+ wc.wsync <- cellCount
+ maxCell = <-wc.wsync
}
- return fmt.Sprintf(format, msg)
+ return wc.fill(msg, maxCell+(pureWidth-stripWidth))
}
// Init initializes width related config.
func (wc *WC) Init() WC {
- wc.dynFormat = "%%"
+ wc.fill = runewidth.FillLeft
if (wc.C & DidentRight) != 0 {
- wc.dynFormat += "-"
+ wc.fill = runewidth.FillRight
}
- wc.dynFormat += "%ds"
if (wc.C & DSyncWidth) != 0 {
// it's deliberate choice to override wsync on each Init() call,
// this way globals like WCSyncSpace can be reused
diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/elapsed.go b/vendor/github.com/vbauerster/mpb/v5/decor/elapsed.go
index c9999a3b5..e389f1581 100644
--- a/vendor/github.com/vbauerster/mpb/v5/decor/elapsed.go
+++ b/vendor/github.com/vbauerster/mpb/v5/decor/elapsed.go
@@ -25,11 +25,11 @@ func Elapsed(style TimeStyle, wcc ...WC) Decorator {
func NewElapsed(style TimeStyle, startTime time.Time, wcc ...WC) Decorator {
var msg string
producer := chooseTimeProducer(style)
- f := func(s *Statistics) string {
+ fn := func(s Statistics) string {
if !s.Completed {
msg = producer(time.Since(startTime))
}
return msg
}
- return Any(f, wcc...)
+ return Any(fn, wcc...)
}
diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/eta.go b/vendor/github.com/vbauerster/mpb/v5/decor/eta.go
index 6cb27a247..d03caa735 100644
--- a/vendor/github.com/vbauerster/mpb/v5/decor/eta.go
+++ b/vendor/github.com/vbauerster/mpb/v5/decor/eta.go
@@ -63,7 +63,7 @@ type movingAverageETA struct {
producer func(time.Duration) string
}
-func (d *movingAverageETA) Decor(s *Statistics) string {
+func (d *movingAverageETA) Decor(s Statistics) string {
v := math.Round(d.average.Value())
remaining := time.Duration((s.Total - s.Current) * int64(v))
if d.normalizer != nil {
@@ -117,7 +117,7 @@ type averageETA struct {
producer func(time.Duration) string
}
-func (d *averageETA) Decor(s *Statistics) string {
+func (d *averageETA) Decor(s Statistics) string {
var remaining time.Duration
if s.Current != 0 {
durPerItem := float64(time.Since(d.startTime)) / float64(s.Current)
diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/merge.go b/vendor/github.com/vbauerster/mpb/v5/decor/merge.go
index 520f13a7f..e41406a64 100644
--- a/vendor/github.com/vbauerster/mpb/v5/decor/merge.go
+++ b/vendor/github.com/vbauerster/mpb/v5/decor/merge.go
@@ -1,9 +1,10 @@
package decor
import (
- "fmt"
"strings"
- "unicode/utf8"
+
+ "github.com/acarl005/stripansi"
+ "github.com/mattn/go-runewidth"
)
// Merge wraps its decorator argument with intention to sync width
@@ -64,18 +65,18 @@ func (d *mergeDecorator) Base() Decorator {
return d.Decorator
}
-func (d *mergeDecorator) Decor(s *Statistics) string {
+func (d *mergeDecorator) Decor(s Statistics) string {
msg := d.Decorator.Decor(s)
- msgLen := utf8.RuneCountInString(msg)
+ pureWidth := runewidth.StringWidth(msg)
+ stripWidth := runewidth.StringWidth(stripansi.Strip(msg))
+ cellCount := stripWidth
if (d.wc.C & DextraSpace) != 0 {
- msgLen++
+ cellCount++
}
- var total int
- max := utf8.RuneCountInString(d.placeHolders[0].FormatMsg(""))
- total += max
- pw := (msgLen - max) / len(d.placeHolders)
- rem := (msgLen - max) % len(d.placeHolders)
+ total := runewidth.StringWidth(d.placeHolders[0].FormatMsg(""))
+ pw := (cellCount - total) / len(d.placeHolders)
+ rem := (cellCount - total) % len(d.placeHolders)
var diff int
for i := 1; i < len(d.placeHolders); i++ {
@@ -87,20 +88,20 @@ func (d *mergeDecorator) Decor(s *Statistics) string {
width = 0
}
}
- max = utf8.RuneCountInString(ph.FormatMsg(strings.Repeat(" ", width)))
+ max := runewidth.StringWidth(ph.FormatMsg(strings.Repeat(" ", width)))
total += max
diff = max - pw
}
d.wc.wsync <- pw + rem
- max = <-d.wc.wsync
- return fmt.Sprintf(fmt.Sprintf(d.wc.dynFormat, max+total), msg)
+ max := <-d.wc.wsync
+ return d.wc.fill(msg, max+total+(pureWidth-stripWidth))
}
type placeHolderDecorator struct {
WC
}
-func (d *placeHolderDecorator) Decor(*Statistics) string {
+func (d *placeHolderDecorator) Decor(Statistics) string {
return ""
}
diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/name.go b/vendor/github.com/vbauerster/mpb/v5/decor/name.go
index a7d477e07..3af311254 100644
--- a/vendor/github.com/vbauerster/mpb/v5/decor/name.go
+++ b/vendor/github.com/vbauerster/mpb/v5/decor/name.go
@@ -8,5 +8,5 @@ package decor
// `wcc` optional WC config
//
func Name(str string, wcc ...WC) Decorator {
- return Any(func(*Statistics) string { return str }, wcc...)
+ return Any(func(Statistics) string { return str }, wcc...)
}
diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/on_complete.go b/vendor/github.com/vbauerster/mpb/v5/decor/on_complete.go
index 0a1526bf5..f46b19aba 100644
--- a/vendor/github.com/vbauerster/mpb/v5/decor/on_complete.go
+++ b/vendor/github.com/vbauerster/mpb/v5/decor/on_complete.go
@@ -24,7 +24,7 @@ type onCompleteWrapper struct {
msg string
}
-func (d *onCompleteWrapper) Decor(s *Statistics) string {
+func (d *onCompleteWrapper) Decor(s Statistics) string {
if s.Completed {
wc := d.GetConf()
return wc.FormatMsg(d.msg)
diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/percentage.go b/vendor/github.com/vbauerster/mpb/v5/decor/percentage.go
index 65ca7d318..d6314a619 100644
--- a/vendor/github.com/vbauerster/mpb/v5/decor/percentage.go
+++ b/vendor/github.com/vbauerster/mpb/v5/decor/percentage.go
@@ -50,7 +50,7 @@ func NewPercentage(format string, wcc ...WC) Decorator {
if format == "" {
format = "% d"
}
- f := func(s *Statistics) string {
+ f := func(s Statistics) string {
p := internal.Percentage(s.Total, s.Current, 100)
return fmt.Sprintf(format, percentageType(p))
}
diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/speed.go b/vendor/github.com/vbauerster/mpb/v5/decor/speed.go
index 8a48e3f52..634edabfd 100644
--- a/vendor/github.com/vbauerster/mpb/v5/decor/speed.go
+++ b/vendor/github.com/vbauerster/mpb/v5/decor/speed.go
@@ -78,7 +78,7 @@ type movingAverageSpeed struct {
msg string
}
-func (d *movingAverageSpeed) Decor(s *Statistics) string {
+func (d *movingAverageSpeed) Decor(s Statistics) string {
if !s.Completed {
var speed float64
if v := d.average.Value(); v > 0 {
@@ -140,7 +140,7 @@ type averageSpeed struct {
msg string
}
-func (d *averageSpeed) Decor(s *Statistics) string {
+func (d *averageSpeed) Decor(s Statistics) string {
if !s.Completed {
speed := float64(s.Current) / float64(time.Since(d.startTime))
d.msg = d.producer(speed * 1e9)
diff --git a/vendor/github.com/vbauerster/mpb/v5/decor/spinner.go b/vendor/github.com/vbauerster/mpb/v5/decor/spinner.go
index abfb2f76c..6871639db 100644
--- a/vendor/github.com/vbauerster/mpb/v5/decor/spinner.go
+++ b/vendor/github.com/vbauerster/mpb/v5/decor/spinner.go
@@ -12,7 +12,7 @@ func Spinner(frames []string, wcc ...WC) Decorator {
frames = defaultSpinnerStyle
}
var count uint
- f := func(s *Statistics) string {
+ f := func(s Statistics) string {
frame := frames[count%uint(len(frames))]
count++
return frame
diff --git a/vendor/github.com/vbauerster/mpb/v5/go.mod b/vendor/github.com/vbauerster/mpb/v5/go.mod
index 1d8d52934..389a19d54 100644
--- a/vendor/github.com/vbauerster/mpb/v5/go.mod
+++ b/vendor/github.com/vbauerster/mpb/v5/go.mod
@@ -3,8 +3,9 @@ module github.com/vbauerster/mpb/v5
require (
github.com/VividCortex/ewma v1.1.1
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
- golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
- golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f // indirect
+ github.com/mattn/go-isatty v0.0.12
+ github.com/mattn/go-runewidth v0.0.9
+ golang.org/x/sys v0.0.0-20200519105757-fe76b779f299
)
go 1.14
diff --git a/vendor/github.com/vbauerster/mpb/v5/go.sum b/vendor/github.com/vbauerster/mpb/v5/go.sum
index 99ca1bf67..dcaa8c553 100644
--- a/vendor/github.com/vbauerster/mpb/v5/go.sum
+++ b/vendor/github.com/vbauerster/mpb/v5/go.sum
@@ -2,12 +2,10 @@ github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdc
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU=
-golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8=
-golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/vbauerster/mpb/v5/internal/width.go b/vendor/github.com/vbauerster/mpb/v5/internal/width.go
new file mode 100644
index 000000000..35d528983
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v5/internal/width.go
@@ -0,0 +1,8 @@
+package internal
+
+func WidthForBarFiller(reqWidth, available int) int {
+ if reqWidth <= 0 || reqWidth >= available {
+ return available
+ }
+ return reqWidth
+}
diff --git a/vendor/github.com/vbauerster/mpb/v5/progress.go b/vendor/github.com/vbauerster/mpb/v5/progress.go
index a366b9295..ac1ce50ab 100644
--- a/vendor/github.com/vbauerster/mpb/v5/progress.go
+++ b/vendor/github.com/vbauerster/mpb/v5/progress.go
@@ -19,8 +19,6 @@ import (
const (
// default RefreshRate
prr = 120 * time.Millisecond
- // default width
- pwidth = 80
)
// Progress represents the container that renders Progress bars
@@ -46,7 +44,7 @@ type pState struct {
// following are provided/overrided by user
idCount int
- width int
+ reqWidth int
popCompleted bool
rr time.Duration
uwg *sync.WaitGroup
@@ -70,7 +68,6 @@ func New(options ...ContainerOption) *Progress {
func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress {
s := &pState{
bHeap: priorityQueue{},
- width: pwidth,
rr: prr,
parkedBars: make(map[*Bar]*Bar),
output: os.Stdout,
@@ -113,7 +110,7 @@ func (p *Progress) AddSpinner(total int64, alignment SpinnerAlignment, options .
// Panics if *Progress instance is done, i.e. called after *Progress.Wait().
func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar {
if filler == nil {
- filler = NewBarFiller(DefaultBarStyle, false)
+ filler = BarFillerFunc(func(io.Writer, int, decor.Statistics) {})
}
p.bwg.Add(1)
result := make(chan *Bar)
@@ -215,14 +212,46 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) {
op(s)
case <-p.refreshCh:
if err := s.render(cw); err != nil {
- go p.dlogger.Println(err)
+ p.dlogger.Println(err)
}
case <-s.shutdownNotifier:
+ if s.heapUpdated {
+ if err := s.render(cw); err != nil {
+ p.dlogger.Println(err)
+ }
+ }
return
}
}
}
+func (s *pState) newTicker(done <-chan struct{}) chan time.Time {
+ ch := make(chan time.Time)
+ if s.shutdownNotifier == nil {
+ s.shutdownNotifier = make(chan struct{})
+ }
+ go func() {
+ if s.renderDelay != nil {
+ <-s.renderDelay
+ }
+ if s.refreshSrc == nil {
+ ticker := time.NewTicker(s.rr)
+ defer ticker.Stop()
+ s.refreshSrc = ticker.C
+ }
+ for {
+ select {
+ case tick := <-s.refreshSrc:
+ ch <- tick
+ case <-done:
+ close(s.shutdownNotifier)
+ return
+ }
+ }
+ }()
+ return ch
+}
+
func (s *pState) render(cw *cwriter.Writer) error {
if s.heapUpdated {
s.updateSyncMatrix()
@@ -233,7 +262,7 @@ func (s *pState) render(cw *cwriter.Writer) error {
tw, err := cw.GetWidth()
if err != nil {
- tw = s.width
+ tw = s.reqWidth
}
for i := 0; i < s.bHeap.Len(); i++ {
bar := s.bHeap[i]
@@ -250,11 +279,16 @@ func (s *pState) flush(cw *cwriter.Writer) error {
b := heap.Pop(&s.bHeap).(*Bar)
cw.ReadFrom(<-b.frameCh)
if b.toShutdown {
- // shutdown at next flush
- // this ensures no bar ends up with less than 100% rendered
- defer func() {
+ if b.recoveredPanic != nil {
s.barShutdownQueue = append(s.barShutdownQueue, b)
- }()
+ b.toShutdown = false
+ } else {
+ // shutdown at next flush
+ // this ensures no bar ends up with less than 100% rendered
+ defer func() {
+ s.barShutdownQueue = append(s.barShutdownQueue, b)
+ }()
+ }
}
lineCount += b.extendedLines + 1
bm[b] = struct{}{}
@@ -295,33 +329,6 @@ func (s *pState) flush(cw *cwriter.Writer) error {
return cw.Flush(lineCount)
}
-func (s *pState) newTicker(done <-chan struct{}) chan time.Time {
- ch := make(chan time.Time)
- if s.shutdownNotifier == nil {
- s.shutdownNotifier = make(chan struct{})
- }
- go func() {
- if s.renderDelay != nil {
- <-s.renderDelay
- }
- if s.refreshSrc == nil {
- ticker := time.NewTicker(s.rr)
- defer ticker.Stop()
- s.refreshSrc = ticker.C
- }
- for {
- select {
- case tick := <-s.refreshSrc:
- ch <- tick
- case <-done:
- close(s.shutdownNotifier)
- return
- }
- }
- }()
- return ch
-}
-
func (s *pState) updateSyncMatrix() {
s.pMatrix = make(map[int][]chan int)
s.aMatrix = make(map[int][]chan int)
@@ -342,16 +349,13 @@ func (s *pState) updateSyncMatrix() {
func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOption) *bState {
bs := &bState{
+ id: s.idCount,
+ priority: s.idCount,
+ reqWidth: s.reqWidth,
total: total,
- baseF: extractBaseFiller(filler),
filler: filler,
- priority: s.idCount,
- id: s.idCount,
- width: s.width,
+ extender: func(r io.Reader, _ int, _ decor.Statistics) (io.Reader, int) { return r, 0 },
debugOut: s.debugOut,
- extender: func(r io.Reader, _ int, _ *decor.Statistics) (io.Reader, int) {
- return r, 0
- },
}
for _, opt := range options {
@@ -360,13 +364,18 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio
}
}
+ if bs.middleware != nil {
+ bs.filler = bs.middleware(filler)
+ bs.middleware = nil
+ }
+
if s.popCompleted && !bs.noPop {
bs.priority = -1
}
- bs.bufP = bytes.NewBuffer(make([]byte, 0, bs.width))
- bs.bufB = bytes.NewBuffer(make([]byte, 0, bs.width))
- bs.bufA = bytes.NewBuffer(make([]byte, 0, bs.width))
+ bs.bufP = bytes.NewBuffer(make([]byte, 0, 128))
+ bs.bufB = bytes.NewBuffer(make([]byte, 0, 256))
+ bs.bufA = bytes.NewBuffer(make([]byte, 0, 128))
return bs
}
@@ -387,13 +396,3 @@ func syncWidth(matrix map[int][]chan int) {
}()
}
}
-
-func extractBaseFiller(f BarFiller) BarFiller {
- type wrapper interface {
- Base() BarFiller
- }
- if f, ok := f.(wrapper); ok {
- return extractBaseFiller(f.Base())
- }
- return f
-}
diff --git a/vendor/gopkg.in/yaml.v3/.travis.yml b/vendor/gopkg.in/yaml.v3/.travis.yml
new file mode 100644
index 000000000..04d4dae09
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/.travis.yml
@@ -0,0 +1,16 @@
+language: go
+
+go:
+ - "1.4.x"
+ - "1.5.x"
+ - "1.6.x"
+ - "1.7.x"
+ - "1.8.x"
+ - "1.9.x"
+ - "1.10.x"
+ - "1.11.x"
+ - "1.12.x"
+ - "1.13.x"
+ - "tip"
+
+go_import_path: gopkg.in/yaml.v3
diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE
new file mode 100644
index 000000000..2683e4bb1
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/LICENSE
@@ -0,0 +1,50 @@
+
+This project is covered by two different licenses: MIT and Apache.
+
+#### MIT License ####
+
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original MIT license, with the additional
+copyright staring in 2011 when the project was ported over:
+
+ apic.go emitterc.go parserc.go readerc.go scannerc.go
+ writerc.go yamlh.go yamlprivateh.go
+
+Copyright (c) 2006-2010 Kirill Simonov
+Copyright (c) 2006-2011 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+### Apache License ###
+
+All the remaining project files are covered by the Apache license:
+
+Copyright (c) 2011-2019 Canonical Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE
new file mode 100644
index 000000000..866d74a7a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md
new file mode 100644
index 000000000..08eb1babd
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/README.md
@@ -0,0 +1,150 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.2, but preserves some behavior
+from 1.1 for backwards compatibility.
+
+Specifically, as of v3 of the yaml package:
+
+ - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being
+ decoded into a typed bool value. Otherwise they behave as a string. Booleans
+ in YAML 1.2 are _true/false_ only.
+ - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_
+ as specified in YAML 1.2, because most parsers still use the old format.
+ Octals in the _0o777_ format are supported though, so new files work.
+ - Does not support base-60 floats. These are gone from YAML 1.2, and were
+ actually never supported by this package as it's clearly a poor choice.
+
+and offers backwards
+compatibility with YAML 1.1 in some cases.
+1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v3*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v3
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3)
+
+API stability
+-------------
+
+The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the MIT and Apache License 2.0 licenses.
+Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v3"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go
new file mode 100644
index 000000000..65846e674
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/apic.go
@@ -0,0 +1,746 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+// Create ALIAS.
+func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ anchor: anchor,
+ }
+ return true
+}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go
new file mode 100644
index 000000000..be63169b7
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/decode.go
@@ -0,0 +1,931 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *Node
+ anchors map[string]*Node
+ doneInit bool
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+ yaml_parser_set_input_string(&p.parser, b)
+ return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ yaml_parser_set_input_reader(&p.parser, r)
+ return &p
+}
+
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.anchors = make(map[string]*Node)
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ return p.event.typ
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *Node, anchor []byte) {
+ if anchor != nil {
+ n.Anchor = string(anchor)
+ p.anchors[n.Anchor] = n
+ }
+}
+
+func (p *parser) parse() *Node {
+ p.init()
+ switch p.peek() {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ case yaml_TAIL_COMMENT_EVENT:
+ panic("internal error: unexpected tail comment event (please report)")
+ default:
+ panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String())
+ }
+}
+
+func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node {
+ var style Style
+ if tag != "" && tag != "!" {
+ tag = shortTag(tag)
+ style = TaggedStyle
+ } else if defaultTag != "" {
+ tag = defaultTag
+ } else if kind == ScalarNode {
+ tag, _ = resolve("", value)
+ }
+ return &Node{
+ Kind: kind,
+ Tag: tag,
+ Value: value,
+ Style: style,
+ Line: p.event.start_mark.line + 1,
+ Column: p.event.start_mark.column + 1,
+ HeadComment: string(p.event.head_comment),
+ LineComment: string(p.event.line_comment),
+ FootComment: string(p.event.foot_comment),
+ }
+}
+
+func (p *parser) parseChild(parent *Node) *Node {
+ child := p.parse()
+ parent.Content = append(parent.Content, child)
+ return child
+}
+
+func (p *parser) document() *Node {
+ n := p.node(DocumentNode, "", "", "")
+ p.doc = n
+ p.expect(yaml_DOCUMENT_START_EVENT)
+ p.parseChild(n)
+ if p.peek() == yaml_DOCUMENT_END_EVENT {
+ n.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_DOCUMENT_END_EVENT)
+ return n
+}
+
+func (p *parser) alias() *Node {
+ n := p.node(AliasNode, "", "", string(p.event.anchor))
+ n.Alias = p.anchors[n.Value]
+ if n.Alias == nil {
+ failf("unknown anchor '%s' referenced", n.Value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
+ return n
+}
+
+func (p *parser) scalar() *Node {
+ var parsedStyle = p.event.scalar_style()
+ var nodeStyle Style
+ switch {
+ case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = DoubleQuotedStyle
+ case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = SingleQuotedStyle
+ case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0:
+ nodeStyle = LiteralStyle
+ case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0:
+ nodeStyle = FoldedStyle
+ }
+ var nodeValue = string(p.event.value)
+ var nodeTag = string(p.event.tag)
+ var defaultTag string
+ if nodeStyle == 0 {
+ if nodeValue == "<<" {
+ defaultTag = mergeTag
+ }
+ } else {
+ defaultTag = strTag
+ }
+ n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue)
+ n.Style |= nodeStyle
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SCALAR_EVENT)
+ return n
+}
+
+func (p *parser) sequence() *Node {
+ n := p.node(SequenceNode, seqTag, string(p.event.tag), "")
+ if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 {
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
+ p.parseChild(n)
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ p.expect(yaml_SEQUENCE_END_EVENT)
+ return n
+}
+
+func (p *parser) mapping() *Node {
+ n := p.node(MappingNode, mapTag, string(p.event.tag), "")
+ block := true
+ if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 {
+ block = false
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
+ k := p.parseChild(n)
+ if block && k.FootComment != "" {
+ // Must be a foot comment for the prior value when being dedented.
+ if len(n.Content) > 2 {
+ n.Content[len(n.Content)-3].FootComment = k.FootComment
+ k.FootComment = ""
+ }
+ }
+ v := p.parseChild(n)
+ if k.FootComment == "" && v.FootComment != "" {
+ k.FootComment = v.FootComment
+ v.FootComment = ""
+ }
+ if p.peek() == yaml_TAIL_COMMENT_EVENT {
+ if k.FootComment == "" {
+ k.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_TAIL_COMMENT_EVENT)
+ }
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 {
+ n.Content[len(n.Content)-2].FootComment = n.FootComment
+ n.FootComment = ""
+ }
+ p.expect(yaml_MAPPING_END_EVENT)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *Node
+ aliases map[*Node]bool
+ terrors []string
+
+ stringMapType reflect.Type
+ generalMapType reflect.Type
+
+ knownFields bool
+ uniqueKeys bool
+ decodeCount int
+ aliasCount int
+ aliasDepth int
+}
+
+var (
+ nodeType = reflect.TypeOf(Node{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ stringMapType = reflect.TypeOf(map[string]interface{}{})
+ generalMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = generalMapType.Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder() *decoder {
+ d := &decoder{
+ stringMapType: stringMapType,
+ generalMapType: generalMapType,
+ uniqueKeys: true,
+ }
+ d.aliases = make(map[*Node]bool)
+ return d
+}
+
+func (d *decoder) terror(n *Node, tag string, out reflect.Value) {
+ if n.Tag != "" {
+ tag = n.Tag
+ }
+ value := n.Value
+ if tag != seqTag && tag != mapTag {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) {
+ err := u.UnmarshalYAML(n)
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.ShortTag() == nullTag {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ outi := out.Addr().Interface()
+ if u, ok := outi.(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ if u, ok := outi.(obsoleteUnmarshaler); ok {
+ good = d.callObsoleteUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) {
+ if n.ShortTag() == nullTag {
+ return reflect.Value{}
+ }
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+const (
+ // 400,000 decode operations is ~500kb of dense object declarations, or
+ // ~5kb of dense object declarations with 10000% alias expansion
+ alias_ratio_range_low = 400000
+
+ // 4,000,000 decode operations is ~5MB of dense object declarations, or
+ // ~4.5MB of dense object declarations with 10% alias expansion
+ alias_ratio_range_high = 4000000
+
+ // alias_ratio_range is the range over which we scale allowed alias ratios
+ alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+ switch {
+ case decodeCount <= alias_ratio_range_low:
+ // allow 99% to come from alias expansion for small-to-medium documents
+ return 0.99
+ case decodeCount >= alias_ratio_range_high:
+ // allow 10% to come from alias expansion for very large documents
+ return 0.10
+ default:
+ // scale smoothly from 99% down to 10% over the range.
+ // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+ // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+ return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+ }
+}
+
+func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) {
+ d.decodeCount++
+ if d.aliasDepth > 0 {
+ d.aliasCount++
+ }
+ if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+ failf("document contains excessive aliasing")
+ }
+ if out.Type() == nodeType {
+ out.Set(reflect.ValueOf(n).Elem())
+ return true
+ }
+ switch n.Kind {
+ case DocumentNode:
+ return d.document(n, out)
+ case AliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.Kind {
+ case ScalarNode:
+ good = d.scalar(n, out)
+ case MappingNode:
+ good = d.mapping(n, out)
+ case SequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("internal error: unknown node kind: " + strconv.Itoa(int(n.Kind)))
+ }
+ return good
+}
+
+func (d *decoder) document(n *Node, out reflect.Value) (good bool) {
+ if len(n.Content) == 1 {
+ d.doc = n
+ d.unmarshal(n.Content[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *Node, out reflect.Value) (good bool) {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
+ failf("anchor '%s' value contains itself", n.Value)
+ }
+ d.aliases[n] = true
+ d.aliasDepth++
+ good = d.unmarshal(n.Alias, out)
+ d.aliasDepth--
+ delete(d.aliases, n)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) scalar(n *Node, out reflect.Value) bool {
+ var tag string
+ var resolved interface{}
+ if n.indicatedString() {
+ tag = strTag
+ resolved = n.Value
+ } else {
+ tag, resolved = resolve(n.Tag, n.Value)
+ if tag == binaryTag {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ if out.CanAddr() {
+ switch out.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ out.Set(reflect.Zero(out.Type()))
+ return true
+ }
+ }
+ return false
+ }
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ // We've resolved to exactly the type we want, so use that.
+ out.Set(resolvedv)
+ return true
+ }
+ // Perhaps we can use the value as a TextUnmarshaler to
+ // set its value.
+ if out.CanAddr() {
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+ if ok {
+ var text []byte
+ if tag == binaryTag {
+ text = []byte(resolved.(string))
+ } else {
+ // We let any value be unmarshaled into TextUnmarshaler.
+ // That might be more lax than we'd like, but the
+ // TextUnmarshaler itself should bowl out any dubious values.
+ text = []byte(n.Value)
+ }
+ err := u.UnmarshalText(text)
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == binaryTag {
+ out.SetString(resolved.(string))
+ return true
+ }
+ out.SetString(n.Value)
+ return true
+ case reflect.Interface:
+ out.Set(reflect.ValueOf(resolved))
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ // This used to work in v2, but it's very unfriendly.
+ isDuration := out.Type() == durationType
+
+ switch resolved := resolved.(type) {
+ case int:
+ if !isDuration && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case int64:
+ if !isDuration && !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ return true
+ }
+ case uint64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case float64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ return true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ return true
+ case string:
+ // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html).
+ // It only works if explicitly attempting to unmarshal into a typed bool value.
+ switch resolved {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON":
+ out.SetBool(true)
+ return true
+ case "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ out.SetBool(false)
+ return true
+ }
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ return true
+ case int64:
+ out.SetFloat(float64(resolved))
+ return true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ return true
+ case float64:
+ out.SetFloat(resolved)
+ return true
+ }
+ case reflect.Struct:
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ out.Set(resolvedv)
+ return true
+ }
+ case reflect.Ptr:
+ panic("yaml internal error: please report the issue")
+ }
+ d.terror(n, tag, out)
+ return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Array:
+ if l != out.Len() {
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
+ }
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, seqTag, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.Content[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ if out.Kind() != reflect.Array {
+ out.Set(out.Slice(0, j))
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+ if d.uniqueKeys {
+ nerrs := len(d.terrors)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ for j := i + 2; j < l; j += 2 {
+ nj := n.Content[j]
+ if ni.Kind == nj.Kind && ni.Value == nj.Value {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line))
+ }
+ }
+ }
+ if len(d.terrors) > nerrs {
+ return false
+ }
+ }
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ iface := out
+ if isStringMap(n) {
+ out = reflect.MakeMap(d.stringMapType)
+ } else {
+ out = reflect.MakeMap(d.generalMapType)
+ }
+ iface.Set(out)
+ default:
+ d.terror(n, mapTag, out)
+ return false
+ }
+
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ stringMapType := d.stringMapType
+ generalMapType := d.generalMapType
+ if outt.Elem() == ifaceType {
+ if outt.Key().Kind() == reflect.String {
+ d.stringMapType = outt
+ } else if outt.Key() == ifaceType {
+ d.generalMapType = outt
+ }
+ }
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.Content[i]) {
+ d.merge(n.Content[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.Content[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.Content[i+1], e) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+ d.stringMapType = stringMapType
+ d.generalMapType = generalMapType
+ return true
+}
+
+func isStringMap(n *Node) bool {
+ if n.Kind != MappingNode {
+ return false
+ }
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ if n.Content[i].ShortTag() != strTag {
+ return false
+ }
+ }
+ return true
+}
+
+func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+ elemType = inlineMap.Type().Elem()
+ }
+
+ for _, index := range sinfo.InlineUnmarshalers {
+ field := d.fieldByIndex(n, out, index)
+ d.prepare(n, field)
+ }
+
+ var doneFields []bool
+ if d.uniqueKeys {
+ doneFields = make([]bool, len(sinfo.FieldsList))
+ }
+ name := settableValueOf("")
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ if isMerge(ni) {
+ d.merge(n.Content[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ if d.uniqueKeys {
+ if doneFields[info.Id] {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
+ continue
+ }
+ doneFields[info.Id] = true
+ }
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = d.fieldByIndex(n, out, info.Inline)
+ }
+ d.unmarshal(n.Content[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.Content[i+1], value)
+ inlineMap.SetMapIndex(name, value)
+ } else if d.knownFields {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *Node, out reflect.Value) {
+ switch n.Kind {
+ case MappingNode:
+ d.unmarshal(n, out)
+ case AliasNode:
+ if n.Alias != nil && n.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case SequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.Content) - 1; i >= 0; i-- {
+ ni := n.Content[i]
+ if ni.Kind == AliasNode {
+ if ni.Alias != nil && ni.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ } else if ni.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *Node) bool {
+ return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag)
+}
diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go
new file mode 100644
index 000000000..ab2a06619
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/emitterc.go
@@ -0,0 +1,1992 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and below and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and above and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ // [Go] If inside a block sequence item, discount the space taken by the indicator.
+ if emitter.best_indent > 2 && emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE {
+ emitter.indent -= 2
+ }
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+ emitter.space_above = true
+ emitter.foot_indent = -1
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical || true {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if len(emitter.head_comment) > 0 {
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_emit_node(emitter, event, true, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ // [Go] Force document foot separation.
+ emitter.foot_indent = 0
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.foot_indent = -1
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ if emitter.canonical && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.column == 0 || emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ // [Go] The original logic here would not indent the sequence when inside a mapping.
+ // In Go we always indent it, but take the sequence indicator out of the indentation.
+ indentless := emitter.best_indent == 2 && emitter.mapping_context && (emitter.column == 0 || !emitter.indention)
+ original := emitter.indent
+ if !yaml_emitter_increase_indent(emitter, false, indentless) {
+ return false
+ }
+ if emitter.indent > original+2 {
+ emitter.indent -= 2
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Write a head comment.
+func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.tail_comment) {
+ return false
+ }
+ emitter.tail_comment = emitter.tail_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ }
+
+ if len(emitter.head_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.head_comment) {
+ return false
+ }
+ emitter.head_comment = emitter.head_comment[:0]
+ return true
+}
+
+// Write an line comment.
+func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.line_comment) == 0 {
+ return true
+ }
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.line_comment) {
+ return false
+ }
+ emitter.line_comment = emitter.line_comment[:0]
+ return true
+}
+
+// Write a foot comment.
+func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.foot_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.foot_comment) {
+ return false
+ }
+ emitter.foot_comment = emitter.foot_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ return true
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+ tab_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if value[i] == '\t' {
+ tab_characters = true
+ } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || tab_characters || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ if len(event.head_comment) > 0 {
+ emitter.head_comment = event.head_comment
+ }
+ if len(event.line_comment) > 0 {
+ emitter.line_comment = event.line_comment
+ }
+ if len(event.foot_comment) > 0 {
+ emitter.foot_comment = event.foot_comment
+ }
+ if len(event.tail_comment) > 0 {
+ emitter.tail_comment = event.tail_comment
+ }
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if emitter.foot_indent == indent {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ //emitter.indention = true
+ emitter.space_above = false
+ emitter.foot_indent = -1
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if len(value) > 0 && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ if len(value) > 0 {
+ emitter.whitespace = false
+ }
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ //emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ //emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool {
+ breaks := false
+ pound := false
+ for i := 0; i < len(comment); {
+ if is_break(comment, i) {
+ if !write_break(emitter, comment, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ pound = false
+ } else {
+ if breaks && !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !pound {
+ if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) {
+ return false
+ }
+ pound = true
+ }
+ if !write(emitter, comment, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ if !breaks && !put_break(emitter) {
+ return false
+ }
+
+ emitter.whitespace = true
+ //emitter.indention = true
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go
new file mode 100644
index 000000000..1f37271ce
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/encode.go
@@ -0,0 +1,561 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+ indent int
+ doneInit bool
+}
+
+func newEncoder() *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, w)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func (e *encoder) init() {
+ if e.doneInit {
+ return
+ }
+ if e.indent == 0 {
+ e.indent = 4
+ }
+ e.emitter.best_indent = e.indent
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+ e.emit()
+ e.doneInit = true
+}
+
+func (e *encoder) finish() {
+ e.emitter.open_ended = false
+ yaml_stream_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+ e.init()
+ var node *Node
+ if in.IsValid() {
+ node, _ = in.Interface().(*Node)
+ }
+ if node != nil && node.Kind == DocumentNode {
+ e.nodev(in)
+ } else {
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+ e.marshal(tag, in)
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ tag = shortTag(tag)
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ switch value := iface.(type) {
+ case *Node:
+ e.nodev(in)
+ return
+ case time.Time:
+ e.timev(tag, in)
+ return
+ case *time.Time:
+ e.timev(tag, in.Elem())
+ return
+ case time.Duration:
+ e.stringv(tag, reflect.ValueOf(value.String()))
+ return
+ case Marshaler:
+ v, err := value.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ e.marshal(tag, reflect.ValueOf(v))
+ return
+ case encoding.TextMarshaler:
+ text, err := value.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ case nil:
+ e.nilv()
+ return
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ e.marshal(tag, in.Elem())
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ e.marshal(tag, in.Elem())
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice, reflect.Array:
+ e.slicev(tag, in)
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ e.intv(tag, in)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = e.fieldByIndex(in, info.Inline)
+ if !value.IsValid() {
+ continue
+ }
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+ f()
+ yaml_mapping_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+// isOldBool returns whether s is bool notation as defined in YAML 1.1.
+//
+// We continue to force strings that YAML 1.1 would interpret as booleans to be
+// rendered as quotes strings so that the marshalled output valid for YAML 1.1
+// parsing.
+func isOldBool(s string) (result bool) {
+ switch s {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
+ "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ return true
+ default:
+ return false
+ }
+}
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ canUsePlain := true
+ switch {
+ case !utf8.ValidString(s):
+ if tag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ s = encodeBase64(s)
+ case tag == "":
+ // Check to see if it would resolve to a specific
+ // tag when encoded unquoted. If it doesn't,
+ // there's no need to quote it.
+ rtag, _ := resolve("", s)
+ canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
+ }
+ // Note: it's possible for user code to emit invalid YAML
+ // if they explicitly specify a tag and a string containing
+ // text that's incompatible with that tag.
+ switch {
+ case strings.Contains(s, "\n"):
+ if e.flow {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else {
+ style = yaml_LITERAL_SCALAR_STYLE
+ }
+ case canUsePlain:
+ style = yaml_PLAIN_SCALAR_STYLE
+ default:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+ t := in.Interface().(time.Time)
+ s := t.Format(time.RFC3339Nano)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // Issue #352: When formatting, use the precision of the underlying value
+ precision := 64
+ if in.Kind() == reflect.Float32 {
+ precision = 32
+ }
+
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
+ // TODO Kill this function. Replace all initialize calls by their underlining Go literals.
+ implicit := tag == ""
+ if !implicit {
+ tag = longTag(tag)
+ }
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.event.head_comment = head
+ e.event.line_comment = line
+ e.event.foot_comment = foot
+ e.event.tail_comment = tail
+ e.emit()
+}
+
+func (e *encoder) nodev(in reflect.Value) {
+ e.node(in.Interface().(*Node), "")
+}
+
+func (e *encoder) node(node *Node, tail string) {
+ // If the tag was not explicitly requested, and dropping it won't change the
+ // implicit tag of the value, don't include it in the presentation.
+ var tag = node.Tag
+ var stag = shortTag(tag)
+ var rtag string
+ var forceQuoting bool
+ if tag != "" && node.Style&TaggedStyle == 0 {
+ if node.Kind == ScalarNode {
+ if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
+ tag = ""
+ } else {
+ rtag, _ = resolve("", node.Value)
+ if rtag == stag {
+ tag = ""
+ } else if stag == strTag {
+ tag = ""
+ forceQuoting = true
+ }
+ }
+ } else {
+ switch node.Kind {
+ case MappingNode:
+ rtag = mapTag
+ case SequenceNode:
+ rtag = seqTag
+ }
+ if rtag == stag {
+ tag = ""
+ }
+ }
+ }
+
+ switch node.Kind {
+ case DocumentNode:
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ yaml_document_end_event_initialize(&e.event, true)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case SequenceNode:
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case MappingNode:
+ style := yaml_BLOCK_MAPPING_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style)
+ e.event.tail_comment = []byte(tail)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+
+ // The tail logic below moves the foot comment of prior keys to the following key,
+ // since the value for each key may be a nested structure and the foot needs to be
+ // processed only the entirety of the value is streamed. The last tail is processed
+ // with the mapping end event.
+ var tail string
+ for i := 0; i+1 < len(node.Content); i += 2 {
+ k := node.Content[i]
+ foot := k.FootComment
+ if foot != "" {
+ kopy := *k
+ kopy.FootComment = ""
+ k = &kopy
+ }
+ e.node(k, tail)
+ tail = foot
+
+ v := node.Content[i+1]
+ e.node(v, "")
+ }
+
+ yaml_mapping_end_event_initialize(&e.event)
+ e.event.tail_comment = []byte(tail)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case AliasNode:
+ yaml_alias_event_initialize(&e.event, []byte(node.Value))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case ScalarNode:
+ value := node.Value
+ if !utf8.ValidString(value) {
+ if tag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ value = encodeBase64(value)
+ }
+
+ style := yaml_PLAIN_SCALAR_STYLE
+ switch {
+ case node.Style&DoubleQuotedStyle != 0:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ case node.Style&SingleQuotedStyle != 0:
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ case node.Style&LiteralStyle != 0:
+ style = yaml_LITERAL_SCALAR_STYLE
+ case node.Style&FoldedStyle != 0:
+ style = yaml_FOLDED_SCALAR_STYLE
+ case strings.Contains(value, "\n"):
+ style = yaml_LITERAL_SCALAR_STYLE
+ case forceQuoting:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
+ }
+}
diff --git a/vendor/gopkg.in/yaml.v3/go.mod b/vendor/gopkg.in/yaml.v3/go.mod
new file mode 100644
index 000000000..f407ea321
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/go.mod
@@ -0,0 +1,5 @@
+module "gopkg.in/yaml.v3"
+
+require (
+ "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
+)
diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go
new file mode 100644
index 000000000..aea9050b8
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/parserc.go
@@ -0,0 +1,1229 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ token := &parser.tokens[parser.tokens_head]
+ yaml_parser_unfold_comments(parser, token)
+ return token
+ }
+ return nil
+}
+
+// yaml_parser_unfold_comments walks through the comments queue and joins all
+// comments behind the position of the provided token into the respective
+// top-level comment slices in the parser.
+func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) {
+ for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index {
+ comment := &parser.comments[parser.comments_head]
+ if len(comment.head) > 0 {
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ // No heads on ends, so keep comment.head for a follow up token.
+ break
+ }
+ if len(parser.head_comment) > 0 {
+ parser.head_comment = append(parser.head_comment, '\n')
+ }
+ parser.head_comment = append(parser.head_comment, comment.head...)
+ }
+ if len(comment.foot) > 0 {
+ if len(parser.foot_comment) > 0 {
+ parser.foot_comment = append(parser.foot_comment, '\n')
+ }
+ parser.foot_comment = append(parser.foot_comment, comment.foot...)
+ }
+ if len(comment.line) > 0 {
+ if len(parser.line_comment) > 0 {
+ parser.line_comment = append(parser.line_comment, '\n')
+ }
+ parser.line_comment = append(parser.line_comment, comment.line...)
+ }
+ *comment = yaml_comment_t{}
+ parser.comments_head++
+ }
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ var head_comment []byte
+ if len(parser.head_comment) > 0 {
+ // [Go] Scan the header comment backwards, and if an empty line is found, break
+ // the header so the part before the last empty line goes into the
+ // document header, while the bottom of it goes into a follow up event.
+ for i := len(parser.head_comment) - 1; i > 0; i-- {
+ if parser.head_comment[i] == '\n' {
+ if i == len(parser.head_comment)-1 {
+ head_comment = parser.head_comment[:i]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ } else if parser.head_comment[i-1] == '\n' {
+ head_comment = parser.head_comment[:i-1]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ }
+ }
+ }
+ }
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+
+ head_comment: head_comment,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected <document start>", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ if len(event.head_comment) > 0 && len(event.foot_comment) == 0 {
+ event.foot_comment = event.head_comment
+ event.head_comment = nil
+ }
+ return true
+}
+
+func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) {
+ event.head_comment = parser.head_comment
+ event.line_comment = parser.line_comment
+ event.foot_comment = parser.foot_comment
+ parser.head_comment = nil
+ parser.line_comment = nil
+ parser.foot_comment = nil
+ parser.tail_comment = nil
+ parser.stem_comment = nil
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ if parser.stem_comment != nil {
+ event.head_comment = parser.stem_comment
+ parser.stem_comment = nil
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ prior_head := len(parser.head_comment)
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if prior_head > 0 && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ // [Go] It's a sequence under a sequence entry, so the former head comment
+ // is for the list itself, not the first list item under it.
+ parser.stem_comment = parser.head_comment[:prior_head]
+ if len(parser.head_comment) == prior_head {
+ parser.head_comment = nil
+ } else {
+ // Copy suffix to prevent very strange bugs if someone ever appends
+ // further bytes to the prefix in the stem_comment slice above.
+ parser.head_comment = append([]byte(nil), parser.head_comment[prior_head+1:]...)
+ }
+
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // [Go] A tail comment was left from the prior mapping value processed. Emit an event
+ // as it needs to be processed with that value and not the following key.
+ if len(parser.tail_comment) > 0 {
+ *event = yaml_event_t{
+ typ: yaml_TAIL_COMMENT_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ foot_comment: parser.tail_comment,
+ }
+ parser.tail_comment = nil
+ return true
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go
new file mode 100644
index 000000000..b7de0a89c
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/readerc.go
@@ -0,0 +1,434 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go
new file mode 100644
index 000000000..64ae88805
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/resolve.go
@@ -0,0 +1,326 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, boolTag, []string{"true", "True", "TRUE"}},
+ {false, boolTag, []string{"false", "False", "FALSE"}},
+ {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", mergeTag, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const (
+ nullTag = "!!null"
+ boolTag = "!!bool"
+ strTag = "!!str"
+ intTag = "!!int"
+ floatTag = "!!float"
+ timestampTag = "!!timestamp"
+ seqTag = "!!seq"
+ mapTag = "!!map"
+ binaryTag = "!!binary"
+ mergeTag = "!!merge"
+)
+
+var longTags = make(map[string]string)
+var shortTags = make(map[string]string)
+
+func init() {
+ for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
+ ltag := longTag(stag)
+ longTags[stag] = ltag
+ shortTags[ltag] = stag
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ if strings.HasPrefix(tag, longTagPrefix) {
+ if stag, ok := shortTags[tag]; ok {
+ return stag
+ }
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ if ltag, ok := longTags[tag]; ok {
+ return ltag
+ }
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ tag = shortTag(tag)
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, strTag, binaryTag:
+ return
+ case floatTag:
+ if rtag == intTag {
+ switch v := out.(type) {
+ case int64:
+ rtag = floatTag
+ out = float64(v)
+ return
+ case int:
+ rtag = floatTag
+ out = float64(v)
+ return
+ }
+ }
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != strTag && tag != binaryTag {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == timestampTag {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return timestampTag, t
+ }
+ }
+
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ // Octals as introduced in version 1.2 of the spec.
+ // Octals from the 1.1 spec, spelled as 0777, are still
+ // decoded by default in v3 as well for compatibility.
+ // May be dropped in v4 depending on how usage evolves.
+ if strings.HasPrefix(plain, "0o") {
+ intv, err := strconv.ParseInt(plain[2:], 8, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 8, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0o") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ default:
+ panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ return strTag, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go
new file mode 100644
index 000000000..57e954ca5
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/scannerc.go
@@ -0,0 +1,3025 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ parser.newlines++
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ parser.newlines++
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.newlines++
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // [Go] The comment parsing logic requires a lookahead of two tokens
+ // so that foot comments may be parsed in time of associating them
+ // with the tokens that are parsed before them, and also for line
+ // comments to be transformed into head comments in some edge cases.
+ if parser.tokens_head < len(parser.tokens)-2 {
+ // If a potential simple key is at the head position, we need to fetch
+ // the next token to disambiguate it.
+ head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+ if !ok {
+ break
+ } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
+ return false
+ } else if !valid {
+ break
+ }
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ scan_mark := parser.mark
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // [Go] While unrolling indents, transform the head comments of prior
+ // indentation levels observed after scan_start into foot comments at
+ // the respective indexes.
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ comment_mark := parser.mark
+ if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') {
+ // Associate any following comments with the prior token.
+ comment_mark = parser.tokens[len(parser.tokens)-1].start_mark
+ }
+ defer func() {
+ if !ok {
+ return
+ }
+ if !yaml_parser_scan_line_comment(parser, comment_mark) {
+ ok = false
+ return
+ }
+ }()
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] TODO Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+ if !simple_key.possible {
+ return false, true
+ }
+
+ // The 1.2 specification says:
+ //
+ // "If the ? indicator is omitted, parsing needs to see past the
+ // implicit key to recognize it as such. To limit the amount of
+ // lookahead required, the “:” indicator must appear at most 1024
+ // Unicode characters beyond the start of the key. In addition, the key
+ // is restricted to a single line."
+ //
+ if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return false, yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ return false, true
+ }
+ return true, true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ }
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
+ }
+ return true
+}
+
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+ possible: false,
+ required: false,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ })
+
+ // Increase the flow level.
+ parser.flow_level++
+ if parser.flow_level > max_flow_level {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+ }
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ last := len(parser.simple_keys) - 1
+ delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+ parser.simple_keys = parser.simple_keys[:last]
+ }
+ return true
+}
+
+// max_indents limits the indents stack size
+const max_indents = 10000
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+ if len(parser.indents) > max_indents {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_indents))
+ }
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ block_mark := scan_mark
+ block_mark.index--
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+
+ // [Go] Reposition the end token before potential following
+ // foot comments of parent blocks. For that, search
+ // backwards for recent comments that were at the same
+ // indent as the block that is ending now.
+ stop_index := block_mark.index
+ for i := len(parser.comments) - 1; i >= 0; i-- {
+ comment := &parser.comments[i]
+
+ if comment.end_mark.index < stop_index {
+ // Don't go back beyond the start of the comment/whitespace scan, unless column < 0.
+ // If requested indent column is < 0, then the document is over and everything else
+ // is a foot anyway.
+ break
+ }
+ if comment.start_mark.column == parser.indent+1 {
+ // This is a good match. But maybe there's a former comment
+ // at that same indent level, so keep searching.
+ block_mark = comment.start_mark
+ }
+
+ // While the end of the former comment matches with
+ // the start of the following one, we know there's
+ // nothing in between and scanning is still safe.
+ stop_index = comment.scan_mark.index
+ }
+
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: block_mark,
+ end_mark: block_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ parser.simple_keys_by_tok = make(map[int]int)
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+ return false
+
+ } else if valid {
+
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+ delete(parser.simple_keys_by_tok, simple_key.token_number)
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ scan_mark := parser.mark
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if we just had a line comment under a sequence entry that
+ // looks more like a header to the following content. Similar to this:
+ //
+ // - # The comment
+ // - Some data
+ //
+ // If so, transform the line comment to a head comment and reposition.
+ if len(parser.comments) > 0 && len(parser.tokens) > 1 {
+ tokenA := parser.tokens[len(parser.tokens)-2]
+ tokenB := parser.tokens[len(parser.tokens)-1]
+ comment := &parser.comments[len(parser.comments)-1]
+ if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) {
+ // If it was in the prior line, reposition so it becomes a
+ // header of the follow up token. Otherwise, keep it in place
+ // so it becomes a header of the former.
+ comment.head = comment.line
+ comment.line = nil
+ if comment.start_mark.line == parser.mark.line-1 {
+ comment.token_mark = parser.mark
+ }
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ if !yaml_parser_scan_comments(parser, scan_mark) {
+ return false
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ // [Go] Discard this inline comment for the time being.
+ //if !yaml_parser_scan_line_comment(parser, start_mark) {
+ // return false
+ //}
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] TODO Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ // TODO Test this and then re-enable it.
+ //if !yaml_parser_scan_line_comment(parser, start_mark) {
+ // return false
+ //}
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab characters that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violates indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
+
+func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool {
+ if parser.newlines > 0 {
+ return true
+ }
+
+ var start_mark yaml_mark_t
+ var text []byte
+
+ for peek := 0; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ if parser.buffer[parser.buffer_pos+peek] == '#' {
+ seen := parser.mark.index+peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else {
+ if parser.mark.index >= seen {
+ if len(text) == 0 {
+ start_mark = parser.mark
+ }
+ text = append(text, parser.buffer[parser.buffer_pos])
+ }
+ skip(parser)
+ }
+ }
+ }
+ break
+ }
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ token_mark: token_mark,
+ start_mark: start_mark,
+ line: text,
+ })
+ }
+ return true
+}
+
+func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool {
+ token := parser.tokens[len(parser.tokens)-1]
+
+ if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 {
+ token = parser.tokens[len(parser.tokens)-2]
+ }
+
+ var token_mark = token.start_mark
+ var start_mark yaml_mark_t
+
+ var recent_empty = false
+ var first_empty = parser.newlines <= 1
+
+ var line = parser.mark.line
+ var column = parser.mark.column
+
+ var text []byte
+
+ // The foot line is the place where a comment must start to
+ // still be considered as a foot of the prior content.
+ // If there's some content in the currently parsed line, then
+ // the foot is the line below it.
+ var foot_line = -1
+ if scan_mark.line > 0 {
+ foot_line = parser.mark.line-parser.newlines+1
+ if parser.newlines == 0 && parser.mark.column > 1 {
+ foot_line++
+ }
+ }
+
+ var peek = 0
+ for ; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ column++
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ c := parser.buffer[parser.buffer_pos+peek]
+ if is_breakz(parser.buffer, parser.buffer_pos+peek) || parser.flow_level > 0 && (c == ']' || c == '}') {
+ // Got line break or terminator.
+ if !recent_empty {
+ if first_empty && (start_mark.line == foot_line || start_mark.column-1 < parser.indent) {
+ // This is the first empty line and there were no empty lines before,
+ // so this initial part of the comment is a foot of the prior token
+ // instead of being a head for the following one. Split it up.
+ if len(text) > 0 {
+ if start_mark.column-1 < parser.indent {
+ // If dedented it's unrelated to the prior token.
+ token_mark = start_mark
+ }
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+ } else {
+ if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 {
+ text = append(text, '\n')
+ }
+ }
+ }
+ if !is_break(parser.buffer, parser.buffer_pos+peek) {
+ break
+ }
+ first_empty = false
+ recent_empty = true
+ column = 0
+ line++
+ continue
+ }
+
+ if len(text) > 0 && column < parser.indent+1 && column != start_mark.column {
+ // The comment at the different indentation is a foot of the
+ // preceding data rather than a head of the upcoming one.
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+
+ if parser.buffer[parser.buffer_pos+peek] != '#' {
+ break
+ }
+
+ if len(text) == 0 {
+ start_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ } else {
+ text = append(text, '\n')
+ }
+
+ recent_empty = false
+
+ // Consume until after the consumed comment line.
+ seen := parser.mark.index+peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else {
+ if parser.mark.index >= seen {
+ text = append(text, parser.buffer[parser.buffer_pos])
+ }
+ skip(parser)
+ }
+ }
+
+ peek = 0
+ column = 0
+ line = parser.mark.line
+ }
+
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: start_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column},
+ head: text,
+ })
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go
new file mode 100644
index 000000000..9210ece7e
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/sorter.go
@@ -0,0 +1,134 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ digits := false
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ digits = unicode.IsDigit(ar[i])
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ if digits {
+ return al
+ } else {
+ return bl
+ }
+ }
+ var ai, bi int
+ var an, bn int64
+ if ar[i] == '0' || br[i] == '0' {
+ for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ if ar[j] != '0' {
+ an = 1
+ bn = 1
+ break
+ }
+ }
+ }
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go
new file mode 100644
index 000000000..b8a116bf9
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/writerc.go
@@ -0,0 +1,48 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go
new file mode 100644
index 000000000..b5d35a50d
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yaml.go
@@ -0,0 +1,662 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+ "unicode/utf8"
+)
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document.
+type Unmarshaler interface {
+ UnmarshalYAML(value *Node) error
+}
+
+type obsoleteUnmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// A Decorder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ parser *parser
+ knownFields bool
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// KnownFields ensures that the keys in decoded mappings to
+// exist as fields in the struct being decoded into.
+func (dec *Decoder) KnownFields(enable bool) {
+ dec.knownFields = enable
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ d.knownFields = dec.knownFields
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Decode decodes the node and stores its data into the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (n *Node) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ defer handleErr(&err)
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(n, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder()
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be included if that method returns true.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// SetIndent changes the used indentation used when encoding.
+func (e *Encoder) SetIndent(spaces int) {
+ if spaces < 0 {
+ panic("yaml: cannot indent to a negative number of spaces")
+ }
+ e.encoder.indent = spaces
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+type Kind uint32
+
+const (
+ DocumentNode Kind = 1 << iota
+ SequenceNode
+ MappingNode
+ ScalarNode
+ AliasNode
+)
+
+type Style uint32
+
+const (
+ TaggedStyle Style = 1 << iota
+ DoubleQuotedStyle
+ SingleQuotedStyle
+ LiteralStyle
+ FoldedStyle
+ FlowStyle
+)
+
+// Node represents an element in the YAML document hierarchy. While documents
+// are typically encoded and decoded into higher level types, such as structs
+// and maps, Node is an intermediate representation that allows detailed
+// control over the content being decoded or encoded.
+//
+// Values that make use of the Node type interact with the yaml package in the
+// same way any other type would do, by encoding and decoding yaml data
+// directly or indirectly into them.
+//
+// For example:
+//
+// var person struct {
+// Name string
+// Address yaml.Node
+// }
+// err := yaml.Unmarshal(data, &person)
+//
+// Or by itself:
+//
+// var person Node
+// err := yaml.Unmarshal(data, &person)
+//
+type Node struct {
+ // Kind defines whether the node is a document, a mapping, a sequence,
+ // a scalar value, or an alias to another node. The specific data type of
+ // scalar nodes may be obtained via the ShortTag and LongTag methods.
+ Kind Kind
+
+ // Style allows customizing the apperance of the node in the tree.
+ Style Style
+
+ // Tag holds the YAML tag defining the data type for the value.
+ // When decoding, this field will always be set to the resolved tag,
+ // even when it wasn't explicitly provided in the YAML content.
+ // When encoding, if this field is unset the value type will be
+ // implied from the node properties, and if it is set, it will only
+ // be serialized into the representation if TaggedStyle is used or
+ // the implicit tag diverges from the provided one.
+ Tag string
+
+ // Value holds the unescaped and unquoted represenation of the value.
+ Value string
+
+ // Anchor holds the anchor name for this node, which allows aliases to point to it.
+ Anchor string
+
+ // Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
+ Alias *Node
+
+ // Content holds contained nodes for documents, mappings, and sequences.
+ Content []*Node
+
+ // HeadComment holds any comments in the lines preceding the node and
+ // not separated by an empty line.
+ HeadComment string
+
+ // LineComment holds any comments at the end of the line where the node is in.
+ LineComment string
+
+ // FootComment holds any comments following the node and before empty lines.
+ FootComment string
+
+ // Line and Column hold the node position in the decoded YAML text.
+ // These fields are not respected when encoding the node.
+ Line int
+ Column int
+}
+
+// LongTag returns the long form of the tag that indicates the data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) LongTag() string {
+ return longTag(n.ShortTag())
+}
+
+// ShortTag returns the short form of the YAML tag that indicates data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) ShortTag() string {
+ if n.indicatedString() {
+ return strTag
+ }
+ if n.Tag == "" || n.Tag == "!" {
+ switch n.Kind {
+ case MappingNode:
+ return mapTag
+ case SequenceNode:
+ return seqTag
+ case AliasNode:
+ if n.Alias != nil {
+ return n.Alias.ShortTag()
+ }
+ case ScalarNode:
+ tag, _ := resolve("", n.Value)
+ return tag
+ }
+ return ""
+ }
+ return shortTag(n.Tag)
+}
+
+func (n *Node) indicatedString() bool {
+ return n.Kind == ScalarNode &&
+ (shortTag(n.Tag) == strTag ||
+ (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
+}
+
+// SetString is a convenience function that sets the node to a string value
+// and defines its style in a pleasant way depending on its content.
+func (n *Node) SetString(s string) {
+ n.Kind = ScalarNode
+ if utf8.ValidString(s) {
+ n.Value = s
+ n.Tag = strTag
+ } else {
+ n.Value = encodeBase64(s)
+ n.Tag = binaryTag
+ }
+ if strings.Contains(n.Value, "\n") {
+ n.Style = LiteralStyle
+ }
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+
+ // InlineUnmarshalers holds indexes to inlined fields that
+ // contain unmarshaler values.
+ InlineUnmarshalers [][]int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+var unmarshalerType reflect.Type
+
+func init() {
+ var v Unmarshaler
+ unmarshalerType = reflect.ValueOf(&v).Elem().Type()
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ inlineUnmarshalers := [][]int(nil)
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct, reflect.Ptr:
+ ftype := field.Type
+ for ftype.Kind() == reflect.Ptr {
+ ftype = ftype.Elem()
+ }
+ if ftype.Kind() != reflect.Struct {
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ if reflect.PtrTo(ftype).Implements(unmarshalerType) {
+ inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
+ } else {
+ sinfo, err := getStructInfo(ftype)
+ if err != nil {
+ return nil, err
+ }
+ for _, index := range sinfo.InlineUnmarshalers {
+ inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ }
+ default:
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ InlineUnmarshalers: inlineUnmarshalers,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go
new file mode 100644
index 000000000..2719cfbb0
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yamlh.go
@@ -0,0 +1,805 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "fmt"
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
+
+ yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+ yaml_TAIL_COMMENT_EVENT
+)
+
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+ yaml_TAIL_COMMENT_EVENT: "tail comment",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ newlines int // The number of line breaks since last non-break/non-blank character
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Comments
+
+ head_comment []byte // The current head comments
+ line_comment []byte // The current line comments
+ foot_comment []byte // The current foot comments
+ tail_comment []byte // Foot comment that happens at the end of a block.
+ stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
+
+ comments []yaml_comment_t // The folded comments for all parsed tokens
+ comments_head int
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+ simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+type yaml_comment_t struct {
+
+ scan_mark yaml_mark_t // Position where scanning for comments started
+ token_mark yaml_mark_t // Position after which tokens will be associated with this comment
+ start_mark yaml_mark_t // Position of '#' comment mark
+ end_mark yaml_mark_t // Position where comment terminated
+
+ head []byte
+ line []byte
+ foot []byte
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_writer io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ space_above bool // Is there's an empty line above?
+ foot_indent int // The indent used to write the foot comment above, or -1 if none.
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go
new file mode 100644
index 000000000..e88f9c54a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go
@@ -0,0 +1,198 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return (
+ // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return (
+ // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return (
+ // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 34c8bc94f..f0e279be4 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -66,7 +66,7 @@ github.com/containernetworking/plugins/pkg/utils/hwaddr
github.com/containernetworking/plugins/pkg/utils/sysctl
github.com/containernetworking/plugins/plugins/ipam/host-local/backend
github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator
-# github.com/containers/buildah v1.14.9-0.20200501175434-42a48f9373d9
+# github.com/containers/buildah v1.14.9-0.20200523094741-de0f541d9224
github.com/containers/buildah
github.com/containers/buildah/bind
github.com/containers/buildah/chroot
@@ -84,7 +84,7 @@ github.com/containers/buildah/pkg/secrets
github.com/containers/buildah/pkg/supplemented
github.com/containers/buildah/pkg/umask
github.com/containers/buildah/util
-# github.com/containers/common v0.11.4
+# github.com/containers/common v0.12.0
github.com/containers/common/pkg/apparmor
github.com/containers/common/pkg/auth
github.com/containers/common/pkg/capabilities
@@ -93,7 +93,7 @@ github.com/containers/common/pkg/config
github.com/containers/common/pkg/sysinfo
# github.com/containers/conmon v2.0.16+incompatible
github.com/containers/conmon/runner/config
-# github.com/containers/image/v5 v5.4.4
+# github.com/containers/image/v5 v5.4.5-0.20200529084758-46b2ee6aebb0
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
github.com/containers/image/v5/directory/explicitfilepath
@@ -324,7 +324,7 @@ github.com/inconshreveable/mousetrap
github.com/ishidawataru/sctp
# github.com/json-iterator/go v1.1.9
github.com/json-iterator/go
-# github.com/klauspost/compress v1.10.5
+# github.com/klauspost/compress v1.10.6
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
@@ -335,6 +335,10 @@ github.com/klauspost/compress/zstd/internal/xxhash
github.com/klauspost/pgzip
# github.com/konsorten/go-windows-terminal-sequences v1.0.3
github.com/konsorten/go-windows-terminal-sequences
+# github.com/mattn/go-isatty v0.0.12
+github.com/mattn/go-isatty
+# github.com/mattn/go-runewidth v0.0.9
+github.com/mattn/go-runewidth
# github.com/mattn/go-shellwords v1.0.10
github.com/mattn/go-shellwords
# github.com/matttproud/golang_protobuf_extensions v1.0.1
@@ -421,12 +425,10 @@ github.com/opencontainers/runtime-tools/generate
github.com/opencontainers/runtime-tools/generate/seccomp
github.com/opencontainers/runtime-tools/specerror
github.com/opencontainers/runtime-tools/validate
-# github.com/opencontainers/selinux v1.5.1
+# github.com/opencontainers/selinux v1.5.2
github.com/opencontainers/selinux/go-selinux
github.com/opencontainers/selinux/go-selinux/label
github.com/opencontainers/selinux/pkg/pwalk
-# github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316
-github.com/openshift/api/config/v1
# github.com/openshift/imagebuilder v1.1.4
github.com/openshift/imagebuilder
github.com/openshift/imagebuilder/dockerfile/command
@@ -488,7 +490,7 @@ github.com/sirupsen/logrus/hooks/syslog
github.com/spf13/cobra
# github.com/spf13/pflag v1.0.5
github.com/spf13/pflag
-# github.com/stretchr/testify v1.5.1
+# github.com/stretchr/testify v1.6.0
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
# github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
@@ -529,7 +531,7 @@ github.com/varlink/go/varlink/idl
github.com/vbatts/tar-split/archive/tar
github.com/vbatts/tar-split/tar/asm
github.com/vbatts/tar-split/tar/storage
-# github.com/vbauerster/mpb/v5 v5.0.4
+# github.com/vbauerster/mpb/v5 v5.2.2
github.com/vbauerster/mpb/v5
github.com/vbauerster/mpb/v5/cwriter
github.com/vbauerster/mpb/v5/decor
@@ -682,6 +684,8 @@ gopkg.in/square/go-jose.v2/json
gopkg.in/tomb.v1
# gopkg.in/yaml.v2 v2.3.0
gopkg.in/yaml.v2
+# gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c
+gopkg.in/yaml.v3
# k8s.io/api v0.18.3
k8s.io/api/core/v1
# k8s.io/apimachinery v0.18.3