summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml85
-rw-r--r--.gitignore2
-rw-r--r--Makefile72
-rw-r--r--README.md4
-rw-r--r--RELEASE_NOTES.md29
-rw-r--r--cmd/podman/attach.go3
-rw-r--r--cmd/podman/build.go12
-rw-r--r--cmd/podman/cliconfig/config.go19
-rw-r--r--cmd/podman/common.go10
-rw-r--r--cmd/podman/cp.go11
-rw-r--r--cmd/podman/create.go7
-rw-r--r--cmd/podman/exec.go3
-rw-r--r--cmd/podman/images.go4
-rw-r--r--cmd/podman/login.go4
-rw-r--r--cmd/podman/logout.go11
-rw-r--r--cmd/podman/logs.go2
-rw-r--r--cmd/podman/main_local.go11
-rw-r--r--cmd/podman/play_kube.go12
-rw-r--r--cmd/podman/pull.go10
-rw-r--r--cmd/podman/push.go10
-rw-r--r--cmd/podman/restore.go1
-rw-r--r--cmd/podman/rm.go2
-rw-r--r--cmd/podman/run.go7
-rw-r--r--cmd/podman/runlabel.go9
-rw-r--r--cmd/podman/search.go11
-rw-r--r--cmd/podman/shared/create.go194
-rw-r--r--cmd/podman/shared/funcs.go21
-rw-r--r--cmd/podman/start.go2
-rw-r--r--cmd/podman/utils.go13
-rw-r--r--cni/87-podman-bridge.conflist3
-rw-r--r--completions/bash/podman1
-rw-r--r--contrib/cirrus/README.md2
-rw-r--r--contrib/spec/podman.spec.in2
-rw-r--r--docs/Makefile23
-rw-r--r--docs/Readme.md28
-rw-r--r--docs/make.bat (renamed from docs/rtd/make.bat)0
-rwxr-xr-xdocs/remote-docs.sh (renamed from docs/podman-remote.sh)55
-rw-r--r--docs/requirements.txt (renamed from docs/rtd/requirements.txt)0
-rw-r--r--docs/rtd/Makefile123
-rw-r--r--docs/rtd/source/Commands.rst107
-rw-r--r--docs/rtd/source/man/generate.rst6
-rw-r--r--docs/rtd/source/man/healthcheck.rst4
-rw-r--r--docs/rtd/source/man/image.rst35
-rw-r--r--docs/rtd/source/man/managecontainers.rst64
-rw-r--r--docs/rtd/source/man/network.rst10
-rw-r--r--docs/rtd/source/man/play.rst4
-rw-r--r--docs/rtd/source/man/pod.rst30
-rw-r--r--docs/rtd/source/man/system.rst12
-rw-r--r--docs/rtd/source/man/volume.rst11
-rw-r--r--docs/source/Commands.rst107
-rw-r--r--docs/source/Introduction.rst (renamed from docs/rtd/source/Introduction.rst)0
-rw-r--r--docs/source/Reference.rst (renamed from docs/rtd/source/Reference.rst)0
-rw-r--r--docs/source/Tutorials.rst (renamed from docs/rtd/source/Tutorials.rst)0
-rw-r--r--docs/source/conf.py (renamed from docs/rtd/source/conf.py)0
-rw-r--r--docs/source/generate.rst6
-rw-r--r--docs/source/healthcheck.rst4
-rw-r--r--docs/source/image.rst35
-rw-r--r--docs/source/index.rst (renamed from docs/rtd/source/index.rst)0
-rw-r--r--docs/source/managecontainers.rst64
-rw-r--r--docs/source/markdown/containers-mounts.conf.5.md (renamed from docs/containers-mounts.conf.5.md)0
-rw-r--r--docs/source/markdown/libpod.conf.5.md (renamed from docs/libpod.conf.5.md)0
-rw-r--r--docs/source/markdown/links/podman-container-attach.1 (renamed from docs/links/podman-container-attach.1)0
-rw-r--r--docs/source/markdown/links/podman-container-commit.1 (renamed from docs/links/podman-container-commit.1)0
-rw-r--r--docs/source/markdown/links/podman-container-cp.1 (renamed from docs/links/podman-container-cp.1)0
-rw-r--r--docs/source/markdown/links/podman-container-create.1 (renamed from docs/links/podman-container-create.1)0
-rw-r--r--docs/source/markdown/links/podman-container-diff.1 (renamed from docs/links/podman-container-diff.1)0
-rw-r--r--docs/source/markdown/links/podman-container-exec.1 (renamed from docs/links/podman-container-exec.1)0
-rw-r--r--docs/source/markdown/links/podman-container-export.1 (renamed from docs/links/podman-container-export.1)0
-rw-r--r--docs/source/markdown/links/podman-container-init.1 (renamed from docs/links/podman-container-init.1)0
-rw-r--r--docs/source/markdown/links/podman-container-inspect.1 (renamed from docs/links/podman-container-inspect.1)0
-rw-r--r--docs/source/markdown/links/podman-container-kill.1 (renamed from docs/links/podman-container-kill.1)0
-rw-r--r--docs/source/markdown/links/podman-container-list.1 (renamed from docs/links/podman-container-list.1)0
-rw-r--r--docs/source/markdown/links/podman-container-logs.1 (renamed from docs/links/podman-container-logs.1)0
-rw-r--r--docs/source/markdown/links/podman-container-ls.1 (renamed from docs/links/podman-container-ls.1)0
-rw-r--r--docs/source/markdown/links/podman-container-mount.1 (renamed from docs/links/podman-container-mount.1)0
-rw-r--r--docs/source/markdown/links/podman-container-pause.1 (renamed from docs/links/podman-container-pause.1)0
-rw-r--r--docs/source/markdown/links/podman-container-port.1 (renamed from docs/links/podman-container-port.1)0
-rw-r--r--docs/source/markdown/links/podman-container-ps.1 (renamed from docs/links/podman-container-ps.1)0
-rw-r--r--docs/source/markdown/links/podman-container-restart.1 (renamed from docs/links/podman-container-restart.1)0
-rw-r--r--docs/source/markdown/links/podman-container-rm.1 (renamed from docs/links/podman-container-rm.1)0
-rw-r--r--docs/source/markdown/links/podman-container-run.1 (renamed from docs/links/podman-container-run.1)0
-rw-r--r--docs/source/markdown/links/podman-container-start.1 (renamed from docs/links/podman-container-start.1)0
-rw-r--r--docs/source/markdown/links/podman-container-stats.1 (renamed from docs/links/podman-container-stats.1)0
-rw-r--r--docs/source/markdown/links/podman-container-stop.1 (renamed from docs/links/podman-container-stop.1)0
-rw-r--r--docs/source/markdown/links/podman-container-top.1 (renamed from docs/links/podman-container-top.1)0
-rw-r--r--docs/source/markdown/links/podman-container-umount.1 (renamed from docs/links/podman-container-umount.1)0
-rw-r--r--docs/source/markdown/links/podman-container-unmount.1 (renamed from docs/links/podman-container-unmount.1)0
-rw-r--r--docs/source/markdown/links/podman-container-unpause.1 (renamed from docs/links/podman-container-unpause.1)0
-rw-r--r--docs/source/markdown/links/podman-container-wait.1 (renamed from docs/links/podman-container-wait.1)0
-rw-r--r--docs/source/markdown/links/podman-help.1 (renamed from docs/links/podman-help.1)0
-rw-r--r--docs/source/markdown/links/podman-image-build.1 (renamed from docs/links/podman-image-build.1)0
-rw-r--r--docs/source/markdown/links/podman-image-history.1 (renamed from docs/links/podman-image-history.1)0
-rw-r--r--docs/source/markdown/links/podman-image-import.1 (renamed from docs/links/podman-image-import.1)0
-rw-r--r--docs/source/markdown/links/podman-image-inspect.1 (renamed from docs/links/podman-image-inspect.1)0
-rw-r--r--docs/source/markdown/links/podman-image-list.1 (renamed from docs/links/podman-image-list.1)0
-rw-r--r--docs/source/markdown/links/podman-image-load.1 (renamed from docs/links/podman-image-load.1)0
-rw-r--r--docs/source/markdown/links/podman-image-ls.1 (renamed from docs/links/podman-image-ls.1)0
-rw-r--r--docs/source/markdown/links/podman-image-pull.1 (renamed from docs/links/podman-image-pull.1)0
-rw-r--r--docs/source/markdown/links/podman-image-push.1 (renamed from docs/links/podman-image-push.1)0
-rw-r--r--docs/source/markdown/links/podman-image-rm.1 (renamed from docs/links/podman-image-rm.1)0
-rw-r--r--docs/source/markdown/links/podman-image-save.1 (renamed from docs/links/podman-image-save.1)0
-rw-r--r--docs/source/markdown/links/podman-image-tag.1 (renamed from docs/links/podman-image-tag.1)0
-rw-r--r--docs/source/markdown/links/podman-list.1 (renamed from docs/links/podman-list.1)0
-rw-r--r--docs/source/markdown/links/podman-ls.1 (renamed from docs/links/podman-ls.1)0
-rw-r--r--docs/source/markdown/links/podman-system-info.1 (renamed from docs/links/podman-system-info.1)0
-rw-r--r--docs/source/markdown/links/podman-unmount.1 (renamed from docs/links/podman-unmount.1)0
-rw-r--r--docs/source/markdown/podman-attach.1.md (renamed from docs/podman-attach.1.md)4
-rw-r--r--docs/source/markdown/podman-build.1.md (renamed from docs/podman-build.1.md)0
-rw-r--r--docs/source/markdown/podman-commit.1.md (renamed from docs/podman-commit.1.md)0
-rw-r--r--docs/source/markdown/podman-container-checkpoint.1.md (renamed from docs/podman-container-checkpoint.1.md)0
-rw-r--r--docs/source/markdown/podman-container-cleanup.1.md (renamed from docs/podman-container-cleanup.1.md)0
-rw-r--r--docs/source/markdown/podman-container-exists.1.md (renamed from docs/podman-container-exists.1.md)0
-rw-r--r--docs/source/markdown/podman-container-prune.1.md (renamed from docs/podman-container-prune.1.md)0
-rw-r--r--docs/source/markdown/podman-container-restore.1.md (renamed from docs/podman-container-restore.1.md)9
-rw-r--r--docs/source/markdown/podman-container-runlabel.1.md (renamed from docs/podman-container-runlabel.1.md)0
-rw-r--r--docs/source/markdown/podman-container.1.md (renamed from docs/podman-container.1.md)0
-rw-r--r--docs/source/markdown/podman-cp.1.md (renamed from docs/podman-cp.1.md)0
-rw-r--r--docs/source/markdown/podman-create.1.md (renamed from docs/podman-create.1.md)12
-rw-r--r--docs/source/markdown/podman-diff.1.md (renamed from docs/podman-diff.1.md)0
-rw-r--r--docs/source/markdown/podman-events.1.md (renamed from docs/podman-events.1.md)0
-rw-r--r--docs/source/markdown/podman-exec.1.md (renamed from docs/podman-exec.1.md)2
-rw-r--r--docs/source/markdown/podman-export.1.md (renamed from docs/podman-export.1.md)0
-rw-r--r--docs/source/markdown/podman-generate-kube.1.md (renamed from docs/podman-generate-kube.1.md)0
-rw-r--r--docs/source/markdown/podman-generate-systemd.1.md (renamed from docs/podman-generate-systemd.1.md)0
-rw-r--r--docs/source/markdown/podman-generate.1.md (renamed from docs/podman-generate.1.md)0
-rw-r--r--docs/source/markdown/podman-healthcheck-run.1.md (renamed from docs/podman-healthcheck-run.1.md)0
-rw-r--r--docs/source/markdown/podman-healthcheck.1.md (renamed from docs/podman-healthcheck.1.md)0
-rw-r--r--docs/source/markdown/podman-history.1.md (renamed from docs/podman-history.1.md)0
-rw-r--r--docs/source/markdown/podman-image-exists.1.md (renamed from docs/podman-image-exists.1.md)0
-rw-r--r--docs/source/markdown/podman-image-prune.1.md (renamed from docs/podman-image-prune.1.md)0
-rw-r--r--docs/source/markdown/podman-image-sign.1.md (renamed from docs/podman-image-sign.1.md)0
-rw-r--r--docs/source/markdown/podman-image-tree.1.md (renamed from docs/podman-image-tree.1.md)0
-rw-r--r--docs/source/markdown/podman-image-trust.1.md (renamed from docs/podman-image-trust.1.md)0
-rw-r--r--docs/source/markdown/podman-image.1.md (renamed from docs/podman-image.1.md)0
-rw-r--r--docs/source/markdown/podman-images.1.md (renamed from docs/podman-images.1.md)0
-rw-r--r--docs/source/markdown/podman-import.1.md (renamed from docs/podman-import.1.md)0
-rw-r--r--docs/source/markdown/podman-info.1.md (renamed from docs/podman-info.1.md)0
-rw-r--r--docs/source/markdown/podman-init.1.md (renamed from docs/podman-init.1.md)0
-rw-r--r--docs/source/markdown/podman-inspect.1.md (renamed from docs/podman-inspect.1.md)0
-rw-r--r--docs/source/markdown/podman-kill.1.md (renamed from docs/podman-kill.1.md)0
-rw-r--r--docs/source/markdown/podman-load.1.md (renamed from docs/podman-load.1.md)0
-rw-r--r--docs/source/markdown/podman-login.1.md (renamed from docs/podman-login.1.md)0
-rw-r--r--docs/source/markdown/podman-logout.1.md (renamed from docs/podman-logout.1.md)0
-rw-r--r--docs/source/markdown/podman-logs.1.md (renamed from docs/podman-logs.1.md)2
-rw-r--r--docs/source/markdown/podman-mount.1.md (renamed from docs/podman-mount.1.md)0
-rw-r--r--docs/source/markdown/podman-network-create.1.md (renamed from docs/podman-network-create.1.md)0
-rw-r--r--docs/source/markdown/podman-network-inspect.1.md (renamed from docs/podman-network-inspect.1.md)0
-rw-r--r--docs/source/markdown/podman-network-ls.1.md (renamed from docs/podman-network-ls.1.md)0
-rw-r--r--docs/source/markdown/podman-network-rm.1.md (renamed from docs/podman-network-rm.1.md)0
-rw-r--r--docs/source/markdown/podman-network.1.md (renamed from docs/podman-network.1.md)0
-rw-r--r--docs/source/markdown/podman-pause.1.md (renamed from docs/podman-pause.1.md)0
-rw-r--r--docs/source/markdown/podman-play-kube.1.md (renamed from docs/podman-play-kube.1.md)0
-rw-r--r--docs/source/markdown/podman-play.1.md (renamed from docs/podman-play.1.md)0
-rw-r--r--docs/source/markdown/podman-pod-create.1.md (renamed from docs/podman-pod-create.1.md)0
-rw-r--r--docs/source/markdown/podman-pod-exists.1.md (renamed from docs/podman-pod-exists.1.md)0
-rw-r--r--docs/source/markdown/podman-pod-inspect.1.md (renamed from docs/podman-pod-inspect.1.md)0
-rw-r--r--docs/source/markdown/podman-pod-kill.1.md (renamed from docs/podman-pod-kill.1.md)0
-rw-r--r--docs/source/markdown/podman-pod-pause.1.md (renamed from docs/podman-pod-pause.1.md)0
-rw-r--r--docs/source/markdown/podman-pod-prune.1.md (renamed from docs/podman-pod-prune.1.md)0
-rw-r--r--docs/source/markdown/podman-pod-ps.1.md (renamed from docs/podman-pod-ps.1.md)0
-rw-r--r--docs/source/markdown/podman-pod-restart.1.md (renamed from docs/podman-pod-restart.1.md)0
-rw-r--r--docs/source/markdown/podman-pod-rm.1.md (renamed from docs/podman-pod-rm.1.md)0
-rw-r--r--docs/source/markdown/podman-pod-start.1.md (renamed from docs/podman-pod-start.1.md)0
-rw-r--r--docs/source/markdown/podman-pod-stats.1.md (renamed from docs/podman-pod-stats.1.md)0
-rw-r--r--docs/source/markdown/podman-pod-stop.1.md (renamed from docs/podman-pod-stop.1.md)0
-rw-r--r--docs/source/markdown/podman-pod-top.1.md (renamed from docs/podman-pod-top.1.md)0
-rw-r--r--docs/source/markdown/podman-pod-unpause.1.md (renamed from docs/podman-pod-unpause.1.md)0
-rw-r--r--docs/source/markdown/podman-pod.1.md (renamed from docs/podman-pod.1.md)0
-rw-r--r--docs/source/markdown/podman-port.1.md (renamed from docs/podman-port.1.md)0
-rw-r--r--docs/source/markdown/podman-ps.1.md (renamed from docs/podman-ps.1.md)0
-rw-r--r--docs/source/markdown/podman-pull.1.md (renamed from docs/podman-pull.1.md)0
-rw-r--r--docs/source/markdown/podman-push.1.md (renamed from docs/podman-push.1.md)0
-rw-r--r--docs/source/markdown/podman-remote.1.md (renamed from docs/podman-remote.1.md)0
-rw-r--r--docs/source/markdown/podman-remote.conf.5.md (renamed from docs/podman-remote.conf.5.md)0
-rw-r--r--docs/source/markdown/podman-restart.1.md (renamed from docs/podman-restart.1.md)0
-rw-r--r--docs/source/markdown/podman-rm.1.md (renamed from docs/podman-rm.1.md)3
-rw-r--r--docs/source/markdown/podman-rmi.1.md (renamed from docs/podman-rmi.1.md)0
-rw-r--r--docs/source/markdown/podman-run.1.md (renamed from docs/podman-run.1.md)12
-rw-r--r--docs/source/markdown/podman-save.1.md (renamed from docs/podman-save.1.md)0
-rw-r--r--docs/source/markdown/podman-search.1.md (renamed from docs/podman-search.1.md)0
-rw-r--r--docs/source/markdown/podman-start.1.md (renamed from docs/podman-start.1.md)4
-rw-r--r--docs/source/markdown/podman-stats.1.md (renamed from docs/podman-stats.1.md)0
-rw-r--r--docs/source/markdown/podman-stop.1.md (renamed from docs/podman-stop.1.md)0
-rw-r--r--docs/source/markdown/podman-system-df.1.md (renamed from docs/podman-system-df.1.md)0
-rw-r--r--docs/source/markdown/podman-system-migrate.1.md (renamed from docs/podman-system-migrate.1.md)0
-rw-r--r--docs/source/markdown/podman-system-prune.1.md (renamed from docs/podman-system-prune.1.md)0
-rw-r--r--docs/source/markdown/podman-system-renumber.1.md (renamed from docs/podman-system-renumber.1.md)0
-rw-r--r--docs/source/markdown/podman-system.1.md (renamed from docs/podman-system.1.md)0
-rw-r--r--docs/source/markdown/podman-tag.1.md (renamed from docs/podman-tag.1.md)0
-rw-r--r--docs/source/markdown/podman-top.1.md (renamed from docs/podman-top.1.md)0
-rw-r--r--docs/source/markdown/podman-umount.1.md (renamed from docs/podman-umount.1.md)0
-rw-r--r--docs/source/markdown/podman-unpause.1.md (renamed from docs/podman-unpause.1.md)0
-rw-r--r--docs/source/markdown/podman-unshare.1.md (renamed from docs/podman-unshare.1.md)0
-rw-r--r--docs/source/markdown/podman-varlink.1.md (renamed from docs/podman-varlink.1.md)0
-rw-r--r--docs/source/markdown/podman-version.1.md (renamed from docs/podman-version.1.md)0
-rw-r--r--docs/source/markdown/podman-volume-create.1.md (renamed from docs/podman-volume-create.1.md)0
-rw-r--r--docs/source/markdown/podman-volume-inspect.1.md (renamed from docs/podman-volume-inspect.1.md)0
-rw-r--r--docs/source/markdown/podman-volume-ls.1.md (renamed from docs/podman-volume-ls.1.md)0
-rw-r--r--docs/source/markdown/podman-volume-prune.1.md (renamed from docs/podman-volume-prune.1.md)0
-rw-r--r--docs/source/markdown/podman-volume-rm.1.md (renamed from docs/podman-volume-rm.1.md)0
-rw-r--r--docs/source/markdown/podman-volume.1.md (renamed from docs/podman-volume.1.md)0
-rw-r--r--docs/source/markdown/podman-wait.1.md (renamed from docs/podman-wait.1.md)0
-rw-r--r--docs/source/markdown/podman.1.md (renamed from docs/podman.1.md)0
-rw-r--r--docs/source/network.rst10
-rw-r--r--docs/source/play.rst4
-rw-r--r--docs/source/pod.rst30
-rw-r--r--docs/source/system.rst12
-rw-r--r--docs/source/volume.rst11
-rw-r--r--go.mod17
-rw-r--r--go.sum34
-rwxr-xr-xhack/man-page-checker2
-rwxr-xr-xhack/podman-commands.sh6
-rw-r--r--install.md2
-rw-r--r--libpod.conf32
-rw-r--r--libpod/boltdb_state.go5
-rw-r--r--libpod/common_test.go3
-rw-r--r--libpod/config/config.go549
-rw-r--r--libpod/config/config_test.go64
-rw-r--r--libpod/config/default.go152
-rw-r--r--libpod/config/merge.go183
-rw-r--r--libpod/config/merge_test.go157
-rw-r--r--libpod/config/testdata/empty.conf0
l---------libpod/config/testdata/libpod.conf1
-rw-r--r--libpod/container.go12
-rw-r--r--libpod/container_api.go8
-rw-r--r--libpod/container_inspect.go4
-rw-r--r--libpod/container_internal_linux.go56
-rw-r--r--libpod/container_log_linux.go9
-rw-r--r--libpod/define/config.go18
-rw-r--r--libpod/define/runtime.go37
-rw-r--r--libpod/events/journal_linux.go2
-rw-r--r--libpod/events/logfile.go2
-rw-r--r--libpod/healthcheck.go4
-rw-r--r--libpod/image/pull.go8
-rw-r--r--libpod/in_memory_state.go5
-rw-r--r--libpod/kube.go9
-rw-r--r--libpod/logs/log.go6
-rw-r--r--libpod/networking_linux.go51
-rw-r--r--libpod/oci_attach_linux.go2
-rw-r--r--libpod/oci_conmon_linux.go13
-rw-r--r--libpod/oci_conmon_unsupported.go3
-rw-r--r--libpod/oci_util.go26
-rw-r--r--libpod/options.go49
-rw-r--r--libpod/pod_internal.go4
-rw-r--r--libpod/runtime.go828
-rw-r--r--libpod/runtime_ctr.go6
-rw-r--r--libpod/runtime_pod_linux.go10
-rw-r--r--libpod/state.go13
-rw-r--r--libpod/state_test.go3
-rw-r--r--libpod/stats.go24
-rw-r--r--libpod/util.go20
-rw-r--r--logo/podman-logo-source.svg10
-rw-r--r--logo/podman-logo.pngbin37056 -> 37991 bytes
-rw-r--r--pkg/adapter/containers.go63
-rw-r--r--pkg/adapter/pods.go108
-rw-r--r--pkg/adapter/runtime.go2
-rw-r--r--pkg/adapter/terminal_linux.go3
-rw-r--r--pkg/cgroups/cpu.go4
-rw-r--r--pkg/namespaces/namespaces.go5
-rw-r--r--pkg/rootless/rootless_linux.c14
-rw-r--r--pkg/spec/config_linux_cgo.go2
-rw-r--r--pkg/spec/config_linux_nocgo.go2
-rw-r--r--pkg/spec/config_unsupported.go2
-rw-r--r--pkg/spec/createconfig.go412
-rw-r--r--pkg/spec/namespaces.go433
-rw-r--r--pkg/spec/security.go172
-rw-r--r--pkg/spec/spec.go310
-rw-r--r--pkg/spec/spec_test.go6
-rw-r--r--pkg/spec/storage.go12
-rw-r--r--pkg/util/mountOpts.go31
-rw-r--r--pkg/util/utils.go14
-rw-r--r--pkg/varlinkapi/attach.go2
-rw-r--r--pkg/varlinkapi/containers.go8
-rw-r--r--rootless.md15
-rw-r--r--test/e2e/checkpoint_test.go11
-rw-r--r--test/e2e/create_staticmac_test.go46
-rw-r--r--test/e2e/create_test.go7
-rw-r--r--test/e2e/generate_kube_test.go33
-rw-r--r--test/e2e/login_logout_test.go10
-rw-r--r--test/e2e/logs_test.go12
-rw-r--r--test/e2e/play_kube_test.go10
-rw-r--r--test/e2e/pull_test.go7
-rw-r--r--test/e2e/run_cgroup_parent_test.go6
-rw-r--r--test/e2e/run_selinux_test.go2
-rw-r--r--test/e2e/run_test.go7
-rw-r--r--test/e2e/run_volume_test.go22
-rw-r--r--test/e2e/runlabel_test.go15
-rw-r--r--test/e2e/search_test.go8
-rw-r--r--test/e2e/start_test.go21
-rw-r--r--test/e2e/test.yaml37
-rw-r--r--vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go943
-rw-r--r--vendor/github.com/containernetworking/cni/libcni/api.go230
-rw-r--r--vendor/github.com/containernetworking/cni/libcni/conf.go4
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/invoke/args.go2
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/types/args.go2
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/types/types.go25
-rw-r--r--vendor/github.com/containernetworking/cni/pkg/utils/utils.go51
-rw-r--r--vendor/github.com/containers/buildah/.cirrus.yml30
-rw-r--r--vendor/github.com/containers/buildah/.golangci.yml1
-rw-r--r--vendor/github.com/containers/buildah/.papr.sh1
-rw-r--r--vendor/github.com/containers/buildah/.papr.yml1
-rw-r--r--vendor/github.com/containers/buildah/.travis.yml16
-rw-r--r--vendor/github.com/containers/buildah/CHANGELOG.md22
-rw-r--r--vendor/github.com/containers/buildah/Makefile7
-rw-r--r--vendor/github.com/containers/buildah/buildah.go2
-rw-r--r--vendor/github.com/containers/buildah/go.mod2
-rw-r--r--vendor/github.com/containers/buildah/go.sum2
-rw-r--r--vendor/github.com/containers/buildah/install.md15
-rw-r--r--vendor/github.com/containers/buildah/ostree_tag.sh6
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go5
-rw-r--r--vendor/github.com/containers/buildah/pkg/secrets/secrets.go10
-rw-r--r--vendor/github.com/containers/buildah/run_linux.go2
-rw-r--r--vendor/github.com/containers/buildah/troubleshooting.md6
-rw-r--r--vendor/github.com/containers/buildah/util.go28
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/example_changes.go97
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go411
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go32
-rw-r--r--vendor/github.com/docker/docker/pkg/archive/example_changes.go97
-rw-r--r--vendor/github.com/json-iterator/go/iter.go27
-rw-r--r--vendor/github.com/json-iterator/go/iter_array.go10
-rw-r--r--vendor/github.com/json-iterator/go/iter_object.go24
-rw-r--r--vendor/github.com/json-iterator/go/iter_skip_sloppy.go19
-rw-r--r--vendor/github.com/json-iterator/go/reflect.go5
-rw-r--r--vendor/github.com/json-iterator/go/reflect_extension.go2
-rw-r--r--vendor/github.com/json-iterator/go/reflect_map.go4
-rw-r--r--vendor/github.com/json-iterator/go/reflect_marshaler.go12
-rw-r--r--vendor/github.com/json-iterator/go/reflect_struct_decoder.go44
-rw-r--r--vendor/github.com/klauspost/compress/flate/gen.go265
-rw-r--r--vendor/github.com/klauspost/cpuid/private-gen.go476
-rw-r--r--vendor/github.com/onsi/ginkgo/.travis.yml3
-rw-r--r--vendor/github.com/onsi/ginkgo/CHANGELOG.md22
-rw-r--r--vendor/github.com/onsi/ginkgo/config/config.go9
-rw-r--r--vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go13
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo/run_command.go17
-rw-r--r--vendor/github.com/onsi/ginkgo/ginkgo_dsl.go5
-rw-r--r--vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go21
-rw-r--r--vendor/github.com/onsi/gomega/CHANGELOG.md5
-rw-r--r--vendor/github.com/onsi/gomega/go.mod3
-rw-r--r--vendor/github.com/onsi/gomega/go.sum4
-rw-r--r--vendor/github.com/onsi/gomega/gomega_dsl.go2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/.travis.yml9
-rw-r--r--vendor/github.com/uber/jaeger-client-go/CHANGELOG.md39
-rw-r--r--vendor/github.com/uber/jaeger-client-go/Gopkg.lock125
-rw-r--r--vendor/github.com/uber/jaeger-client-go/Gopkg.toml2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/Makefile3
-rw-r--r--vendor/github.com/uber/jaeger-client-go/README.md23
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/config.go4
-rw-r--r--vendor/github.com/uber/jaeger-client-go/config/config_env.go24
-rw-r--r--vendor/github.com/uber/jaeger-client-go/constants.go2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/metrics.go20
-rw-r--r--vendor/github.com/uber/jaeger-client-go/propagation.go8
-rw-r--r--vendor/github.com/uber/jaeger-client-go/reporter.go2
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler.go446
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler_remote.go334
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go (renamed from vendor/github.com/uber/jaeger-client-go/sampler_options.go)71
-rw-r--r--vendor/github.com/uber/jaeger-client-go/sampler_v2.go93
-rw-r--r--vendor/github.com/uber/jaeger-client-go/span.go144
-rw-r--r--vendor/github.com/uber/jaeger-client-go/span_context.go (renamed from vendor/github.com/uber/jaeger-client-go/context.go)161
-rw-r--r--vendor/github.com/uber/jaeger-client-go/tracer.go127
-rw-r--r--vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go93
-rw-r--r--vendor/github.com/uber/jaeger-client-go/zipkin.go5
-rw-r--r--vendor/github.com/ulikunitz/xz/example.go40
-rw-r--r--vendor/go.uber.org/atomic/.codecov.yml15
-rw-r--r--vendor/go.uber.org/atomic/.gitignore11
-rw-r--r--vendor/go.uber.org/atomic/.travis.yml27
-rw-r--r--vendor/go.uber.org/atomic/LICENSE.txt19
-rw-r--r--vendor/go.uber.org/atomic/Makefile51
-rw-r--r--vendor/go.uber.org/atomic/README.md36
-rw-r--r--vendor/go.uber.org/atomic/atomic.go351
-rw-r--r--vendor/go.uber.org/atomic/error.go55
-rw-r--r--vendor/go.uber.org/atomic/glide.lock17
-rw-r--r--vendor/go.uber.org/atomic/glide.yaml6
-rw-r--r--vendor/go.uber.org/atomic/string.go49
-rw-r--r--vendor/golang.org/x/net/html/atom/gen.go712
-rw-r--r--vendor/golang.org/x/sys/unix/mkasm_darwin.go61
-rw-r--r--vendor/golang.org/x/sys/unix/mkpost.go122
-rw-r--r--vendor/golang.org/x/sys/unix/mksyscall.go407
-rw-r--r--vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go415
-rw-r--r--vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go614
-rw-r--r--vendor/golang.org/x/sys/unix/mksyscall_solaris.go335
-rw-r--r--vendor/golang.org/x/sys/unix/mksysctl_openbsd.go355
-rw-r--r--vendor/golang.org/x/sys/unix/mksysnum.go190
-rw-r--r--vendor/golang.org/x/sys/unix/types_aix.go237
-rw-r--r--vendor/golang.org/x/sys/unix/types_darwin.go283
-rw-r--r--vendor/golang.org/x/sys/unix/types_dragonfly.go263
-rw-r--r--vendor/golang.org/x/sys/unix/types_freebsd.go400
-rw-r--r--vendor/golang.org/x/sys/unix/types_netbsd.go290
-rw-r--r--vendor/golang.org/x/sys/unix/types_openbsd.go283
-rw-r--r--vendor/golang.org/x/sys/unix/types_solaris.go266
-rw-r--r--vendor/golang.org/x/text/encoding/charmap/maketables.go556
-rw-r--r--vendor/golang.org/x/text/encoding/htmlindex/gen.go173
-rw-r--r--vendor/golang.org/x/text/encoding/internal/identifier/gen.go142
-rw-r--r--vendor/golang.org/x/text/encoding/japanese/maketables.go161
-rw-r--r--vendor/golang.org/x/text/encoding/korean/maketables.go143
-rw-r--r--vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go161
-rw-r--r--vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go140
-rw-r--r--vendor/golang.org/x/text/internal/language/compact/gen.go64
-rw-r--r--vendor/golang.org/x/text/internal/language/compact/gen_index.go113
-rw-r--r--vendor/golang.org/x/text/internal/language/compact/gen_parents.go54
-rw-r--r--vendor/golang.org/x/text/internal/language/gen.go1520
-rw-r--r--vendor/golang.org/x/text/internal/language/gen_common.go20
-rw-r--r--vendor/golang.org/x/text/language/gen.go305
-rw-r--r--vendor/golang.org/x/text/unicode/bidi/gen.go133
-rw-r--r--vendor/golang.org/x/text/unicode/bidi/gen_ranges.go57
-rw-r--r--vendor/golang.org/x/text/unicode/bidi/gen_trieval.go64
-rw-r--r--vendor/golang.org/x/text/unicode/norm/maketables.go986
-rw-r--r--vendor/golang.org/x/text/unicode/norm/triegen.go117
-rw-r--r--vendor/gopkg.in/yaml.v2/decode.go14
-rw-r--r--vendor/modules.txt423
-rw-r--r--version/version.go2
411 files changed, 6529 insertions, 15263 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 0713f6dda..c27b76110 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -32,7 +32,7 @@ env:
###
_BUILT_IMAGE_SUFFIX: "libpod-5642998972416000"
FEDORA_CACHE_IMAGE_NAME: "fedora-30-${_BUILT_IMAGE_SUFFIX}"
- PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-29-${_BUILT_IMAGE_SUFFIX}"
+ PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-30-${_BUILT_IMAGE_SUFFIX}"
SPECIAL_FEDORA_CACHE_IMAGE_NAME: "xfedora-30-${_BUILT_IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-19-${_BUILT_IMAGE_SUFFIX}"
PRIOR_UBUNTU_CACHE_IMAGE_NAME: "ubuntu-18-${_BUILT_IMAGE_SUFFIX}"
@@ -136,7 +136,9 @@ gating_task:
# in sync at all times.
vendor_task:
- only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
+ only_if: >-
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*'
depends_on:
- "gating"
@@ -148,15 +150,15 @@ vendor_task:
# Runs within Cirrus's "community cluster"
container:
- image: "quay.io/libpod/gate:latest"
+ image: docker.io/library/golang:1.13
cpu: 4
memory: 12
timeout_in: 30m
vendor_script:
- - '/usr/local/bin/entrypoint.sh vendor |& ${TIMESTAMP}'
- - 'cd ${GOSRC} && ./hack/tree_status.sh |& ${TIMESTAMP}'
+ - 'cd ${CIRRUS_WORKING_DIR} && make vendor'
+ - 'cd ${CIRRUS_WORKING_DIR} && ./hack/tree_status.sh'
on_failure:
failed_branch_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_branch_failure.sh |& ${TIMESTAMP}'
@@ -166,7 +168,9 @@ vendor_task:
# whether the git tree is clean.
varlink_api_task:
- only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
+ only_if: >-
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*'
depends_on:
- "gating"
@@ -201,8 +205,10 @@ build_each_commit_task:
- "vendor"
- "varlink_api"
- only_if: $CIRRUS_BRANCH != $DEST_BRANCH &&
- $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
+ only_if: >-
+ $CIRRUS_BRANCH != $DEST_BRANCH &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*'
gce_instance:
image_project: "libpod-218412"
@@ -232,8 +238,10 @@ build_without_cgo_task:
- "vendor"
- "varlink_api"
- only_if: $CIRRUS_BRANCH != $DEST_BRANCH &&
- $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
+ only_if: >-
+ $CIRRUS_BRANCH != $DEST_BRANCH &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*'
gce_instance:
image_project: "libpod-218412"
@@ -326,12 +334,14 @@ testing_task:
- "build_without_cgo"
# Only test build cache-images, if that's what's requested
- only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
+ only_if: >-
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*'
gce_instance:
matrix:
# Images are generated separately, from build_images_task (below)
- image_name: "${FEDORA_CACHE_IMAGE_NAME}"
+ #image_name: "${FEDORA_CACHE_IMAGE_NAME}"
image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
# Multiple test failures on Ubuntu 19 - Fixes TBD in future PR
# TODO: image_name: "${UBUNTU_CACHE_IMAGE_NAME}"
@@ -381,7 +391,9 @@ special_testing_rootless_task:
- "build_each_commit"
- "build_without_cgo"
- only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
+ only_if: >-
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*'
env:
ADD_SECOND_PARTITION: true
@@ -413,7 +425,9 @@ special_testing_in_podman_task:
- "build_each_commit"
- "build_without_cgo"
- only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
+ only_if: >-
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*'
env:
ADD_SECOND_PARTITION: true
@@ -439,7 +453,9 @@ special_testing_cross_task:
- "varlink_api"
- "vendor"
- only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
+ only_if: >-
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*'
env:
matrix:
@@ -473,7 +489,9 @@ special_testing_cgroupv2_task:
- "varlink_api"
- "vendor"
- only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
+ only_if: >-
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*'
gce_instance:
image_name: "${SPECIAL_FEDORA_CACHE_IMAGE_NAME}"
@@ -503,7 +521,9 @@ special_testing_endpoint_task:
- "varlink_api"
- "vendor"
- only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
+ only_if: >-
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*'
env:
SPECIALMODE: 'endpoint' # See docs
@@ -525,7 +545,9 @@ test_building_snap_task:
depends_on:
- "gating"
- only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
+ only_if: >-
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:IMG.*' &&
+ $CIRRUS_CHANGE_MESSAGE !=~ '.*CI:DOCS.*'
container:
image: yakshaveinc/snapcraft:core18
@@ -539,7 +561,7 @@ test_build_cache_images_task:
only_if: >-
$CIRRUS_BRANCH != $DEST_BRANCH &&
- $CIRRUS_CHANGE_MESSAGE =~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
+ $CIRRUS_CHANGE_MESSAGE =~ '.*CI:IMG.*'
depends_on:
- "gating"
@@ -571,7 +593,7 @@ verify_test_built_images_task:
only_if: >-
$CIRRUS_BRANCH != $DEST_BRANCH &&
- $CIRRUS_CHANGE_MESSAGE =~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
+ $CIRRUS_CHANGE_MESSAGE =~ '.*CI:IMG.*'
depends_on:
@@ -589,8 +611,8 @@ verify_test_built_images_task:
TEST_REMOTE_CLIENT: false
matrix:
# Required env. var. by check_image_script
- PACKER_BUILDER_NAME: "fedora-29"
PACKER_BUILDER_NAME: "fedora-30"
+ #PACKER_BUILDER_NAME: "fedora-31"
PACKER_BUILDER_NAME: "xfedora-30"
PACKER_BUILDER_NAME: "ubuntu-18"
# TODO support $UBUNTU_CACHE_IMAGE_NAME: PACKER_BUILDER_NAME: "ubuntu-19"
@@ -638,6 +660,26 @@ upload_snap_task:
- 'cd contrib/snapcraft && snapcraft && snapcraft push *.snap --release edge'
+test_docs_task:
+
+ only_if: >-
+ $CIRRUS_BRANCH != $DEST_BRANCH &&
+ $CIRRUS_CHANGE_MESSAGE =~ '.*CI:DOCS.*'
+ depends_on:
+ - "gating"
+ - "vendor"
+ #- "test_docs"
+ #- "varlink_api"
+ #- "build_each_commit"
+ stub_script: '/bin/true'
+
+
+#publish_docs_task:
+#
+# only_if: $CIRRUS_BRANCH == $DEST_BRANCH &&
+# $CIRRUS_TAG =~ '^v[0-9]\.[0-9]\.[0-9]$'
+
+
# Post message to IRC if everything passed PR testing
success_task:
@@ -664,6 +706,7 @@ success_task:
- "test_building_snap"
- "upload_snap"
- "verify_test_built_images"
+ - "test_docs"
env:
CIRRUS_WORKING_DIR: "/usr/src/libpod"
diff --git a/.gitignore b/.gitignore
index 598384582..54b63518f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,6 +5,7 @@
/docs/*.[158]
/docs/*.[158].gz
/docs/remote
+/docs/build/
*.o
*.orig
/pause/pause.o
@@ -25,3 +26,4 @@ release.txt
podman-remote*.zip
podman*.tar.gz
.idea*
+.vscode*
diff --git a/Makefile b/Makefile
index d4d123d69..2c1ded9d2 100644
--- a/Makefile
+++ b/Makefile
@@ -3,7 +3,7 @@ export GOPROXY=https://proxy.golang.org
GO ?= go
DESTDIR ?=
-EPOCH_TEST_COMMIT ?= 2b0892e757c878cdb087dd22b8986bccef0276ed
+EPOCH_TEST_COMMIT ?= ac73fd3fe5dcbf2647d589f9c9f37fe9531ed663
HEAD ?= HEAD
CHANGELOG_BASE ?= HEAD~
CHANGELOG_TARGET ?= HEAD
@@ -73,8 +73,8 @@ ASMFLAGS ?= all=-trimpath=${PWD}
LDFLAGS_PODMAN ?= $(LDFLAGS) \
-X $(LIBPOD)/define.gitCommit=$(GIT_COMMIT) \
-X $(LIBPOD)/define.buildInfo=$(BUILD_INFO) \
- -X $(LIBPOD).installPrefix=$(PREFIX) \
- -X $(LIBPOD).etcDir=$(ETCDIR)
+ -X $(LIBPOD)/config._installPrefix=$(PREFIX) \
+ -X $(LIBPOD)/config._etcDir=$(ETCDIR)
#Update to LIBSECCOMP_COMMIT should reflect in Dockerfile too.
LIBSECCOMP_COMMIT := release-2.3
# Rarely if ever should integration tests take more than 50min,
@@ -167,13 +167,11 @@ podman-remote: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build with podman on
$(GO_BUILD) $(BUILDFLAGS) -gcflags '$(GCFLAGS)' -asmflags '$(ASMFLAGS)' -ldflags '$(LDFLAGS_PODMAN)' -tags "$(BUILDTAGS) remoteclient" -o bin/$@ $(PROJECT)/cmd/podman
.PHONY: podman.msi
-podman.msi: podman-remote podman-remote-windows docs ## Will always rebuild exe as there is no podman-remote-windows.exe target to verify timestamp
- rm -rf bin/windows
- mkdir -p bin/windows
- docs/podman-remote.sh windows bin/windows docs
- find bin/windows -print \
- |wixl-heat --var var.ManSourceDir --component-group ManFiles --directory-ref INSTALLDIR --prefix bin/windows/ >bin/windows/pages.wsx
- wixl -D VERSION=$(RELEASE_NUMBER) -D ManSourceDir=bin/windows -o podman-v$(RELEASE_NUMBER).msi contrib/msi/podman.wxs bin/windows/pages.wsx
+podman.msi: podman-remote podman-remote-windows install-podman-remote-windows-docs ## Will always rebuild exe as there is no podman-remote-windows.exe target to verify timestamp
+ $(eval DOCFILE := docs/build/remote/windows)
+ find $(DOCFILE) -print \
+ |wixl-heat --var var.ManSourceDir --component-group ManFiles --directory-ref INSTALLDIR --prefix $(DOCFILE)/ >$(DOCFILE)/pages.wsx
+ wixl -D VERSION=$(RELEASE_NUMBER) -D ManSourceDir=$(DOCFILE) -o podman-v$(RELEASE_NUMBER).msi contrib/msi/podman.wxs $(DOCFILE)/pages.wsx
podman-remote-%: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build podman for a specific GOOS
$(eval BINSFX := $(shell test "$*" != "windows" || echo ".exe"))
@@ -196,7 +194,6 @@ clean: ## Clean artifacts
$(wildcard podman*.tar.gz) \
bin \
build \
- docs/remote \
test/checkseccomp/checkseccomp \
test/goecho/goecho \
test/testdata/redis-image \
@@ -205,9 +202,7 @@ clean: ## Clean artifacts
libpod/pod_ffjson.go \
libpod/container_easyjson.go \
libpod/pod_easyjson.go \
- $(MANPAGES) ||:
- find . -name \*~ -delete
- find . -name \#\* -delete
+ docs/build
libpodimage: ## Build the libpod image
${CONTAINER_RUNTIME} build -t ${LIBPOD_IMAGE} .
@@ -312,20 +307,26 @@ install.catatonit:
test-binaries: test/checkseccomp/checkseccomp test/goecho/goecho install.catatonit
-MANPAGES_MD ?= $(wildcard docs/*.md pkg/*/docs/*.md)
+MANPAGES_MD ?= $(wildcard docs/source/markdown/*.md pkg/*/docs/*.md)
MANPAGES ?= $(MANPAGES_MD:%.md=%)
+MANPAGES_DEST ?= $(subst markdown,man, $(subst source,build,$(MANPAGES)))
$(MANPAGES): %: %.md .gopathok
- @sed -e 's/\((podman.*\.md)\)//' -e 's/\[\(podman.*\)\]/\1/' $< | $(GOMD2MAN) -in /dev/stdin -out $@
+ @sed -e 's/\((podman.*\.md)\)//' -e 's/\[\(podman.*\)\]/\1/' $< | $(GOMD2MAN) -in /dev/stdin -out $(subst source/markdown,build/man,$@)
-docs: $(MANPAGES) ## Generate documentation
+docdir:
+ mkdir -p docs/build/man
-install-podman-remote-docs: podman-remote docs
- rm -rf docs/remote
- docs/podman-remote.sh darwin docs/remote docs
+docs: docdir $(MANPAGES) ## Generate documentation
+
+install-podman-remote-%-docs: podman-remote docs $(MANPAGES)
+ rm -rf docs/build/remote
+ mkdir -p docs/build/remote
+ ln -sf $(shell pwd)/docs/source/markdown/links docs/build/man/
+ docs/remote-docs.sh $* docs/build/remote/$* $(if $(findstring windows,$*),docs/source/markdown,docs/build/man)
man-page-check:
- ./hack/man-page-checker
+ hack/man-page-checker
# When publishing releases include critical build-time details
.PHONY: release.txt
@@ -349,7 +350,7 @@ podman-v$(RELEASE_NUMBER).tar.gz: binaries docs release.txt
# Must call make in-line: Dependency-spec. w/ wild-card also consumes variable value.
podman-remote-v$(RELEASE_NUMBER)-%.zip:
- $(MAKE) podman-remote-$* install-podman-remote-docs release.txt \
+ $(MAKE) podman-remote-$* install-podman-remote-$*-docs release.txt \
RELEASE_BASENAME=$(shell hack/get_release_info.sh REMOTENAME) \
RELEASE_DIST=$* RELEASE_DIST_VER="-"
$(eval TMPDIR := $(shell mktemp -d -p '' $podman_remote_XXXX))
@@ -359,13 +360,7 @@ podman-remote-v$(RELEASE_NUMBER)-%.zip:
# release.txt location and content depended upon by automated tooling
cp release.txt "$(TMPDIR)/"
cp ./bin/podman-remote-$*$(BINSFX) "$(TMPDIR)/$(SUBDIR)/podman$(BINSFX)"
- cp -r ./docs/remote "$(TMPDIR)/$(SUBDIR)/docs/"
- $(eval DOCFILE := $(TMPDIR)/$(SUBDIR)/docs/podman.1)
- cp docs/podman-remote.1 "$(DOCFILE)"
- sed -i 's/podman\\*-remote/podman/g' "$(DOCFILE)"
- sed -i 's/Podman\\*-remote/Podman\ for\ $*/g' "$(DOCFILE)"
- sed -i 's/podman\.conf/podman\-remote\.conf/g' "$(DOCFILE)"
- sed -i 's/A\ remote\ CLI\ for\ Podman\:\ //g' "$(DOCFILE)"
+ cp -r ./docs/build/remote/$* "$(TMPDIR)/$(SUBDIR)/docs/"
cd "$(TMPDIR)" && \
zip --recurse-paths "$(CURDIR)/$@" "./release.txt" "./"
-rm -rf "$(TMPDIR)"
@@ -381,7 +376,7 @@ podman-remote-%-release:
$(MAKE) podman-remote-v$(RELEASE_NUMBER)-$*.zip
docker-docs: docs
- (cd docs; ./dckrman.sh *.1)
+ (cd docs; ./dckrman.sh ./build/man/*.1)
changelog: ## Generate changelog
@echo "Creating changelog from $(CHANGELOG_BASE) to $(CHANGELOG_TARGET)"
@@ -408,9 +403,9 @@ install.bin: podman
install.man: docs
install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(MANDIR)/man1
install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(MANDIR)/man5
- install ${SELINUXOPT} -m 644 $(filter %.1,$(MANPAGES)) -t $(DESTDIR)$(MANDIR)/man1
- install ${SELINUXOPT} -m 644 $(filter %.5,$(MANPAGES)) -t $(DESTDIR)$(MANDIR)/man5
- install ${SELINUXOPT} -m 644 docs/links/*1 -t $(DESTDIR)$(MANDIR)/man1
+ install ${SELINUXOPT} -m 644 $(filter %.1,$(MANPAGES_DEST)) -t $(DESTDIR)$(MANDIR)/man1
+ install ${SELINUXOPT} -m 644 $(filter %.5,$(MANPAGES_DEST)) -t $(DESTDIR)$(MANDIR)/man5
+ install ${SELINUXOPT} -m 644 docs/source/markdown/links/*1 -t $(DESTDIR)$(MANDIR)/man1
install.config:
install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(SHAREDIR_CONTAINERS)
@@ -430,7 +425,7 @@ install.cni:
install.docker: docker-docs
install ${SELINUXOPT} -d -m 755 $(DESTDIR)$(BINDIR) $(DESTDIR)$(MANDIR)/man1
install ${SELINUXOPT} -m 755 docker $(DESTDIR)$(BINDIR)/docker
- install ${SELINUXOPT} -m 644 docs/docker*.1 -t $(DESTDIR)$(MANDIR)/man1
+ install ${SELINUXOPT} -m 644 docs/build/man/docker*.1 -t $(DESTDIR)$(MANDIR)/man1
install.systemd:
install ${SELINUXOPT} -m 755 -d ${DESTDIR}${SYSTEMDDIR} ${DESTDIR}${USERSYSTEMDDIR} ${DESTDIR}${TMPFILESDIR}
@@ -444,16 +439,16 @@ install.systemd:
install ${SELINUXOPT} -m 644 contrib/varlink/podman.conf ${DESTDIR}${TMPFILESDIR}/podman.conf
uninstall:
- for i in $(filter %.1,$(MANPAGES)); do \
+ for i in $(filter %.1,$(MANPAGES_DEST)); do \
rm -f $(DESTDIR)$(MANDIR)/man1/$$(basename $${i}); \
done; \
- for i in $(filter %.5,$(MANPAGES)); do \
+ for i in $(filter %.5,$(MANPAGES_DEST)); do \
rm -f $(DESTDIR)$(MANDIR)/man5/$$(basename $${i}); \
done
.PHONY: .gitvalidation
.gitvalidation: .gopathok
- GIT_CHECK_EXCLUDE="./vendor:docs/rtd/make.bat" $(GOBIN)/git-validation -v -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..$(HEAD)
+ GIT_CHECK_EXCLUDE="./vendor:docs/make.bat" $(GOBIN)/git-validation -v -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..$(HEAD)
.PHONY: install.tools
install.tools: .install.gitvalidation .install.gometalinter .install.md2man .install.ginkgo .install.golangci-lint ## Install needed tools
@@ -527,6 +522,9 @@ vendor:
$(GO) mod vendor && \
$(GO) mod verify
+vendor-in-container:
+ podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.13 make vendor
+
.PHONY: \
.gopathok \
binaries \
diff --git a/README.md b/README.md
index 166a4bebe..8f7e7facd 100644
--- a/README.md
+++ b/README.md
@@ -5,7 +5,7 @@
Libpod provides a library for applications looking to use the Container Pod concept,
popularized by Kubernetes. Libpod also contains the Pod Manager tool `(Podman)`. Podman manages pods, containers, container images, and container volumes.
-* [Latest Version: 1.6.2](https://github.com/containers/libpod/releases/latest)
+* [Latest Version: 1.6.3](https://github.com/containers/libpod/releases/latest)
* [Continuous Integration:](contrib/cirrus/README.md) [![Build Status](https://api.cirrus-ci.com/github/containers/libpod.svg)](https://cirrus-ci.com/github/containers/libpod/master)
* [GoDoc: ![GoDoc](https://godoc.org/github.com/containers/libpod/libpod?status.svg)](https://godoc.org/github.com/containers/libpod/libpod)
* Automated continuous release downloads (including remote-client):
@@ -106,7 +106,7 @@ Information on how Podman configures [OCI Hooks][spec-hooks] to run when launchi
**[Podman API](API.md)**
Documentation on the Podman API using [Varlink](https://www.varlink.org/).
-**[Podman Commands](commands.md)**
+**[Podman Commands](https://podman.readthedocs.io/en/latest/Commands.html)**
A list of the Podman commands with links to their man pages and in many cases videos
showing the commands in use.
diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md
index 235871273..cefad25f4 100644
--- a/RELEASE_NOTES.md
+++ b/RELEASE_NOTES.md
@@ -1,5 +1,34 @@
# Release Notes
+## 1.6.3
+### Features
+- Handling of the `libpod.conf` configuration file has seen major changes. Most significantly, rootless users will no longer automatically receive a complete configuration file when they first use Podman, and will instead only receive differences from the global configuration.
+- Initial support for the CNI DNS plugin, which allows containers to resolve the IPs of other containers via DNS name, has been added
+- Podman now supports anonymous named volumes, created by specifying only a destination to the `-v` flag to the `podman create` and `podman run` commands
+- Named volumes now support `uid` and `gid` options in `--opt o=...` to set UID and GID of the created volume
+
+### Bugfixes
+- Fixed a bug where the `podman start` command would print container ID, instead of name, when starting containers given their name
+- Fixed a bug where named volumes with options did not properly detect issues with mounting the volume, leading to an inconsistent state ([#4303](https://github.com/containers/libpod/issues/4303))
+- Fixed a bug where incorrect Seccomp profiles were used in containers generated by `podman play kube`
+- Fixed a bug where processes started by `podman exec` would have the wrong SELinux label in some circumstances ([#4361](https://github.com/containers/libpod/issues/4361))
+- Fixed a bug where error messages from `slirp4netns` would be lost
+- Fixed a bug where `podman run --network=$NAME` would not throw an error in rootless Podman, where CNI networks are not supported
+- Fixed a bug where `podman network create` would throw confusing errors when trying to create a volume with a name that already exists
+- Fixed a bug where Podman would not error if the `systemd` CGroup manager was specified, but systemd could not be contacted over DBus
+- Fixed a bug where image volumes were mounted `noexec` ([#4318](https://github.com/containers/libpod/issues/4318))
+- Fixed a bug where the `podman stats` command required the name of a container to be given, instead of showing all containers when no container was specified ([#4274](https://github.com/containers/libpod/issues/4274))
+- Fixed a bug where the `podman volume inspect` command would not show the options that named volumes were created with
+- Fixed a bug where custom storage configuration was not written to `storage.conf` at time of first creation for rootless Podman ([#2659](https://github.com/containers/libpod/issues/2659))
+- Fixed a bug where remote Podman did not support shell redirection of container output
+
+### Misc
+- Updated vendored containers/image library to v5.0
+- Initial support for images using manifest lists has been added, though commands for directly interacting with manifests are still missing
+- Support for pushing to and pulling from OSTree has been removed due to deprecation in the containers/image library
+- Rootless Podman no longer enables linger on systems with systemd as init by default. As such, containers will now be killed when the user who ran them logs out, unless linger is explicitly enabled using [loginctl](https://www.freedesktop.org/software/systemd/man/loginctl.html)
+- Podman will now check the version of `conmon` that is in use to ensure it is sufficient
+
## 1.6.2
### Features
- Added a `--runtime` flag to `podman system migrate` to allow the OCI runtime for all containers to be reset, to ease transition to the `crun` runtime on CGroups V2 systems until `runc` gains full support
diff --git a/cmd/podman/attach.go b/cmd/podman/attach.go
index b78633ed6..b03673f29 100644
--- a/cmd/podman/attach.go
+++ b/cmd/podman/attach.go
@@ -2,6 +2,7 @@ package main
import (
"github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/spf13/cobra"
@@ -31,7 +32,7 @@ func init() {
attachCommand.SetHelpTemplate(HelpTemplate())
attachCommand.SetUsageTemplate(UsageTemplate())
flags := attachCommand.Flags()
- flags.StringVar(&attachCommand.DetachKeys, "detach-keys", "", "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`")
+ flags.StringVar(&attachCommand.DetachKeys, "detach-keys", define.DefaultDetachKeys, "Select the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`")
flags.BoolVar(&attachCommand.NoStdin, "no-stdin", false, "Do not attach STDIN. The default is false")
flags.BoolVar(&attachCommand.SigProxy, "sig-proxy", true, "Proxy received signals to the process")
flags.BoolVarP(&attachCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
diff --git a/cmd/podman/build.go b/cmd/podman/build.go
index e9ebc50aa..fbf85fc97 100644
--- a/cmd/podman/build.go
+++ b/cmd/podman/build.go
@@ -11,7 +11,7 @@ import (
buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/image/v5/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
- "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/adapter"
"github.com/docker/go-units"
"github.com/opencontainers/runtime-spec/specs-go"
@@ -155,6 +155,11 @@ func buildCmd(c *cliconfig.BuildValues) error {
tags = tags[1:]
}
}
+ if c.BudResults.Authfile != "" {
+ if _, err := os.Stat(c.BudResults.Authfile); err != nil {
+ return errors.Wrapf(err, "error getting authfile %s", c.BudResults.Authfile)
+ }
+ }
pullPolicy := imagebuildah.PullNever
if c.Pull {
@@ -238,6 +243,9 @@ func buildCmd(c *cliconfig.BuildValues) error {
if contextDir == "" {
return errors.Errorf("no context directory specified, and no containerfile specified")
}
+ if !fileIsDir(contextDir) {
+ return errors.Errorf("context must be a directory: %v", contextDir)
+ }
if len(containerfiles) == 0 {
if checkIfFileExists(filepath.Join(contextDir, "Containerfile")) {
containerfiles = append(containerfiles, filepath.Join(contextDir, "Containerfile"))
@@ -260,7 +268,7 @@ func buildCmd(c *cliconfig.BuildValues) error {
if err != nil {
return err
}
- if conf != nil && conf.CgroupManager == libpod.SystemdCgroupsManager {
+ if conf != nil && conf.CgroupManager == define.SystemdCgroupsManager {
runtimeFlags = append(runtimeFlags, "--systemd-cgroup")
}
// end from buildah
diff --git a/cmd/podman/cliconfig/config.go b/cmd/podman/cliconfig/config.go
index 1bb5fa30c..780b68333 100644
--- a/cmd/podman/cliconfig/config.go
+++ b/cmd/podman/cliconfig/config.go
@@ -251,7 +251,7 @@ type LogsValues struct {
Details bool
Follow bool
Since string
- Tail uint64
+ Tail int64
Timestamps bool
Latest bool
}
@@ -467,14 +467,15 @@ type RestartValues struct {
type RestoreValues struct {
PodmanCommand
- All bool
- Keep bool
- Latest bool
- TcpEstablished bool
- Import string
- Name string
- IgnoreRootfs bool
- IgnoreStaticIP bool
+ All bool
+ Keep bool
+ Latest bool
+ TcpEstablished bool
+ Import string
+ Name string
+ IgnoreRootfs bool
+ IgnoreStaticIP bool
+ IgnoreStaticMAC bool
}
type RmValues struct {
diff --git a/cmd/podman/common.go b/cmd/podman/common.go
index 33a848553..4db043f31 100644
--- a/cmd/podman/common.go
+++ b/cmd/podman/common.go
@@ -7,8 +7,8 @@ import (
"strings"
"github.com/containers/buildah"
+ buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/libpod/cmd/podman/cliconfig"
- "github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/sysinfo"
@@ -112,7 +112,7 @@ func getCreateFlags(c *cliconfig.PodmanCommand) {
"Attach to STDIN, STDOUT or STDERR (default [])",
)
createFlags.String(
- "authfile", shared.GetAuthFile(""),
+ "authfile", buildahcli.GetDefaultAuthFile(),
"Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override",
)
createFlags.String(
@@ -132,7 +132,7 @@ func getCreateFlags(c *cliconfig.PodmanCommand) {
"Drop capabilities from the container",
)
createFlags.String(
- "cgroupns", "host",
+ "cgroupns", "",
"cgroup namespace to use",
)
createFlags.String(
@@ -188,7 +188,7 @@ func getCreateFlags(c *cliconfig.PodmanCommand) {
"Run container in background and print container ID",
)
createFlags.String(
- "detach-keys", "",
+ "detach-keys", define.DefaultDetachKeys,
"Override the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`",
)
createFlags.StringSlice(
@@ -328,7 +328,7 @@ func getCreateFlags(c *cliconfig.PodmanCommand) {
)
createFlags.String(
"mac-address", "",
- "Container MAC address (e.g. 92:d0:c6:0a:29:33), not currently supported",
+ "Container MAC address (e.g. 92:d0:c6:0a:29:33)",
)
createFlags.StringP(
"memory", "m", "",
diff --git a/cmd/podman/cp.go b/cmd/podman/cp.go
index 75a23afd6..c53a97df3 100644
--- a/cmd/podman/cp.go
+++ b/cmd/podman/cp.go
@@ -257,8 +257,15 @@ func parsePath(runtime *libpod.Runtime, path string) (*libpod.Container, string)
return nil, path
}
+func evalSymlinks(path string) (string, error) {
+ if path == os.Stdin.Name() {
+ return path, nil
+ }
+ return filepath.EvalSymlinks(path)
+}
+
func getPathInfo(path string) (string, os.FileInfo, error) {
- path, err := filepath.EvalSymlinks(path)
+ path, err := evalSymlinks(path)
if err != nil {
return "", nil, errors.Wrapf(err, "error evaluating symlinks %q", path)
}
@@ -270,7 +277,7 @@ func getPathInfo(path string) (string, os.FileInfo, error) {
}
func copy(src, destPath, dest string, idMappingOpts storage.IDMappingOptions, chownOpts *idtools.IDPair, extract, isFromHostToCtr bool) error {
- srcPath, err := filepath.EvalSymlinks(src)
+ srcPath, err := evalSymlinks(src)
if err != nil {
return errors.Wrapf(err, "error evaluating symlinks %q", srcPath)
}
diff --git a/cmd/podman/create.go b/cmd/podman/create.go
index 3c24729c5..73fba5a8c 100644
--- a/cmd/podman/create.go
+++ b/cmd/podman/create.go
@@ -2,6 +2,7 @@ package main
import (
"fmt"
+ "os"
"strings"
"github.com/containers/libpod/cmd/podman/cliconfig"
@@ -50,6 +51,12 @@ func createCmd(c *cliconfig.CreateValues) error {
defer span.Finish()
}
+ if c.String("authfile") != "" {
+ if _, err := os.Stat(c.String("authfile")); err != nil {
+ return errors.Wrapf(err, "error getting authfile %s", c.String("authfile"))
+ }
+ }
+
if err := createInit(&c.PodmanCommand); err != nil {
return err
}
diff --git a/cmd/podman/exec.go b/cmd/podman/exec.go
index 649a7b0db..afa701897 100644
--- a/cmd/podman/exec.go
+++ b/cmd/podman/exec.go
@@ -2,6 +2,7 @@ package main
import (
"github.com/containers/libpod/cmd/podman/cliconfig"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/spf13/cobra"
@@ -34,7 +35,7 @@ func init() {
execCommand.SetUsageTemplate(UsageTemplate())
flags := execCommand.Flags()
flags.SetInterspersed(false)
- flags.StringVar(&execCommand.DetachKeys, "detach-keys", "", "Override the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _")
+ flags.StringVar(&execCommand.DetachKeys, "detach-keys", define.DefaultDetachKeys, "Select the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _")
flags.StringArrayVarP(&execCommand.Env, "env", "e", []string{}, "Set environment variables")
flags.BoolVarP(&execCommand.Interactive, "interactive", "i", false, "Keep STDIN open even if not attached")
flags.BoolVarP(&execCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
diff --git a/cmd/podman/images.go b/cmd/podman/images.go
index 6157fda2a..7d498517c 100644
--- a/cmd/podman/images.go
+++ b/cmd/podman/images.go
@@ -291,6 +291,10 @@ func getImagesTemplateOutput(ctx context.Context, images []*adapter.ContainerIma
if len(tag) == 71 && strings.HasPrefix(tag, "sha256:") {
imageDigest = digest.Digest(tag)
tag = ""
+ } else {
+ if img.Digest() != "" {
+ imageDigest = img.Digest()
+ }
}
params := imagesTemplateParams{
Repository: repo,
diff --git a/cmd/podman/login.go b/cmd/podman/login.go
index f91366eac..369e0da16 100644
--- a/cmd/podman/login.go
+++ b/cmd/podman/login.go
@@ -6,11 +6,11 @@ import (
"os"
"strings"
+ buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/pkg/docker/config"
"github.com/containers/image/v5/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
- "github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod/image"
"github.com/docker/docker-credential-helpers/credentials"
"github.com/pkg/errors"
@@ -54,7 +54,7 @@ func init() {
flags.BoolVar(&loginCommand.StdinPassword, "password-stdin", false, "Take the password from stdin")
// Disabled flags for the remote client
if !remote {
- flags.StringVar(&loginCommand.Authfile, "authfile", shared.GetAuthFile(""), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&loginCommand.Authfile, "authfile", buildahcli.GetDefaultAuthFile(), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
flags.StringVar(&loginCommand.CertDir, "cert-dir", "", "Pathname of a directory containing TLS certificates and keys used to connect to the registry")
flags.BoolVar(&loginCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries")
}
diff --git a/cmd/podman/logout.go b/cmd/podman/logout.go
index ef3452afe..4a113b1d0 100644
--- a/cmd/podman/logout.go
+++ b/cmd/podman/logout.go
@@ -3,11 +3,11 @@ package main
import (
"fmt"
+ buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/pkg/docker/config"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
- "github.com/containers/libpod/libpod/image"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -40,7 +40,7 @@ func init() {
logoutCommand.SetUsageTemplate(UsageTemplate())
flags := logoutCommand.Flags()
flags.BoolVarP(&logoutCommand.All, "all", "a", false, "Remove the cached credentials for all registries in the auth file")
- flags.StringVar(&logoutCommand.Authfile, "authfile", shared.GetAuthFile(""), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&logoutCommand.Authfile, "authfile", buildahcli.GetDefaultAuthFile(), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
markFlagHiddenForRemoteClient("authfile", flags)
}
@@ -59,7 +59,10 @@ func logoutCmd(c *cliconfig.LogoutValues) error {
server = scrubServer(args[0])
}
- sc := image.GetSystemContext("", c.Authfile, false)
+ sc, err := shared.GetSystemContext(c.Authfile)
+ if err != nil {
+ return err
+ }
if c.All {
if err := config.RemoveAllAuthentication(sc); err != nil {
@@ -69,7 +72,7 @@ func logoutCmd(c *cliconfig.LogoutValues) error {
return nil
}
- err := config.RemoveAuthentication(sc, server)
+ err = config.RemoveAuthentication(sc, server)
switch errors.Cause(err) {
case nil:
fmt.Printf("Removed login credentials for %s\n", server)
diff --git a/cmd/podman/logs.go b/cmd/podman/logs.go
index 32605389e..a2594b5bf 100644
--- a/cmd/podman/logs.go
+++ b/cmd/podman/logs.go
@@ -52,7 +52,7 @@ func init() {
flags.BoolVarP(&logsCommand.Follow, "follow", "f", false, "Follow log output. The default is false")
flags.BoolVarP(&logsCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
flags.StringVar(&logsCommand.Since, "since", "", "Show logs since TIMESTAMP")
- flags.Uint64Var(&logsCommand.Tail, "tail", 0, "Output the specified number of LINES at the end of the logs. Defaults to 0, which prints all lines")
+ flags.Int64Var(&logsCommand.Tail, "tail", -1, "Output the specified number of LINES at the end of the logs. Defaults to -1, which prints all lines")
flags.BoolVarP(&logsCommand.Timestamps, "timestamps", "t", false, "Output the timestamps in the log")
markFlagHidden(flags, "details")
flags.SetInterspersed(false)
diff --git a/cmd/podman/main_local.go b/cmd/podman/main_local.go
index 202d93b35..f630f1210 100644
--- a/cmd/podman/main_local.go
+++ b/cmd/podman/main_local.go
@@ -16,7 +16,7 @@ import (
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
- "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/cgroups"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/tracing"
@@ -32,10 +32,7 @@ import (
const remote = false
func init() {
- cgroupManager := libpod.SystemdCgroupsManager
- if runtimeConfig, err := libpod.DefaultRuntimeConfig(); err == nil {
- cgroupManager = runtimeConfig.CgroupManager
- }
+ cgroupManager := define.SystemdCgroupsManager
cgroupHelp := "Cgroup manager to use (cgroupfs or systemd)"
cgroupv2, _ := cgroups.IsCgroup2UnifiedMode()
if rootless.IsRootless() && !cgroupv2 {
@@ -181,7 +178,7 @@ func setupRootless(cmd *cobra.Command, args []string) error {
if !ownsCgroup {
unitName := fmt.Sprintf("podman-%d.scope", os.Getpid())
if err := utils.RunUnderSystemdScope(os.Getpid(), "user.slice", unitName); err != nil {
- if conf.CgroupManager == libpod.SystemdCgroupsManager {
+ if conf.CgroupManager == define.SystemdCgroupsManager {
logrus.Warnf("Failed to add podman to systemd sandbox cgroup: %v", err)
} else {
logrus.Debugf("Failed to add podman to systemd sandbox cgroup: %v", err)
@@ -225,7 +222,7 @@ func setupRootless(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
- if conf.CgroupManager == libpod.SystemdCgroupsManager {
+ if conf.CgroupManager == define.SystemdCgroupsManager {
logrus.Warnf("Failed to add pause process to systemd sandbox cgroup: %v", err)
} else {
logrus.Debugf("Failed to add pause process to systemd sandbox cgroup: %v", err)
diff --git a/cmd/podman/play_kube.go b/cmd/podman/play_kube.go
index 9a5cc3ec1..fc9f2d5b6 100644
--- a/cmd/podman/play_kube.go
+++ b/cmd/podman/play_kube.go
@@ -2,8 +2,10 @@ package main
import (
"fmt"
+ "os"
+
+ buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/libpod/cmd/podman/cliconfig"
- "github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/spf13/cobra"
@@ -40,7 +42,7 @@ func init() {
flags.BoolVarP(&playKubeCommand.Quiet, "quiet", "q", false, "Suppress output information when pulling images")
// Disabled flags for the remote client
if !remote {
- flags.StringVar(&playKubeCommand.Authfile, "authfile", shared.GetAuthFile(""), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&playKubeCommand.Authfile, "authfile", buildahcli.GetDefaultAuthFile(), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
flags.StringVar(&playKubeCommand.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys")
flags.StringVar(&playKubeCommand.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
flags.BoolVar(&playKubeCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries")
@@ -57,6 +59,12 @@ func playKubeCmd(c *cliconfig.KubePlayValues) error {
return errors.New("you must supply at least one file")
}
+ if c.Authfile != "" {
+ if _, err := os.Stat(c.Authfile); err != nil {
+ return errors.Wrapf(err, "error getting authfile %s", c.Authfile)
+ }
+ }
+
ctx := getContext()
runtime, err := adapter.GetRuntime(ctx, &c.PodmanCommand)
if err != nil {
diff --git a/cmd/podman/pull.go b/cmd/podman/pull.go
index d64793147..c6baf6b61 100644
--- a/cmd/podman/pull.go
+++ b/cmd/podman/pull.go
@@ -6,12 +6,12 @@ import (
"os"
"strings"
+ buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/image/v5/docker"
dockerarchive "github.com/containers/image/v5/docker/archive"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
- "github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/pkg/util"
@@ -60,7 +60,7 @@ func init() {
markFlagHidden(flags, "override-os")
// Disabled flags for the remote client
if !remote {
- flags.StringVar(&pullCommand.Authfile, "authfile", shared.GetAuthFile(""), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&pullCommand.Authfile, "authfile", buildahcli.GetDefaultAuthFile(), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
flags.StringVar(&pullCommand.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys")
flags.StringVar(&pullCommand.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
flags.BoolVar(&pullCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries")
@@ -96,6 +96,12 @@ func pullCmd(c *cliconfig.PullValues) (retError error) {
return errors.Errorf("too many arguments. Requires exactly 1")
}
+ if c.Authfile != "" {
+ if _, err := os.Stat(c.Authfile); err != nil {
+ return errors.Wrapf(err, "error getting authfile %s", c.Authfile)
+ }
+ }
+
arr := strings.SplitN(args[0], ":", 2)
if len(arr) == 2 {
if c.Bool("all-tags") {
diff --git a/cmd/podman/push.go b/cmd/podman/push.go
index 0fdfb6202..1be8dfe11 100644
--- a/cmd/podman/push.go
+++ b/cmd/podman/push.go
@@ -6,11 +6,11 @@ import (
"os"
"strings"
+ buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/image/v5/directory"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
- "github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/pkg/util"
@@ -59,7 +59,7 @@ func init() {
// Disabled flags for the remote client
if !remote {
- flags.StringVar(&pushCommand.Authfile, "authfile", shared.GetAuthFile(""), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&pushCommand.Authfile, "authfile", buildahcli.GetDefaultAuthFile(), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
flags.StringVar(&pushCommand.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys")
flags.BoolVar(&pushCommand.Compress, "compress", false, "Compress tarball image layers when pushing to a directory using the 'dir' transport. (default is same compression type as source)")
flags.StringVar(&pushCommand.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
@@ -74,6 +74,12 @@ func pushCmd(c *cliconfig.PushValues) error {
destName string
)
+ if c.Authfile != "" {
+ if _, err := os.Stat(c.Authfile); err != nil {
+ return errors.Wrapf(err, "error getting authfile %s", c.Authfile)
+ }
+ }
+
args := c.InputArgs
if len(args) == 0 || len(args) > 2 {
return errors.New("podman push requires at least one image name, and optionally a second to specify a different destination name")
diff --git a/cmd/podman/restore.go b/cmd/podman/restore.go
index 90d0b2dc4..caefadb6d 100644
--- a/cmd/podman/restore.go
+++ b/cmd/podman/restore.go
@@ -47,6 +47,7 @@ func init() {
flags.StringVarP(&restoreCommand.Name, "name", "n", "", "Specify new name for container restored from exported checkpoint (only works with --import)")
flags.BoolVar(&restoreCommand.IgnoreRootfs, "ignore-rootfs", false, "Do not apply root file-system changes when importing from exported checkpoint")
flags.BoolVar(&restoreCommand.IgnoreStaticIP, "ignore-static-ip", false, "Ignore IP address set via --static-ip")
+ flags.BoolVar(&restoreCommand.IgnoreStaticMAC, "ignore-static-mac", false, "Ignore MAC address set via --mac-address")
markFlagHiddenForRemoteClient("latest", flags)
}
diff --git a/cmd/podman/rm.go b/cmd/podman/rm.go
index 89062f524..6329a9d8e 100644
--- a/cmd/podman/rm.go
+++ b/cmd/podman/rm.go
@@ -43,7 +43,7 @@ func init() {
flags.BoolVarP(&rmCommand.Force, "force", "f", false, "Force removal of a running or unusable container. The default is false")
flags.BoolVarP(&rmCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
flags.BoolVar(&rmCommand.Storage, "storage", false, "Remove container from storage library")
- flags.BoolVarP(&rmCommand.Volumes, "volumes", "v", false, "Remove the volumes associated with the container")
+ flags.BoolVarP(&rmCommand.Volumes, "volumes", "v", false, "Remove anonymous volumes associated with the container")
markFlagHiddenForRemoteClient("storage", flags)
markFlagHiddenForRemoteClient("latest", flags)
}
diff --git a/cmd/podman/run.go b/cmd/podman/run.go
index 7aa4cb3c4..a6468f225 100644
--- a/cmd/podman/run.go
+++ b/cmd/podman/run.go
@@ -1,6 +1,8 @@
package main
import (
+ "os"
+
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/pkg/adapter"
"github.com/opentracing/opentracing-go"
@@ -45,6 +47,11 @@ func runCmd(c *cliconfig.RunValues) error {
span, _ := opentracing.StartSpanFromContext(Ctx, "runCmd")
defer span.Finish()
}
+ if c.String("authfile") != "" {
+ if _, err := os.Stat(c.String("authfile")); err != nil {
+ return errors.Wrapf(err, "error checking authfile path %s", c.String("authfile"))
+ }
+ }
if err := createInit(&c.PodmanCommand); err != nil {
return err
}
diff --git a/cmd/podman/runlabel.go b/cmd/podman/runlabel.go
index 7359bc0c7..358538155 100644
--- a/cmd/podman/runlabel.go
+++ b/cmd/podman/runlabel.go
@@ -6,6 +6,7 @@ import (
"os"
"strings"
+ buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/image/v5/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
@@ -60,7 +61,7 @@ func init() {
flags.BoolVarP(&runlabelCommand.Quiet, "quiet", "q", false, "Suppress output information when installing images")
// Disabled flags for the remote client
if !remote {
- flags.StringVar(&runlabelCommand.Authfile, "authfile", shared.GetAuthFile(""), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&runlabelCommand.Authfile, "authfile", buildahcli.GetDefaultAuthFile(), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
flags.StringVar(&runlabelCommand.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys")
flags.StringVar(&runlabelCommand.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
flags.BoolVar(&runlabelCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries")
@@ -97,6 +98,12 @@ func runlabelCmd(c *cliconfig.RunlabelValues) error {
}
defer runtime.DeferredShutdown(false)
+ if c.Authfile != "" {
+ if _, err := os.Stat(c.Authfile); err != nil {
+ return errors.Wrapf(err, "error getting authfile %s", c.Authfile)
+ }
+ }
+
args := c.InputArgs
if len(args) < 2 {
return errors.Errorf("the runlabel command requires at least 2 arguments: LABEL IMAGE")
diff --git a/cmd/podman/search.go b/cmd/podman/search.go
index cdcb30a59..87a26e544 100644
--- a/cmd/podman/search.go
+++ b/cmd/podman/search.go
@@ -1,13 +1,14 @@
package main
import (
+ "os"
"reflect"
"strings"
+ buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/buildah/pkg/formats"
"github.com/containers/image/v5/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
- "github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod/image"
"github.com/pkg/errors"
"github.com/spf13/cobra"
@@ -45,7 +46,7 @@ func init() {
flags.BoolVar(&searchCommand.NoTrunc, "no-trunc", false, "Do not truncate the output")
// Disabled flags for the remote client
if !remote {
- flags.StringVar(&searchCommand.Authfile, "authfile", shared.GetAuthFile(""), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
+ flags.StringVar(&searchCommand.Authfile, "authfile", buildahcli.GetDefaultAuthFile(), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
flags.BoolVar(&searchCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries")
}
}
@@ -65,6 +66,12 @@ func searchCmd(c *cliconfig.SearchValues) error {
return err
}
+ if c.Authfile != "" {
+ if _, err := os.Stat(c.Authfile); err != nil {
+ return errors.Wrapf(err, "error getting authfile %s", c.Authfile)
+ }
+ }
+
searchOptions := image.SearchOptions{
NoTrunc: c.NoTrunc,
Limit: c.Limit,
diff --git a/cmd/podman/shared/create.go b/cmd/podman/shared/create.go
index dc343e694..c7ea2e389 100644
--- a/cmd/podman/shared/create.go
+++ b/cmd/podman/shared/create.go
@@ -26,7 +26,6 @@ import (
"github.com/docker/docker/pkg/signal"
"github.com/docker/go-connections/nat"
"github.com/docker/go-units"
- "github.com/opencontainers/selinux/go-selinux/label"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -94,7 +93,7 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod.
ArchitectureChoice: c.String("override-arch"),
}
- newImage, err := runtime.ImageRuntime().New(ctx, name, rtc.SignaturePolicyPath, GetAuthFile(c.String("authfile")), writer, &dockerRegistryOptions, image.SigningOptions{}, nil, pullType)
+ newImage, err := runtime.ImageRuntime().New(ctx, name, rtc.SignaturePolicyPath, c.String("authfile"), writer, &dockerRegistryOptions, image.SigningOptions{}, nil, pullType)
if err != nil {
return nil, nil, err
}
@@ -195,72 +194,6 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod.
return ctr, createConfig, nil
}
-func parseSecurityOpt(config *cc.CreateConfig, securityOpts []string, runtime *libpod.Runtime) error {
- var (
- labelOpts []string
- )
-
- if config.PidMode.IsHost() {
- labelOpts = append(labelOpts, label.DisableSecOpt()...)
- } else if config.PidMode.IsContainer() {
- ctr, err := runtime.LookupContainer(config.PidMode.Container())
- if err != nil {
- return errors.Wrapf(err, "container %q not found", config.PidMode.Container())
- }
- secopts, err := label.DupSecOpt(ctr.ProcessLabel())
- if err != nil {
- return errors.Wrapf(err, "failed to duplicate label %q ", ctr.ProcessLabel())
- }
- labelOpts = append(labelOpts, secopts...)
- }
-
- if config.IpcMode.IsHost() {
- labelOpts = append(labelOpts, label.DisableSecOpt()...)
- } else if config.IpcMode.IsContainer() {
- ctr, err := runtime.LookupContainer(config.IpcMode.Container())
- if err != nil {
- return errors.Wrapf(err, "container %q not found", config.IpcMode.Container())
- }
- secopts, err := label.DupSecOpt(ctr.ProcessLabel())
- if err != nil {
- return errors.Wrapf(err, "failed to duplicate label %q ", ctr.ProcessLabel())
- }
- labelOpts = append(labelOpts, secopts...)
- }
-
- for _, opt := range securityOpts {
- if opt == "no-new-privileges" {
- config.NoNewPrivs = true
- } else {
- con := strings.SplitN(opt, "=", 2)
- if len(con) != 2 {
- return fmt.Errorf("invalid --security-opt 1: %q", opt)
- }
-
- switch con[0] {
- case "label":
- labelOpts = append(labelOpts, con[1])
- case "apparmor":
- config.ApparmorProfile = con[1]
- case "seccomp":
- config.SeccompProfilePath = con[1]
- default:
- return fmt.Errorf("invalid --security-opt 2: %q", opt)
- }
- }
- }
-
- if config.SeccompProfilePath == "" {
- var err error
- config.SeccompProfilePath, err = libpod.DefaultSeccompPath()
- if err != nil {
- return err
- }
- }
- config.LabelOpts = labelOpts
- return nil
-}
-
func configureEntrypoint(c *GenericCLIResults, data *inspect.ImageData) []string {
entrypoint := []string{}
if c.IsSet("entrypoint") {
@@ -336,10 +269,6 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
return nil, err
}
- if c.String("mac-address") != "" {
- return nil, errors.Errorf("--mac-address option not currently supported")
- }
-
imageID := ""
inputCommand = c.InputArgs[1:]
@@ -352,11 +281,6 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
rootfs = c.InputArgs[0]
}
- sysctl, err := validateSysctl(c.StringSlice("sysctl"))
- if err != nil {
- return nil, errors.Wrapf(err, "invalid value for sysctl")
- }
-
if c.String("memory") != "" {
memoryLimit, err = units.RAMInBytes(c.String("memory"))
if err != nil {
@@ -695,61 +619,96 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
pidsLimit = 0
}
+ pid := &cc.PidConfig{
+ PidMode: pidMode,
+ }
+ ipc := &cc.IpcConfig{
+ IpcMode: ipcMode,
+ }
+
+ cgroup := &cc.CgroupConfig{
+ Cgroups: c.String("cgroups"),
+ Cgroupns: c.String("cgroupns"),
+ CgroupParent: c.String("cgroup-parent"),
+ CgroupMode: cgroupMode,
+ }
+
+ userns := &cc.UserConfig{
+ GroupAdd: c.StringSlice("group-add"),
+ IDMappings: idmappings,
+ UsernsMode: usernsMode,
+ User: user,
+ }
+
+ uts := &cc.UtsConfig{
+ UtsMode: utsMode,
+ NoHosts: c.Bool("no-hosts"),
+ HostAdd: c.StringSlice("add-host"),
+ Hostname: c.String("hostname"),
+ }
+
+ net := &cc.NetworkConfig{
+ DNSOpt: c.StringSlice("dns-opt"),
+ DNSSearch: c.StringSlice("dns-search"),
+ DNSServers: c.StringSlice("dns"),
+ HTTPProxy: c.Bool("http-proxy"),
+ MacAddress: c.String("mac-address"),
+ Network: network,
+ NetMode: netMode,
+ IPAddress: c.String("ip"),
+ Publish: c.StringSlice("publish"),
+ PublishAll: c.Bool("publish-all"),
+ PortBindings: portBindings,
+ }
+
+ sysctl, err := validateSysctl(c.StringSlice("sysctl"))
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid value for sysctl")
+ }
+
+ secConfig := &cc.SecurityConfig{
+ CapAdd: c.StringSlice("cap-add"),
+ CapDrop: c.StringSlice("cap-drop"),
+ Privileged: c.Bool("privileged"),
+ ReadOnlyRootfs: c.Bool("read-only"),
+ ReadOnlyTmpfs: c.Bool("read-only-tmpfs"),
+ Sysctl: sysctl,
+ }
+
+ if err := secConfig.SetLabelOpts(runtime, pid, ipc); err != nil {
+ return nil, err
+ }
+ if err := secConfig.SetSecurityOpts(runtime, c.StringArray("security-opt")); err != nil {
+ return nil, err
+ }
+
config := &cc.CreateConfig{
Annotations: annotations,
BuiltinImgVolumes: ImageVolumes,
ConmonPidFile: c.String("conmon-pidfile"),
ImageVolumeType: c.String("image-volume"),
- CapAdd: c.StringSlice("cap-add"),
- CapDrop: c.StringSlice("cap-drop"),
CidFile: c.String("cidfile"),
- Cgroupns: c.String("cgroupns"),
- Cgroups: c.String("cgroups"),
- CgroupParent: c.String("cgroup-parent"),
Command: command,
UserCommand: userCommand,
Detach: c.Bool("detach"),
Devices: c.StringSlice("device"),
- DNSOpt: c.StringSlice("dns-opt"),
- DNSSearch: c.StringSlice("dns-search"),
- DNSServers: c.StringSlice("dns"),
Entrypoint: entrypoint,
Env: env,
// ExposedPorts: ports,
- GroupAdd: c.StringSlice("group-add"),
- Hostname: c.String("hostname"),
- HostAdd: c.StringSlice("add-host"),
- HTTPProxy: c.Bool("http-proxy"),
- NoHosts: c.Bool("no-hosts"),
- IDMappings: idmappings,
Init: c.Bool("init"),
InitPath: c.String("init-path"),
Image: imageName,
ImageID: imageID,
Interactive: c.Bool("interactive"),
// IP6Address: c.String("ipv6"), // Not implemented yet - needs CNI support for static v6
- IPAddress: c.String("ip"),
- Labels: labels,
+ Labels: labels,
// LinkLocalIP: c.StringSlice("link-local-ip"), // Not implemented yet
LogDriver: logDriver,
LogDriverOpt: c.StringSlice("log-opt"),
- MacAddress: c.String("mac-address"),
Name: c.String("name"),
- Network: network,
// NetworkAlias: c.StringSlice("network-alias"), // Not implemented - does this make sense in Podman?
- IpcMode: ipcMode,
- NetMode: netMode,
- UtsMode: utsMode,
- PidMode: pidMode,
- CgroupMode: cgroupMode,
- Pod: podName,
- Privileged: c.Bool("privileged"),
- Publish: c.StringSlice("publish"),
- PublishAll: c.Bool("publish-all"),
- PortBindings: portBindings,
- Quiet: c.Bool("quiet"),
- ReadOnlyRootfs: c.Bool("read-only"),
- ReadOnlyTmpfs: c.Bool("read-only-tmpfs"),
+ Pod: podName,
+ Quiet: c.Bool("quiet"),
Resources: cc.CreateResourceConfig{
BlkioWeight: blkioWeight,
BlkioWeightDevice: c.StringSlice("blkio-weight-device"),
@@ -778,30 +737,27 @@ func ParseCreateOpts(ctx context.Context, c *GenericCLIResults, runtime *libpod.
},
RestartPolicy: c.String("restart"),
Rm: c.Bool("rm"),
+ Security: *secConfig,
StopSignal: stopSignal,
StopTimeout: c.Uint("stop-timeout"),
- Sysctl: sysctl,
Systemd: systemd,
Tmpfs: c.StringArray("tmpfs"),
Tty: tty,
- User: user,
- UsernsMode: usernsMode,
MountsFlag: c.StringArray("mount"),
Volumes: c.StringArray("volume"),
WorkDir: workDir,
Rootfs: rootfs,
VolumesFrom: c.StringSlice("volumes-from"),
Syslog: c.Bool("syslog"),
- }
- if config.Privileged {
- config.LabelOpts = label.DisableSecOpt()
- } else {
- if err := parseSecurityOpt(config, c.StringArray("security-opt"), runtime); err != nil {
- return nil, err
- }
+ Pid: *pid,
+ Ipc: *ipc,
+ Cgroup: *cgroup,
+ User: *userns,
+ Uts: *uts,
+ Network: *net,
}
- config.SecurityOpts = c.StringArray("security-opt")
+
warnings, err := verifyContainerResources(config, false)
if err != nil {
return nil, err
diff --git a/cmd/podman/shared/funcs.go b/cmd/podman/shared/funcs.go
index 9362e8e9b..404d0f288 100644
--- a/cmd/podman/shared/funcs.go
+++ b/cmd/podman/shared/funcs.go
@@ -6,24 +6,19 @@ import (
"path/filepath"
"strings"
- "github.com/containers/libpod/pkg/util"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/libpod/libpod/image"
"github.com/google/shlex"
+ "github.com/pkg/errors"
)
-func GetAuthFile(authfile string) string {
+func GetSystemContext(authfile string) (*types.SystemContext, error) {
if authfile != "" {
- return authfile
- }
-
- authfile = os.Getenv("REGISTRY_AUTH_FILE")
- if authfile != "" {
- return authfile
- }
-
- if runtimeDir, err := util.GetRuntimeDir(); err == nil {
- return filepath.Join(runtimeDir, "containers/auth.json")
+ if _, err := os.Stat(authfile); err != nil {
+ return nil, errors.Wrapf(err, "error checking authfile path %s", authfile)
+ }
}
- return ""
+ return image.GetSystemContext("", authfile, false), nil
}
func substituteCommand(cmd string) (string, error) {
diff --git a/cmd/podman/start.go b/cmd/podman/start.go
index 2d2cf74d2..d4b4534bb 100644
--- a/cmd/podman/start.go
+++ b/cmd/podman/start.go
@@ -35,7 +35,7 @@ func init() {
startCommand.SetUsageTemplate(UsageTemplate())
flags := startCommand.Flags()
flags.BoolVarP(&startCommand.Attach, "attach", "a", false, "Attach container's STDOUT and STDERR")
- flags.StringVar(&startCommand.DetachKeys, "detach-keys", "", "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`")
+ flags.StringVar(&startCommand.DetachKeys, "detach-keys", define.DefaultDetachKeys, "Select the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`")
flags.BoolVarP(&startCommand.Interactive, "interactive", "i", false, "Keep STDIN open even if not attached")
flags.BoolVarP(&startCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
flags.BoolVar(&startCommand.SigProxy, "sig-proxy", false, "Proxy received signals to the process (default true if attaching, false otherwise)")
diff --git a/cmd/podman/utils.go b/cmd/podman/utils.go
index 592d7a1d1..21389b43a 100644
--- a/cmd/podman/utils.go
+++ b/cmd/podman/utils.go
@@ -68,8 +68,19 @@ func aliasFlags(f *pflag.FlagSet, name string) pflag.NormalizedName {
// Check if a file exists and is not a directory
func checkIfFileExists(name string) bool {
file, err := os.Stat(name)
- if os.IsNotExist(err) {
+ // All errors return file == nil
+ if err != nil {
return false
}
return !file.IsDir()
}
+
+// Check if a file is or is not a directory
+func fileIsDir(name string) bool {
+ file, err := os.Stat(name)
+ // All errors return file == nil
+ if err != nil {
+ return false
+ }
+ return file.IsDir()
+}
diff --git a/cni/87-podman-bridge.conflist b/cni/87-podman-bridge.conflist
index 9db416a19..a7bcf47bb 100644
--- a/cni/87-podman-bridge.conflist
+++ b/cni/87-podman-bridge.conflist
@@ -33,6 +33,9 @@
{
"type": "firewall",
"backend": "iptables"
+ },
+ {
+ "type": "tuning"
}
]
}
diff --git a/completions/bash/podman b/completions/bash/podman
index 0abf9e738..4d552b0a7 100644
--- a/completions/bash/podman
+++ b/completions/bash/podman
@@ -877,6 +877,7 @@ _podman_container_restore() {
--tcp-established
--ignore-rootfs
--ignore-static-ip
+ --ignore-static-mac
"
case "$prev" in
-i|--import)
diff --git a/contrib/cirrus/README.md b/contrib/cirrus/README.md
index 779f95d95..de9a33714 100644
--- a/contrib/cirrus/README.md
+++ b/contrib/cirrus/README.md
@@ -27,7 +27,7 @@ task (pass or fail) is set based on the exit status of the last script to execut
4. ``lint``: Execute regular `make lint` to check for any code cruft.
Should also run for less than a few minutes.
-5. ``vendor``: runs `make vendor` followed by `./hack/tree_status.sh` to check
+5. ``vendor``: runs `make vendor-in-container` followed by `./hack/tree_status.sh` to check
whether the git tree is clean. The reasoning for that is to make sure that
the vendor.conf, the code and the vendored packages in ./vendor are in sync
at all times.
diff --git a/contrib/spec/podman.spec.in b/contrib/spec/podman.spec.in
index 8e0cb9950..33ecc8eba 100644
--- a/contrib/spec/podman.spec.in
+++ b/contrib/spec/podman.spec.in
@@ -39,7 +39,7 @@
%global shortcommit_conmon %(c=%{commit_conmon}; echo ${c:0:7})
Name: podman
-Version: 1.6.3
+Version: 1.6.4
Release: #COMMITDATE#.git%{shortcommit0}%{?dist}
Summary: Manage Pods, Containers and Container Images
License: ASL 2.0
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 000000000..fb67e266c
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,23 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS ?=
+SPHINXBUILD ?= sphinx-build
+SOURCEDIR = source
+BUILDDIR = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+clean:
+ rm -fr build/
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/Readme.md b/docs/Readme.md
new file mode 100644
index 000000000..209dcd6b4
--- /dev/null
+++ b/docs/Readme.md
@@ -0,0 +1,28 @@
+# Podman Documentation
+
+The online man pages and other documents regarding Podman can be found at
+[Read The Docs](https://podman.readthedocs.io/en/latest/index.html). The man pages
+can be found under the [Commands](https://podman.readthedocs.io/en/latest/Commands.html)
+link on that page.
+
+# Build the Docs
+
+## Directory Structure
+
+| | Directory |
+| ------------------------------------ | --------------------------- |
+| Markdown source for man pages | docs/source/markdown/ |
+| man pages aliases as .so files | docs/source/markdown/links/ |
+| restructured text for readthedocs.io | docs/rst/ |
+| target for output | docs/build |
+| man pages | docs/build/man |
+| remote linux man pages | docs/build/remote/linux |
+| remote darwin man pages | docs/build/remote/darwin |
+| remote windows html pages | docs/build/remote/windows |
+
+## Support files
+
+| | |
+| ------------------------------------ | --------------------------- |
+| docs/remote-docs.sh | Read the docs/source/markdown files and format for each platform |
+| docs/links-to-html.lua | pandoc filter to do aliases for html files |
diff --git a/docs/rtd/make.bat b/docs/make.bat
index 6247f7e23..6247f7e23 100644
--- a/docs/rtd/make.bat
+++ b/docs/make.bat
diff --git a/docs/podman-remote.sh b/docs/remote-docs.sh
index 2f8e76d1b..1440b0926 100755
--- a/docs/podman-remote.sh
+++ b/docs/remote-docs.sh
@@ -1,17 +1,17 @@
#!/bin/bash -e
# Assemble remote man pages for darwin or windows from markdown files
-PLATFORM=$1 ## windows or darwin
-TARGET=$2 ## where to output files
+PLATFORM=$1 ## linux, windows or darwin
+TARGET=${2} ## where to output files
SOURCES=${@:3} ## directories to find markdown files
PODMAN=${PODMAN:-bin/podman-remote} ## location overridden for testing
function usage() {
echo >&2 "$0 PLATFORM TARGET SOURCES..."
- echo >&2 "PLATFORM: Is either darwin or windows."
- echo >&2 "TARGET: Is the directory where files will be staged."
- echo >&2 "SOURCES: Are the directories to source markdown files."
+ echo >&2 "PLATFORM: Is either linux, darwin or windows."
+ echo >&2 "TARGET: Is the directory where files will be staged. eg, docs/build/remote/linux"
+ echo >&2 "SOURCES: Are the directories of source files. eg, docs/markdown"
}
function fail() {
@@ -21,19 +21,17 @@ function fail() {
}
case $PLATFORM in
-'darwin')
- EXT=1
- PUBLISHER=darwin_fn
+darwin|linux)
+ PUBLISHER=man_fn
;;
-'windows')
- EXT=1.md
- PUBLISHER=windows_fn
+windows)
+ PUBLISHER=html_fn
;;
-'-help')
+-help)
usage
exit 0
;;
-*) fail '"darwin" and "windows" are currently the only supported platforms.' ;;
+*) fail '"linux", "darwin" and "windows" are the only supported platforms.' ;;
esac
if [[ -z $TARGET ]]; then
@@ -48,20 +46,20 @@ if [[ ! -x $PODMAN ]]; then
fail "$PODMAN does not exist"
fi
-## darwin_fn copies the markdown page or link to flattened directory
-function darwin_fn() {
- local markdown=$1
- local file=$(basename $markdown)
- local dir=$(dirname $markdown)
+## man_fn copies the man page or link to flattened directory
+function man_fn() {
+ local page=$1
+ local file=$(basename $page)
+ local dir=$(dirname $page)
- if [[ -f $dir/links/$file ]]; then
- markdown=$dir/links/$file
+ if [[ ! -f $page ]]; then
+ page=$dir/links/${file%.*}.1
fi
- install $markdown $TARGET
+ install $page $TARGET/${file%%.*}.1
}
-## windows_fn converts the markdown page or link to HTML
-function windows_fn() {
+## html_fn converts the markdown page or link to HTML
+function html_fn() {
local markdown=$1
local file=$(basename $markdown)
local dir=$(dirname $markdown)
@@ -70,21 +68,26 @@ function windows_fn() {
local link=$(sed -e 's?.so man1/\(.*\)?\1?' <$dir/links/${file%.md})
markdown=$dir/$link.md
fi
- pandoc --ascii --lua-filter=$dir/links-to-html.lua -o $TARGET/${file%.$EXT}.html $markdown
+ pandoc --ascii --lua-filter=docs/links-to-html.lua -o $TARGET/${file%%.*}.html $markdown
}
## pub_pages finds and publishes the remote manual pages
function pub_pages() {
local source=$1
local publisher=$2
- for f in $(ls $source/podman-remote*$EXT); do
+ for f in $(ls $source/podman-remote*); do
$publisher $f
done
+ # rename podman-remote.ext to podman.ext and copy
+ local remote=$(echo $TARGET/podman-remote.*)
+ local ext=${remote##*.}
+ cp -f $remote $TARGET/podman.$ext
+
for c in "container" "image" "pod" "volume" ""; do
local cmd=${c:+-$c}
for s in $($PODMAN $c --help | sed -n '/^Available Commands:/,/^Flags:/p' | sed -e '1d;$d' -e '/^$/d' | awk '{print $1}'); do
- $publisher $source/podman$cmd-$s.$EXT
+ $publisher $(echo $source/podman$cmd-$s.*)
done
done
}
diff --git a/docs/rtd/requirements.txt b/docs/requirements.txt
index 44af373ac..44af373ac 100644
--- a/docs/rtd/requirements.txt
+++ b/docs/requirements.txt
diff --git a/docs/rtd/Makefile b/docs/rtd/Makefile
deleted file mode 100644
index 50af6490a..000000000
--- a/docs/rtd/Makefile
+++ /dev/null
@@ -1,123 +0,0 @@
-# Minimal makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line, and also
-# from the environment for the first two.
-SPHINXOPTS ?=
-SPHINXBUILD ?= sphinx-build
-SOURCEDIR = source
-BUILDDIR = build
-
-# Put it first so that "make" without argument is like "make help".
-help:
- @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-
-clean:
- rm -fr build/
- rm -f source/man/podman-*.1.md
-
-copy:
- cp -v ../podman-attach.1.md source/man/
- cp -v ../podman-build.1.md source/man/
- cp -v ../podman-commit.1.md source/man/
- # container
- cp -v ../podman-container-checkpoint.1.md source/man/
- cp -v ../podman-container-exists.1.md source/man/
- cp -v ../podman-container-restore.1.md source/man/
- cp -v ../podman-container-cleanup.1.md source/man/
- cp -v ../podman-container-prune.1.md source/man/
- cp -v ../podman-container-runlabel.1.md source/man/
- cp -v ../podman-cp.1.md source/man/
- cp -v ../podman-create.1.md source/man/
- cp -v ../podman-diff.1.md source/man/
- cp -v ../podman-events.1.md source/man/
- cp -v ../podman-exec.1.md source/man/
- cp -v ../podman-export.1.md source/man/
- # generate
- cp -v ../podman-generate-systemd.1.md source/man/
- cp -v ../podman-generate-kube.1.md source/man/
- # healthcheck
- cp -v ../podman-healthcheck-run.1.md source/man/
- #cp -v ../podman-help.1.md source/
- cp -v ../podman-history.1.md source/man/
- # image
- cp -v ../podman-image-prune.1.md source/man/
- cp -v ../podman-image-tree.1.md source/man/
- cp -v ../podman-image-trust.1.md source/man/
- cp -v ../podman-image-exists.1.md source/man/
- cp -v ../podman-image-sign.1.md source/man/
- cp -v ../podman-images.1.md source/man/
- cp -v ../podman-import.1.md source/man/
- cp -v ../podman-info.1.md source/man/
- cp -v ../podman-init.1.md source/man/
- cp -v ../podman-inspect.1.md source/man/
- cp -v ../podman-kill.1.md source/man/
- cp -v ../podman-load.1.md source/man/
- cp -v ../podman-login.1.md source/man/
- cp -v ../podman-logout.1.md source/man/
- cp -v ../podman-logs.1.md source/man/
- cp -v ../podman-mount.1.md source/man/
- # network
- cp -v ../podman-network-create.1.md source/man/
- cp -v ../podman-network-ls.1.md source/man/
- cp -v ../podman-network-inspect.1.md source/man/
- cp -v ../podman-network-rm.1.md source/man/
- cp -v ../podman-pause.1.md source/man/
- # play
- cp -v ../podman-play-kube.1.md source/man/
- # pod
- cp -v ../podman-pod-create.1.md source/man/
- cp -v ../podman-pod-pause.1.md source/man/
- cp -v ../podman-pod-rm.1.md source/man/
- cp -v ../podman-pod-top.1.md source/man/
- cp -v ../podman-pod-exists.1.md source/man/
- cp -v ../podman-pod-prune.1.md source/man/
- cp -v ../podman-pod-start.1.md source/man/
- cp -v ../podman-pod-unpause.1.md source/man/
- cp -v ../podman-pod-inspect.1.md source/man/
- cp -v ../podman-pod-ps.1.md source/man/
- cp -v ../podman-pod-stats.1.md source/man/
- cp -v ../podman-pod-kill.1.md source/man/
- cp -v ../podman-pod-restart.1.md source/man/
- cp -v ../podman-pod-stop.1.md source/man/
- cp -v ../podman-port.1.md source/man/
- cp -v ../podman-ps.1.md source/man/
- cp -v ../podman-pull.1.md source/man/
- cp -v ../podman-push.1.md source/man/
- cp -v ../podman-restart.1.md source/man/
- cp -v ../podman-rm.1.md source/man/
- cp -v ../podman-rmi.1.md source/man/
- cp -v ../podman-run.1.md source/man/
- cp -v ../podman-save.1.md source/man/
- cp -v ../podman-search.1.md source/man/
- cp -v ../podman-start.1.md source/man/
- cp -v ../podman-stats.1.md source/man/
- cp -v ../podman-stop.1.md source/man/
- # system
- cp -v ../podman-system-migrate.1.md source/man/
- cp -v ../podman-system-renumber.1.md source/man/
- cp -v ../podman-system-df.1.md source/man/
- cp -v ../podman-system-prune.1.md source/man/
- cp -v ../podman-top.1.md source/man/
- cp -v ../podman-umount.1.md source/man/
- cp -v ../podman-unpause.1.md source/man/
- cp -v ../podman-unshare.1.md source/man/
- cp -v ../podman-varlink.1.md source/man/
- cp -v ../podman-version.1.md source/man/
- # volume
- cp -v ../podman-volume-inspect.1.md source/man/
- cp -v ../podman-volume-prune.1.md source/man/
- cp -v ../podman-volume-create.1.md source/man/
- cp -v ../podman-volume-ls.1.md source/man/
- cp -v ../podman-volume-rm.1.md source/man/
- cp -v ../podman-wait.1.md source/man/
-
-.PHONY: help Makefile copy
-
-html: copy
- @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-
-# Catch-all target: route all unknown targets to Sphinx using the new
-# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
-%: Makefile
- @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/rtd/source/Commands.rst b/docs/rtd/source/Commands.rst
deleted file mode 100644
index f6ba5b20d..000000000
--- a/docs/rtd/source/Commands.rst
+++ /dev/null
@@ -1,107 +0,0 @@
-Commands
-========
-
-
-:doc:`attach <man/podman-attach.1>` Attach to a running container
-
-:doc:`build <man/podman-build.1>` Build an image using instructions from Containerfiles
-
-:doc:`commit <man/podman-commit.1>` Create new image based on the changed container
-
-:doc:`containers <man/managecontainers>` Manage Containers
-
-:doc:`cp <man/podman-cp.1>` Copy files/folders between a container and the local filesystem
-
-:doc:`create <man/podman-create.1>` Create but do not start a container
-
-:doc:`diff <man/podman-diff.1>` Inspect changes on container's file systems
-
-:doc:`events <man/podman-events.1>` Show podman events
-
-:doc:`exec <man/podman-exec.1>` Run a process in a running container
-
-:doc:`export <man/podman-export.1>` Export container's filesystem contents as a tar archive
-
-:doc:`generate <man/generate>` Generated structured data
-
-:doc:`healthcheck <man/healthcheck>` Manage Healthcheck
-
-:doc:`history <man/podman-history.1>` Show history of a specified image
-
-:doc:`image <man/image>` Manage images
-
-:doc:`images <man/podman-images.1>` List images in local storage
-
-:doc:`import <man/podman-import.1>` Import a tarball to create a filesystem image
-
-:doc:`info <man/podman-info.1>` Display podman system information
-
-:doc:`init <man/podman-init.1>` Initialize one or more containers
-
-:doc:`inspect <man/podman-inspect.1>` Display the configuration of a container or image
-
-:doc:`kill <man/podman-kill.1>` Kill one or more running containers with a specific signal
-
-:doc:`load <man/podman-load.1>` Load an image from container archive
-
-:doc:`login <man/podman-login.1>` Login to a container registry
-
-:doc:`logout <man/podman-logout.1>` Logout of a container registry
-
-:doc:`logs <man/podman-logs.1>` Fetch the logs of a container
-
-:doc:`mount <man/podman-mount.1>` Mount a working container's root filesystem
-
-:doc:`network <man/network>` Manage Networks
-
-:doc:`pause <man/podman-pause.1>` Pause all the processes in one or more containers
-
-:doc:`play <man/play>` Play a pod
-
-:doc:`pod <man/pod>` Manage pods
-
-:doc:`port <man/podman-port.1>` List port mappings or a specific mapping for the container
-
-:doc:`ps <man/podman-ps.1>` List containers
-
-:doc:`pull <man/podman-pull.1>` Pull an image from a registry
-
-:doc:`push <man/podman-push.1>` Push an image to a specified destination
-
-:doc:`restart <man/podman-restart.1>` Restart one or more containers
-
-:doc:`rm <man/podman-rm.1>` Remove one or more containers
-
-:doc:`rmi <man/podman-rmi.1>` Removes one or more images from local storage
-
-:doc:`run <man/podman-run.1>` Run a command in a new container
-
-:doc:`save <man/podman-save.1>` Save image to an archive
-
-:doc:`search <man/podman-search.1>` Search registry for image
-
-:doc:`start <man/podman-start.1>` Start one or more containers
-
-:doc:`stats <man/podman-stats.1>` Display a live stream of container resource usage statistics
-
-:doc:`stop <man/podman-stop.1>` Stop one or more containers
-
-:doc:`system <man/system>` Manage podman
-
-:doc:`tag <man/podman-tag.1>` Add an additional name to a local image
-
-:doc:`top <man/podman-top.1>` Display the running processes of a container
-
-:doc:`umount <man/podman-umount.1>` Unmounts working container's root filesystem
-
-:doc:`unpause <man/podman-unpause.1>` Unpause the processes in one or more containers
-
-:doc:`unshare <man/podman-unshare.1>` Run a command in a modified user namespace
-
-:doc:`varlink <man/podman-varlink.1>` Run varlink interface
-
-:doc:`version <man/podman-version.1>` Display the Podman Version Information
-
-:doc:`volume <man/volume>` Manage volumes
-
-:doc:`wait <man/podman-wait.1>` Block on one or more containers \ No newline at end of file
diff --git a/docs/rtd/source/man/generate.rst b/docs/rtd/source/man/generate.rst
deleted file mode 100644
index e82a15735..000000000
--- a/docs/rtd/source/man/generate.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-Generate
-========
-
-:doc:`kube <podman-generate-kube.1>` Generate Kubernetes pod YAML from a container or pod
-
-:doc:`systemd <podman-generate-systemd.1>` Generate a systemd unit file for a Podman container
diff --git a/docs/rtd/source/man/healthcheck.rst b/docs/rtd/source/man/healthcheck.rst
deleted file mode 100644
index 697c1358b..000000000
--- a/docs/rtd/source/man/healthcheck.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-HealthCheck
-===========
-
-:doc:`run <podman-healthcheck-run.1>` run the health check of a container
diff --git a/docs/rtd/source/man/image.rst b/docs/rtd/source/man/image.rst
deleted file mode 100644
index ad963cd41..000000000
--- a/docs/rtd/source/man/image.rst
+++ /dev/null
@@ -1,35 +0,0 @@
-Image
-=====
-
-
-:doc:`build <podman-build.1>` Build an image using instructions from Containerfiles
-
-:doc:`exists <podman-image-exists.1>` Check if an image exists in local storage
-
-:doc:`history <podman-history.1>` Show history of a specified image
-
-:doc:`import <podman-import.1>` Import a tarball to create a filesystem image
-
-:doc:`inspect <podman-inspect.1>` Display the configuration of an image
-
-:doc:`list <podman-images.1>` List images in local storage
-
-:doc:`load <podman-load.1>` Load an image from container archive
-
-:doc:`prune <podman-image-prune.1>` Remove unused images
-
-:doc:`pull <podman-pull.1>` Pull an image from a registry
-
-:doc:`push <podman-push.1>` Push an image to a specified destination
-
-:doc:`rm <podman-rmi.1>` Removes one or more images from local storage
-
-:doc:`save <podman-save.1>` Save image to an archive
-
-:doc:`sign <podman-image-sign.1>` Sign an image
-
-:doc:`tag <podman-tag.1>` Add an additional name to a local image
-
-:doc:`tree <podman-image-tree.1>` Prints layer hierarchy of an image in a tree format
-
-:doc:`trust <podman-image-trust.1>` Manage container image trust policy
diff --git a/docs/rtd/source/man/managecontainers.rst b/docs/rtd/source/man/managecontainers.rst
deleted file mode 100644
index 20e8c0679..000000000
--- a/docs/rtd/source/man/managecontainers.rst
+++ /dev/null
@@ -1,64 +0,0 @@
-Manage Containers
-=================
-
-:doc:`attach <podman-attach.1>` Attach to a running container
-
-:doc:`checkpoint <podman-container-checkpoint.1>` Checkpoints one or more containers
-
-:doc:`cleanup <podman-container-cleanup.1>` Cleanup network and mountpoints of one or more containers
-
-:doc:`commit <podman-commit.1>` Create new image based on the changed container
-
-:doc:`cp <podman-cp.1>` Copy files/folders between a container and the local filesystem
-
-:doc:`create <podman-create.1>` Create but do not start a container
-
-:doc:`diff <podman-diff.1>` Inspect changes on container's file systems
-
-:doc:`exec <podman-exec.1>` Run a process in a running container
-
-:doc:`exists <podman-exists.1>` Check if a container exists in local storage
-
-:doc:`export <podman-export.1>` Export container's filesystem contents as a tar archive
-
-:doc:`init <podman-init.1>` Initialize one or more containers
-
-:doc:`inspect <podman-inspect.1>` Display the configuration of a container or image
-
-:doc:`kill <podman-kill.1>` Kill one or more running containers with a specific signal
-
-:doc:`list <podman-ps.1>` List containers
-
-:doc:`logs <podman-logs.1>` Fetch the logs of a container
-
-:doc:`mount <podman-mount.1>` Mount a working container's root filesystem
-
-:doc:`pause <podman-pause.1>` Pause all the processes in one or more containers
-
-:doc:`port <podman-port.1>` List port mappings or a specific mapping for the container
-
-:doc:`restart <podman-restart.1>` Restart one or more containers
-
-:doc:`prune <podman-container-prune.1>` Remove all stopped containers
-
-:doc:`restore <podman-container-restore.1>` Restores one or more containers from a checkpoint
-
-:doc:`rm <podman-rm.1>` Remove one or more containers
-
-:doc:`run <podman-run.1>` Run a command in a new container
-
-:doc:`runlabel <podman-container-runlabel.1>` Execute the command described by an image label
-
-:doc:`start <podman-start.1>` Start one or more containers
-
-:doc:`stats <podman-stats.1>` Display a live stream of container resource usage statistics
-
-:doc:`stop <podman-stop.1>` Stop one or more containers
-
-:doc:`top <podman-top.1>` Display the running processes of a container
-
-:doc:`umount <podman-umount.1>` Unmounts working container's root filesystem
-
-:doc:`unpause <podman-unpause.1>` Unpause the processes in one or more containers
-
-:doc:`wait <podman-wait.1>` Block on one or more containers
diff --git a/docs/rtd/source/man/network.rst b/docs/rtd/source/man/network.rst
deleted file mode 100644
index 6d6a4c022..000000000
--- a/docs/rtd/source/man/network.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-Network
-=====
-
-:doc:`create <podman-network-create.1>` network create
-
-:doc:`inspect <podman-network-inspect.1>` network inspect
-
-:doc:`ls <podman-network-ls.1>` network list
-
-:doc:`rm <podman-network-rm.1>` network rm \ No newline at end of file
diff --git a/docs/rtd/source/man/play.rst b/docs/rtd/source/man/play.rst
deleted file mode 100644
index 93e1a9a1e..000000000
--- a/docs/rtd/source/man/play.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Play
-====
-
-:doc:`kube <podman-play-kube.1>` Play a pod based on Kubernetes YAML
diff --git a/docs/rtd/source/man/pod.rst b/docs/rtd/source/man/pod.rst
deleted file mode 100644
index 13c1740f8..000000000
--- a/docs/rtd/source/man/pod.rst
+++ /dev/null
@@ -1,30 +0,0 @@
-Pod
-===
-
-:doc:`create <podman-pod-create.1>` Create a new empty pod
-
-:doc:`exists <podman-pod-exists.1>` Check if a pod exists in local storage
-
-:doc:`inspect <podman-pod-inspect.1>` Displays a pod configuration
-
-:doc:`kill <podman-pod-kill.1>` Send the specified signal or SIGKILL to containers in pod
-
-:doc:`pause <podman-pause.1>` Pause one or more pods
-
-:doc:`prune <podman-pod-prune.1>` Remove all stopped pods
-
-:doc:`ps <podman-pod-ps.1>` List pods
-
-:doc:`restart <podman-pod-restart.1>` Restart one or more pods
-
-:doc:`rm <podman-pod-rm.1>` Remove one or more pods
-
-:doc:`start <podman-pod-start.1>` Start one or more pods
-
-:doc:`stats <podman-pod-stats.1>` Display a live stream of resource usage statistics for the containers in one or more pods
-
-:doc:`stop <podman-pod-stop.1>` Stop one or more pods
-
-:doc:`top <podman-pod-top.1>` Display the running processes of containers in a pod
-
-:doc:`unpause <podman-pod-unpause.1>` Unpause one or more pods
diff --git a/docs/rtd/source/man/system.rst b/docs/rtd/source/man/system.rst
deleted file mode 100644
index 764ec01c1..000000000
--- a/docs/rtd/source/man/system.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-System
-======
-
-:doc:`df <podman-system-df.1>` Show podman disk usage
-
-:doc:`info <podman-info.1>` Display podman system information
-
-:doc:`migrate <podman-system-migrate.1>` Migrate containers
-
-:doc:`prune <podman-system-prune.1>` Remove unused data
-
-:doc:`renumber <podman-system-renumber.1>` Migrate lock numbers
diff --git a/docs/rtd/source/man/volume.rst b/docs/rtd/source/man/volume.rst
deleted file mode 100644
index ee18e4b2e..000000000
--- a/docs/rtd/source/man/volume.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-Volume
-======
-:doc:`create <podman-volume-create.1>` Create a new volume
-
-:doc:`inspect <podman-volume-inspect.1>` Display detailed information on one or more volumes
-
-:doc:`ls <podman-volume-ls.1>` List volumes
-
-:doc:`prune <podman-volume-prune.1>` Remove all unused volumes
-
-:doc:`rm <podman-volume-rm.1>` Remove one or more volumes \ No newline at end of file
diff --git a/docs/source/Commands.rst b/docs/source/Commands.rst
new file mode 100644
index 000000000..276a9c4fb
--- /dev/null
+++ b/docs/source/Commands.rst
@@ -0,0 +1,107 @@
+Commands
+========
+
+
+:doc:`attach <markdown/podman-attach.1>` Attach to a running container
+
+:doc:`build <markdown/podman-build.1>` Build an image using instructions from Containerfiles
+
+:doc:`commit <markdown/podman-commit.1>` Create new image based on the changed container
+
+:doc:`containers <managecontainers>` Manage Containers
+
+:doc:`cp <markdown/podman-cp.1>` Copy files/folders between a container and the local filesystem
+
+:doc:`create <markdown/podman-create.1>` Create but do not start a container
+
+:doc:`diff <markdown/podman-diff.1>` Inspect changes on container's file systems
+
+:doc:`events <markdown/podman-events.1>` Show podman events
+
+:doc:`exec <markdown/podman-exec.1>` Run a process in a running container
+
+:doc:`export <markdown/podman-export.1>` Export container's filesystem contents as a tar archive
+
+:doc:`generate <generate>` Generated structured data
+
+:doc:`healthcheck <healthcheck>` Manage Healthcheck
+
+:doc:`history <markdown/podman-history.1>` Show history of a specified image
+
+:doc:`image <image>` Manage images
+
+:doc:`images <markdown/podman-images.1>` List images in local storage
+
+:doc:`import <markdown/podman-import.1>` Import a tarball to create a filesystem image
+
+:doc:`info <markdown/podman-info.1>` Display podman system information
+
+:doc:`init <markdown/podman-init.1>` Initialize one or more containers
+
+:doc:`inspect <markdown/podman-inspect.1>` Display the configuration of a container or image
+
+:doc:`kill <markdown/podman-kill.1>` Kill one or more running containers with a specific signal
+
+:doc:`load <markdown/podman-load.1>` Load an image from container archive
+
+:doc:`login <markdown/podman-login.1>` Login to a container registry
+
+:doc:`logout <markdown/podman-logout.1>` Logout of a container registry
+
+:doc:`logs <markdown/podman-logs.1>` Fetch the logs of a container
+
+:doc:`mount <markdown/podman-mount.1>` Mount a working container's root filesystem
+
+:doc:`network <network>` Manage Networks
+
+:doc:`pause <markdown/podman-pause.1>` Pause all the processes in one or more containers
+
+:doc:`play <play>` Play a pod
+
+:doc:`pod <pod>` Manage pods
+
+:doc:`port <markdown/podman-port.1>` List port mappings or a specific mapping for the container
+
+:doc:`ps <markdown/podman-ps.1>` List containers
+
+:doc:`pull <markdown/podman-pull.1>` Pull an image from a registry
+
+:doc:`push <markdown/podman-push.1>` Push an image to a specified destination
+
+:doc:`restart <markdown/podman-restart.1>` Restart one or more containers
+
+:doc:`rm <markdown/podman-rm.1>` Remove one or more containers
+
+:doc:`rmi <markdown/podman-rmi.1>` Removes one or more images from local storage
+
+:doc:`run <markdown/podman-run.1>` Run a command in a new container
+
+:doc:`save <markdown/podman-save.1>` Save image to an archive
+
+:doc:`search <markdown/podman-search.1>` Search registry for image
+
+:doc:`start <markdown/podman-start.1>` Start one or more containers
+
+:doc:`stats <markdown/podman-stats.1>` Display a live stream of container resource usage statistics
+
+:doc:`stop <markdown/podman-stop.1>` Stop one or more containers
+
+:doc:`system <system>` Manage podman
+
+:doc:`tag <markdown/podman-tag.1>` Add an additional name to a local image
+
+:doc:`top <markdown/podman-top.1>` Display the running processes of a container
+
+:doc:`umount <markdown/podman-umount.1>` Unmounts working container's root filesystem
+
+:doc:`unpause <markdown/podman-unpause.1>` Unpause the processes in one or more containers
+
+:doc:`unshare <markdown/podman-unshare.1>` Run a command in a modified user namespace
+
+:doc:`varlink <markdown/podman-varlink.1>` Run varlink interface
+
+:doc:`version <markdown/podman-version.1>` Display the Podman Version Information
+
+:doc:`volume <volume>` Manage volumes
+
+:doc:`wait <markdown/podman-wait.1>` Block on one or more containers \ No newline at end of file
diff --git a/docs/rtd/source/Introduction.rst b/docs/source/Introduction.rst
index c516b3317..c516b3317 100644
--- a/docs/rtd/source/Introduction.rst
+++ b/docs/source/Introduction.rst
diff --git a/docs/rtd/source/Reference.rst b/docs/source/Reference.rst
index 9a771c87f..9a771c87f 100644
--- a/docs/rtd/source/Reference.rst
+++ b/docs/source/Reference.rst
diff --git a/docs/rtd/source/Tutorials.rst b/docs/source/Tutorials.rst
index 0c7e28c3b..0c7e28c3b 100644
--- a/docs/rtd/source/Tutorials.rst
+++ b/docs/source/Tutorials.rst
diff --git a/docs/rtd/source/conf.py b/docs/source/conf.py
index d95290f72..d95290f72 100644
--- a/docs/rtd/source/conf.py
+++ b/docs/source/conf.py
diff --git a/docs/source/generate.rst b/docs/source/generate.rst
new file mode 100644
index 000000000..fd267ce62
--- /dev/null
+++ b/docs/source/generate.rst
@@ -0,0 +1,6 @@
+Generate
+========
+
+:doc:`kube <markdown/podman-generate-kube.1>` Generate Kubernetes pod YAML from a container or pod
+
+:doc:`systemd <markdown/podman-generate-systemd.1>` Generate a systemd unit file for a Podman container
diff --git a/docs/source/healthcheck.rst b/docs/source/healthcheck.rst
new file mode 100644
index 000000000..2e2f88fbc
--- /dev/null
+++ b/docs/source/healthcheck.rst
@@ -0,0 +1,4 @@
+HealthCheck
+===========
+
+:doc:`run <markdown/podman-healthcheck-run.1>` run the health check of a container
diff --git a/docs/source/image.rst b/docs/source/image.rst
new file mode 100644
index 000000000..a8c6171e1
--- /dev/null
+++ b/docs/source/image.rst
@@ -0,0 +1,35 @@
+Image
+=====
+
+
+:doc:`build <markdown/podman-build.1>` Build an image using instructions from Containerfiles
+
+:doc:`exists <markdown/podman-image-exists.1>` Check if an image exists in local storage
+
+:doc:`history <markdown/podman-history.1>` Show history of a specified image
+
+:doc:`import <markdown/podman-import.1>` Import a tarball to create a filesystem image
+
+:doc:`inspect <markdown/podman-inspect.1>` Display the configuration of an image
+
+:doc:`list <markdown/podman-images.1>` List images in local storage
+
+:doc:`load <markdown/podman-load.1>` Load an image from container archive
+
+:doc:`prune <markdown/podman-image-prune.1>` Remove unused images
+
+:doc:`pull <markdown/podman-pull.1>` Pull an image from a registry
+
+:doc:`push <markdown/podman-push.1>` Push an image to a specified destination
+
+:doc:`rm <markdown/podman-rmi.1>` Removes one or more images from local storage
+
+:doc:`save <markdown/podman-save.1>` Save image to an archive
+
+:doc:`sign <markdown/podman-image-sign.1>` Sign an image
+
+:doc:`tag <markdown/podman-tag.1>` Add an additional name to a local image
+
+:doc:`tree <markdown/podman-image-tree.1>` Prints layer hierarchy of an image in a tree format
+
+:doc:`trust <markdown/podman-image-trust.1>` Manage container image trust policy
diff --git a/docs/rtd/source/index.rst b/docs/source/index.rst
index 9dd61a6a6..9dd61a6a6 100644
--- a/docs/rtd/source/index.rst
+++ b/docs/source/index.rst
diff --git a/docs/source/managecontainers.rst b/docs/source/managecontainers.rst
new file mode 100644
index 000000000..5c5a83a82
--- /dev/null
+++ b/docs/source/managecontainers.rst
@@ -0,0 +1,64 @@
+Manage Containers
+=================
+
+:doc:`attach <markdown/podman-attach.1>` Attach to a running container
+
+:doc:`checkpoint <markdown/podman-container-checkpoint.1>` Checkpoints one or more containers
+
+:doc:`cleanup <markdown/podman-container-cleanup.1>` Cleanup network and mountpoints of one or more containers
+
+:doc:`commit <markdown/podman-commit.1>` Create new image based on the changed container
+
+:doc:`cp <markdown/podman-cp.1>` Copy files/folders between a container and the local filesystem
+
+:doc:`create <markdown/podman-create.1>` Create but do not start a container
+
+:doc:`diff <markdown/podman-diff.1>` Inspect changes on container's file systems
+
+:doc:`exec <markdown/podman-exec.1>` Run a process in a running container
+
+:doc:`exists <markdown/podman-container-exists.1>` Check if a container exists in local storage
+
+:doc:`export <markdown/podman-export.1>` Export container's filesystem contents as a tar archive
+
+:doc:`init <markdown/podman-init.1>` Initialize one or more containers
+
+:doc:`inspect <markdown/podman-inspect.1>` Display the configuration of a container or image
+
+:doc:`kill <markdown/podman-kill.1>` Kill one or more running containers with a specific signal
+
+:doc:`list <markdown/podman-ps.1>` List containers
+
+:doc:`logs <markdown/podman-logs.1>` Fetch the logs of a container
+
+:doc:`mount <markdown/podman-mount.1>` Mount a working container's root filesystem
+
+:doc:`pause <markdown/podman-pause.1>` Pause all the processes in one or more containers
+
+:doc:`port <markdown/podman-port.1>` List port mappings or a specific mapping for the container
+
+:doc:`restart <markdown/podman-restart.1>` Restart one or more containers
+
+:doc:`prune <markdown/podman-container-prune.1>` Remove all stopped containers
+
+:doc:`restore <markdown/podman-container-restore.1>` Restores one or more containers from a checkpoint
+
+:doc:`rm <markdown/podman-rm.1>` Remove one or more containers
+
+:doc:`run <markdown/podman-run.1>` Run a command in a new container
+
+:doc:`runlabel <markdown/podman-container-runlabel.1>` Execute the command described by an image label
+
+:doc:`start <markdown/podman-start.1>` Start one or more containers
+
+:doc:`stats <markdown/podman-stats.1>` Display a live stream of container resource usage statistics
+
+:doc:`stop <markdown/podman-stop.1>` Stop one or more containers
+
+:doc:`top <markdown/podman-top.1>` Display the running processes of a container
+
+:doc:`umount <markdown/podman-umount.1>` Unmounts working container's root filesystem
+
+:doc:`unpause <markdown/podman-unpause.1>` Unpause the processes in one or more containers
+
+:doc:`wait <markdown/podman-wait.1>` Block on one or more containers
diff --git a/docs/containers-mounts.conf.5.md b/docs/source/markdown/containers-mounts.conf.5.md
index 130c1c523..130c1c523 100644
--- a/docs/containers-mounts.conf.5.md
+++ b/docs/source/markdown/containers-mounts.conf.5.md
diff --git a/docs/libpod.conf.5.md b/docs/source/markdown/libpod.conf.5.md
index c28c80b56..c28c80b56 100644
--- a/docs/libpod.conf.5.md
+++ b/docs/source/markdown/libpod.conf.5.md
diff --git a/docs/links/podman-container-attach.1 b/docs/source/markdown/links/podman-container-attach.1
index be3b5efd3..be3b5efd3 100644
--- a/docs/links/podman-container-attach.1
+++ b/docs/source/markdown/links/podman-container-attach.1
diff --git a/docs/links/podman-container-commit.1 b/docs/source/markdown/links/podman-container-commit.1
index cdff06766..cdff06766 100644
--- a/docs/links/podman-container-commit.1
+++ b/docs/source/markdown/links/podman-container-commit.1
diff --git a/docs/links/podman-container-cp.1 b/docs/source/markdown/links/podman-container-cp.1
index 6ad859c84..6ad859c84 100644
--- a/docs/links/podman-container-cp.1
+++ b/docs/source/markdown/links/podman-container-cp.1
diff --git a/docs/links/podman-container-create.1 b/docs/source/markdown/links/podman-container-create.1
index f6b07d946..f6b07d946 100644
--- a/docs/links/podman-container-create.1
+++ b/docs/source/markdown/links/podman-container-create.1
diff --git a/docs/links/podman-container-diff.1 b/docs/source/markdown/links/podman-container-diff.1
index ac4881f98..ac4881f98 100644
--- a/docs/links/podman-container-diff.1
+++ b/docs/source/markdown/links/podman-container-diff.1
diff --git a/docs/links/podman-container-exec.1 b/docs/source/markdown/links/podman-container-exec.1
index 6bb98ec75..6bb98ec75 100644
--- a/docs/links/podman-container-exec.1
+++ b/docs/source/markdown/links/podman-container-exec.1
diff --git a/docs/links/podman-container-export.1 b/docs/source/markdown/links/podman-container-export.1
index 85b1d5438..85b1d5438 100644
--- a/docs/links/podman-container-export.1
+++ b/docs/source/markdown/links/podman-container-export.1
diff --git a/docs/links/podman-container-init.1 b/docs/source/markdown/links/podman-container-init.1
index 3a8bee249..3a8bee249 100644
--- a/docs/links/podman-container-init.1
+++ b/docs/source/markdown/links/podman-container-init.1
diff --git a/docs/links/podman-container-inspect.1 b/docs/source/markdown/links/podman-container-inspect.1
index 261043845..261043845 100644
--- a/docs/links/podman-container-inspect.1
+++ b/docs/source/markdown/links/podman-container-inspect.1
diff --git a/docs/links/podman-container-kill.1 b/docs/source/markdown/links/podman-container-kill.1
index 71960113d..71960113d 100644
--- a/docs/links/podman-container-kill.1
+++ b/docs/source/markdown/links/podman-container-kill.1
diff --git a/docs/links/podman-container-list.1 b/docs/source/markdown/links/podman-container-list.1
index f7f44c704..f7f44c704 100644
--- a/docs/links/podman-container-list.1
+++ b/docs/source/markdown/links/podman-container-list.1
diff --git a/docs/links/podman-container-logs.1 b/docs/source/markdown/links/podman-container-logs.1
index 7e0ce6307..7e0ce6307 100644
--- a/docs/links/podman-container-logs.1
+++ b/docs/source/markdown/links/podman-container-logs.1
diff --git a/docs/links/podman-container-ls.1 b/docs/source/markdown/links/podman-container-ls.1
index f7f44c704..f7f44c704 100644
--- a/docs/links/podman-container-ls.1
+++ b/docs/source/markdown/links/podman-container-ls.1
diff --git a/docs/links/podman-container-mount.1 b/docs/source/markdown/links/podman-container-mount.1
index b14e594a4..b14e594a4 100644
--- a/docs/links/podman-container-mount.1
+++ b/docs/source/markdown/links/podman-container-mount.1
diff --git a/docs/links/podman-container-pause.1 b/docs/source/markdown/links/podman-container-pause.1
index 3ed8d7cef..3ed8d7cef 100644
--- a/docs/links/podman-container-pause.1
+++ b/docs/source/markdown/links/podman-container-pause.1
diff --git a/docs/links/podman-container-port.1 b/docs/source/markdown/links/podman-container-port.1
index adfeda201..adfeda201 100644
--- a/docs/links/podman-container-port.1
+++ b/docs/source/markdown/links/podman-container-port.1
diff --git a/docs/links/podman-container-ps.1 b/docs/source/markdown/links/podman-container-ps.1
index f7f44c704..f7f44c704 100644
--- a/docs/links/podman-container-ps.1
+++ b/docs/source/markdown/links/podman-container-ps.1
diff --git a/docs/links/podman-container-restart.1 b/docs/source/markdown/links/podman-container-restart.1
index 9fb09ae50..9fb09ae50 100644
--- a/docs/links/podman-container-restart.1
+++ b/docs/source/markdown/links/podman-container-restart.1
diff --git a/docs/links/podman-container-rm.1 b/docs/source/markdown/links/podman-container-rm.1
index 6dfc6e98c..6dfc6e98c 100644
--- a/docs/links/podman-container-rm.1
+++ b/docs/source/markdown/links/podman-container-rm.1
diff --git a/docs/links/podman-container-run.1 b/docs/source/markdown/links/podman-container-run.1
index 6c70caa37..6c70caa37 100644
--- a/docs/links/podman-container-run.1
+++ b/docs/source/markdown/links/podman-container-run.1
diff --git a/docs/links/podman-container-start.1 b/docs/source/markdown/links/podman-container-start.1
index aa042cbfe..aa042cbfe 100644
--- a/docs/links/podman-container-start.1
+++ b/docs/source/markdown/links/podman-container-start.1
diff --git a/docs/links/podman-container-stats.1 b/docs/source/markdown/links/podman-container-stats.1
index 5f2e2e7d6..5f2e2e7d6 100644
--- a/docs/links/podman-container-stats.1
+++ b/docs/source/markdown/links/podman-container-stats.1
diff --git a/docs/links/podman-container-stop.1 b/docs/source/markdown/links/podman-container-stop.1
index 6b7e87c69..6b7e87c69 100644
--- a/docs/links/podman-container-stop.1
+++ b/docs/source/markdown/links/podman-container-stop.1
diff --git a/docs/links/podman-container-top.1 b/docs/source/markdown/links/podman-container-top.1
index ddcf7b5ec..ddcf7b5ec 100644
--- a/docs/links/podman-container-top.1
+++ b/docs/source/markdown/links/podman-container-top.1
diff --git a/docs/links/podman-container-umount.1 b/docs/source/markdown/links/podman-container-umount.1
index 789dabbb0..789dabbb0 100644
--- a/docs/links/podman-container-umount.1
+++ b/docs/source/markdown/links/podman-container-umount.1
diff --git a/docs/links/podman-container-unmount.1 b/docs/source/markdown/links/podman-container-unmount.1
index 789dabbb0..789dabbb0 100644
--- a/docs/links/podman-container-unmount.1
+++ b/docs/source/markdown/links/podman-container-unmount.1
diff --git a/docs/links/podman-container-unpause.1 b/docs/source/markdown/links/podman-container-unpause.1
index b8d970744..b8d970744 100644
--- a/docs/links/podman-container-unpause.1
+++ b/docs/source/markdown/links/podman-container-unpause.1
diff --git a/docs/links/podman-container-wait.1 b/docs/source/markdown/links/podman-container-wait.1
index bd2505dde..bd2505dde 100644
--- a/docs/links/podman-container-wait.1
+++ b/docs/source/markdown/links/podman-container-wait.1
diff --git a/docs/links/podman-help.1 b/docs/source/markdown/links/podman-help.1
index 6b7954b0d..6b7954b0d 100644
--- a/docs/links/podman-help.1
+++ b/docs/source/markdown/links/podman-help.1
diff --git a/docs/links/podman-image-build.1 b/docs/source/markdown/links/podman-image-build.1
index 27c751cf6..27c751cf6 100644
--- a/docs/links/podman-image-build.1
+++ b/docs/source/markdown/links/podman-image-build.1
diff --git a/docs/links/podman-image-history.1 b/docs/source/markdown/links/podman-image-history.1
index 6b6dc17e1..6b6dc17e1 100644
--- a/docs/links/podman-image-history.1
+++ b/docs/source/markdown/links/podman-image-history.1
diff --git a/docs/links/podman-image-import.1 b/docs/source/markdown/links/podman-image-import.1
index 4564fc6c9..4564fc6c9 100644
--- a/docs/links/podman-image-import.1
+++ b/docs/source/markdown/links/podman-image-import.1
diff --git a/docs/links/podman-image-inspect.1 b/docs/source/markdown/links/podman-image-inspect.1
index 261043845..261043845 100644
--- a/docs/links/podman-image-inspect.1
+++ b/docs/source/markdown/links/podman-image-inspect.1
diff --git a/docs/links/podman-image-list.1 b/docs/source/markdown/links/podman-image-list.1
index 17d6ca880..17d6ca880 100644
--- a/docs/links/podman-image-list.1
+++ b/docs/source/markdown/links/podman-image-list.1
diff --git a/docs/links/podman-image-load.1 b/docs/source/markdown/links/podman-image-load.1
index 21b6d8e9c..21b6d8e9c 100644
--- a/docs/links/podman-image-load.1
+++ b/docs/source/markdown/links/podman-image-load.1
diff --git a/docs/links/podman-image-ls.1 b/docs/source/markdown/links/podman-image-ls.1
index 17d6ca880..17d6ca880 100644
--- a/docs/links/podman-image-ls.1
+++ b/docs/source/markdown/links/podman-image-ls.1
diff --git a/docs/links/podman-image-pull.1 b/docs/source/markdown/links/podman-image-pull.1
index d14519be3..d14519be3 100644
--- a/docs/links/podman-image-pull.1
+++ b/docs/source/markdown/links/podman-image-pull.1
diff --git a/docs/links/podman-image-push.1 b/docs/source/markdown/links/podman-image-push.1
index 51d52ad11..51d52ad11 100644
--- a/docs/links/podman-image-push.1
+++ b/docs/source/markdown/links/podman-image-push.1
diff --git a/docs/links/podman-image-rm.1 b/docs/source/markdown/links/podman-image-rm.1
index 1007ad150..1007ad150 100644
--- a/docs/links/podman-image-rm.1
+++ b/docs/source/markdown/links/podman-image-rm.1
diff --git a/docs/links/podman-image-save.1 b/docs/source/markdown/links/podman-image-save.1
index 715d7d6ee..715d7d6ee 100644
--- a/docs/links/podman-image-save.1
+++ b/docs/source/markdown/links/podman-image-save.1
diff --git a/docs/links/podman-image-tag.1 b/docs/source/markdown/links/podman-image-tag.1
index e41c50de9..e41c50de9 100644
--- a/docs/links/podman-image-tag.1
+++ b/docs/source/markdown/links/podman-image-tag.1
diff --git a/docs/links/podman-list.1 b/docs/source/markdown/links/podman-list.1
index f7f44c704..f7f44c704 100644
--- a/docs/links/podman-list.1
+++ b/docs/source/markdown/links/podman-list.1
diff --git a/docs/links/podman-ls.1 b/docs/source/markdown/links/podman-ls.1
index f7f44c704..f7f44c704 100644
--- a/docs/links/podman-ls.1
+++ b/docs/source/markdown/links/podman-ls.1
diff --git a/docs/links/podman-system-info.1 b/docs/source/markdown/links/podman-system-info.1
index 809a71b8b..809a71b8b 100644
--- a/docs/links/podman-system-info.1
+++ b/docs/source/markdown/links/podman-system-info.1
diff --git a/docs/links/podman-unmount.1 b/docs/source/markdown/links/podman-unmount.1
index 789dabbb0..789dabbb0 100644
--- a/docs/links/podman-unmount.1
+++ b/docs/source/markdown/links/podman-unmount.1
diff --git a/docs/podman-attach.1.md b/docs/source/markdown/podman-attach.1.md
index cef01f0f6..1ac2e49a9 100644
--- a/docs/podman-attach.1.md
+++ b/docs/source/markdown/podman-attach.1.md
@@ -20,9 +20,7 @@ it in the **libpod.conf** file: see **libpod.conf(5)** for more information.
## OPTIONS
**--detach-keys**=*sequence*
-Override the key sequence for detaching a container. Format is a single character `[a-Z]` or
-a comma separated sequence of `ctrl-<value>`, where `<value>` is one of:
-`a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`.
+Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*.
**--latest**, **-l**
diff --git a/docs/podman-build.1.md b/docs/source/markdown/podman-build.1.md
index 567d0ead3..567d0ead3 100644
--- a/docs/podman-build.1.md
+++ b/docs/source/markdown/podman-build.1.md
diff --git a/docs/podman-commit.1.md b/docs/source/markdown/podman-commit.1.md
index 07a885ae2..07a885ae2 100644
--- a/docs/podman-commit.1.md
+++ b/docs/source/markdown/podman-commit.1.md
diff --git a/docs/podman-container-checkpoint.1.md b/docs/source/markdown/podman-container-checkpoint.1.md
index 034d338bb..034d338bb 100644
--- a/docs/podman-container-checkpoint.1.md
+++ b/docs/source/markdown/podman-container-checkpoint.1.md
diff --git a/docs/podman-container-cleanup.1.md b/docs/source/markdown/podman-container-cleanup.1.md
index 69e21ce9f..69e21ce9f 100644
--- a/docs/podman-container-cleanup.1.md
+++ b/docs/source/markdown/podman-container-cleanup.1.md
diff --git a/docs/podman-container-exists.1.md b/docs/source/markdown/podman-container-exists.1.md
index 4d988132b..4d988132b 100644
--- a/docs/podman-container-exists.1.md
+++ b/docs/source/markdown/podman-container-exists.1.md
diff --git a/docs/podman-container-prune.1.md b/docs/source/markdown/podman-container-prune.1.md
index d8a4b7f4e..d8a4b7f4e 100644
--- a/docs/podman-container-prune.1.md
+++ b/docs/source/markdown/podman-container-prune.1.md
diff --git a/docs/podman-container-restore.1.md b/docs/source/markdown/podman-container-restore.1.md
index 1d2cf0b3e..d71daf4af 100644
--- a/docs/podman-container-restore.1.md
+++ b/docs/source/markdown/podman-container-restore.1.md
@@ -76,6 +76,15 @@ a container is restored multiple times from an exported checkpoint with **--name
Using **--ignore-static-ip** tells Podman to ignore the IP address if it was configured
with **--ip** during container creation.
+**--ignore-static-mac**
+
+If the container was started with **--mac-address** the restored container also
+tries to use that MAC address and restore fails if that MAC address is already
+in use. This can happen, if a container is restored multiple times from an
+exported checkpoint with **--name, -n**.
+
+Using **--ignore-static-mac** tells Podman to ignore the MAC address if it was
+configured with **--mac-address** during container creation.
## EXAMPLE
podman container restore mywebserver
diff --git a/docs/podman-container-runlabel.1.md b/docs/source/markdown/podman-container-runlabel.1.md
index 8511dd5cd..8511dd5cd 100644
--- a/docs/podman-container-runlabel.1.md
+++ b/docs/source/markdown/podman-container-runlabel.1.md
diff --git a/docs/podman-container.1.md b/docs/source/markdown/podman-container.1.md
index 4ea7c7acc..4ea7c7acc 100644
--- a/docs/podman-container.1.md
+++ b/docs/source/markdown/podman-container.1.md
diff --git a/docs/podman-cp.1.md b/docs/source/markdown/podman-cp.1.md
index 0f54b2e8b..0f54b2e8b 100644
--- a/docs/podman-cp.1.md
+++ b/docs/source/markdown/podman-cp.1.md
diff --git a/docs/podman-create.1.md b/docs/source/markdown/podman-create.1.md
index 6617850fd..d9ee69f82 100644
--- a/docs/podman-create.1.md
+++ b/docs/source/markdown/podman-create.1.md
@@ -67,12 +67,14 @@ Drop Linux capabilities
**--cgroupns**=*mode*
-Set the cgroup namespace mode for the container, by default **host** is used.
+Set the cgroup namespace mode for the container.
**host**: use the host's cgroup namespace inside the container.
**container:<NAME|ID>**: join the namespace of the specified container.
**private**: create a new cgroup namespace.
**ns:<PATH>**: join the namespace at the specified path.
+If the host uses cgroups v1, the default is set to **host**. On cgroups v2 the default is **private**.
+
**--cgroups**=*mode*
Determines whether the container will create CGroups.
@@ -196,9 +198,7 @@ it in the **libpod.conf** file: see **libpod.conf(5)** for more information.
**--detach-keys**=*sequence*
-Override the key sequence for detaching a container. Format is a single character `[a-Z]` or
-a comma separated sequence of `ctrl-<value>`, where `<value>` is one of:
-`a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`.
+Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*.
**--device**=*device*
@@ -498,6 +498,10 @@ Current supported mount TYPES are `bind`, `volume`, and `tmpfs`.
· tmpfs-mode: File mode of the tmpfs in octal. (e.g. 700 or 0700.) Defaults to 1777 in Linux.
+ · tmpcopyup: Enable copyup from the image directory at the same location to the tmpfs. Used by default.
+
+ · notmpcopyup: Disable copying files from the image to the tmpfs.
+
**--name**=*name*
Assign a name to the container
diff --git a/docs/podman-diff.1.md b/docs/source/markdown/podman-diff.1.md
index 5b0434a07..5b0434a07 100644
--- a/docs/podman-diff.1.md
+++ b/docs/source/markdown/podman-diff.1.md
diff --git a/docs/podman-events.1.md b/docs/source/markdown/podman-events.1.md
index bb1923574..bb1923574 100644
--- a/docs/podman-events.1.md
+++ b/docs/source/markdown/podman-events.1.md
diff --git a/docs/podman-exec.1.md b/docs/source/markdown/podman-exec.1.md
index 4c17c056a..9624425dc 100644
--- a/docs/podman-exec.1.md
+++ b/docs/source/markdown/podman-exec.1.md
@@ -15,7 +15,7 @@ podman\-exec - Execute a command in a running container
**--detach-keys**=*sequence*
-Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`.
+Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*.
**--env**, **-e**
diff --git a/docs/podman-export.1.md b/docs/source/markdown/podman-export.1.md
index 4286d0e2f..4286d0e2f 100644
--- a/docs/podman-export.1.md
+++ b/docs/source/markdown/podman-export.1.md
diff --git a/docs/podman-generate-kube.1.md b/docs/source/markdown/podman-generate-kube.1.md
index f4b4cd482..f4b4cd482 100644
--- a/docs/podman-generate-kube.1.md
+++ b/docs/source/markdown/podman-generate-kube.1.md
diff --git a/docs/podman-generate-systemd.1.md b/docs/source/markdown/podman-generate-systemd.1.md
index b81e68a46..b81e68a46 100644
--- a/docs/podman-generate-systemd.1.md
+++ b/docs/source/markdown/podman-generate-systemd.1.md
diff --git a/docs/podman-generate.1.md b/docs/source/markdown/podman-generate.1.md
index 50050f2c1..50050f2c1 100644
--- a/docs/podman-generate.1.md
+++ b/docs/source/markdown/podman-generate.1.md
diff --git a/docs/podman-healthcheck-run.1.md b/docs/source/markdown/podman-healthcheck-run.1.md
index 21f2d9b20..21f2d9b20 100644
--- a/docs/podman-healthcheck-run.1.md
+++ b/docs/source/markdown/podman-healthcheck-run.1.md
diff --git a/docs/podman-healthcheck.1.md b/docs/source/markdown/podman-healthcheck.1.md
index 91c3e4345..91c3e4345 100644
--- a/docs/podman-healthcheck.1.md
+++ b/docs/source/markdown/podman-healthcheck.1.md
diff --git a/docs/podman-history.1.md b/docs/source/markdown/podman-history.1.md
index a67cb0286..a67cb0286 100644
--- a/docs/podman-history.1.md
+++ b/docs/source/markdown/podman-history.1.md
diff --git a/docs/podman-image-exists.1.md b/docs/source/markdown/podman-image-exists.1.md
index f6a89e2aa..f6a89e2aa 100644
--- a/docs/podman-image-exists.1.md
+++ b/docs/source/markdown/podman-image-exists.1.md
diff --git a/docs/podman-image-prune.1.md b/docs/source/markdown/podman-image-prune.1.md
index b844a9f63..b844a9f63 100644
--- a/docs/podman-image-prune.1.md
+++ b/docs/source/markdown/podman-image-prune.1.md
diff --git a/docs/podman-image-sign.1.md b/docs/source/markdown/podman-image-sign.1.md
index 62845e715..62845e715 100644
--- a/docs/podman-image-sign.1.md
+++ b/docs/source/markdown/podman-image-sign.1.md
diff --git a/docs/podman-image-tree.1.md b/docs/source/markdown/podman-image-tree.1.md
index c4624e05c..c4624e05c 100644
--- a/docs/podman-image-tree.1.md
+++ b/docs/source/markdown/podman-image-tree.1.md
diff --git a/docs/podman-image-trust.1.md b/docs/source/markdown/podman-image-trust.1.md
index 3fe4f7f52..3fe4f7f52 100644
--- a/docs/podman-image-trust.1.md
+++ b/docs/source/markdown/podman-image-trust.1.md
diff --git a/docs/podman-image.1.md b/docs/source/markdown/podman-image.1.md
index 339a531dd..339a531dd 100644
--- a/docs/podman-image.1.md
+++ b/docs/source/markdown/podman-image.1.md
diff --git a/docs/podman-images.1.md b/docs/source/markdown/podman-images.1.md
index 3ac07fc43..3ac07fc43 100644
--- a/docs/podman-images.1.md
+++ b/docs/source/markdown/podman-images.1.md
diff --git a/docs/podman-import.1.md b/docs/source/markdown/podman-import.1.md
index 946b680dd..946b680dd 100644
--- a/docs/podman-import.1.md
+++ b/docs/source/markdown/podman-import.1.md
diff --git a/docs/podman-info.1.md b/docs/source/markdown/podman-info.1.md
index 9721755ef..9721755ef 100644
--- a/docs/podman-info.1.md
+++ b/docs/source/markdown/podman-info.1.md
diff --git a/docs/podman-init.1.md b/docs/source/markdown/podman-init.1.md
index 3b49cfb99..3b49cfb99 100644
--- a/docs/podman-init.1.md
+++ b/docs/source/markdown/podman-init.1.md
diff --git a/docs/podman-inspect.1.md b/docs/source/markdown/podman-inspect.1.md
index f1630c713..f1630c713 100644
--- a/docs/podman-inspect.1.md
+++ b/docs/source/markdown/podman-inspect.1.md
diff --git a/docs/podman-kill.1.md b/docs/source/markdown/podman-kill.1.md
index 617d25b85..617d25b85 100644
--- a/docs/podman-kill.1.md
+++ b/docs/source/markdown/podman-kill.1.md
diff --git a/docs/podman-load.1.md b/docs/source/markdown/podman-load.1.md
index deb4fb5ec..deb4fb5ec 100644
--- a/docs/podman-load.1.md
+++ b/docs/source/markdown/podman-load.1.md
diff --git a/docs/podman-login.1.md b/docs/source/markdown/podman-login.1.md
index 9d368e9f2..9d368e9f2 100644
--- a/docs/podman-login.1.md
+++ b/docs/source/markdown/podman-login.1.md
diff --git a/docs/podman-logout.1.md b/docs/source/markdown/podman-logout.1.md
index 01dc52ecd..01dc52ecd 100644
--- a/docs/podman-logout.1.md
+++ b/docs/source/markdown/podman-logout.1.md
diff --git a/docs/podman-logs.1.md b/docs/source/markdown/podman-logs.1.md
index 405f180d9..5507ba13a 100644
--- a/docs/podman-logs.1.md
+++ b/docs/source/markdown/podman-logs.1.md
@@ -39,7 +39,7 @@ and 2006-01-02.
**--tail**=*LINES*
-Output the specified number of LINES at the end of the logs. LINES must be a positive integer. Defaults to 0,
+Output the specified number of LINES at the end of the logs. LINES must be an integer. Defaults to -1,
which prints all lines
**--timestamps**, **-t**
diff --git a/docs/podman-mount.1.md b/docs/source/markdown/podman-mount.1.md
index 8f4deeca6..8f4deeca6 100644
--- a/docs/podman-mount.1.md
+++ b/docs/source/markdown/podman-mount.1.md
diff --git a/docs/podman-network-create.1.md b/docs/source/markdown/podman-network-create.1.md
index c281d50d9..c281d50d9 100644
--- a/docs/podman-network-create.1.md
+++ b/docs/source/markdown/podman-network-create.1.md
diff --git a/docs/podman-network-inspect.1.md b/docs/source/markdown/podman-network-inspect.1.md
index dfa7e4b0c..dfa7e4b0c 100644
--- a/docs/podman-network-inspect.1.md
+++ b/docs/source/markdown/podman-network-inspect.1.md
diff --git a/docs/podman-network-ls.1.md b/docs/source/markdown/podman-network-ls.1.md
index 46e424593..46e424593 100644
--- a/docs/podman-network-ls.1.md
+++ b/docs/source/markdown/podman-network-ls.1.md
diff --git a/docs/podman-network-rm.1.md b/docs/source/markdown/podman-network-rm.1.md
index c71f0d8fd..c71f0d8fd 100644
--- a/docs/podman-network-rm.1.md
+++ b/docs/source/markdown/podman-network-rm.1.md
diff --git a/docs/podman-network.1.md b/docs/source/markdown/podman-network.1.md
index f05b2b78f..f05b2b78f 100644
--- a/docs/podman-network.1.md
+++ b/docs/source/markdown/podman-network.1.md
diff --git a/docs/podman-pause.1.md b/docs/source/markdown/podman-pause.1.md
index dfd4da416..dfd4da416 100644
--- a/docs/podman-pause.1.md
+++ b/docs/source/markdown/podman-pause.1.md
diff --git a/docs/podman-play-kube.1.md b/docs/source/markdown/podman-play-kube.1.md
index 2ac860a32..2ac860a32 100644
--- a/docs/podman-play-kube.1.md
+++ b/docs/source/markdown/podman-play-kube.1.md
diff --git a/docs/podman-play.1.md b/docs/source/markdown/podman-play.1.md
index 364baad60..364baad60 100644
--- a/docs/podman-play.1.md
+++ b/docs/source/markdown/podman-play.1.md
diff --git a/docs/podman-pod-create.1.md b/docs/source/markdown/podman-pod-create.1.md
index cd1de6401..cd1de6401 100644
--- a/docs/podman-pod-create.1.md
+++ b/docs/source/markdown/podman-pod-create.1.md
diff --git a/docs/podman-pod-exists.1.md b/docs/source/markdown/podman-pod-exists.1.md
index cf2852934..cf2852934 100644
--- a/docs/podman-pod-exists.1.md
+++ b/docs/source/markdown/podman-pod-exists.1.md
diff --git a/docs/podman-pod-inspect.1.md b/docs/source/markdown/podman-pod-inspect.1.md
index 831d28259..831d28259 100644
--- a/docs/podman-pod-inspect.1.md
+++ b/docs/source/markdown/podman-pod-inspect.1.md
diff --git a/docs/podman-pod-kill.1.md b/docs/source/markdown/podman-pod-kill.1.md
index 596e15cea..596e15cea 100644
--- a/docs/podman-pod-kill.1.md
+++ b/docs/source/markdown/podman-pod-kill.1.md
diff --git a/docs/podman-pod-pause.1.md b/docs/source/markdown/podman-pod-pause.1.md
index 9533ed4a1..9533ed4a1 100644
--- a/docs/podman-pod-pause.1.md
+++ b/docs/source/markdown/podman-pod-pause.1.md
diff --git a/docs/podman-pod-prune.1.md b/docs/source/markdown/podman-pod-prune.1.md
index f79961b2f..f79961b2f 100644
--- a/docs/podman-pod-prune.1.md
+++ b/docs/source/markdown/podman-pod-prune.1.md
diff --git a/docs/podman-pod-ps.1.md b/docs/source/markdown/podman-pod-ps.1.md
index 65a7072ab..65a7072ab 100644
--- a/docs/podman-pod-ps.1.md
+++ b/docs/source/markdown/podman-pod-ps.1.md
diff --git a/docs/podman-pod-restart.1.md b/docs/source/markdown/podman-pod-restart.1.md
index 57f479102..57f479102 100644
--- a/docs/podman-pod-restart.1.md
+++ b/docs/source/markdown/podman-pod-restart.1.md
diff --git a/docs/podman-pod-rm.1.md b/docs/source/markdown/podman-pod-rm.1.md
index 6659534b4..6659534b4 100644
--- a/docs/podman-pod-rm.1.md
+++ b/docs/source/markdown/podman-pod-rm.1.md
diff --git a/docs/podman-pod-start.1.md b/docs/source/markdown/podman-pod-start.1.md
index 29960d6aa..29960d6aa 100644
--- a/docs/podman-pod-start.1.md
+++ b/docs/source/markdown/podman-pod-start.1.md
diff --git a/docs/podman-pod-stats.1.md b/docs/source/markdown/podman-pod-stats.1.md
index 962edbda0..962edbda0 100644
--- a/docs/podman-pod-stats.1.md
+++ b/docs/source/markdown/podman-pod-stats.1.md
diff --git a/docs/podman-pod-stop.1.md b/docs/source/markdown/podman-pod-stop.1.md
index b3ce47d72..b3ce47d72 100644
--- a/docs/podman-pod-stop.1.md
+++ b/docs/source/markdown/podman-pod-stop.1.md
diff --git a/docs/podman-pod-top.1.md b/docs/source/markdown/podman-pod-top.1.md
index 48f10055a..48f10055a 100644
--- a/docs/podman-pod-top.1.md
+++ b/docs/source/markdown/podman-pod-top.1.md
diff --git a/docs/podman-pod-unpause.1.md b/docs/source/markdown/podman-pod-unpause.1.md
index e0a88c2e3..e0a88c2e3 100644
--- a/docs/podman-pod-unpause.1.md
+++ b/docs/source/markdown/podman-pod-unpause.1.md
diff --git a/docs/podman-pod.1.md b/docs/source/markdown/podman-pod.1.md
index b3d002a06..b3d002a06 100644
--- a/docs/podman-pod.1.md
+++ b/docs/source/markdown/podman-pod.1.md
diff --git a/docs/podman-port.1.md b/docs/source/markdown/podman-port.1.md
index c9833f447..c9833f447 100644
--- a/docs/podman-port.1.md
+++ b/docs/source/markdown/podman-port.1.md
diff --git a/docs/podman-ps.1.md b/docs/source/markdown/podman-ps.1.md
index 298de0b2b..298de0b2b 100644
--- a/docs/podman-ps.1.md
+++ b/docs/source/markdown/podman-ps.1.md
diff --git a/docs/podman-pull.1.md b/docs/source/markdown/podman-pull.1.md
index de9688f5e..de9688f5e 100644
--- a/docs/podman-pull.1.md
+++ b/docs/source/markdown/podman-pull.1.md
diff --git a/docs/podman-push.1.md b/docs/source/markdown/podman-push.1.md
index 3f0350bcd..3f0350bcd 100644
--- a/docs/podman-push.1.md
+++ b/docs/source/markdown/podman-push.1.md
diff --git a/docs/podman-remote.1.md b/docs/source/markdown/podman-remote.1.md
index 04010abaf..04010abaf 100644
--- a/docs/podman-remote.1.md
+++ b/docs/source/markdown/podman-remote.1.md
diff --git a/docs/podman-remote.conf.5.md b/docs/source/markdown/podman-remote.conf.5.md
index e9cc05989..e9cc05989 100644
--- a/docs/podman-remote.conf.5.md
+++ b/docs/source/markdown/podman-remote.conf.5.md
diff --git a/docs/podman-restart.1.md b/docs/source/markdown/podman-restart.1.md
index 08fa29244..08fa29244 100644
--- a/docs/podman-restart.1.md
+++ b/docs/source/markdown/podman-restart.1.md
diff --git a/docs/podman-rm.1.md b/docs/source/markdown/podman-rm.1.md
index 207d9d61d..c1663129c 100644
--- a/docs/podman-rm.1.md
+++ b/docs/source/markdown/podman-rm.1.md
@@ -42,7 +42,8 @@ The storage option conflicts with the **--all**, **--latest**, and **--volumes**
**--volumes**, **-v**
-Remove the volumes associated with the container.
+Remove anonymous volumes associated with the container. This does not include named volumes
+created with `podman volume create`, or the `--volume` option of `podman run` and `podman create`.
## EXAMPLE
Remove a container by its name *mywebserver*
diff --git a/docs/podman-rmi.1.md b/docs/source/markdown/podman-rmi.1.md
index d911ee6cb..d911ee6cb 100644
--- a/docs/podman-rmi.1.md
+++ b/docs/source/markdown/podman-rmi.1.md
diff --git a/docs/podman-run.1.md b/docs/source/markdown/podman-run.1.md
index d6d8f4c1e..28b00ee29 100644
--- a/docs/podman-run.1.md
+++ b/docs/source/markdown/podman-run.1.md
@@ -81,12 +81,14 @@ Drop Linux capabilities
**--cgroupns**=*mode*
-Set the cgroup namespace mode for the container, by default **host** is used.
+Set the cgroup namespace mode for the container.
**host**: use the host's cgroup namespace inside the container.
**container:<NAME|ID>**: join the namespace of the specified container.
**private**: create a new cgroup namespace.
**ns:<PATH>**: join the namespace at the specified path.
+If the host uses cgroups v1, the default is set to **host**. On cgroups v2 the default is **private**.
+
**--cgroups**=*mode*
Determines whether the container will create CGroups.
@@ -202,9 +204,7 @@ it in the **libpod.conf** file: see **libpod.conf(5)** for more information.
**--detach-keys**=*sequence*
-Override the key sequence for detaching a container. Format is a single character `[a-Z]` or
-a comma separated sequence of `ctrl-<value>`, where `<value>` is one of:
-`a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`.
+Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*.
**--device**=*device*
@@ -509,6 +509,10 @@ Current supported mount TYPES are `bind`, `volume`, and `tmpfs`.
· tmpfs-mode: File mode of the tmpfs in octal. (e.g. 700 or 0700.) Defaults to 1777 in Linux.
+ · tmpcopyup: Enable copyup from the image directory at the same location to the tmpfs. Used by default.
+
+ · notmpcopyup: Disable copying files from the image to the tmpfs.
+
**--name**=*name*
Assign a name to the container
diff --git a/docs/podman-save.1.md b/docs/source/markdown/podman-save.1.md
index b2b0995d3..b2b0995d3 100644
--- a/docs/podman-save.1.md
+++ b/docs/source/markdown/podman-save.1.md
diff --git a/docs/podman-search.1.md b/docs/source/markdown/podman-search.1.md
index 31de6f839..31de6f839 100644
--- a/docs/podman-search.1.md
+++ b/docs/source/markdown/podman-search.1.md
diff --git a/docs/podman-start.1.md b/docs/source/markdown/podman-start.1.md
index 4c81d17bd..84af9d800 100644
--- a/docs/podman-start.1.md
+++ b/docs/source/markdown/podman-start.1.md
@@ -23,9 +23,7 @@ starting multiple containers.
**--detach-keys**=*sequence*
-Override the key sequence for detaching a container. Format is a single character `[a-Z]` or
-a comma separated sequence of `ctrl-<value>`, where `<value>` is one of:
-`a-z`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`.
+Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*.
**--interactive**, **-i**
diff --git a/docs/podman-stats.1.md b/docs/source/markdown/podman-stats.1.md
index 741873c3f..741873c3f 100644
--- a/docs/podman-stats.1.md
+++ b/docs/source/markdown/podman-stats.1.md
diff --git a/docs/podman-stop.1.md b/docs/source/markdown/podman-stop.1.md
index b5ea670b0..b5ea670b0 100644
--- a/docs/podman-stop.1.md
+++ b/docs/source/markdown/podman-stop.1.md
diff --git a/docs/podman-system-df.1.md b/docs/source/markdown/podman-system-df.1.md
index d0b1755ee..d0b1755ee 100644
--- a/docs/podman-system-df.1.md
+++ b/docs/source/markdown/podman-system-df.1.md
diff --git a/docs/podman-system-migrate.1.md b/docs/source/markdown/podman-system-migrate.1.md
index d5e3bcb95..d5e3bcb95 100644
--- a/docs/podman-system-migrate.1.md
+++ b/docs/source/markdown/podman-system-migrate.1.md
diff --git a/docs/podman-system-prune.1.md b/docs/source/markdown/podman-system-prune.1.md
index e6297dc0b..e6297dc0b 100644
--- a/docs/podman-system-prune.1.md
+++ b/docs/source/markdown/podman-system-prune.1.md
diff --git a/docs/podman-system-renumber.1.md b/docs/source/markdown/podman-system-renumber.1.md
index 071eefe29..071eefe29 100644
--- a/docs/podman-system-renumber.1.md
+++ b/docs/source/markdown/podman-system-renumber.1.md
diff --git a/docs/podman-system.1.md b/docs/source/markdown/podman-system.1.md
index bbd541066..bbd541066 100644
--- a/docs/podman-system.1.md
+++ b/docs/source/markdown/podman-system.1.md
diff --git a/docs/podman-tag.1.md b/docs/source/markdown/podman-tag.1.md
index 291d95228..291d95228 100644
--- a/docs/podman-tag.1.md
+++ b/docs/source/markdown/podman-tag.1.md
diff --git a/docs/podman-top.1.md b/docs/source/markdown/podman-top.1.md
index 1410aa651..1410aa651 100644
--- a/docs/podman-top.1.md
+++ b/docs/source/markdown/podman-top.1.md
diff --git a/docs/podman-umount.1.md b/docs/source/markdown/podman-umount.1.md
index 100c47b32..100c47b32 100644
--- a/docs/podman-umount.1.md
+++ b/docs/source/markdown/podman-umount.1.md
diff --git a/docs/podman-unpause.1.md b/docs/source/markdown/podman-unpause.1.md
index f5538d6d5..f5538d6d5 100644
--- a/docs/podman-unpause.1.md
+++ b/docs/source/markdown/podman-unpause.1.md
diff --git a/docs/podman-unshare.1.md b/docs/source/markdown/podman-unshare.1.md
index 9052b97ab..9052b97ab 100644
--- a/docs/podman-unshare.1.md
+++ b/docs/source/markdown/podman-unshare.1.md
diff --git a/docs/podman-varlink.1.md b/docs/source/markdown/podman-varlink.1.md
index 0d2ab1668..0d2ab1668 100644
--- a/docs/podman-varlink.1.md
+++ b/docs/source/markdown/podman-varlink.1.md
diff --git a/docs/podman-version.1.md b/docs/source/markdown/podman-version.1.md
index 4499f6338..4499f6338 100644
--- a/docs/podman-version.1.md
+++ b/docs/source/markdown/podman-version.1.md
diff --git a/docs/podman-volume-create.1.md b/docs/source/markdown/podman-volume-create.1.md
index b354f396f..b354f396f 100644
--- a/docs/podman-volume-create.1.md
+++ b/docs/source/markdown/podman-volume-create.1.md
diff --git a/docs/podman-volume-inspect.1.md b/docs/source/markdown/podman-volume-inspect.1.md
index ac5b6c977..ac5b6c977 100644
--- a/docs/podman-volume-inspect.1.md
+++ b/docs/source/markdown/podman-volume-inspect.1.md
diff --git a/docs/podman-volume-ls.1.md b/docs/source/markdown/podman-volume-ls.1.md
index d431c7c6e..d431c7c6e 100644
--- a/docs/podman-volume-ls.1.md
+++ b/docs/source/markdown/podman-volume-ls.1.md
diff --git a/docs/podman-volume-prune.1.md b/docs/source/markdown/podman-volume-prune.1.md
index 25ea701a3..25ea701a3 100644
--- a/docs/podman-volume-prune.1.md
+++ b/docs/source/markdown/podman-volume-prune.1.md
diff --git a/docs/podman-volume-rm.1.md b/docs/source/markdown/podman-volume-rm.1.md
index 9a2fe8c99..9a2fe8c99 100644
--- a/docs/podman-volume-rm.1.md
+++ b/docs/source/markdown/podman-volume-rm.1.md
diff --git a/docs/podman-volume.1.md b/docs/source/markdown/podman-volume.1.md
index 288e57b82..288e57b82 100644
--- a/docs/podman-volume.1.md
+++ b/docs/source/markdown/podman-volume.1.md
diff --git a/docs/podman-wait.1.md b/docs/source/markdown/podman-wait.1.md
index ce1c70a5f..ce1c70a5f 100644
--- a/docs/podman-wait.1.md
+++ b/docs/source/markdown/podman-wait.1.md
diff --git a/docs/podman.1.md b/docs/source/markdown/podman.1.md
index f6fa1a457..f6fa1a457 100644
--- a/docs/podman.1.md
+++ b/docs/source/markdown/podman.1.md
diff --git a/docs/source/network.rst b/docs/source/network.rst
new file mode 100644
index 000000000..c1c53bc34
--- /dev/null
+++ b/docs/source/network.rst
@@ -0,0 +1,10 @@
+Network
+=====
+
+:doc:`create <markdown/podman-network-create.1>` network create
+
+:doc:`inspect <markdown/podman-network-inspect.1>` network inspect
+
+:doc:`ls <markdown/podman-network-ls.1>` network list
+
+:doc:`rm <markdown/podman-network-rm.1>` network rm \ No newline at end of file
diff --git a/docs/source/play.rst b/docs/source/play.rst
new file mode 100644
index 000000000..8f00d2f45
--- /dev/null
+++ b/docs/source/play.rst
@@ -0,0 +1,4 @@
+Play
+====
+
+:doc:`kube <markdown/podman-play-kube.1>` Play a pod based on Kubernetes YAML
diff --git a/docs/source/pod.rst b/docs/source/pod.rst
new file mode 100644
index 000000000..391686ce5
--- /dev/null
+++ b/docs/source/pod.rst
@@ -0,0 +1,30 @@
+Pod
+===
+
+:doc:`create <markdown/podman-pod-create.1>` Create a new empty pod
+
+:doc:`exists <markdown/podman-pod-exists.1>` Check if a pod exists in local storage
+
+:doc:`inspect <markdown/podman-pod-inspect.1>` Displays a pod configuration
+
+:doc:`kill <markdown/podman-pod-kill.1>` Send the specified signal or SIGKILL to containers in pod
+
+:doc:`pause <markdown/podman-pause.1>` Pause one or more pods
+
+:doc:`prune <markdown/podman-pod-prune.1>` Remove all stopped pods
+
+:doc:`ps <markdown/podman-pod-ps.1>` List pods
+
+:doc:`restart <markdown/podman-pod-restart.1>` Restart one or more pods
+
+:doc:`rm <markdown/podman-pod-rm.1>` Remove one or more pods
+
+:doc:`start <markdown/podman-pod-start.1>` Start one or more pods
+
+:doc:`stats <markdown/podman-pod-stats.1>` Display a live stream of resource usage statistics for the containers in one or more pods
+
+:doc:`stop <markdown/podman-pod-stop.1>` Stop one or more pods
+
+:doc:`top <markdown/podman-pod-top.1>` Display the running processes of containers in a pod
+
+:doc:`unpause <markdown/podman-pod-unpause.1>` Unpause one or more pods
diff --git a/docs/source/system.rst b/docs/source/system.rst
new file mode 100644
index 000000000..2f2b7ea8f
--- /dev/null
+++ b/docs/source/system.rst
@@ -0,0 +1,12 @@
+System
+======
+
+:doc:`df <markdown/podman-system-df.1>` Show podman disk usage
+
+:doc:`info <markdown/podman-info.1>` Display podman system information
+
+:doc:`migrate <markdown/podman-system-migrate.1>` Migrate containers
+
+:doc:`prune <markdown/podman-system-prune.1>` Remove unused data
+
+:doc:`renumber <markdown/podman-system-renumber.1>` Migrate lock numbers
diff --git a/docs/source/volume.rst b/docs/source/volume.rst
new file mode 100644
index 000000000..43fe87054
--- /dev/null
+++ b/docs/source/volume.rst
@@ -0,0 +1,11 @@
+Volume
+======
+:doc:`create <markdown/podman-volume-create.1>` Create a new volume
+
+:doc:`inspect <markdown/podman-volume-inspect.1>` Display detailed information on one or more volumes
+
+:doc:`ls <markdown/podman-volume-ls.1>` List volumes
+
+:doc:`prune <markdown/podman-volume-prune.1>` Remove all unused volumes
+
+:doc:`rm <markdown/podman-volume-rm.1>` Remove one or more volumes \ No newline at end of file
diff --git a/go.mod b/go.mod
index af1be72aa..1b4e58225 100644
--- a/go.mod
+++ b/go.mod
@@ -9,15 +9,16 @@ require (
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc // indirect
- github.com/containernetworking/cni v0.7.1
+ github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784
github.com/containernetworking/plugins v0.8.2
- github.com/containers/buildah v1.11.4-0.20191028173731-21b4778b359e
+ github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982
+ github.com/containers/conmon v2.0.2+incompatible // indirect
github.com/containers/image/v5 v5.0.0
github.com/containers/psgo v1.3.2
github.com/containers/storage v1.13.5
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
- github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca
+ github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b
github.com/cyphar/filepath-securejoin v0.2.2
github.com/davecgh/go-spew v1.1.1
github.com/docker/distribution v2.7.1+incompatible
@@ -37,11 +38,11 @@ require (
github.com/hashicorp/go-multierror v1.0.0
github.com/hpcloud/tail v1.0.0
github.com/imdario/mergo v0.3.7 // indirect
- github.com/json-iterator/go v1.1.7
+ github.com/json-iterator/go v1.1.8
github.com/mattn/go-isatty v0.0.8 // indirect
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618
- github.com/onsi/ginkgo v1.10.1
- github.com/onsi/gomega v1.7.0
+ github.com/onsi/ginkgo v1.10.3
+ github.com/onsi/gomega v1.7.1
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158
@@ -59,7 +60,7 @@ require (
github.com/stretchr/testify v1.4.0
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
github.com/uber-go/atomic v1.4.0 // indirect
- github.com/uber/jaeger-client-go v2.19.0+incompatible
+ github.com/uber/jaeger-client-go v2.20.0+incompatible
github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5 // indirect
github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b
github.com/vishvananda/netlink v1.0.0
@@ -71,7 +72,7 @@ require (
google.golang.org/appengine v1.6.1 // indirect
google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
- gopkg.in/yaml.v2 v2.2.4
+ gopkg.in/yaml.v2 v2.2.5
k8s.io/api v0.0.0-20190813020757-36bff7324fb7
k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010
k8s.io/client-go v0.0.0-20190620085101-78d2af792bab
diff --git a/go.sum b/go.sum
index f5d5635aa..262cc86a0 100644
--- a/go.sum
+++ b/go.sum
@@ -53,14 +53,16 @@ github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL
github.com/containernetworking/cni v0.7.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE=
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 h1:rqUVLD8I859xRgUx/WMC3v7QAFqbLKZbs+0kqYboRJc=
+github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/plugins v0.8.2 h1:5lnwfsAYO+V7yXhysJKy3E1A2Gy9oVut031zfdOzI9w=
github.com/containernetworking/plugins v0.8.2/go.mod h1:TxALKWZpWL79BC3GOYKJzzXr7U8R23PdhwaLp6F3adc=
-github.com/containers/buildah v1.11.3 h1:L5vFj+ao58IGq3G30jN94vRQrIgMU/uTOEKduDr3Nyg=
-github.com/containers/buildah v1.11.3/go.mod h1:jqZmSU/PhFwTHHlOotnw4bbs1JbkRQLh8dut5DF4Qek=
github.com/containers/buildah v1.11.4-0.20191028173731-21b4778b359e h1:iDavHEx5Yr7o+0l6495Ya6N0YEPplIUZuWC2e14baDM=
github.com/containers/buildah v1.11.4-0.20191028173731-21b4778b359e/go.mod h1:Igrk75FAxLnzDaHUbtpWB8pwL+Bv+cnakWMvqAXW2v8=
-github.com/containers/image/v4 v4.0.1 h1:idNGHChj0Pyv3vLrxul2oSVMZLeFqpoq3CjLeVgapSQ=
-github.com/containers/image/v4 v4.0.1/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA=
+github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982 h1:5WUe09k2sJSbmxwLHZLHc41TrIPrP0GlbhX+WDJBqvs=
+github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982/go.mod h1:eGWB4tLoo0hIBuytQpvgUC0hk2mvl2ofaYBeDsU/qoc=
+github.com/containers/conmon v2.0.2+incompatible h1:h2HCdd/EBpwFn7RT82Y2GyXnVUHWxk1Jm4cESSZG4P8=
+github.com/containers/conmon v2.0.2+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.0.0 h1:arnXgbt1ucsC/ndtSpiQY87rA0UjhF+/xQnPzqdBDn4=
github.com/containers/image/v5 v5.0.0/go.mod h1:MgiLzCfIeo8lrHi+4Lb8HP+rh513sm0Mlk6RrhjFOLY=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
@@ -85,6 +87,8 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca h1:CJstDqYy9ClWuPcDHMTCAiUS+ckekluYetGR2iYYWuo=
github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca/go.mod h1:BO0al9TKber3XUTucLzKgoG5sq8qiOB41H7zSdfw6r8=
+github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b h1:SgS+WV10y2Bubuy2HquSBori6DXj9sqRN77Hgs5H7Qc=
+github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b/go.mod h1:ZOuIEOp/3MB1eCBWANnNxM3zUA3NWh76wSRCsnKAg2c=
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
@@ -102,8 +106,6 @@ github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BU
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v0.0.0-20171019062838-86f080cff091/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v0.0.0-20180522102801-da99009bbb11/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b h1:+Ga+YpCDpcY1fln6GI0fiiirpqHGcob5/Vk3oKNuGdU=
-github.com/docker/docker v1.4.2-0.20190710153559-aa8249ae1b8b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce h1:H3csZuxZESJeeEiOxq4YXPNmLFbjl7u2qVBrAAGX/sA=
github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
@@ -139,8 +141,6 @@ github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsouza/go-dockerclient v1.4.4 h1:Sd5nD4wdAgiPxvrbYUzT2ZZNmPk3z+GGnZ+frvw8z04=
-github.com/fsouza/go-dockerclient v1.4.4/go.mod h1:PrwszSL5fbmsESocROrOGq/NULMXRw+bajY0ltzD6MA=
github.com/fsouza/go-dockerclient v1.5.0 h1:7OtayOe5HnoG+KWMHgyyPymwaodnB2IDYuVfseKyxbA=
github.com/fsouza/go-dockerclient v1.5.0/go.mod h1:AqZZK/zFO3phxYxlTsAaeAMSdQ9mgHuhy+bjN034Qds=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -210,8 +210,6 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd h1:anPrsicrIi2ColgWTVPk+TrN42hJIWlfPHSBP9S0ZkM=
-github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd/go.mod h1:3LVOLeyx9XVvwPgrt2be44XgSqndprz1G18rSk8KD84=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
@@ -228,6 +226,8 @@ github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwK
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/juju/errors v0.0.0-20180806074554-22422dad46e1/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
github.com/juju/testing v0.0.0-20190613124551-e81189438503/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
@@ -293,6 +293,9 @@ github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY=
+github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -300,6 +303,8 @@ github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
@@ -322,8 +327,6 @@ github.com/opencontainers/selinux v1.3.0 h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqG
github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible h1:s55wx8JIG/CKnewev892HifTBrtKzMdvgB3rm4rxC2s=
github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY=
-github.com/openshift/imagebuilder v1.1.0 h1:oT704SkwMEzmIMU/+Uv1Wmvt+p10q3v2WuYMeFI18c4=
-github.com/openshift/imagebuilder v1.1.0/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
github.com/openshift/imagebuilder v1.1.1 h1:KAUR31p8UBJdfVO42azWgb+LeMAed2zaKQ19e0C0X2I=
github.com/openshift/imagebuilder v1.1.1/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
@@ -406,6 +409,8 @@ github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o=
github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
github.com/uber/jaeger-client-go v2.19.0+incompatible h1:pbwbYfHUoaase0oPQOdZ1GcaUjImYGimUXSQ/+8+Z8Q=
github.com/uber/jaeger-client-go v2.19.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
+github.com/uber/jaeger-client-go v2.20.0+incompatible h1:ttG9wKdl2ikV/BGOtu+eb+VPp+R7jMeuM177Ihs5Fdc=
+github.com/uber/jaeger-client-go v2.20.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5 h1:CwmGyzHTzCqCdZJkWR0A7ucZXgrCY7spRcpvm7ci//s=
github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
@@ -444,8 +449,6 @@ golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ=
golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -495,7 +498,6 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 h1:tdsQdquKbTNMsSZLqnLELJGzCANp9oXhu6zFBW6ODx4=
golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -552,6 +554,8 @@ gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v0.0.0-20190624233834-05ebafbffc79/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
diff --git a/hack/man-page-checker b/hack/man-page-checker
index 30d0b2113..99d280539 100755
--- a/hack/man-page-checker
+++ b/hack/man-page-checker
@@ -18,7 +18,7 @@ die() {
exit 1
}
-cd $(dirname $0)/../docs || die "Please run me from top-level libpod dir"
+cd $(dirname $0)/../docs/source/markdown || die "Please run me from top-level libpod dir"
rc=0
diff --git a/hack/podman-commands.sh b/hack/podman-commands.sh
index 7530e20d0..32f94fc7b 100755
--- a/hack/podman-commands.sh
+++ b/hack/podman-commands.sh
@@ -34,7 +34,7 @@ function podman_man() {
# This md file has a table of the form:
# | [podman-cmd(1)\[(podman-cmd.1.md) | Description ... |
# For all such, print the 'cmd' portion (the one in brackets).
- sed -ne 's/^|\s\+\[podman-\([a-z]\+\)(1.*/\1/p' <docs/$1.1.md
+ sed -ne 's/^|\s\+\[podman-\([a-z]\+\)(1.*/\1/p' <docs/source/markdown/$1.1.md
# Special case: there is no podman-help man page, nor need for such.
echo "help"
@@ -48,7 +48,7 @@ function podman_man() {
# | cmd | [podman-cmd(1)](podman-cmd.1.md) | Description ... |
# For all such we find, with 'podman- in the second column, print the
# first column (with whitespace trimmed)
- awk -F\| '$3 ~ /podman-/ { gsub(" ","",$2); print $2 }' < docs/$1.1.md
+ awk -F\| '$3 ~ /podman-/ { gsub(" ","",$2); print $2 }' < docs/source/markdown/$1.1.md
fi
}
@@ -93,7 +93,7 @@ if [ $rc -ne 0 ]; then
* podman man pages.
*
* The 'checking:' header indicates the specific command (and possibly
-* subcommand) being tested, e.g. podman --help vs docs/podman.1.md.
+* subcommand) being tested, e.g. podman --help vs docs/source/podman.1.md.
*
* A '-' indicates a subcommand present in 'podman --help' but not the
* corresponding man page.
diff --git a/install.md b/install.md
index 2583c4f9d..39b639176 100644
--- a/install.md
+++ b/install.md
@@ -72,6 +72,8 @@ sudo apt-get install -qq -y software-properties-common uidmap
sudo add-apt-repository -y ppa:projectatomic/ppa
sudo apt-get update -qq
sudo apt-get -qq -y install podman
+sudo mkdir -p /etc/containers
+echo -e "[registries.search]\nregistries = ['docker.io', 'quay.io']" | sudo tee /etc/containers/registries.conf
```
#### Fedora
diff --git a/libpod.conf b/libpod.conf
index 47f66ecc1..7e0228c19 100644
--- a/libpod.conf
+++ b/libpod.conf
@@ -142,8 +142,36 @@ runc = [
]
crun = [
- "/usr/bin/crun",
- "/usr/local/bin/crun",
+ "/usr/bin/crun",
+ "/usr/sbin/crun",
+ "/usr/local/bin/crun",
+ "/usr/local/sbin/crun",
+ "/sbin/crun",
+ "/bin/crun",
+ "/run/current-system/sw/bin/crun",
+]
+
+# Kata Containers is an OCI runtime, where containers are run inside lightweight
+# Virtual Machines (VMs). Kata provides additional isolation towards the host,
+# minimizing the host attack surface and mitigating the consequences of
+# containers breakout.
+# Please notes that Kata does not support rootless podman yet, but we can leave
+# the paths below blank to let them be discovered by the $PATH environment
+# variable.
+
+# Kata Containers with the default configured VMM
+kata-runtime = [
+ "/usr/bin/kata-runtime",
+]
+
+# Kata Containers with the QEMU VMM
+kata-qemu = [
+ "/usr/bin/kata-qemu",
+]
+
+# Kata Containers with the Firecracker VMM
+kata-fc = [
+ "/usr/bin/kata-fc",
]
# The [runtimes] table MUST be the last thing in this file.
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 0bb1df7b8..608a279c3 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -6,6 +6,7 @@ import (
"strings"
"sync"
+ "github.com/containers/libpod/libpod/config"
"github.com/containers/libpod/libpod/define"
bolt "github.com/etcd-io/bbolt"
jsoniter "github.com/json-iterator/go"
@@ -291,12 +292,12 @@ func (s *BoltState) Refresh() error {
// GetDBConfig retrieves runtime configuration fields that were created when
// the database was first initialized
-func (s *BoltState) GetDBConfig() (*DBConfig, error) {
+func (s *BoltState) GetDBConfig() (*config.DBConfig, error) {
if !s.valid {
return nil, define.ErrDBClosed
}
- cfg := new(DBConfig)
+ cfg := new(config.DBConfig)
db, err := s.getDBCon()
if err != nil {
diff --git a/libpod/common_test.go b/libpod/common_test.go
index 93ca7bc71..83b162c8a 100644
--- a/libpod/common_test.go
+++ b/libpod/common_test.go
@@ -7,6 +7,7 @@ import (
"testing"
"time"
+ "github.com/containers/libpod/libpod/config"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/lock"
"github.com/cri-o/ocicni/pkg/ocicni"
@@ -74,7 +75,7 @@ func getTestContainer(id, name string, manager lock.Manager) (*Container, error)
},
},
runtime: &Runtime{
- config: &RuntimeConfig{
+ config: &config.Config{
VolumePath: "/does/not/exist/tmp/volumes",
},
},
diff --git a/libpod/config/config.go b/libpod/config/config.go
new file mode 100644
index 000000000..5b4b57f3a
--- /dev/null
+++ b/libpod/config/config.go
@@ -0,0 +1,549 @@
+package config
+
+import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/BurntSushi/toml"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/libpod/pkg/util"
+ "github.com/containers/storage"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // _defaultTransport is a prefix that we apply to an image name to check
+ // docker hub first for the image.
+ _defaultTransport = "docker://"
+
+ // _rootlessConfigPath is the path to the rootless libpod.conf in $HOME.
+ _rootlessConfigPath = ".config/containers/libpod.conf"
+
+ // _conmonMinMajorVersion is the major version required for conmon.
+ _conmonMinMajorVersion = 2
+
+ // _conmonMinMinorVersion is the minor version required for conmon.
+ _conmonMinMinorVersion = 0
+
+ // _conmonMinPatchVersion is the sub-minor version required for conmon.
+ _conmonMinPatchVersion = 1
+
+ // _conmonVersionFormatErr is used when the expected versio-format of conmon
+ // has changed.
+ _conmonVersionFormatErr = "conmon version changed format"
+
+ // InstallPrefix is the prefix where podman will be installed.
+ // It can be overridden at build time.
+ _installPrefix = "/usr"
+
+ // EtcDir is the sysconfdir where podman should look for system config files.
+ // It can be overridden at build time.
+ _etcDir = "/etc"
+
+ // SeccompDefaultPath defines the default seccomp path.
+ SeccompDefaultPath = _installPrefix + "/share/containers/seccomp.json"
+
+ // SeccompOverridePath if this exists it overrides the default seccomp path.
+ SeccompOverridePath = _etcDir + "/crio/seccomp.json"
+
+ // _rootConfigPath is the path to the libpod configuration file
+ // This file is loaded to replace the builtin default config before
+ // runtime options (e.g. WithStorageConfig) are applied.
+ // If it is not present, the builtin default config is used instead
+ // This path can be overridden when the runtime is created by using
+ // NewRuntimeFromConfig() instead of NewRuntime().
+ _rootConfigPath = _installPrefix + "/share/containers/libpod.conf"
+
+ // _rootOverrideConfigPath is the path to an override for the default libpod
+ // configuration file. If OverrideConfigPath exists, it will be used in
+ // place of the configuration file pointed to by ConfigPath.
+ _rootOverrideConfigPath = _etcDir + "/containers/libpod.conf"
+)
+
+// SetOptions contains a subset of options in a Config. It's used to indicate if
+// a given option has either been set by the user or by a parsed libpod
+// configuration file. If not, the corresponding option might be overwritten by
+// values from the database. This behavior guarantess backwards compat with
+// older version of libpod and Podman.
+type SetOptions struct {
+ // StorageConfigRunRootSet indicates if the RunRoot has been explicitly set
+ // by the config or by the user. It's required to guarantee backwards
+ // compatibility with older versions of libpod for which we must query the
+ // database configuration. Not included in the on-disk config.
+ StorageConfigRunRootSet bool `toml:"-"`
+
+ // StorageConfigGraphRootSet indicates if the RunRoot has been explicitly
+ // set by the config or by the user. It's required to guarantee backwards
+ // compatibility with older versions of libpod for which we must query the
+ // database configuration. Not included in the on-disk config.
+ StorageConfigGraphRootSet bool `toml:"-"`
+
+ // StorageConfigGraphDriverNameSet indicates if the GraphDriverName has been
+ // explicitly set by the config or by the user. It's required to guarantee
+ // backwards compatibility with older versions of libpod for which we must
+ // query the database configuration. Not included in the on-disk config.
+ StorageConfigGraphDriverNameSet bool `toml:"-"`
+
+ // VolumePathSet indicates if the VolumePath has been explicitly set by the
+ // config or by the user. It's required to guarantee backwards compatibility
+ // with older versions of libpod for which we must query the database
+ // configuration. Not included in the on-disk config.
+ VolumePathSet bool `toml:"-"`
+
+ // StaticDirSet indicates if the StaticDir has been explicitly set by the
+ // config or by the user. It's required to guarantee backwards compatibility
+ // with older versions of libpod for which we must query the database
+ // configuration. Not included in the on-disk config.
+ StaticDirSet bool `toml:"-"`
+
+ // TmpDirSet indicates if the TmpDir has been explicitly set by the config
+ // or by the user. It's required to guarantee backwards compatibility with
+ // older versions of libpod for which we must query the database
+ // configuration. Not included in the on-disk config.
+ TmpDirSet bool `toml:"-"`
+}
+
+// Config contains configuration options used to set up a libpod runtime
+type Config struct {
+ // NOTE: when changing this struct, make sure to update (*Config).Merge().
+
+ // SetOptions contains a subset of config options. It's used to indicate if
+ // a given option has either been set by the user or by a parsed libpod
+ // configuration file. If not, the corresponding option might be
+ // overwritten by values from the database. This behavior guarantess
+ // backwards compat with older version of libpod and Podman.
+ SetOptions
+
+ // StateType is the type of the backing state store. Avoid using multiple
+ // values for this with the same containers/storage configuration on the
+ // same system. Different state types do not interact, and each will see a
+ // separate set of containers, which may cause conflicts in
+ // containers/storage. As such this is not exposed via the config file.
+ StateType define.RuntimeStateStore `toml:"-"`
+
+ // StorageConfig is the configuration used by containers/storage Not
+ // included in the on-disk config, use the dedicated containers/storage
+ // configuration file instead.
+ StorageConfig storage.StoreOptions `toml:"-"`
+
+ // VolumePath is the default location that named volumes will be created
+ // under. This convention is followed by the default volume driver, but
+ // may not be by other drivers.
+ VolumePath string `toml:"volume_path"`
+
+ // ImageDefaultTransport is the default transport method used to fetch
+ // images.
+ ImageDefaultTransport string `toml:"image_default_transport"`
+
+ // SignaturePolicyPath is the path to a signature policy to use for
+ // validating images. If left empty, the containers/image default signature
+ // policy will be used.
+ SignaturePolicyPath string `toml:"signature_policy_path,omitempty"`
+
+ // OCIRuntime is the OCI runtime to use.
+ OCIRuntime string `toml:"runtime"`
+
+ // OCIRuntimes are the set of configured OCI runtimes (default is runc).
+ OCIRuntimes map[string][]string `toml:"runtimes"`
+
+ // RuntimeSupportsJSON is the list of the OCI runtimes that support
+ // --format=json.
+ RuntimeSupportsJSON []string `toml:"runtime_supports_json"`
+
+ // RuntimeSupportsNoCgroups is a list of OCI runtimes that support
+ // running containers without CGroups.
+ RuntimeSupportsNoCgroups []string `toml:"runtime_supports_nocgroups"`
+
+ // RuntimePath is the path to OCI runtime binary for launching containers.
+ // The first path pointing to a valid file will be used This is used only
+ // when there are no OCIRuntime/OCIRuntimes defined. It is used only to be
+ // backward compatible with older versions of Podman.
+ RuntimePath []string `toml:"runtime_path"`
+
+ // ConmonPath is the path to the Conmon binary used for managing containers.
+ // The first path pointing to a valid file will be used.
+ ConmonPath []string `toml:"conmon_path"`
+
+ // ConmonEnvVars are environment variables to pass to the Conmon binary
+ // when it is launched.
+ ConmonEnvVars []string `toml:"conmon_env_vars"`
+
+ // CGroupManager is the CGroup Manager to use Valid values are "cgroupfs"
+ // and "systemd".
+ CgroupManager string `toml:"cgroup_manager"`
+
+ // InitPath is the path to the container-init binary.
+ InitPath string `toml:"init_path"`
+
+ // StaticDir is the path to a persistent directory to store container
+ // files.
+ StaticDir string `toml:"static_dir"`
+
+ // TmpDir is the path to a temporary directory to store per-boot container
+ // files. Must be stored in a tmpfs.
+ TmpDir string `toml:"tmp_dir"`
+
+ // MaxLogSize is the maximum size of container logfiles.
+ MaxLogSize int64 `toml:"max_log_size,omitempty"`
+
+ // NoPivotRoot sets whether to set no-pivot-root in the OCI runtime.
+ NoPivotRoot bool `toml:"no_pivot_root"`
+
+ // CNIConfigDir sets the directory where CNI configuration files are
+ // stored.
+ CNIConfigDir string `toml:"cni_config_dir"`
+
+ // CNIPluginDir sets a number of directories where the CNI network
+ // plugins can be located.
+ CNIPluginDir []string `toml:"cni_plugin_dir"`
+
+ // CNIDefaultNetwork is the network name of the default CNI network
+ // to attach pods to.
+ CNIDefaultNetwork string `toml:"cni_default_network,omitempty"`
+
+ // HooksDir holds paths to the directories containing hooks
+ // configuration files. When the same filename is present in in
+ // multiple directories, the file in the directory listed last in
+ // this slice takes precedence.
+ HooksDir []string `toml:"hooks_dir"`
+
+ // DefaultMountsFile is the path to the default mounts file for testing
+ // purposes only.
+ DefaultMountsFile string `toml:"-"`
+
+ // Namespace is the libpod namespace to use. Namespaces are used to create
+ // scopes to separate containers and pods in the state. When namespace is
+ // set, libpod will only view containers and pods in the same namespace. All
+ // containers and pods created will default to the namespace set here. A
+ // namespace of "", the empty string, is equivalent to no namespace, and all
+ // containers and pods will be visible. The default namespace is "".
+ Namespace string `toml:"namespace,omitempty"`
+
+ // InfraImage is the image a pod infra container will use to manage
+ // namespaces.
+ InfraImage string `toml:"infra_image"`
+
+ // InfraCommand is the command run to start up a pod infra container.
+ InfraCommand string `toml:"infra_command"`
+
+ // EnablePortReservation determines whether libpod will reserve ports on the
+ // host when they are forwarded to containers. When enabled, when ports are
+ // forwarded to containers, they are held open by conmon as long as the
+ // container is running, ensuring that they cannot be reused by other
+ // programs on the host. However, this can cause significant memory usage if
+ // a container has many ports forwarded to it. Disabling this can save
+ // memory.
+ EnablePortReservation bool `toml:"enable_port_reservation"`
+
+ // EnableLabeling indicates whether libpod will support container labeling.
+ EnableLabeling bool `toml:"label"`
+
+ // NetworkCmdPath is the path to the slirp4netns binary.
+ NetworkCmdPath string `toml:"network_cmd_path"`
+
+ // NumLocks is the number of locks to make available for containers and
+ // pods.
+ NumLocks uint32 `toml:"num_locks,omitempty"`
+
+ // LockType is the type of locking to use.
+ LockType string `toml:"lock_type,omitempty"`
+
+ // EventsLogger determines where events should be logged.
+ EventsLogger string `toml:"events_logger"`
+
+ // EventsLogFilePath is where the events log is stored.
+ EventsLogFilePath string `toml:"events_logfile_path"`
+
+ //DetachKeys is the sequence of keys used to detach a container.
+ DetachKeys string `toml:"detach_keys"`
+
+ // SDNotify tells Libpod to allow containers to notify the host systemd of
+ // readiness using the SD_NOTIFY mechanism.
+ SDNotify bool
+}
+
+// DBConfig is a set of Libpod runtime configuration settings that are saved in
+// a State when it is first created, and can subsequently be retrieved.
+type DBConfig struct {
+ LibpodRoot string
+ LibpodTmp string
+ StorageRoot string
+ StorageTmp string
+ GraphDriver string
+ VolumePath string
+}
+
+// readConfigFromFile reads the specified config file at `path` and attempts to
+// unmarshal its content into a Config.
+func readConfigFromFile(path string) (*Config, error) {
+ var config Config
+
+ configBytes, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ logrus.Debugf("Reading configuration file %q", path)
+ err = toml.Unmarshal(configBytes, &config)
+
+ // For the sake of backwards compat we need to check if the config fields
+ // with *Set suffix are set in the config. Note that the storage-related
+ // fields are NOT set in the config here but in the storage.conf OR directly
+ // by the user.
+ if config.VolumePath != "" {
+ config.VolumePathSet = true
+ }
+ if config.StaticDir != "" {
+ config.StaticDirSet = true
+ }
+ if config.TmpDir != "" {
+ config.TmpDirSet = true
+ }
+
+ return &config, err
+}
+
+// Write decodes the config as TOML and writes it to the specified path.
+func (c *Config) Write(path string) error {
+ f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0666)
+ if err != nil {
+ return errors.Wrapf(err, "error opening config file %q", path)
+ }
+
+ buffer := new(bytes.Buffer)
+ if err := toml.NewEncoder(buffer).Encode(c); err != nil {
+ return errors.Wrapf(err, "error encoding config")
+ }
+
+ if _, err := f.WriteString(buffer.String()); err != nil {
+ return errors.Wrapf(err, "error writing config %q", path)
+ }
+ return err
+}
+
+// FindConmon iterates over (*Config).ConmonPath and returns the path to first
+// (version) matching conmon binary. If non is found, we try to do a path lookup
+// of "conmon".
+func (c *Config) FindConmon() (string, error) {
+ foundOutdatedConmon := false
+ for _, path := range c.ConmonPath {
+ stat, err := os.Stat(path)
+ if err != nil {
+ continue
+ }
+ if stat.IsDir() {
+ continue
+ }
+ if err := probeConmon(path); err != nil {
+ logrus.Warnf("Conmon at %s invalid: %v", path, err)
+ foundOutdatedConmon = true
+ continue
+ }
+ logrus.Debugf("Using conmon: %q", path)
+ return path, nil
+ }
+
+ // Search the $PATH as last fallback
+ if path, err := exec.LookPath("conmon"); err == nil {
+ if err := probeConmon(path); err != nil {
+ logrus.Warnf("Conmon at %s is invalid: %v", path, err)
+ foundOutdatedConmon = true
+ } else {
+ logrus.Debugf("Using conmon from $PATH: %q", path)
+ return path, nil
+ }
+ }
+
+ if foundOutdatedConmon {
+ return "", errors.Wrapf(define.ErrConmonOutdated,
+ "please update to v%d.%d.%d or later",
+ _conmonMinMajorVersion, _conmonMinMinorVersion, _conmonMinPatchVersion)
+ }
+
+ return "", errors.Wrapf(define.ErrInvalidArg,
+ "could not find a working conmon binary (configured options: %v)",
+ c.ConmonPath)
+}
+
+// probeConmon calls conmon --version and verifies it is a new enough version for
+// the runtime expectations podman currently has.
+func probeConmon(conmonBinary string) error {
+ cmd := exec.Command(conmonBinary, "--version")
+ var out bytes.Buffer
+ cmd.Stdout = &out
+ err := cmd.Run()
+ if err != nil {
+ return err
+ }
+ r := regexp.MustCompile(`^conmon version (?P<Major>\d+).(?P<Minor>\d+).(?P<Patch>\d+)`)
+
+ matches := r.FindStringSubmatch(out.String())
+ if len(matches) != 4 {
+ return errors.Wrap(err, _conmonVersionFormatErr)
+ }
+ major, err := strconv.Atoi(matches[1])
+ if err != nil {
+ return errors.Wrap(err, _conmonVersionFormatErr)
+ }
+ if major < _conmonMinMajorVersion {
+ return define.ErrConmonOutdated
+ }
+ if major > _conmonMinMajorVersion {
+ return nil
+ }
+
+ minor, err := strconv.Atoi(matches[2])
+ if err != nil {
+ return errors.Wrap(err, _conmonVersionFormatErr)
+ }
+ if minor < _conmonMinMinorVersion {
+ return define.ErrConmonOutdated
+ }
+ if minor > _conmonMinMinorVersion {
+ return nil
+ }
+
+ patch, err := strconv.Atoi(matches[3])
+ if err != nil {
+ return errors.Wrap(err, _conmonVersionFormatErr)
+ }
+ if patch < _conmonMinPatchVersion {
+ return define.ErrConmonOutdated
+ }
+ if patch > _conmonMinPatchVersion {
+ return nil
+ }
+
+ return nil
+}
+
+// NewConfig creates a new Config. It starts with an empty config and, if
+// specified, merges the config at `userConfigPath` path. Depending if we're
+// running as root or rootless, we then merge the system configuration followed
+// by merging the default config (hard-coded default in memory).
+//
+// Note that the OCI runtime is hard-set to `crun` if we're running on a system
+// with cgroupsv2. Other OCI runtimes are not yet supporting cgroupsv2. This
+// might change in the future.
+func NewConfig(userConfigPath string) (*Config, error) {
+ config := &Config{} // start with an empty config
+
+ // First, try to read the user-specified config
+ if userConfigPath != "" {
+ var err error
+ config, err = readConfigFromFile(userConfigPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading user config %q", userConfigPath)
+ }
+ }
+
+ // Now, check if the user can access system configs and merge them if needed.
+ if configs, err := systemConfigs(); err != nil {
+ return nil, errors.Wrapf(err, "error finding config on system")
+ } else {
+ for _, path := range configs {
+ systemConfig, err := readConfigFromFile(path)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error reading system config %q", path)
+ }
+ // Merge the it into the config. Any unset field in config will be
+ // over-written by the systemConfig.
+ if err := config.mergeConfig(systemConfig); err != nil {
+ return nil, errors.Wrapf(err, "error merging system config")
+ }
+ logrus.Debugf("Merged system config %q: %v", path, config)
+ }
+ }
+
+ // Finally, create a default config from memory and forcefully merge it into
+ // the config. This way we try to make sure that all fields are properly set
+ // and that user AND system config can partially set.
+ if defaultConfig, err := defaultConfigFromMemory(); err != nil {
+ return nil, errors.Wrapf(err, "error generating default config from memory")
+ } else {
+ if err := config.mergeConfig(defaultConfig); err != nil {
+ return nil, errors.Wrapf(err, "error merging default config from memory")
+ }
+ }
+
+ // Relative paths can cause nasty bugs, because core paths we use could
+ // shift between runs (or even parts of the program - the OCI runtime
+ // uses a different working directory than we do, for example.
+ if !filepath.IsAbs(config.StaticDir) {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "static directory must be an absolute path - instead got %q", config.StaticDir)
+ }
+ if !filepath.IsAbs(config.TmpDir) {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "temporary directory must be an absolute path - instead got %q", config.TmpDir)
+ }
+ if !filepath.IsAbs(config.VolumePath) {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "volume path must be an absolute path - instead got %q", config.VolumePath)
+ }
+
+ // Check if we need to switch to cgroupfs on rootless.
+ config.checkCgroupsAndAdjustConfig()
+
+ return config, nil
+}
+
+func rootlessConfigPath() (string, error) {
+ home, err := util.HomeDir()
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(home, _rootlessConfigPath), nil
+}
+
+func systemConfigs() ([]string, error) {
+ if rootless.IsRootless() {
+ path, err := rootlessConfigPath()
+ if err != nil {
+ return nil, err
+ }
+ if _, err := os.Stat(path); err == nil {
+ return []string{path}, nil
+ }
+ return nil, err
+ }
+
+ configs := []string{}
+ if _, err := os.Stat(_rootOverrideConfigPath); err == nil {
+ configs = append(configs, _rootOverrideConfigPath)
+ }
+ if _, err := os.Stat(_rootConfigPath); err == nil {
+ configs = append(configs, _rootConfigPath)
+ }
+ return configs, nil
+}
+
+// checkCgroupsAndAdjustConfig checks if we're running rootless with the systemd
+// cgroup manager. In case the user session isn't available, we're switching the
+// cgroup manager to cgroupfs. Note, this only applies to rootless.
+func (c *Config) checkCgroupsAndAdjustConfig() {
+ if !rootless.IsRootless() || c.CgroupManager != define.SystemdCgroupsManager {
+ return
+ }
+
+ session := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
+ hasSession := session != ""
+ if hasSession && strings.HasPrefix(session, "unix:path=") {
+ _, err := os.Stat(strings.TrimPrefix(session, "unix:path="))
+ hasSession = err == nil
+ }
+
+ if !hasSession {
+ logrus.Warningf("The cgroups manager is set to systemd but there is no systemd user session available")
+ logrus.Warningf("For using systemd, you may need to login using an user session")
+ logrus.Warningf("Alternatively, you can enable lingering with: `loginctl enable-linger %d` (possibly as root)", rootless.GetRootlessUID())
+ logrus.Warningf("Falling back to --cgroup-manager=cgroupfs")
+ c.CgroupManager = define.CgroupfsCgroupsManager
+ }
+}
diff --git a/libpod/config/config_test.go b/libpod/config/config_test.go
new file mode 100644
index 000000000..47c092440
--- /dev/null
+++ b/libpod/config/config_test.go
@@ -0,0 +1,64 @@
+package config
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/storage"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEmptyConfig(t *testing.T) {
+ // Make sure that we can read empty configs
+ config, err := readConfigFromFile("testdata/empty.conf")
+ assert.NotNil(t, config)
+ assert.Nil(t, err)
+}
+
+func TestDefaultLibpodConf(t *testing.T) {
+ // Make sure that we can read the default libpod.conf
+ config, err := readConfigFromFile("testdata/libpod.conf")
+ assert.NotNil(t, config)
+ assert.Nil(t, err)
+}
+
+func TestMergeEmptyAndDefaultMemoryConfig(t *testing.T) {
+ // Make sure that when we merge the default config into an empty one that we
+ // effectively get the default config.
+ defaultConfig, err := defaultConfigFromMemory()
+ assert.NotNil(t, defaultConfig)
+ assert.Nil(t, err)
+ defaultConfig.StateType = define.InvalidStateStore
+ defaultConfig.StorageConfig = storage.StoreOptions{}
+
+ emptyConfig, err := readConfigFromFile("testdata/empty.conf")
+ assert.NotNil(t, emptyConfig)
+ assert.Nil(t, err)
+
+ err = emptyConfig.mergeConfig(defaultConfig)
+ assert.Nil(t, err)
+
+ equal := reflect.DeepEqual(emptyConfig, defaultConfig)
+ assert.True(t, equal)
+}
+
+func TestMergeEmptyAndLibpodConfig(t *testing.T) {
+ // Make sure that when we merge the default config into an empty one that we
+ // effectively get the default config.
+ libpodConfig, err := readConfigFromFile("testdata/libpod.conf")
+ assert.NotNil(t, libpodConfig)
+ assert.Nil(t, err)
+ libpodConfig.StateType = define.InvalidStateStore
+ libpodConfig.StorageConfig = storage.StoreOptions{}
+
+ emptyConfig, err := readConfigFromFile("testdata/empty.conf")
+ assert.NotNil(t, emptyConfig)
+ assert.Nil(t, err)
+
+ err = emptyConfig.mergeConfig(libpodConfig)
+ assert.Nil(t, err)
+
+ equal := reflect.DeepEqual(emptyConfig, libpodConfig)
+ assert.True(t, equal)
+}
diff --git a/libpod/config/default.go b/libpod/config/default.go
new file mode 100644
index 000000000..5decaeab7
--- /dev/null
+++ b/libpod/config/default.go
@@ -0,0 +1,152 @@
+package config
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/libpod/events"
+ "github.com/containers/libpod/pkg/cgroups"
+ "github.com/containers/libpod/pkg/rootless"
+ "github.com/containers/libpod/pkg/util"
+ "github.com/containers/storage"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ // _defaultGraphRoot points to the default path of the graph root.
+ _defaultGraphRoot = "/var/lib/containers/storage"
+ // _defaultRootlessSignaturePolicyPath points to the default path of the
+ // rootless policy.json file.
+ _defaultRootlessSignaturePolicyPath = ".config/containers/policy.json"
+)
+
+// defaultConfigFromMemory returns a default libpod configuration. Note that the
+// config is different for root and rootless. It also parses the storage.conf.
+func defaultConfigFromMemory() (*Config, error) {
+ c := new(Config)
+ if tmp, err := defaultTmpDir(); err != nil {
+ return nil, err
+ } else {
+ c.TmpDir = tmp
+ }
+ c.EventsLogFilePath = filepath.Join(c.TmpDir, "events", "events.log")
+
+ storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
+ if err != nil {
+ return nil, err
+ }
+ if storeOpts.GraphRoot == "" {
+ logrus.Warnf("Storage configuration is unset - using hardcoded default graph root %q", _defaultGraphRoot)
+ storeOpts.GraphRoot = _defaultGraphRoot
+ }
+ c.StaticDir = filepath.Join(storeOpts.GraphRoot, "libpod")
+ c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes")
+ c.StorageConfig = storeOpts
+
+ c.ImageDefaultTransport = _defaultTransport
+ c.StateType = define.BoltDBStateStore
+ c.OCIRuntime = "runc"
+
+ // If we're running on cgroups v2, default to using crun.
+ if onCgroupsv2, _ := cgroups.IsCgroup2UnifiedMode(); onCgroupsv2 {
+ c.OCIRuntime = "crun"
+ }
+
+ c.OCIRuntimes = map[string][]string{
+ "runc": {
+ "/usr/bin/runc",
+ "/usr/sbin/runc",
+ "/usr/local/bin/runc",
+ "/usr/local/sbin/runc",
+ "/sbin/runc",
+ "/bin/runc",
+ "/usr/lib/cri-o-runc/sbin/runc",
+ "/run/current-system/sw/bin/runc",
+ },
+ "crun": {
+ "/usr/bin/crun",
+ "/usr/sbin/crun",
+ "/usr/local/bin/crun",
+ "/usr/local/sbin/crun",
+ "/sbin/crun",
+ "/bin/crun",
+ "/run/current-system/sw/bin/crun",
+ },
+ }
+ c.ConmonPath = []string{
+ "/usr/libexec/podman/conmon",
+ "/usr/local/libexec/podman/conmon",
+ "/usr/local/lib/podman/conmon",
+ "/usr/bin/conmon",
+ "/usr/sbin/conmon",
+ "/usr/local/bin/conmon",
+ "/usr/local/sbin/conmon",
+ "/run/current-system/sw/bin/conmon",
+ }
+ c.ConmonEnvVars = []string{
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ }
+ c.RuntimeSupportsJSON = []string{
+ "crun",
+ "runc",
+ }
+ c.RuntimeSupportsNoCgroups = []string{"crun"}
+ c.InitPath = define.DefaultInitPath
+ c.CgroupManager = define.SystemdCgroupsManager
+ c.MaxLogSize = -1
+ c.NoPivotRoot = false
+ c.CNIConfigDir = _etcDir + "/cni/net.d/"
+ c.CNIPluginDir = []string{
+ "/usr/libexec/cni",
+ "/usr/lib/cni",
+ "/usr/local/lib/cni",
+ "/opt/cni/bin",
+ }
+ c.CNIDefaultNetwork = "podman"
+ c.InfraCommand = define.DefaultInfraCommand
+ c.InfraImage = define.DefaultInfraImage
+ c.EnablePortReservation = true
+ c.EnableLabeling = true
+ c.NumLocks = 2048
+ c.EventsLogger = events.DefaultEventerType.String()
+ c.DetachKeys = define.DefaultDetachKeys
+ // TODO - ideally we should expose a `type LockType string` along with
+ // constants.
+ c.LockType = "shm"
+
+ if rootless.IsRootless() {
+ home, err := util.HomeDir()
+ if err != nil {
+ return nil, err
+ }
+ sigPath := filepath.Join(home, _defaultRootlessSignaturePolicyPath)
+ if _, err := os.Stat(sigPath); err == nil {
+ c.SignaturePolicyPath = sigPath
+ }
+ }
+ return c, nil
+}
+
+func defaultTmpDir() (string, error) {
+ if !rootless.IsRootless() {
+ return "/var/run/libpod", nil
+ }
+
+ runtimeDir, err := util.GetRuntimeDir()
+ if err != nil {
+ return "", err
+ }
+ libpodRuntimeDir := filepath.Join(runtimeDir, "libpod")
+
+ if err := os.Mkdir(libpodRuntimeDir, 0700|os.ModeSticky); err != nil {
+ if !os.IsExist(err) {
+ return "", errors.Wrapf(err, "cannot mkdir %s", libpodRuntimeDir)
+ } else if err := os.Chmod(libpodRuntimeDir, 0700|os.ModeSticky); err != nil {
+ // The directory already exist, just set the sticky bit
+ return "", errors.Wrapf(err, "could not set sticky bit on %s", libpodRuntimeDir)
+ }
+ }
+ return filepath.Join(libpodRuntimeDir, "tmp"), nil
+}
diff --git a/libpod/config/merge.go b/libpod/config/merge.go
new file mode 100644
index 000000000..798a63da7
--- /dev/null
+++ b/libpod/config/merge.go
@@ -0,0 +1,183 @@
+package config
+
+import (
+ "path/filepath"
+
+ "github.com/containers/libpod/libpod/define"
+ "github.com/sirupsen/logrus"
+)
+
+// Merge merges the other config into the current one. Note that a field of the
+// other config is only merged when it's not already set in the current one.
+//
+// Note that the StateType and the StorageConfig will NOT be changed.
+func (c *Config) mergeConfig(other *Config) error {
+ // strings
+ c.CgroupManager = mergeStrings(c.CgroupManager, other.CgroupManager)
+ c.CNIConfigDir = mergeStrings(c.CNIConfigDir, other.CNIConfigDir)
+ c.CNIDefaultNetwork = mergeStrings(c.CNIDefaultNetwork, other.CNIDefaultNetwork)
+ c.DefaultMountsFile = mergeStrings(c.DefaultMountsFile, other.DefaultMountsFile)
+ c.DetachKeys = mergeStrings(c.DetachKeys, other.DetachKeys)
+ c.EventsLogFilePath = mergeStrings(c.EventsLogFilePath, other.EventsLogFilePath)
+ c.EventsLogger = mergeStrings(c.EventsLogger, other.EventsLogger)
+ c.ImageDefaultTransport = mergeStrings(c.ImageDefaultTransport, other.ImageDefaultTransport)
+ c.InfraCommand = mergeStrings(c.InfraCommand, other.InfraCommand)
+ c.InfraImage = mergeStrings(c.InfraImage, other.InfraImage)
+ c.InitPath = mergeStrings(c.InitPath, other.InitPath)
+ c.LockType = mergeStrings(c.LockType, other.LockType)
+ c.Namespace = mergeStrings(c.Namespace, other.Namespace)
+ c.NetworkCmdPath = mergeStrings(c.NetworkCmdPath, other.NetworkCmdPath)
+ c.OCIRuntime = mergeStrings(c.OCIRuntime, other.OCIRuntime)
+ c.SignaturePolicyPath = mergeStrings(c.SignaturePolicyPath, other.SignaturePolicyPath)
+ c.StaticDir = mergeStrings(c.StaticDir, other.StaticDir)
+ c.TmpDir = mergeStrings(c.TmpDir, other.TmpDir)
+ c.VolumePath = mergeStrings(c.VolumePath, other.VolumePath)
+
+ // string map of slices
+ c.OCIRuntimes = mergeStringMaps(c.OCIRuntimes, other.OCIRuntimes)
+
+ // string slices
+ c.CNIPluginDir = mergeStringSlices(c.CNIPluginDir, other.CNIPluginDir)
+ c.ConmonEnvVars = mergeStringSlices(c.ConmonEnvVars, other.ConmonEnvVars)
+ c.ConmonPath = mergeStringSlices(c.ConmonPath, other.ConmonPath)
+ c.HooksDir = mergeStringSlices(c.HooksDir, other.HooksDir)
+ c.RuntimePath = mergeStringSlices(c.RuntimePath, other.RuntimePath)
+ c.RuntimeSupportsJSON = mergeStringSlices(c.RuntimeSupportsJSON, other.RuntimeSupportsJSON)
+ c.RuntimeSupportsNoCgroups = mergeStringSlices(c.RuntimeSupportsNoCgroups, other.RuntimeSupportsNoCgroups)
+
+ // int64s
+ c.MaxLogSize = mergeInt64s(c.MaxLogSize, other.MaxLogSize)
+
+ // uint32s
+ c.NumLocks = mergeUint32s(c.NumLocks, other.NumLocks)
+
+ // bools
+ c.EnableLabeling = mergeBools(c.EnableLabeling, other.EnableLabeling)
+ c.EnablePortReservation = mergeBools(c.EnablePortReservation, other.EnablePortReservation)
+ c.NoPivotRoot = mergeBools(c.NoPivotRoot, other.NoPivotRoot)
+ c.SDNotify = mergeBools(c.SDNotify, other.SDNotify)
+
+ // state type
+ if c.StateType == define.InvalidStateStore {
+ c.StateType = other.StateType
+ }
+
+ // store options - need to check all fields since some configs might only
+ // set it partially
+ c.StorageConfig.RunRoot = mergeStrings(c.StorageConfig.RunRoot, other.StorageConfig.RunRoot)
+ c.StorageConfig.GraphRoot = mergeStrings(c.StorageConfig.GraphRoot, other.StorageConfig.GraphRoot)
+ c.StorageConfig.GraphDriverName = mergeStrings(c.StorageConfig.GraphDriverName, other.StorageConfig.GraphDriverName)
+ c.StorageConfig.GraphDriverOptions = mergeStringSlices(c.StorageConfig.GraphDriverOptions, other.StorageConfig.GraphDriverOptions)
+ if c.StorageConfig.UIDMap == nil {
+ c.StorageConfig.UIDMap = other.StorageConfig.UIDMap
+ }
+ if c.StorageConfig.GIDMap == nil {
+ c.StorageConfig.GIDMap = other.StorageConfig.GIDMap
+ }
+
+ // backwards compat *Set fields
+ c.StorageConfigRunRootSet = mergeBools(c.StorageConfigRunRootSet, other.StorageConfigRunRootSet)
+ c.StorageConfigGraphRootSet = mergeBools(c.StorageConfigGraphRootSet, other.StorageConfigGraphRootSet)
+ c.StorageConfigGraphDriverNameSet = mergeBools(c.StorageConfigGraphDriverNameSet, other.StorageConfigGraphDriverNameSet)
+ c.VolumePathSet = mergeBools(c.VolumePathSet, other.VolumePathSet)
+ c.StaticDirSet = mergeBools(c.StaticDirSet, other.StaticDirSet)
+ c.TmpDirSet = mergeBools(c.TmpDirSet, other.TmpDirSet)
+
+ return nil
+}
+
+// MergeDBConfig merges the configuration from the database.
+func (c *Config) MergeDBConfig(dbConfig *DBConfig) error {
+
+ if !c.StorageConfigRunRootSet && dbConfig.StorageTmp != "" {
+ if c.StorageConfig.RunRoot != dbConfig.StorageTmp &&
+ c.StorageConfig.RunRoot != "" {
+ logrus.Debugf("Overriding run root %q with %q from database",
+ c.StorageConfig.RunRoot, dbConfig.StorageTmp)
+ }
+ c.StorageConfig.RunRoot = dbConfig.StorageTmp
+ }
+
+ if !c.StorageConfigGraphRootSet && dbConfig.StorageRoot != "" {
+ if c.StorageConfig.GraphRoot != dbConfig.StorageRoot &&
+ c.StorageConfig.GraphRoot != "" {
+ logrus.Debugf("Overriding graph root %q with %q from database",
+ c.StorageConfig.GraphRoot, dbConfig.StorageRoot)
+ }
+ c.StorageConfig.GraphRoot = dbConfig.StorageRoot
+ }
+
+ if !c.StorageConfigGraphDriverNameSet && dbConfig.GraphDriver != "" {
+ if c.StorageConfig.GraphDriverName != dbConfig.GraphDriver &&
+ c.StorageConfig.GraphDriverName != "" {
+ logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve",
+ c.StorageConfig.GraphDriverName, dbConfig.GraphDriver)
+ }
+ c.StorageConfig.GraphDriverName = dbConfig.GraphDriver
+ }
+
+ if !c.StaticDirSet && dbConfig.LibpodRoot != "" {
+ if c.StaticDir != dbConfig.LibpodRoot && c.StaticDir != "" {
+ logrus.Debugf("Overriding static dir %q with %q from database", c.StaticDir, dbConfig.LibpodRoot)
+ }
+ c.StaticDir = dbConfig.LibpodRoot
+ }
+
+ if !c.TmpDirSet && dbConfig.LibpodTmp != "" {
+ if c.TmpDir != dbConfig.LibpodTmp && c.TmpDir != "" {
+ logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp)
+ }
+ c.TmpDir = dbConfig.LibpodTmp
+ c.EventsLogFilePath = filepath.Join(dbConfig.LibpodTmp, "events", "events.log")
+ }
+
+ if !c.VolumePathSet && dbConfig.VolumePath != "" {
+ if c.VolumePath != dbConfig.VolumePath && c.VolumePath != "" {
+ logrus.Debugf("Overriding volume path %q with %q from database", c.VolumePath, dbConfig.VolumePath)
+ }
+ c.VolumePath = dbConfig.VolumePath
+ }
+ return nil
+}
+
+func mergeStrings(a, b string) string {
+ if a == "" {
+ return b
+ }
+ return a
+}
+
+func mergeStringSlices(a, b []string) []string {
+ if len(a) == 0 && b != nil {
+ return b
+ }
+ return a
+}
+
+func mergeStringMaps(a, b map[string][]string) map[string][]string {
+ if len(a) == 0 && b != nil {
+ return b
+ }
+ return a
+}
+
+func mergeInt64s(a, b int64) int64 {
+ if a == 0 {
+ return b
+ }
+ return a
+}
+
+func mergeUint32s(a, b uint32) uint32 {
+ if a == 0 {
+ return b
+ }
+ return a
+}
+
+func mergeBools(a, b bool) bool {
+ if !a {
+ return b
+ }
+ return a
+}
diff --git a/libpod/config/merge_test.go b/libpod/config/merge_test.go
new file mode 100644
index 000000000..eb450b273
--- /dev/null
+++ b/libpod/config/merge_test.go
@@ -0,0 +1,157 @@
+package config
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestMergeStrings(t *testing.T) {
+ testData := []struct {
+ a string
+ b string
+ res string
+ }{
+ {"", "", ""},
+ {"a", "", "a"},
+ {"a", "b", "a"},
+ {"", "b", "b"},
+ }
+ for _, data := range testData {
+ res := mergeStrings(data.a, data.b)
+ assert.Equal(t, data.res, res)
+ }
+}
+
+func TestMergeStringSlices(t *testing.T) {
+ testData := []struct {
+ a []string
+ b []string
+ res []string
+ }{
+ {
+ nil, nil, nil,
+ },
+ {
+ nil,
+ []string{},
+ []string{},
+ },
+ {
+ []string{},
+ nil,
+ []string{},
+ },
+ {
+ []string{},
+ []string{},
+ []string{},
+ },
+ {
+ []string{"a"},
+ []string{},
+ []string{"a"},
+ },
+ {
+ []string{"a"},
+ []string{"b"},
+ []string{"a"},
+ },
+ {
+ []string{},
+ []string{"b"},
+ []string{"b"},
+ },
+ }
+ for _, data := range testData {
+ res := mergeStringSlices(data.a, data.b)
+ assert.Equal(t, data.res, res)
+ }
+}
+
+func TestMergeStringMaps(t *testing.T) {
+ testData := []struct {
+ a map[string][]string
+ b map[string][]string
+ res map[string][]string
+ }{
+ {
+ nil, nil, nil,
+ },
+ {
+ nil,
+ map[string][]string{},
+ map[string][]string{}},
+ {
+ map[string][]string{"a": {"a"}},
+ nil,
+ map[string][]string{"a": {"a"}},
+ },
+ {
+ nil,
+ map[string][]string{"b": {"b"}},
+ map[string][]string{"b": {"b"}},
+ },
+ {
+ map[string][]string{"a": {"a"}},
+ map[string][]string{"b": {"b"}},
+ map[string][]string{"a": {"a"}},
+ },
+ }
+ for _, data := range testData {
+ res := mergeStringMaps(data.a, data.b)
+ assert.Equal(t, data.res, res)
+ }
+}
+
+func TestMergeInts64(t *testing.T) {
+ testData := []struct {
+ a int64
+ b int64
+ res int64
+ }{
+ {int64(0), int64(0), int64(0)},
+ {int64(1), int64(0), int64(1)},
+ {int64(0), int64(1), int64(1)},
+ {int64(2), int64(1), int64(2)},
+ {int64(-1), int64(1), int64(-1)},
+ {int64(0), int64(-1), int64(-1)},
+ }
+ for _, data := range testData {
+ res := mergeInt64s(data.a, data.b)
+ assert.Equal(t, data.res, res)
+ }
+}
+func TestMergeUint32(t *testing.T) {
+ testData := []struct {
+ a uint32
+ b uint32
+ res uint32
+ }{
+ {uint32(0), uint32(0), uint32(0)},
+ {uint32(1), uint32(0), uint32(1)},
+ {uint32(0), uint32(1), uint32(1)},
+ {uint32(2), uint32(1), uint32(2)},
+ }
+ for _, data := range testData {
+ res := mergeUint32s(data.a, data.b)
+ assert.Equal(t, data.res, res)
+ }
+}
+
+func TestMergeBools(t *testing.T) {
+ testData := []struct {
+ a bool
+ b bool
+ res bool
+ }{
+ {false, false, false},
+ {true, false, true},
+ {false, true, true},
+ {true, true, true},
+ }
+ for _, data := range testData {
+ res := mergeBools(data.a, data.b)
+ assert.Equal(t, data.res, res)
+ }
+}
diff --git a/libpod/config/testdata/empty.conf b/libpod/config/testdata/empty.conf
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/libpod/config/testdata/empty.conf
diff --git a/libpod/config/testdata/libpod.conf b/libpod/config/testdata/libpod.conf
new file mode 120000
index 000000000..17d09fe4a
--- /dev/null
+++ b/libpod/config/testdata/libpod.conf
@@ -0,0 +1 @@
+../../../libpod.conf \ No newline at end of file
diff --git a/libpod/container.go b/libpod/container.go
index 5a7cf202b..4f7fc067e 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -138,6 +138,10 @@ type Container struct {
// being checkpointed. If requestedIP is set it will be used instead
// of config.StaticIP.
requestedIP net.IP
+ // A restored container should have the same MAC address as before
+ // being checkpointed. If requestedMAC is set it will be used instead
+ // of config.StaticMAC.
+ requestedMAC net.HardwareAddr
// This is true if a container is restored from a checkpoint.
restoreFromCheckpoint bool
@@ -296,6 +300,10 @@ type ContainerConfig struct {
// This cannot be set unless CreateNetNS is set.
// If not set, the container will be dynamically assigned an IP by CNI.
StaticIP net.IP `json:"staticIP"`
+ // StaticMAC is a static MAC to request for the container.
+ // This cannot be set unless CreateNetNS is set.
+ // If not set, the container will be dynamically assigned a MAC by CNI.
+ StaticMAC net.HardwareAddr `json:"staticMAC"`
// PortMappings are the ports forwarded to the container's network
// namespace
// These are not used unless CreateNetNS is true
@@ -1059,9 +1067,9 @@ func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:in
// CGroupPath returns a cgroups "path" for a given container.
func (c *Container) CGroupPath() (string, error) {
switch c.runtime.config.CgroupManager {
- case CgroupfsCgroupsManager:
+ case define.CgroupfsCgroupsManager:
return filepath.Join(c.config.CgroupParent, fmt.Sprintf("libpod-%s", c.ID())), nil
- case SystemdCgroupsManager:
+ case define.SystemdCgroupsManager:
if rootless.IsRootless() {
uid := rootless.GetRootlessUID()
return filepath.Join(c.config.CgroupParent, fmt.Sprintf("user-%d.slice/user@%d.service/user.slice", uid, uid), createUnitName("libpod", c.ID())), nil
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 1b2d52ce3..b8cfe02f6 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -1,6 +1,7 @@
package libpod
import (
+ "bufio"
"context"
"io"
"io/ioutil"
@@ -361,7 +362,7 @@ type AttachStreams struct {
// ErrorStream will be attached to container's STDERR
ErrorStream io.WriteCloser
// InputStream will be attached to container's STDIN
- InputStream io.Reader
+ InputStream *bufio.Reader
// AttachOutput is whether to attach to STDOUT
// If false, stdout will not be attached
AttachOutput bool
@@ -793,6 +794,11 @@ type ContainerCheckpointOptions struct {
// important to be able to restore a container multiple
// times with '--import --name'.
IgnoreStaticIP bool
+ // IgnoreStaticMAC tells the API to ignore the MAC set
+ // during 'podman run' with '--mac-address'. This is especially
+ // important to be able to restore a container multiple
+ // times with '--import --name'.
+ IgnoreStaticMAC bool
}
// Checkpoint checkpoints a container
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index d28d37937..66aca23ed 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -1322,9 +1322,9 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named
// Need to check if it's the default, and not print if so.
defaultCgroupParent := ""
switch c.runtime.config.CgroupManager {
- case CgroupfsCgroupsManager:
+ case define.CgroupfsCgroupsManager:
defaultCgroupParent = CgroupfsDefaultCgroupParent
- case SystemdCgroupsManager:
+ case define.SystemdCgroupsManager:
defaultCgroupParent = SystemdDefaultCgroupParent
}
if c.config.CgroupParent != defaultCgroupParent {
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 283d38a0f..26d6771b0 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -794,6 +794,15 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
c.config.StaticIP = nil
}
+ // If a container is restored multiple times from an exported checkpoint with
+ // the help of '--import --name', the restore will fail if during 'podman run'
+ // a static container MAC address was set with '--mac-address'. The user
+ // can tell the restore process to ignore the static MAC with
+ // '--ignore-static-mac'
+ if options.IgnoreStaticMAC {
+ c.config.StaticMAC = nil
+ }
+
// Read network configuration from checkpoint
// Currently only one interface with one IP is supported.
networkStatusFile, err := os.Open(filepath.Join(c.bundlePath(), "network.status"))
@@ -803,9 +812,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
// TODO: This implicit restoring with or without IP depending on an
// unrelated restore parameter (--name) does not seem like the
// best solution.
- if err == nil && options.Name == "" && !options.IgnoreStaticIP {
+ if err == nil && options.Name == "" && (!options.IgnoreStaticIP || !options.IgnoreStaticMAC) {
// The file with the network.status does exist. Let's restore the
- // container with the same IP address as during checkpointing.
+ // container with the same IP address / MAC address as during checkpointing.
defer networkStatusFile.Close()
var networkStatus []*cnitypes.Result
networkJSON, err := ioutil.ReadAll(networkStatusFile)
@@ -815,16 +824,35 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if err := json.Unmarshal(networkJSON, &networkStatus); err != nil {
return err
}
- // Take the first IP address
- var IP net.IP
- if len(networkStatus) > 0 {
- if len(networkStatus[0].IPs) > 0 {
- IP = networkStatus[0].IPs[0].Address.IP
+ if !options.IgnoreStaticIP {
+ // Take the first IP address
+ var IP net.IP
+ if len(networkStatus) > 0 {
+ if len(networkStatus[0].IPs) > 0 {
+ IP = networkStatus[0].IPs[0].Address.IP
+ }
+ }
+ if IP != nil {
+ // Tell CNI which IP address we want.
+ c.requestedIP = IP
}
}
- if IP != nil {
- // Tell CNI which IP address we want.
- c.requestedIP = IP
+ if !options.IgnoreStaticMAC {
+ // Take the first device with a defined sandbox.
+ var MAC net.HardwareAddr
+ for _, n := range networkStatus[0].Interfaces {
+ if n.Sandbox != "" {
+ MAC, err = net.ParseMAC(n.Mac)
+ if err != nil {
+ return errors.Wrapf(err, "failed to parse MAC %v", n.Mac)
+ }
+ break
+ }
+ }
+ if MAC != nil {
+ // Tell CNI which MAC address we want.
+ c.requestedMAC = MAC
+ }
}
}
@@ -1088,7 +1116,7 @@ func (c *Container) makeBindMounts() error {
}
// Add Secret Mounts
- secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.DefaultMountsFile, c.state.RunDir, c.RootUID(), c.RootGID(), rootless.IsRootless())
+ secretMounts := secrets.SecretMountsWithUIDGID(c.config.MountLabel, c.state.RunDir, c.runtime.config.DefaultMountsFile, c.state.RunDir, c.RootUID(), c.RootGID(), rootless.IsRootless(), false)
for _, mount := range secretMounts {
if _, ok := c.state.BindMounts[mount.Destination]; !ok {
c.state.BindMounts[mount.Destination] = mount.Source
@@ -1314,7 +1342,7 @@ func (c *Container) copyOwnerAndPerms(source, dest string) error {
// Teardown CNI config on refresh
func (c *Container) refreshCNI() error {
// Let's try and delete any lingering network config...
- podNetwork := c.runtime.getPodNetwork(c.ID(), c.config.Name, "", c.config.Networks, c.config.PortMappings, c.config.StaticIP)
+ podNetwork := c.runtime.getPodNetwork(c.ID(), c.config.Name, "", c.config.Networks, c.config.PortMappings, c.config.StaticIP, c.config.StaticMAC)
return c.runtime.netPlugin.TearDownPod(podNetwork)
}
@@ -1326,14 +1354,14 @@ func (c *Container) getOCICgroupPath() (string, error) {
}
if (rootless.IsRootless() && !unified) || c.config.NoCgroups {
return "", nil
- } else if c.runtime.config.CgroupManager == SystemdCgroupsManager {
+ } else if c.runtime.config.CgroupManager == define.SystemdCgroupsManager {
// When runc is set to use Systemd as a cgroup manager, it
// expects cgroups to be passed as follows:
// slice:prefix:name
systemdCgroups := fmt.Sprintf("%s:libpod:%s", path.Base(c.config.CgroupParent), c.ID())
logrus.Debugf("Setting CGroups for container %s to %s", c.ID(), systemdCgroups)
return systemdCgroups, nil
- } else if c.runtime.config.CgroupManager == CgroupfsCgroupsManager {
+ } else if c.runtime.config.CgroupManager == define.CgroupfsCgroupsManager {
cgroupPath, err := c.CGroupPath()
if err != nil {
return "", err
diff --git a/libpod/container_log_linux.go b/libpod/container_log_linux.go
index 8a87a8796..c4acc3d4f 100644
--- a/libpod/container_log_linux.go
+++ b/libpod/container_log_linux.go
@@ -6,6 +6,7 @@ package libpod
import (
"fmt"
"io"
+ "math"
"strings"
"time"
@@ -30,7 +31,11 @@ const (
func (c *Container) readFromJournal(options *logs.LogOptions, logChannel chan *logs.LogLine) error {
var config journal.JournalReaderConfig
- config.NumFromTail = options.Tail
+ if options.Tail < 0 {
+ config.NumFromTail = math.MaxUint64
+ } else {
+ config.NumFromTail = uint64(options.Tail)
+ }
config.Formatter = journalFormatter
defaultTime := time.Time{}
if options.Since != defaultTime {
@@ -54,7 +59,7 @@ func (c *Container) readFromJournal(options *logs.LogOptions, logChannel chan *l
if r == nil {
return errors.Errorf("journal reader creation failed")
}
- if options.Tail == 0 {
+ if options.Tail == math.MaxInt64 {
r.Rewind()
}
diff --git a/libpod/define/config.go b/libpod/define/config.go
index c66f67feb..8bd59be75 100644
--- a/libpod/define/config.go
+++ b/libpod/define/config.go
@@ -7,11 +7,23 @@ var (
DefaultInfraImage = "k8s.gcr.io/pause:3.1"
// DefaultInfraCommand to be run in an infra container
DefaultInfraCommand = "/pause"
+ // DefaultSHMLockPath is the default path for SHM locks
+ DefaultSHMLockPath = "/libpod_lock"
+ // DefaultRootlessSHMLockPath is the default path for rootless SHM locks
+ DefaultRootlessSHMLockPath = "/libpod_rootless_lock"
+ // DefaultDetachKeys is the default keys sequence for detaching a
+ // container
+ DefaultDetachKeys = "ctrl-p,ctrl-q"
)
-// CtrRemoveTimeout is the default number of seconds to wait after stopping a container
-// before sending the kill signal
-const CtrRemoveTimeout = 10
+const (
+ // CtrRemoveTimeout is the default number of seconds to wait after stopping a container
+ // before sending the kill signal
+ CtrRemoveTimeout = 10
+ // DefaultTransport is a prefix that we apply to an image name
+ // to check docker hub first for the image
+ DefaultTransport = "docker://"
+)
// InfoData holds the info type, i.e store, host etc and the data for each type
type InfoData struct {
diff --git a/libpod/define/runtime.go b/libpod/define/runtime.go
new file mode 100644
index 000000000..4d8c6cb4d
--- /dev/null
+++ b/libpod/define/runtime.go
@@ -0,0 +1,37 @@
+package define
+
+import "time"
+
+// RuntimeStateStore is a constant indicating which state store implementation
+// should be used by libpod
+type RuntimeStateStore int
+
+const (
+ // InvalidStateStore is an invalid state store
+ InvalidStateStore RuntimeStateStore = iota
+ // InMemoryStateStore is an in-memory state that will not persist data
+ // on containers and pods between libpod instances or after system
+ // reboot
+ InMemoryStateStore RuntimeStateStore = iota
+ // SQLiteStateStore is a state backed by a SQLite database
+ // It is presently disabled
+ SQLiteStateStore RuntimeStateStore = iota
+ // BoltDBStateStore is a state backed by a BoltDB database
+ BoltDBStateStore RuntimeStateStore = iota
+ // CgroupfsCgroupsManager represents cgroupfs native cgroup manager
+ CgroupfsCgroupsManager = "cgroupfs"
+ // SystemdCgroupsManager represents systemd native cgroup manager
+ SystemdCgroupsManager = "systemd"
+ // ContainerCreateTimeout is the timeout before we decide we've failed
+ // to create a container.
+ // TODO: Make this generic - all OCI runtime operations should use the
+ // same timeout, this one.
+ // TODO: Consider dropping from 240 to 60 seconds. I don't think waiting
+ // 4 minutes versus 1 minute makes a real difference.
+ ContainerCreateTimeout = 240 * time.Second
+ // DefaultShmSize is the default shm size
+ DefaultShmSize = 64 * 1024 * 1024
+ // NsRunDir is the default directory in which running network namespaces
+ // are stored
+ NsRunDir = "/var/run/netns"
+)
diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go
index 470c76959..9e6fffc29 100644
--- a/libpod/events/journal_linux.go
+++ b/libpod/events/journal_linux.go
@@ -54,6 +54,7 @@ func (e EventJournalD) Write(ee Event) error {
// Read reads events from the journal and sends qualified events to the event channel
func (e EventJournalD) Read(options ReadOptions) error {
+ defer close(options.EventChannel)
eventOptions, err := generateEventOptions(options.Filters, options.Since, options.Until)
if err != nil {
return errors.Wrapf(err, "failed to generate event options")
@@ -87,7 +88,6 @@ func (e EventJournalD) Read(options ReadOptions) error {
if err != nil {
return err
}
- defer close(options.EventChannel)
for {
if _, err := j.Next(); err != nil {
return err
diff --git a/libpod/events/logfile.go b/libpod/events/logfile.go
index 4b65b0ad0..93e6fa3c9 100644
--- a/libpod/events/logfile.go
+++ b/libpod/events/logfile.go
@@ -41,6 +41,7 @@ func (e EventLogFile) Write(ee Event) error {
// Reads from the log file
func (e EventLogFile) Read(options ReadOptions) error {
+ defer close(options.EventChannel)
eventOptions, err := generateEventOptions(options.Filters, options.Since, options.Until)
if err != nil {
return errors.Wrapf(err, "unable to generate event options")
@@ -68,7 +69,6 @@ func (e EventLogFile) Read(options ReadOptions) error {
options.EventChannel <- event
}
}
- close(options.EventChannel)
return nil
}
diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go
index 68ffc2349..e9c950713 100644
--- a/libpod/healthcheck.go
+++ b/libpod/healthcheck.go
@@ -133,7 +133,9 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
streams := new(AttachStreams)
streams.OutputStream = hcw
streams.ErrorStream = hcw
- streams.InputStream = os.Stdin
+
+ streams.InputStream = bufio.NewReader(os.Stdin)
+
streams.AttachOutput = true
streams.AttachError = true
streams.AttachInput = true
diff --git a/libpod/image/pull.go b/libpod/image/pull.go
index 7f5dc33b9..99c11e3ff 100644
--- a/libpod/image/pull.go
+++ b/libpod/image/pull.go
@@ -230,7 +230,12 @@ func (ir *Runtime) pullImageFromHeuristicSource(ctx context.Context, inputName s
sc.BlobInfoCacheDir = filepath.Join(ir.store.GraphRoot(), "cache")
srcRef, err := alltransports.ParseImageName(inputName)
if err != nil {
- // could be trying to pull from registry with short name
+ // We might be pulling with an unqualified image reference in which case
+ // we need to make sure that we're not using any other transport.
+ srcTransport := alltransports.TransportFromImageName(inputName)
+ if srcTransport != nil && srcTransport.Name() != DockerTransport {
+ return nil, err
+ }
goal, err = ir.pullGoalFromPossiblyUnqualifiedName(inputName)
if err != nil {
return nil, errors.Wrap(err, "error getting default registries to try")
@@ -347,6 +352,7 @@ func (ir *Runtime) pullGoalFromPossiblyUnqualifiedName(inputName string) (*pullG
if err != nil {
return nil, err
}
+
if decomposedImage.hasRegistry {
srcRef, err := docker.ParseReference("//" + inputName)
if err != nil {
diff --git a/libpod/in_memory_state.go b/libpod/in_memory_state.go
index 5ab258772..2144671a5 100644
--- a/libpod/in_memory_state.go
+++ b/libpod/in_memory_state.go
@@ -3,6 +3,7 @@ package libpod
import (
"strings"
+ "github.com/containers/libpod/libpod/config"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/registrar"
"github.com/containers/storage/pkg/truncindex"
@@ -80,8 +81,8 @@ func (s *InMemoryState) Refresh() error {
// GetDBConfig is not implemented for in-memory state.
// As we do not store a config, return an empty one.
-func (s *InMemoryState) GetDBConfig() (*DBConfig, error) {
- return &DBConfig{}, nil
+func (s *InMemoryState) GetDBConfig() (*config.DBConfig, error) {
+ return &config.DBConfig{}, nil
}
// ValidateDBConfig is not implemented for the in-memory state.
diff --git a/libpod/kube.go b/libpod/kube.go
index d0e7baf95..47a77991e 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -487,13 +487,16 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
if err := c.syncContainer(); err != nil {
return nil, errors.Wrapf(err, "unable to sync container during YAML generation")
}
+
logrus.Debugf("Looking in container for user: %s", c.User())
- u, err := lookup.GetUser(c.state.Mountpoint, c.User())
+ execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.User(), nil)
if err != nil {
return nil, err
}
- user := int64(u.Uid)
- sc.RunAsUser = &user
+ uid := int64(execUser.Uid)
+ gid := int64(execUser.Gid)
+ sc.RunAsUser = &uid
+ sc.RunAsGroup = &gid
}
return &sc, nil
}
diff --git a/libpod/logs/log.go b/libpod/logs/log.go
index 0b1703567..0330df06a 100644
--- a/libpod/logs/log.go
+++ b/libpod/logs/log.go
@@ -31,7 +31,7 @@ type LogOptions struct {
Details bool
Follow bool
Since time.Time
- Tail uint64
+ Tail int64
Timestamps bool
Multi bool
WaitGroup *sync.WaitGroup
@@ -54,8 +54,10 @@ func GetLogFile(path string, options *LogOptions) (*tail.Tail, []*LogLine, error
logTail []*LogLine
)
// whence 0=origin, 2=end
- if options.Tail > 0 {
+ if options.Tail >= 0 {
whence = 2
+ }
+ if options.Tail > 0 {
logTail, err = getTailLog(path, int(options.Tail))
if err != nil {
return nil, nil, err
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index daa0619a2..cba7b636a 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -28,23 +28,34 @@ import (
)
// Get an OCICNI network config
-func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP) ocicni.PodNetwork {
+func (r *Runtime) getPodNetwork(id, name, nsPath string, networks []string, ports []ocicni.PortMapping, staticIP net.IP, staticMAC net.HardwareAddr) ocicni.PodNetwork {
defaultNetwork := r.netPlugin.GetDefaultNetworkName()
network := ocicni.PodNetwork{
Name: name,
Namespace: name, // TODO is there something else we should put here? We don't know about Kube namespaces
ID: id,
NetNS: nsPath,
- Networks: networks,
RuntimeConfig: map[string]ocicni.RuntimeConfig{
defaultNetwork: {PortMappings: ports},
},
}
- if staticIP != nil {
- network.Networks = []string{defaultNetwork}
+ if staticIP != nil || staticMAC != nil {
+ network.Networks = []ocicni.NetAttachment{{Name: defaultNetwork}}
+ var rt ocicni.RuntimeConfig = ocicni.RuntimeConfig{PortMappings: ports}
+ if staticIP != nil {
+ rt.IP = staticIP.String()
+ }
+ if staticMAC != nil {
+ rt.MAC = staticMAC.String()
+ }
network.RuntimeConfig = map[string]ocicni.RuntimeConfig{
- defaultNetwork: {IP: staticIP.String(), PortMappings: ports},
+ defaultNetwork: rt,
+ }
+ } else {
+ network.Networks = make([]ocicni.NetAttachment, len(networks))
+ for i, netName := range networks {
+ network.Networks[i].Name = netName
}
}
@@ -62,7 +73,16 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re
requestedIP = ctr.config.StaticIP
}
- podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctrNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP)
+ var requestedMAC net.HardwareAddr
+ if ctr.requestedMAC != nil {
+ requestedMAC = ctr.requestedMAC
+ // cancel request for a specific MAC in case the container is reused later
+ ctr.requestedMAC = nil
+ } else {
+ requestedMAC = ctr.config.StaticMAC
+ }
+
+ podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctrNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP, requestedMAC)
results, err := r.netPlugin.SetUpPod(podNetwork)
if err != nil {
@@ -78,10 +98,10 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) ([]*cnitypes.Re
networkStatus := make([]*cnitypes.Result, 0)
for idx, r := range results {
- logrus.Debugf("[%d] CNI result: %v", idx, r.String())
- resultCurrent, err := cnitypes.GetResult(r)
+ logrus.Debugf("[%d] CNI result: %v", idx, r.Result.String())
+ resultCurrent, err := cnitypes.GetResult(r.Result)
if err != nil {
- return nil, errors.Wrapf(err, "error parsing CNI plugin result %q: %v", r.String(), err)
+ return nil, errors.Wrapf(err, "error parsing CNI plugin result %q: %v", r.Result.String(), err)
}
networkStatus = append(networkStatus, resultCurrent)
}
@@ -295,7 +315,7 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
defer close(chWait)
// wait that API socket file appears before trying to use it.
- if _, err := WaitForFile(apiSocket, chWait, pidWaitTimeout*time.Millisecond); err != nil {
+ if _, err := WaitForFile(apiSocket, chWait, pidWaitTimeout); err != nil {
return errors.Wrapf(err, "waiting for slirp4nets to create the api socket file %s", apiSocket)
}
@@ -443,7 +463,16 @@ func (r *Runtime) teardownNetNS(ctr *Container) error {
requestedIP = ctr.config.StaticIP
}
- podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP)
+ var requestedMAC net.HardwareAddr
+ if ctr.requestedMAC != nil {
+ requestedMAC = ctr.requestedMAC
+ // cancel request for a specific MAC in case the container is reused later
+ ctr.requestedMAC = nil
+ } else {
+ requestedMAC = ctr.config.StaticMAC
+ }
+
+ podNetwork := r.getPodNetwork(ctr.ID(), ctr.Name(), ctr.state.NetNS.Path(), ctr.config.Networks, ctr.config.PortMappings, requestedIP, requestedMAC)
if err := r.netPlugin.TearDownPod(podNetwork); err != nil {
return errors.Wrapf(err, "error tearing down CNI namespace configuration for container %s", ctr.ID())
diff --git a/libpod/oci_attach_linux.go b/libpod/oci_attach_linux.go
index a383f6eab..46c70e7eb 100644
--- a/libpod/oci_attach_linux.go
+++ b/libpod/oci_attach_linux.go
@@ -152,7 +152,7 @@ func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-c
func processDetachKeys(keys string) ([]byte, error) {
// Check the validity of the provided keys first
if len(keys) == 0 {
- keys = DefaultDetachKeys
+ return []byte{}, nil
}
detachKeys, err := term.ToBytes(keys)
if err != nil {
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index d9907c5b1..026b13129 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -16,6 +16,7 @@ import (
"syscall"
"time"
+ "github.com/containers/libpod/libpod/config"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/cgroups"
"github.com/containers/libpod/pkg/errorhandling"
@@ -58,7 +59,7 @@ type ConmonOCIRuntime struct {
// The first path that points to a valid executable will be used.
// Deliberately private. Someone should not be able to construct this outside of
// libpod.
-func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtimeCfg *RuntimeConfig, supportsJSON, supportsNoCgroups bool) (OCIRuntime, error) {
+func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtimeCfg *config.Config, supportsJSON, supportsNoCgroups bool) (OCIRuntime, error) {
if name == "" {
return nil, errors.Wrapf(define.ErrInvalidArg, "the OCI runtime must be provided a non-empty name")
}
@@ -114,7 +115,7 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits")
runtime.socketsDir = filepath.Join(runtime.tmpDir, "socket")
- if runtime.cgroupManager != CgroupfsCgroupsManager && runtime.cgroupManager != SystemdCgroupsManager {
+ if runtime.cgroupManager != define.CgroupfsCgroupsManager && runtime.cgroupManager != define.SystemdCgroupsManager {
return nil, errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager specified: %s", runtime.cgroupManager)
}
@@ -1092,7 +1093,7 @@ func (r *ConmonOCIRuntime) configureConmonEnv(runtimeDir string) ([]string, []*o
env = append(env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
env = append(env, fmt.Sprintf("_CONTAINERS_USERNS_CONFIGURED=%s", os.Getenv("_CONTAINERS_USERNS_CONFIGURED")))
env = append(env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%s", os.Getenv("_CONTAINERS_ROOTLESS_UID")))
- home, err := homeDir()
+ home, err := util.HomeDir()
if err != nil {
return nil, nil, err
}
@@ -1118,7 +1119,7 @@ func (r *ConmonOCIRuntime) configureConmonEnv(runtimeDir string) ([]string, []*o
func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, pidPath, logPath, exitDir, ociLogPath string) []string {
// set the conmon API version to be able to use the correct sync struct keys
args := []string{"--api-version", "1"}
- if r.cgroupManager == SystemdCgroupsManager && !ctr.config.NoCgroups {
+ if r.cgroupManager == define.SystemdCgroupsManager && !ctr.config.NoCgroups {
args = append(args, "-s")
}
args = append(args, "-c", ctr.ID())
@@ -1222,7 +1223,7 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec
if mustCreateCgroup {
cgroupParent := ctr.CgroupParent()
- if r.cgroupManager == SystemdCgroupsManager {
+ if r.cgroupManager == define.SystemdCgroupsManager {
unitName := createUnitName("libpod-conmon", ctr.ID())
realCgroupParent := cgroupParent
@@ -1345,7 +1346,7 @@ func readConmonPipeData(pipe *os.File, ociLog string) (int, error) {
return ss.si.Data, errors.Wrapf(define.ErrInternal, "container create failed")
}
data = ss.si.Data
- case <-time.After(ContainerCreateTimeout):
+ case <-time.After(define.ContainerCreateTimeout):
return -1, errors.Wrapf(define.ErrInternal, "container creation timeout")
}
return data, nil
diff --git a/libpod/oci_conmon_unsupported.go b/libpod/oci_conmon_unsupported.go
index 77b06eed3..a2f52f527 100644
--- a/libpod/oci_conmon_unsupported.go
+++ b/libpod/oci_conmon_unsupported.go
@@ -3,6 +3,7 @@
package libpod
import (
+ "github.com/containers/libpod/libpod/config"
"github.com/containers/libpod/libpod/define"
)
@@ -15,7 +16,7 @@ type ConmonOCIRuntime struct {
}
// newConmonOCIRuntime is not supported on this OS.
-func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtimeCfg *RuntimeConfig, supportsJSON, supportsNoCgroups bool) (OCIRuntime, error) {
+func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtimeCfg *config.Config, supportsJSON, supportsNoCgroups bool) (OCIRuntime, error) {
return nil, define.ErrNotImplemented
}
diff --git a/libpod/oci_util.go b/libpod/oci_util.go
index cb85b153d..c1a7f1c9a 100644
--- a/libpod/oci_util.go
+++ b/libpod/oci_util.go
@@ -14,29 +14,9 @@ import (
"github.com/sirupsen/logrus"
)
-const (
- // CgroupfsCgroupsManager represents cgroupfs native cgroup manager
- CgroupfsCgroupsManager = "cgroupfs"
- // SystemdCgroupsManager represents systemd native cgroup manager
- SystemdCgroupsManager = "systemd"
-
- // ContainerCreateTimeout is the timeout before we decide we've failed
- // to create a container.
- // TODO: Make this generic - all OCI runtime operations should use the
- // same timeout, this one.
- // TODO: Consider dropping from 240 to 60 seconds. I don't think waiting
- // 4 minutes versus 1 minute makes a real difference.
- ContainerCreateTimeout = 240 * time.Second
-
- // Timeout before declaring that runtime has failed to kill a given
- // container
- killContainerTimeout = 5 * time.Second
- // DefaultShmSize is the default shm size
- DefaultShmSize = 64 * 1024 * 1024
- // NsRunDir is the default directory in which running network namespaces
- // are stored
- NsRunDir = "/var/run/netns"
-)
+// Timeout before declaring that runtime has failed to kill a given
+// container
+const killContainerTimeout = 5 * time.Second
// ociError is used to parse the OCI runtime JSON log. It is not part of the
// OCI runtime specifications, it follows what runc does
diff --git a/libpod/options.go b/libpod/options.go
index f79c75e98..00b5626b4 100644
--- a/libpod/options.go
+++ b/libpod/options.go
@@ -39,30 +39,30 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption {
if config.RunRoot != "" {
rt.config.StorageConfig.RunRoot = config.RunRoot
- rt.configuredFrom.storageRunRootSet = true
+ rt.config.StorageConfigRunRootSet = true
setField = true
}
if config.GraphRoot != "" {
rt.config.StorageConfig.GraphRoot = config.GraphRoot
- rt.configuredFrom.storageGraphRootSet = true
+ rt.config.StorageConfigGraphRootSet = true
// Also set libpod static dir, so we are a subdirectory
// of the c/storage store by default
rt.config.StaticDir = filepath.Join(config.GraphRoot, "libpod")
- rt.configuredFrom.libpodStaticDirSet = true
+ rt.config.StaticDirSet = true
// Also set libpod volume path, so we are a subdirectory
// of the c/storage store by default
rt.config.VolumePath = filepath.Join(config.GraphRoot, "volumes")
- rt.configuredFrom.volPathSet = true
+ rt.config.VolumePathSet = true
setField = true
}
if config.GraphDriverName != "" {
rt.config.StorageConfig.GraphDriverName = config.GraphDriverName
- rt.configuredFrom.storageGraphDriverSet = true
+ rt.config.StorageConfigGraphDriverNameSet = true
setField = true
}
@@ -135,13 +135,13 @@ func WithSignaturePolicy(path string) RuntimeOption {
// Please note that information is not portable between backing states.
// As such, if this differs between two libpods running on the same system,
// they will not share containers, and unspecified behavior may occur.
-func WithStateType(storeType RuntimeStateStore) RuntimeOption {
+func WithStateType(storeType define.RuntimeStateStore) RuntimeOption {
return func(rt *Runtime) error {
if rt.valid {
return define.ErrRuntimeFinalized
}
- if storeType == InvalidStateStore {
+ if storeType == define.InvalidStateStore {
return errors.Wrapf(define.ErrInvalidArg, "must provide a valid state store type")
}
@@ -224,9 +224,9 @@ func WithCgroupManager(manager string) RuntimeOption {
return define.ErrRuntimeFinalized
}
- if manager != CgroupfsCgroupsManager && manager != SystemdCgroupsManager {
+ if manager != define.CgroupfsCgroupsManager && manager != define.SystemdCgroupsManager {
return errors.Wrapf(define.ErrInvalidArg, "CGroup manager must be one of %s and %s",
- CgroupfsCgroupsManager, SystemdCgroupsManager)
+ define.CgroupfsCgroupsManager, define.SystemdCgroupsManager)
}
rt.config.CgroupManager = manager
@@ -244,7 +244,7 @@ func WithStaticDir(dir string) RuntimeOption {
}
rt.config.StaticDir = dir
- rt.configuredFrom.libpodStaticDirSet = true
+ rt.config.StaticDirSet = true
return nil
}
@@ -295,7 +295,7 @@ func WithTmpDir(dir string) RuntimeOption {
return define.ErrRuntimeFinalized
}
rt.config.TmpDir = dir
- rt.configuredFrom.libpodTmpDirSet = true
+ rt.config.TmpDirSet = true
return nil
}
@@ -395,7 +395,7 @@ func WithVolumePath(volPath string) RuntimeOption {
}
rt.config.VolumePath = volPath
- rt.configuredFrom.volPathSet = true
+ rt.config.VolumePathSet = true
return nil
}
@@ -1052,6 +1052,31 @@ func WithStaticIP(ip net.IP) CtrCreateOption {
}
}
+// WithStaticMAC indicates that the container should request a static MAC from
+// the CNI plugins.
+// It cannot be set unless WithNetNS has already been passed.
+// Further, it cannot be set if additional CNI networks to join have been
+// specified.
+func WithStaticMAC(mac net.HardwareAddr) CtrCreateOption {
+ return func(ctr *Container) error {
+ if ctr.valid {
+ return define.ErrCtrFinalized
+ }
+
+ if !ctr.config.CreateNetNS {
+ return errors.Wrapf(define.ErrInvalidArg, "cannot set a static MAC if the container is not creating a network namespace")
+ }
+
+ if len(ctr.config.Networks) != 0 {
+ return errors.Wrapf(define.ErrInvalidArg, "cannot set a static MAC if joining additional CNI networks")
+ }
+
+ ctr.config.StaticMAC = mac
+
+ return nil
+ }
+}
+
// WithLogDriver sets the log driver for the container
func WithLogDriver(driver string) CtrCreateOption {
return func(ctr *Container) error {
diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go
index 7aacda482..b5895d133 100644
--- a/libpod/pod_internal.go
+++ b/libpod/pod_internal.go
@@ -66,13 +66,13 @@ func (p *Pod) refresh() error {
// We need to recreate the pod's cgroup
if p.config.UsePodCgroup {
switch p.runtime.config.CgroupManager {
- case SystemdCgroupsManager:
+ case define.SystemdCgroupsManager:
cgroupPath, err := systemdSliceFromPath(p.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", p.ID()))
if err != nil {
logrus.Errorf("Error creating CGroup for pod %s: %v", p.ID(), err)
}
p.state.CgroupPath = cgroupPath
- case CgroupfsCgroupsManager:
+ case define.CgroupfsCgroupsManager:
p.state.CgroupPath = filepath.Join(p.config.CgroupParent, p.ID())
logrus.Debugf("setting pod cgroup to %s", p.state.CgroupPath)
diff --git a/libpod/runtime.go b/libpod/runtime.go
index 300477c09..42e6782e9 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -1,28 +1,21 @@
package libpod
import (
- "bytes"
"context"
"fmt"
- "io/ioutil"
"os"
- "os/exec"
- "os/user"
"path/filepath"
- "regexp"
- "strconv"
"strings"
"sync"
"syscall"
- "github.com/BurntSushi/toml"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/types"
+ "github.com/containers/libpod/libpod/config"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/libpod/lock"
- "github.com/containers/libpod/pkg/cgroups"
sysreg "github.com/containers/libpod/pkg/registries"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/util"
@@ -33,75 +26,13 @@ import (
"github.com/sirupsen/logrus"
)
-// RuntimeStateStore is a constant indicating which state store implementation
-// should be used by libpod
-type RuntimeStateStore int
-
-const (
- // InvalidStateStore is an invalid state store
- InvalidStateStore RuntimeStateStore = iota
- // InMemoryStateStore is an in-memory state that will not persist data
- // on containers and pods between libpod instances or after system
- // reboot
- InMemoryStateStore RuntimeStateStore = iota
- // SQLiteStateStore is a state backed by a SQLite database
- // It is presently disabled
- SQLiteStateStore RuntimeStateStore = iota
- // BoltDBStateStore is a state backed by a BoltDB database
- BoltDBStateStore RuntimeStateStore = iota
-)
-
-var (
- // InstallPrefix is the prefix where podman will be installed.
- // It can be overridden at build time.
- installPrefix = "/usr"
- // EtcDir is the sysconfdir where podman should look for system config files.
- // It can be overridden at build time.
- etcDir = "/etc"
-
- // SeccompDefaultPath defines the default seccomp path
- SeccompDefaultPath = installPrefix + "/share/containers/seccomp.json"
- // SeccompOverridePath if this exists it overrides the default seccomp path
- SeccompOverridePath = etcDir + "/crio/seccomp.json"
-
- // ConfigPath is the path to the libpod configuration file
- // This file is loaded to replace the builtin default config before
- // runtime options (e.g. WithStorageConfig) are applied.
- // If it is not present, the builtin default config is used instead
- // This path can be overridden when the runtime is created by using
- // NewRuntimeFromConfig() instead of NewRuntime()
- ConfigPath = installPrefix + "/share/containers/libpod.conf"
- // OverrideConfigPath is the path to an override for the default libpod
- // configuration file. If OverrideConfigPath exists, it will be used in
- // place of the configuration file pointed to by ConfigPath.
- OverrideConfigPath = etcDir + "/containers/libpod.conf"
-
- // DefaultSHMLockPath is the default path for SHM locks
- DefaultSHMLockPath = "/libpod_lock"
- // DefaultRootlessSHMLockPath is the default path for rootless SHM locks
- DefaultRootlessSHMLockPath = "/libpod_rootless_lock"
-
- // DefaultDetachKeys is the default keys sequence for detaching a
- // container
- DefaultDetachKeys = "ctrl-p,ctrl-q"
-
- // minConmonMajor is the major version required for conmon
- minConmonMajor = 2
-
- // minConmonMinor is the minor version required for conmon
- minConmonMinor = 0
-
- // minConmonPatch is the sub-minor version required for conmon
- minConmonPatch = 1
-)
-
// A RuntimeOption is a functional option which alters the Runtime created by
// NewRuntime
type RuntimeOption func(*Runtime) error
// Runtime is the core libpod runtime
type Runtime struct {
- config *RuntimeConfig
+ config *config.Config
state State
store storage.Store
@@ -113,7 +44,6 @@ type Runtime struct {
conmonPath string
imageRuntime *image.Runtime
lockManager lock.Manager
- configuredFrom *runtimeConfiguredFrom
// doRenumber indicates that the runtime should perform a lock renumber
// during initialization.
@@ -141,223 +71,6 @@ type Runtime struct {
noStore bool
}
-// RuntimeConfig contains configuration options used to set up the runtime
-type RuntimeConfig struct {
- // StorageConfig is the configuration used by containers/storage
- // Not included in on-disk config, use the dedicated containers/storage
- // configuration file instead
- StorageConfig storage.StoreOptions `toml:"-"`
- // VolumePath is the default location that named volumes will be created
- // under. This convention is followed by the default volume driver, but
- // may not be by other drivers.
- VolumePath string `toml:"volume_path"`
- // ImageDefaultTransport is the default transport method used to fetch
- // images
- ImageDefaultTransport string `toml:"image_default_transport"`
- // SignaturePolicyPath is the path to a signature policy to use for
- // validating images
- // If left empty, the containers/image default signature policy will
- // be used
- SignaturePolicyPath string `toml:"signature_policy_path,omitempty"`
- // StateType is the type of the backing state store.
- // Avoid using multiple values for this with the same containers/storage
- // configuration on the same system. Different state types do not
- // interact, and each will see a separate set of containers, which may
- // cause conflicts in containers/storage
- // As such this is not exposed via the config file
- StateType RuntimeStateStore `toml:"-"`
- // OCIRuntime is the OCI runtime to use.
- OCIRuntime string `toml:"runtime"`
- // OCIRuntimes are the set of configured OCI runtimes (default is runc)
- OCIRuntimes map[string][]string `toml:"runtimes"`
- // RuntimeSupportsJSON is the list of the OCI runtimes that support
- // --format=json.
- RuntimeSupportsJSON []string `toml:"runtime_supports_json"`
- // RuntimeSupportsNoCgroups is a list of OCI runtimes that support
- // running containers without CGroups.
- RuntimeSupportsNoCgroups []string `toml:"runtime_supports_nocgroups"`
- // RuntimePath is the path to OCI runtime binary for launching
- // containers.
- // The first path pointing to a valid file will be used
- // This is used only when there are no OCIRuntime/OCIRuntimes defined. It
- // is used only to be backward compatible with older versions of Podman.
- RuntimePath []string `toml:"runtime_path"`
- // ConmonPath is the path to the Conmon binary used for managing
- // containers
- // The first path pointing to a valid file will be used
- ConmonPath []string `toml:"conmon_path"`
- // ConmonEnvVars are environment variables to pass to the Conmon binary
- // when it is launched
- ConmonEnvVars []string `toml:"conmon_env_vars"`
- // CGroupManager is the CGroup Manager to use
- // Valid values are "cgroupfs" and "systemd"
- CgroupManager string `toml:"cgroup_manager"`
- // InitPath is the path to the container-init binary.
- InitPath string `toml:"init_path"`
- // StaticDir is the path to a persistent directory to store container
- // files
- StaticDir string `toml:"static_dir"`
- // TmpDir is the path to a temporary directory to store per-boot
- // container files
- // Must be stored in a tmpfs
- TmpDir string `toml:"tmp_dir"`
- // MaxLogSize is the maximum size of container logfiles
- MaxLogSize int64 `toml:"max_log_size,omitempty"`
- // NoPivotRoot sets whether to set no-pivot-root in the OCI runtime
- NoPivotRoot bool `toml:"no_pivot_root"`
- // CNIConfigDir sets the directory where CNI configuration files are
- // stored
- CNIConfigDir string `toml:"cni_config_dir"`
- // CNIPluginDir sets a number of directories where the CNI network
- // plugins can be located
- CNIPluginDir []string `toml:"cni_plugin_dir"`
- // CNIDefaultNetwork is the network name of the default CNI network
- // to attach pods to
- CNIDefaultNetwork string `toml:"cni_default_network,omitempty"`
- // HooksDir holds paths to the directories containing hooks
- // configuration files. When the same filename is present in in
- // multiple directories, the file in the directory listed last in
- // this slice takes precedence.
- HooksDir []string `toml:"hooks_dir"`
- // DefaultMountsFile is the path to the default mounts file for testing
- // purposes only
- DefaultMountsFile string `toml:"-"`
- // Namespace is the libpod namespace to use.
- // Namespaces are used to create scopes to separate containers and pods
- // in the state.
- // When namespace is set, libpod will only view containers and pods in
- // the same namespace. All containers and pods created will default to
- // the namespace set here.
- // A namespace of "", the empty string, is equivalent to no namespace,
- // and all containers and pods will be visible.
- // The default namespace is "".
- Namespace string `toml:"namespace,omitempty"`
-
- // InfraImage is the image a pod infra container will use to manage namespaces
- InfraImage string `toml:"infra_image"`
- // InfraCommand is the command run to start up a pod infra container
- InfraCommand string `toml:"infra_command"`
- // EnablePortReservation determines whether libpod will reserve ports on
- // the host when they are forwarded to containers.
- // When enabled, when ports are forwarded to containers, they are
- // held open by conmon as long as the container is running, ensuring
- // that they cannot be reused by other programs on the host.
- // However, this can cause significant memory usage if a container has
- // many ports forwarded to it. Disabling this can save memory.
- EnablePortReservation bool `toml:"enable_port_reservation"`
- // EnableLabeling indicates wether libpod will support container labeling
- EnableLabeling bool `toml:"label"`
- // NetworkCmdPath is the path to the slirp4netns binary
- NetworkCmdPath string `toml:"network_cmd_path"`
-
- // NumLocks is the number of locks to make available for containers and
- // pods.
- NumLocks uint32 `toml:"num_locks,omitempty"`
-
- // LockType is the type of locking to use.
- LockType string `toml:"lock_type,omitempty"`
-
- // EventsLogger determines where events should be logged
- EventsLogger string `toml:"events_logger"`
- // EventsLogFilePath is where the events log is stored.
- EventsLogFilePath string `toml:"events_logfile_path"`
- //DetachKeys is the sequence of keys used to detach a container
- DetachKeys string `toml:"detach_keys"`
-
- // SDNotify tells Libpod to allow containers to notify the host
- // systemd of readiness using the SD_NOTIFY mechanism
- SDNotify bool
- // CgroupCheck verifies if the cgroup check for correct OCI runtime has been done.
- CgroupCheck bool `toml:"cgroup_check,omitempty"`
-}
-
-// runtimeConfiguredFrom is a struct used during early runtime init to help
-// assemble the full RuntimeConfig struct from defaults.
-// It indicated whether several fields in the runtime configuration were set
-// explicitly.
-// If they were not, we may override them with information from the database,
-// if it exists and differs from what is present in the system already.
-type runtimeConfiguredFrom struct {
- storageGraphDriverSet bool
- storageGraphRootSet bool
- storageRunRootSet bool
- libpodStaticDirSet bool
- libpodTmpDirSet bool
- volPathSet bool
- conmonPath bool
- conmonEnvVars bool
- initPath bool
- ociRuntimes bool
- runtimePath bool
- cniPluginDir bool
- noPivotRoot bool
- runtimeSupportsJSON bool
- runtimeSupportsNoCgroups bool
- ociRuntime bool
-}
-
-func defaultRuntimeConfig() (RuntimeConfig, error) {
- storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
- if err != nil {
- return RuntimeConfig{}, err
- }
- graphRoot := storeOpts.GraphRoot
- if graphRoot == "" {
- logrus.Warnf("Storage configuration is unset - using hardcoded default paths")
- graphRoot = "/var/lib/containers/storage"
- }
- volumePath := filepath.Join(graphRoot, "volumes")
- staticDir := filepath.Join(graphRoot, "libpod")
- return RuntimeConfig{
- // Leave this empty so containers/storage will use its defaults
- StorageConfig: storage.StoreOptions{},
- VolumePath: volumePath,
- ImageDefaultTransport: DefaultTransport,
- StateType: BoltDBStateStore,
- OCIRuntime: "runc",
- OCIRuntimes: map[string][]string{
- "runc": {
- "/usr/bin/runc",
- "/usr/sbin/runc",
- "/usr/local/bin/runc",
- "/usr/local/sbin/runc",
- "/sbin/runc",
- "/bin/runc",
- "/usr/lib/cri-o-runc/sbin/runc",
- "/run/current-system/sw/bin/runc",
- },
- },
- ConmonPath: []string{
- "/usr/libexec/podman/conmon",
- "/usr/local/lib/podman/conmon",
- "/usr/bin/conmon",
- "/usr/sbin/conmon",
- "/usr/local/bin/conmon",
- "/usr/local/sbin/conmon",
- "/run/current-system/sw/bin/conmon",
- },
- ConmonEnvVars: []string{
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
- },
- InitPath: define.DefaultInitPath,
- CgroupManager: SystemdCgroupsManager,
- StaticDir: staticDir,
- TmpDir: "",
- MaxLogSize: -1,
- NoPivotRoot: false,
- CNIConfigDir: etcDir + "/cni/net.d/",
- CNIPluginDir: []string{"/usr/libexec/cni", "/usr/lib/cni", "/usr/local/lib/cni", "/opt/cni/bin"},
- InfraCommand: define.DefaultInfraCommand,
- InfraImage: define.DefaultInfraImage,
- EnablePortReservation: true,
- EnableLabeling: true,
- NumLocks: 2048,
- EventsLogger: events.DefaultEventerType.String(),
- DetachKeys: DefaultDetachKeys,
- LockType: "shm",
- }, nil
-}
-
// SetXdgDirs ensures the XDG_RUNTIME_DIR env and XDG_CONFIG_HOME variables are set.
// containers/image uses XDG_RUNTIME_DIR to locate the auth file, XDG_CONFIG_HOME is
// use for the libpod.conf configuration file.
@@ -400,28 +113,6 @@ func SetXdgDirs() error {
return nil
}
-func getDefaultTmpDir() (string, error) {
- if !rootless.IsRootless() {
- return "/var/run/libpod", nil
- }
-
- runtimeDir, err := util.GetRuntimeDir()
- if err != nil {
- return "", err
- }
- libpodRuntimeDir := filepath.Join(runtimeDir, "libpod")
-
- if err := os.Mkdir(libpodRuntimeDir, 0700|os.ModeSticky); err != nil {
- if !os.IsExist(err) {
- return "", errors.Wrapf(err, "cannot mkdir %s", libpodRuntimeDir)
- } else if err := os.Chmod(libpodRuntimeDir, 0700|os.ModeSticky); err != nil {
- // The directory already exist, just set the sticky bit
- return "", errors.Wrapf(err, "could not set sticky bit on %s", libpodRuntimeDir)
- }
- }
- return filepath.Join(libpodRuntimeDir, "tmp"), nil
-}
-
// NewRuntime creates a new container runtime
// Options can be passed to override the default configuration for the runtime
func NewRuntime(ctx context.Context, options ...RuntimeOption) (runtime *Runtime, err error) {
@@ -440,260 +131,14 @@ func NewRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
return newRuntimeFromConfig(ctx, userConfigPath, options...)
}
-func homeDir() (string, error) {
- home := os.Getenv("HOME")
- if home == "" {
- usr, err := user.LookupId(fmt.Sprintf("%d", rootless.GetRootlessUID()))
- if err != nil {
- return "", errors.Wrapf(err, "unable to resolve HOME directory")
- }
- home = usr.HomeDir
- }
- return home, nil
-}
-
-func getRootlessConfigPath() (string, error) {
- home, err := homeDir()
- if err != nil {
- return "", err
- }
-
- return filepath.Join(home, ".config/containers/libpod.conf"), nil
-}
-
-func getConfigPath() (string, error) {
- if rootless.IsRootless() {
- path, err := getRootlessConfigPath()
- if err != nil {
- return "", err
- }
- if _, err := os.Stat(path); err == nil {
- return path, nil
- }
- return "", err
- }
- if _, err := os.Stat(OverrideConfigPath); err == nil {
- // Use the override configuration path
- return OverrideConfigPath, nil
- }
- if _, err := os.Stat(ConfigPath); err == nil {
- return ConfigPath, nil
- }
- return "", nil
-}
-
-// DefaultRuntimeConfig reads default config path and returns the RuntimeConfig
-func DefaultRuntimeConfig() (*RuntimeConfig, error) {
- configPath, err := getConfigPath()
- if err != nil {
- return nil, err
- }
-
- contents, err := ioutil.ReadFile(configPath)
- if err != nil {
- return nil, errors.Wrapf(err, "error reading configuration file %s", configPath)
- }
-
- // This is ugly, but we need to decode twice.
- // Once to check if libpod static and tmp dirs were explicitly
- // set (not enough to check if they're not the default value,
- // might have been explicitly configured to the default).
- // A second time to actually get a usable config.
- tmpConfig := new(RuntimeConfig)
- if _, err := toml.Decode(string(contents), tmpConfig); err != nil {
- return nil, errors.Wrapf(err, "error decoding configuration file %s",
- configPath)
- }
- return tmpConfig, nil
-}
-
func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ...RuntimeOption) (runtime *Runtime, err error) {
runtime = new(Runtime)
- runtime.config = new(RuntimeConfig)
- runtime.configuredFrom = new(runtimeConfiguredFrom)
-
- // Copy the default configuration
- tmpDir, err := getDefaultTmpDir()
- if err != nil {
- return nil, err
- }
-
- // storage.conf
- storageConfFile, err := storage.DefaultConfigFile(rootless.IsRootless())
- if err != nil {
- return nil, err
- }
- createStorageConfFile := false
- if _, err := os.Stat(storageConfFile); os.IsNotExist(err) {
- createStorageConfFile = true
- }
-
- defRunConf, err := defaultRuntimeConfig()
+ conf, err := config.NewConfig(userConfigPath)
if err != nil {
return nil, err
}
- if err := JSONDeepCopy(defRunConf, runtime.config); err != nil {
- return nil, errors.Wrapf(err, "error copying runtime default config")
- }
- runtime.config.TmpDir = tmpDir
-
- storageConf, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID())
- if err != nil {
- return nil, errors.Wrapf(err, "error retrieving storage config")
- }
- runtime.config.StorageConfig = storageConf
- runtime.config.StaticDir = filepath.Join(storageConf.GraphRoot, "libpod")
- runtime.config.VolumePath = filepath.Join(storageConf.GraphRoot, "volumes")
-
- configPath, err := getConfigPath()
- if err != nil {
- return nil, err
- }
- if rootless.IsRootless() {
- home, err := homeDir()
- if err != nil {
- return nil, err
- }
- if runtime.config.SignaturePolicyPath == "" {
- newPath := filepath.Join(home, ".config/containers/policy.json")
- if _, err := os.Stat(newPath); err == nil {
- runtime.config.SignaturePolicyPath = newPath
- }
- }
- }
-
- if userConfigPath != "" {
- configPath = userConfigPath
- if _, err := os.Stat(configPath); err != nil {
- // If the user specified a config file, we must fail immediately
- // when it doesn't exist
- return nil, errors.Wrapf(err, "cannot stat %s", configPath)
- }
- }
-
- // If we have a valid configuration file, load it in
- if configPath != "" {
- contents, err := ioutil.ReadFile(configPath)
- if err != nil {
- return nil, errors.Wrapf(err, "error reading configuration file %s", configPath)
- }
-
- // This is ugly, but we need to decode twice.
- // Once to check if libpod static and tmp dirs were explicitly
- // set (not enough to check if they're not the default value,
- // might have been explicitly configured to the default).
- // A second time to actually get a usable config.
- tmpConfig := new(RuntimeConfig)
- if _, err := toml.Decode(string(contents), tmpConfig); err != nil {
- return nil, errors.Wrapf(err, "error decoding configuration file %s",
- configPath)
- }
-
- if err := cgroupV2Check(configPath, tmpConfig); err != nil {
- return nil, err
- }
-
- if tmpConfig.StaticDir != "" {
- runtime.configuredFrom.libpodStaticDirSet = true
- }
- if tmpConfig.TmpDir != "" {
- runtime.configuredFrom.libpodTmpDirSet = true
- }
- if tmpConfig.VolumePath != "" {
- runtime.configuredFrom.volPathSet = true
- }
- if tmpConfig.ConmonPath != nil {
- runtime.configuredFrom.conmonPath = true
- }
- if tmpConfig.ConmonEnvVars != nil {
- runtime.configuredFrom.conmonEnvVars = true
- }
- if tmpConfig.InitPath != "" {
- runtime.configuredFrom.initPath = true
- }
- if tmpConfig.OCIRuntimes != nil {
- runtime.configuredFrom.ociRuntimes = true
- }
- if tmpConfig.RuntimePath != nil {
- runtime.configuredFrom.runtimePath = true
- }
- if tmpConfig.CNIPluginDir != nil {
- runtime.configuredFrom.cniPluginDir = true
- }
- if tmpConfig.NoPivotRoot {
- runtime.configuredFrom.noPivotRoot = true
- }
- if tmpConfig.RuntimeSupportsJSON != nil {
- runtime.configuredFrom.runtimeSupportsJSON = true
- }
- if tmpConfig.RuntimeSupportsNoCgroups != nil {
- runtime.configuredFrom.runtimeSupportsNoCgroups = true
- }
- if tmpConfig.OCIRuntime != "" {
- runtime.configuredFrom.ociRuntime = true
- }
-
- if _, err := toml.Decode(string(contents), runtime.config); err != nil {
- return nil, errors.Wrapf(err, "error decoding configuration file %s", configPath)
- }
- } else if rootless.IsRootless() {
- // If the configuration file was not found but we are running in rootless, a subset of the
- // global config file is used.
- for _, path := range []string{OverrideConfigPath, ConfigPath} {
- contents, err := ioutil.ReadFile(path)
- if err != nil {
- // Ignore any error, the file might not be readable by us.
- continue
- }
- tmpConfig := new(RuntimeConfig)
- if _, err := toml.Decode(string(contents), tmpConfig); err != nil {
- return nil, errors.Wrapf(err, "error decoding configuration file %s", path)
- }
-
- // Cherry pick the settings we want from the global configuration
- if !runtime.configuredFrom.conmonPath {
- runtime.config.ConmonPath = tmpConfig.ConmonPath
- }
- if !runtime.configuredFrom.conmonEnvVars {
- runtime.config.ConmonEnvVars = tmpConfig.ConmonEnvVars
- }
- if !runtime.configuredFrom.initPath {
- runtime.config.InitPath = tmpConfig.InitPath
- }
- if !runtime.configuredFrom.ociRuntimes {
- runtime.config.OCIRuntimes = tmpConfig.OCIRuntimes
- }
- if !runtime.configuredFrom.runtimePath {
- runtime.config.RuntimePath = tmpConfig.RuntimePath
- }
- if !runtime.configuredFrom.cniPluginDir {
- runtime.config.CNIPluginDir = tmpConfig.CNIPluginDir
- }
- if !runtime.configuredFrom.noPivotRoot {
- runtime.config.NoPivotRoot = tmpConfig.NoPivotRoot
- }
- if !runtime.configuredFrom.runtimeSupportsJSON {
- runtime.config.RuntimeSupportsJSON = tmpConfig.RuntimeSupportsJSON
- }
- if !runtime.configuredFrom.runtimeSupportsNoCgroups {
- runtime.config.RuntimeSupportsNoCgroups = tmpConfig.RuntimeSupportsNoCgroups
- }
- if !runtime.configuredFrom.ociRuntime {
- runtime.config.OCIRuntime = tmpConfig.OCIRuntime
- }
-
- cgroupsV2, err := cgroups.IsCgroup2UnifiedMode()
- if err != nil {
- return nil, err
- }
- if cgroupsV2 {
- runtime.config.CgroupCheck = true
- }
-
- break
- }
- }
+ runtime.config = conf
// Overwrite config with user-given configuration options
for _, opt := range options {
@@ -702,36 +147,6 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
}
}
- if rootless.IsRootless() && configPath == "" {
- if createStorageConfFile {
- if err := util.WriteStorageConfigFile(&runtime.config.StorageConfig, storageConfFile); err != nil {
- return nil, errors.Wrapf(err, "cannot write config file %s", storageConfFile)
- }
- }
-
- configPath, err := getRootlessConfigPath()
- if err != nil {
- return nil, err
- }
- if configPath != "" {
- if err := os.MkdirAll(filepath.Dir(configPath), 0711); err != nil {
- return nil, err
- }
- file, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
- if err != nil && !os.IsExist(err) {
- return nil, errors.Wrapf(err, "cannot open file %s", configPath)
- }
- if err == nil {
- defer file.Close()
- enc := toml.NewEncoder(file)
- if err := enc.Encode(runtime.config); err != nil {
- if removeErr := os.Remove(configPath); removeErr != nil {
- logrus.Debugf("unable to remove %s: %q", configPath, err)
- }
- }
- }
- }
- }
if err := makeRuntime(ctx, runtime); err != nil {
return nil, err
}
@@ -758,9 +173,9 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) {
}
case "", "shm":
- lockPath := DefaultSHMLockPath
+ lockPath := define.DefaultSHMLockPath
if rootless.IsRootless() {
- lockPath = fmt.Sprintf("%s_%d", DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
+ lockPath = fmt.Sprintf("%s_%d", define.DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
}
// Set up the lock manager
manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks)
@@ -794,119 +209,14 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) {
return manager, nil
}
-// probeConmon calls conmon --version and verifies it is a new enough version for
-// the runtime expectations podman currently has
-func probeConmon(conmonBinary string) error {
- versionFormatErr := "conmon version changed format"
- cmd := exec.Command(conmonBinary, "--version")
- var out bytes.Buffer
- cmd.Stdout = &out
- err := cmd.Run()
- if err != nil {
- return err
- }
- r := regexp.MustCompile(`^conmon version (?P<Major>\d+).(?P<Minor>\d+).(?P<Patch>\d+)`)
-
- matches := r.FindStringSubmatch(out.String())
- if len(matches) != 4 {
- return errors.Wrapf(err, versionFormatErr)
- }
- major, err := strconv.Atoi(matches[1])
- if err != nil {
- return errors.Wrapf(err, versionFormatErr)
- }
- if major < minConmonMajor {
- return define.ErrConmonOutdated
- }
- if major > minConmonMajor {
- return nil
- }
-
- minor, err := strconv.Atoi(matches[2])
- if err != nil {
- return errors.Wrapf(err, versionFormatErr)
- }
- if minor < minConmonMinor {
- return define.ErrConmonOutdated
- }
- if minor > minConmonMinor {
- return nil
- }
-
- patch, err := strconv.Atoi(matches[3])
- if err != nil {
- return errors.Wrapf(err, versionFormatErr)
- }
- if patch < minConmonPatch {
- return define.ErrConmonOutdated
- }
- if patch > minConmonPatch {
- return nil
- }
-
- return nil
-}
-
// Make a new runtime based on the given configuration
// Sets up containers/storage, state store, OCI runtime
func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
- // Let's sanity-check some paths first.
- // Relative paths can cause nasty bugs, because core paths we use could
- // shift between runs (or even parts of the program - the OCI runtime
- // uses a different working directory than we do, for example.
- if !filepath.IsAbs(runtime.config.StaticDir) {
- return errors.Wrapf(define.ErrInvalidArg, "static directory must be an absolute path - instead got %q", runtime.config.StaticDir)
- }
- if !filepath.IsAbs(runtime.config.TmpDir) {
- return errors.Wrapf(define.ErrInvalidArg, "temporary directory must be an absolute path - instead got %q", runtime.config.TmpDir)
- }
- if !filepath.IsAbs(runtime.config.VolumePath) {
- return errors.Wrapf(define.ErrInvalidArg, "volume path must be an absolute path - instead got %q", runtime.config.VolumePath)
- }
-
// Find a working conmon binary
- foundConmon := false
- foundOutdatedConmon := false
- for _, path := range runtime.config.ConmonPath {
- stat, err := os.Stat(path)
- if err != nil {
- continue
- }
- if stat.IsDir() {
- continue
- }
- if err := probeConmon(path); err != nil {
- logrus.Warnf("conmon at %s invalid: %v", path, err)
- foundOutdatedConmon = true
- continue
- }
- foundConmon = true
- runtime.conmonPath = path
- logrus.Debugf("using conmon: %q", path)
- break
- }
-
- // Search the $PATH as last fallback
- if !foundConmon {
- if conmon, err := exec.LookPath("conmon"); err == nil {
- if err := probeConmon(conmon); err != nil {
- logrus.Warnf("conmon at %s is invalid: %v", conmon, err)
- foundOutdatedConmon = true
- } else {
- foundConmon = true
- runtime.conmonPath = conmon
- logrus.Debugf("using conmon from $PATH: %q", conmon)
- }
- }
- }
-
- if !foundConmon {
- if foundOutdatedConmon {
- return errors.Errorf("please update to v%d.%d.%d or later: %v", minConmonMajor, minConmonMinor, minConmonPatch, define.ErrConmonOutdated)
- }
- return errors.Wrapf(define.ErrInvalidArg,
- "could not find a working conmon binary (configured options: %v)",
- runtime.config.ConmonPath)
+ if cPath, err := runtime.config.FindConmon(); err != nil {
+ return err
+ } else {
+ runtime.conmonPath = cPath
}
// Make the static files directory if it does not exist
@@ -918,17 +228,22 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
}
}
- // Set up the state
+ // Set up the state.
+ //
+ // TODO - if we further break out the state implementation into
+ // libpod/state, the config could take care of the code below. It
+ // would further allow to move the types and consts into a coherent
+ // package.
switch runtime.config.StateType {
- case InMemoryStateStore:
+ case define.InMemoryStateStore:
state, err := NewInMemoryState()
if err != nil {
return err
}
runtime.state = state
- case SQLiteStateStore:
+ case define.SQLiteStateStore:
return errors.Wrapf(define.ErrInvalidArg, "SQLite state is currently disabled")
- case BoltDBStateStore:
+ case define.BoltDBStateStore:
dbPath := filepath.Join(runtime.config.StaticDir, "bolt_state.db")
state, err := NewBoltState(dbPath, runtime)
@@ -937,7 +252,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
}
runtime.state = state
default:
- return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed")
+ return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed (%v)", runtime.config.StateType)
}
// Grab config from the database so we can reset some defaults
@@ -946,51 +261,9 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
return errors.Wrapf(err, "error retrieving runtime configuration from database")
}
- // Reset defaults if they were not explicitly set
- if !runtime.configuredFrom.storageGraphDriverSet && dbConfig.GraphDriver != "" {
- if runtime.config.StorageConfig.GraphDriverName != dbConfig.GraphDriver &&
- runtime.config.StorageConfig.GraphDriverName != "" {
- logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve",
- runtime.config.StorageConfig.GraphDriverName, dbConfig.GraphDriver)
- }
- runtime.config.StorageConfig.GraphDriverName = dbConfig.GraphDriver
- }
- if !runtime.configuredFrom.storageGraphRootSet && dbConfig.StorageRoot != "" {
- if runtime.config.StorageConfig.GraphRoot != dbConfig.StorageRoot &&
- runtime.config.StorageConfig.GraphRoot != "" {
- logrus.Debugf("Overriding graph root %q with %q from database",
- runtime.config.StorageConfig.GraphRoot, dbConfig.StorageRoot)
- }
- runtime.config.StorageConfig.GraphRoot = dbConfig.StorageRoot
+ if err := runtime.config.MergeDBConfig(dbConfig); err != nil {
+ return errors.Wrapf(err, "error merging database config into runtime config")
}
- if !runtime.configuredFrom.storageRunRootSet && dbConfig.StorageTmp != "" {
- if runtime.config.StorageConfig.RunRoot != dbConfig.StorageTmp &&
- runtime.config.StorageConfig.RunRoot != "" {
- logrus.Debugf("Overriding run root %q with %q from database",
- runtime.config.StorageConfig.RunRoot, dbConfig.StorageTmp)
- }
- runtime.config.StorageConfig.RunRoot = dbConfig.StorageTmp
- }
- if !runtime.configuredFrom.libpodStaticDirSet && dbConfig.LibpodRoot != "" {
- if runtime.config.StaticDir != dbConfig.LibpodRoot && runtime.config.StaticDir != "" {
- logrus.Debugf("Overriding static dir %q with %q from database", runtime.config.StaticDir, dbConfig.LibpodRoot)
- }
- runtime.config.StaticDir = dbConfig.LibpodRoot
- }
- if !runtime.configuredFrom.libpodTmpDirSet && dbConfig.LibpodTmp != "" {
- if runtime.config.TmpDir != dbConfig.LibpodTmp && runtime.config.TmpDir != "" {
- logrus.Debugf("Overriding tmp dir %q with %q from database", runtime.config.TmpDir, dbConfig.LibpodTmp)
- }
- runtime.config.TmpDir = dbConfig.LibpodTmp
- }
- if !runtime.configuredFrom.volPathSet && dbConfig.VolumePath != "" {
- if runtime.config.VolumePath != dbConfig.VolumePath && runtime.config.VolumePath != "" {
- logrus.Debugf("Overriding volume path %q with %q from database", runtime.config.VolumePath, dbConfig.VolumePath)
- }
- runtime.config.VolumePath = dbConfig.VolumePath
- }
-
- runtime.config.EventsLogFilePath = filepath.Join(runtime.config.TmpDir, "events", "events.log")
logrus.Debugf("Using graph driver %s", runtime.config.StorageConfig.GraphDriverName)
logrus.Debugf("Using graph root %s", runtime.config.StorageConfig.GraphRoot)
@@ -1269,7 +542,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
}
// GetConfig returns a copy of the configuration used by the runtime
-func (r *Runtime) GetConfig() (*RuntimeConfig, error) {
+func (r *Runtime) GetConfig() (*config.Config, error) {
r.lock.RLock()
defer r.lock.RUnlock()
@@ -1277,7 +550,7 @@ func (r *Runtime) GetConfig() (*RuntimeConfig, error) {
return nil, define.ErrRuntimeStopped
}
- config := new(RuntimeConfig)
+ config := new(config.Config)
// Copy so the caller won't be able to modify the actual config
if err := JSONDeepCopy(r.config, config); err != nil {
@@ -1499,56 +772,3 @@ func (r *Runtime) SystemContext() *types.SystemContext {
func (r *Runtime) GetOCIRuntimePath() string {
return r.defaultOCIRuntime.Path()
}
-
-// Since runc does not currently support cgroupV2
-// Change to default crun on first running of libpod.conf
-// TODO Once runc has support for cgroups, this function should be removed.
-func cgroupV2Check(configPath string, tmpConfig *RuntimeConfig) error {
- if !tmpConfig.CgroupCheck && rootless.IsRootless() {
- if tmpConfig.CgroupManager == SystemdCgroupsManager {
- // If we are running rootless and the systemd manager is requested, be sure that dbus is accessible
- session := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
- hasSession := session != ""
- if hasSession && strings.HasPrefix(session, "unix:path=") {
- _, err := os.Stat(strings.TrimPrefix(session, "unix:path="))
- hasSession = err == nil
- }
-
- if !hasSession {
- logrus.Warningf("The cgroups manager is set to systemd but there is no systemd user session available")
- logrus.Warningf("For using systemd, you may need to login using an user session")
- logrus.Warningf("Alternatively, you can enable lingering with: `loginctl enable-linger %d` (possibily as root)", rootless.GetRootlessUID())
- logrus.Warningf("Falling back to --cgroup-manager=cgroupfs")
-
- tmpConfig.CgroupManager = CgroupfsCgroupsManager
- }
-
- }
- cgroupsV2, err := cgroups.IsCgroup2UnifiedMode()
- if err != nil {
- return err
- }
- if cgroupsV2 {
- path, err := exec.LookPath("crun")
- if err != nil {
- logrus.Warnf("Can not find crun package on the host, containers might fail to run on cgroup V2 systems without crun: %q", err)
- // Can't find crun path so do nothing
- return nil
- }
- tmpConfig.CgroupCheck = true
- tmpConfig.OCIRuntime = path
- file, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
- if err != nil {
- return errors.Wrapf(err, "cannot open file %s", configPath)
- }
- defer file.Close()
- enc := toml.NewEncoder(file)
- if err := enc.Encode(tmpConfig); err != nil {
- if removeErr := os.Remove(configPath); removeErr != nil {
- logrus.Debugf("unable to remove %s: %q", configPath, err)
- }
- }
- }
- }
- return nil
-}
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index 2b214d572..7069d3494 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -75,7 +75,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf
if config == nil {
ctr.config.ID = stringid.GenerateNonCryptoID()
- ctr.config.ShmSize = DefaultShmSize
+ ctr.config.ShmSize = define.DefaultShmSize
} else {
// This is a restore from an imported checkpoint
ctr.restoreFromCheckpoint = true
@@ -215,7 +215,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai
// Only if we're actually configuring CGroups.
if !ctr.config.NoCgroups {
switch r.config.CgroupManager {
- case CgroupfsCgroupsManager:
+ case define.CgroupfsCgroupsManager:
if ctr.config.CgroupParent == "" {
if pod != nil && pod.config.UsePodCgroup {
podCgroup, err := pod.CgroupPath()
@@ -232,7 +232,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (c *Contai
} else if strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") {
return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs")
}
- case SystemdCgroupsManager:
+ case define.SystemdCgroupsManager:
if ctr.config.CgroupParent == "" {
if pod != nil && pod.config.UsePodCgroup {
podCgroup, err := pod.CgroupPath()
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 05866d05a..704aaf9d0 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -76,7 +76,7 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (_ *Po
// Check CGroup parent sanity, and set it if it was not set
switch r.config.CgroupManager {
- case CgroupfsCgroupsManager:
+ case define.CgroupfsCgroupsManager:
if pod.config.CgroupParent == "" {
pod.config.CgroupParent = CgroupfsDefaultCgroupParent
} else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
@@ -89,7 +89,7 @@ func (r *Runtime) NewPod(ctx context.Context, options ...PodCreateOption) (_ *Po
if pod.config.UsePodCgroup {
pod.state.CgroupPath = filepath.Join(pod.config.CgroupParent, pod.ID())
}
- case SystemdCgroupsManager:
+ case define.SystemdCgroupsManager:
if pod.config.CgroupParent == "" {
if rootless.IsRootless() {
pod.config.CgroupParent = SystemdDefaultRootlessCgroupParent
@@ -200,7 +200,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
// the pod and conmon CGroups with a PID limit to prevent them from
// spawning any further processes (particularly cleanup processes) which
// would prevent removing the CGroups.
- if p.runtime.config.CgroupManager == CgroupfsCgroupsManager {
+ if p.runtime.config.CgroupManager == define.CgroupfsCgroupsManager {
// Get the conmon CGroup
conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon")
conmonCgroup, err := cgroups.Load(conmonCgroupPath)
@@ -251,7 +251,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath)
switch p.runtime.config.CgroupManager {
- case SystemdCgroupsManager:
+ case define.SystemdCgroupsManager:
if err := deleteSystemdCgroup(p.state.CgroupPath); err != nil {
if removalErr == nil {
removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID())
@@ -259,7 +259,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
logrus.Errorf("Error deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
}
}
- case CgroupfsCgroupsManager:
+ case define.CgroupfsCgroupsManager:
// Delete the cgroupfs cgroup
// Make sure the conmon cgroup is deleted first
// Since the pod is almost gone, don't bother failing
diff --git a/libpod/state.go b/libpod/state.go
index e38f820b5..b246b5eac 100644
--- a/libpod/state.go
+++ b/libpod/state.go
@@ -1,15 +1,6 @@
package libpod
-// DBConfig is a set of Libpod runtime configuration settings that are saved
-// in a State when it is first created, and can subsequently be retrieved.
-type DBConfig struct {
- LibpodRoot string
- LibpodTmp string
- StorageRoot string
- StorageTmp string
- GraphDriver string
- VolumePath string
-}
+import "github.com/containers/libpod/libpod/config"
// State is a storage backend for libpod's current state.
// A State is only initialized once per instance of libpod.
@@ -37,7 +28,7 @@ type State interface {
// root and tmp dirs, and c/storage graph driver.
// This is not implemented by the in-memory state, as it has no need to
// validate runtime configuration.
- GetDBConfig() (*DBConfig, error)
+ GetDBConfig() (*config.DBConfig, error)
// ValidateDBConfig validates the config in the given Runtime struct
// against paths stored in the configured database.
diff --git a/libpod/state_test.go b/libpod/state_test.go
index 5db1f301c..d4a4149f9 100644
--- a/libpod/state_test.go
+++ b/libpod/state_test.go
@@ -8,6 +8,7 @@ import (
"testing"
"time"
+ "github.com/containers/libpod/libpod/config"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/lock"
"github.com/containers/storage"
@@ -52,7 +53,7 @@ func getEmptyBoltState() (s State, p string, m lock.Manager, err error) {
}
runtime := new(Runtime)
- runtime.config = new(RuntimeConfig)
+ runtime.config = new(config.Config)
runtime.config.StorageConfig = storage.StoreOptions{}
runtime.lockManager = lockManager
diff --git a/libpod/stats.go b/libpod/stats.go
index 5513abce5..3b5e0958c 100644
--- a/libpod/stats.go
+++ b/libpod/stats.go
@@ -3,7 +3,6 @@
package libpod
import (
- "runtime"
"strings"
"syscall"
"time"
@@ -56,8 +55,8 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container
}
previousCPU := previousStats.CPUNano
- previousSystem := previousStats.SystemNano
- stats.CPU = calculateCPUPercent(cgroupStats, previousCPU, previousSystem)
+ now := uint64(time.Now().UnixNano())
+ stats.CPU = calculateCPUPercent(cgroupStats, previousCPU, now, previousStats.SystemNano)
stats.MemUsage = cgroupStats.Memory.Usage.Usage
stats.MemLimit = getMemLimit(cgroupStats.Memory.Usage.Limit)
stats.MemPerc = (float64(stats.MemUsage) / float64(stats.MemLimit)) * 100
@@ -67,7 +66,7 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container
}
stats.BlockInput, stats.BlockOutput = calculateBlockIO(cgroupStats)
stats.CPUNano = cgroupStats.CPU.Usage.Total
- stats.SystemNano = cgroupStats.CPU.Usage.Kernel
+ stats.SystemNano = now
// Handle case where the container is not in a network namespace
if netStats != nil {
stats.NetInput = netStats.TxBytes
@@ -98,20 +97,19 @@ func getMemLimit(cgroupLimit uint64) uint64 {
return cgroupLimit
}
-func calculateCPUPercent(stats *cgroups.Metrics, previousCPU, previousSystem uint64) float64 {
+// calculateCPUPercent calculates the cpu usage using the latest measurement in stats.
+// previousCPU is the last value of stats.CPU.Usage.Total measured at the time previousSystem.
+// (now - previousSystem) is the time delta in nanoseconds, between the measurement in previousCPU
+// and the updated value in stats.
+func calculateCPUPercent(stats *cgroups.Metrics, previousCPU, now, previousSystem uint64) float64 {
var (
cpuPercent = 0.0
cpuDelta = float64(stats.CPU.Usage.Total - previousCPU)
- systemDelta = float64(uint64(time.Now().UnixNano()) - previousSystem)
+ systemDelta = float64(now - previousSystem)
)
if systemDelta > 0.0 && cpuDelta > 0.0 {
- // gets a ratio of container cpu usage total, multiplies it by the number of cores (4 cores running
- // at 100% utilization should be 400% utilization), and multiplies that by 100 to get a percentage
- nCPUS := len(stats.CPU.Usage.PerCPU)
- if nCPUS == 0 {
- nCPUS = runtime.NumCPU()
- }
- cpuPercent = (cpuDelta / systemDelta) * float64(nCPUS) * 100
+ // gets a ratio of container cpu usage total, and multiplies that by 100 to get a percentage
+ cpuPercent = (cpuDelta / systemDelta) * 100
}
return cpuPercent
}
diff --git a/libpod/util.go b/libpod/util.go
index 5ae5ab491..bae2f4eb8 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -10,6 +10,7 @@ import (
"strings"
"time"
+ "github.com/containers/libpod/libpod/config"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/utils"
"github.com/fsnotify/fsnotify"
@@ -19,10 +20,6 @@ import (
// Runtime API constants
const (
- // DefaultTransport is a prefix that we apply to an image name
- // to check docker hub first for the image
- DefaultTransport = "docker://"
-
unknownPackage = "Unknown"
)
@@ -190,19 +187,22 @@ func programVersion(mountProgram string) (string, error) {
return strings.TrimSuffix(output, "\n"), nil
}
+// DefaultSeccompPath returns the path to the default seccomp.json file
+// if it exists, first it checks OverrideSeccomp and then default.
+// If neither exist function returns ""
func DefaultSeccompPath() (string, error) {
- _, err := os.Stat(SeccompOverridePath)
+ _, err := os.Stat(config.SeccompOverridePath)
if err == nil {
- return SeccompOverridePath, nil
+ return config.SeccompOverridePath, nil
}
if !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "can't check if %q exists", SeccompOverridePath)
+ return "", errors.Wrapf(err, "can't check if %q exists", config.SeccompOverridePath)
}
- if _, err := os.Stat(SeccompDefaultPath); err != nil {
+ if _, err := os.Stat(config.SeccompDefaultPath); err != nil {
if !os.IsNotExist(err) {
- return "", errors.Wrapf(err, "can't check if %q exists", SeccompDefaultPath)
+ return "", errors.Wrapf(err, "can't check if %q exists", config.SeccompDefaultPath)
}
return "", nil
}
- return SeccompDefaultPath, nil
+ return config.SeccompDefaultPath, nil
}
diff --git a/logo/podman-logo-source.svg b/logo/podman-logo-source.svg
index 9e5cc6625..135624a42 100644
--- a/logo/podman-logo-source.svg
+++ b/logo/podman-logo-source.svg
@@ -251,7 +251,7 @@
ry="0.88143349"
rx="0.8483994"
cy="-8.3384542"
- cx="967.05286"
+ cx="965.94666"
id="ellipse10873"
style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.68990111;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
</g>
@@ -386,8 +386,8 @@
<ellipse
ry="0.77958798"
rx="0.75153452"
- cy="8.4197426"
- cx="954.63715"
+ cy="8.5291576"
+ cx="953.76178"
id="ellipse10927"
style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.95576686;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
</g>
@@ -512,8 +512,8 @@
<ellipse
style="fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:square;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
id="ellipse10977"
- cx="974.20233"
- cy="6.3892479"
+ cx="973.4754"
+ cy="6.4801154"
rx="0.66728675"
ry="0.69219536" />
</g>
diff --git a/logo/podman-logo.png b/logo/podman-logo.png
index 70c8b1191..90acf9cbc 100644
--- a/logo/podman-logo.png
+++ b/logo/podman-logo.png
Binary files differ
diff --git a/pkg/adapter/containers.go b/pkg/adapter/containers.go
index 430b6925d..287bd8474 100644
--- a/pkg/adapter/containers.go
+++ b/pkg/adapter/containers.go
@@ -307,7 +307,11 @@ func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *logs.LogOptions) er
if len(c.InputArgs) > 1 {
options.Multi = true
}
- logChannel := make(chan *logs.LogLine, int(c.Tail)*len(c.InputArgs)+1)
+ tailLen := int(c.Tail)
+ if tailLen < 0 {
+ tailLen = 0
+ }
+ logChannel := make(chan *logs.LogLine, tailLen*len(c.InputArgs)+1)
containers, err := shortcuts.GetContainersByContext(false, c.Latest, c.InputArgs, r.Runtime)
if err != nil {
return err
@@ -396,17 +400,8 @@ func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode
}
}
- config, err := r.Runtime.GetConfig()
- if err != nil {
- return exitCode, err
- }
- detachKeys := c.String("detach-keys")
- if detachKeys == "" {
- detachKeys = config.DetachKeys
- }
-
// if the container was created as part of a pod, also start its dependencies, if any.
- if err := StartAttachCtr(ctx, ctr, outputStream, errorStream, inputStream, detachKeys, c.Bool("sig-proxy"), true, c.IsSet("pod")); err != nil {
+ if err := StartAttachCtr(ctx, ctr, outputStream, errorStream, inputStream, c.String("detach-keys"), c.Bool("sig-proxy"), true, c.IsSet("pod")); err != nil {
// We've manually detached from the container
// Do not perform cleanup, or wait for container exit code
// Just exit immediately
@@ -543,12 +538,13 @@ func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues)
)
options := libpod.ContainerCheckpointOptions{
- Keep: c.Keep,
- TCPEstablished: c.TcpEstablished,
- TargetFile: c.Import,
- Name: c.Name,
- IgnoreRootfs: c.IgnoreRootfs,
- IgnoreStaticIP: c.IgnoreStaticIP,
+ Keep: c.Keep,
+ TCPEstablished: c.TcpEstablished,
+ TargetFile: c.Import,
+ Name: c.Name,
+ IgnoreRootfs: c.IgnoreRootfs,
+ IgnoreStaticIP: c.IgnoreStaticIP,
+ IgnoreStaticMAC: c.IgnoreStaticMAC,
}
filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
@@ -656,20 +652,25 @@ func (r *LocalRuntime) Start(ctx context.Context, c *cliconfig.StartValues, sigP
return exitCode, nil
}
- if ctrRunning {
- fmt.Println(ctr.ID())
- continue
- }
- // Handle non-attach start
- // If the container is in a pod, also set to recursively start dependencies
- if err := ctr.Start(ctx, ctr.PodID() != ""); err != nil {
- if lastError != nil {
- fmt.Fprintln(os.Stderr, lastError)
+ // Start the container if it's not running already.
+ if !ctrRunning {
+ // Handle non-attach start
+ // If the container is in a pod, also set to recursively start dependencies
+ if err := ctr.Start(ctx, ctr.PodID() != ""); err != nil {
+ if lastError != nil {
+ fmt.Fprintln(os.Stderr, lastError)
+ }
+ lastError = errors.Wrapf(err, "unable to start container %q", container)
+ continue
}
- lastError = errors.Wrapf(err, "unable to start container %q", container)
- continue
}
- fmt.Println(ctr.ID())
+ // Check if the container is referenced by ID or by name and print
+ // it accordingly.
+ if strings.HasPrefix(ctr.ID(), container) {
+ fmt.Println(ctr.ID())
+ } else {
+ fmt.Println(container)
+ }
}
return exitCode, lastError
}
@@ -891,7 +892,7 @@ func (r *LocalRuntime) execPS(c *libpod.Container, args []string) ([]string, err
streams := new(libpod.AttachStreams)
streams.OutputStream = wPipe
streams.ErrorStream = wPipe
- streams.InputStream = os.Stdin
+ streams.InputStream = bufio.NewReader(os.Stdin)
streams.AttachOutput = true
streams.AttachError = true
streams.AttachInput = true
@@ -969,7 +970,7 @@ func (r *LocalRuntime) ExecContainer(ctx context.Context, cli *cliconfig.ExecVal
streams.OutputStream = os.Stdout
streams.ErrorStream = os.Stderr
if cli.Interactive {
- streams.InputStream = os.Stdin
+ streams.InputStream = bufio.NewReader(os.Stdin)
streams.AttachInput = true
}
streams.AttachOutput = true
diff --git a/pkg/adapter/pods.go b/pkg/adapter/pods.go
index d8d5b884f..85f93ed3e 100644
--- a/pkg/adapter/pods.go
+++ b/pkg/adapter/pods.go
@@ -666,10 +666,69 @@ func getPodPorts(containers []v1.Container) []ocicni.PortMapping {
return infraPorts
}
+func setupSecurityContext(securityConfig *createconfig.SecurityConfig, userConfig *createconfig.UserConfig, containerYAML v1.Container) {
+ if containerYAML.SecurityContext == nil {
+ return
+ }
+ if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil {
+ securityConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem
+ }
+ if containerYAML.SecurityContext.Privileged != nil {
+ securityConfig.Privileged = *containerYAML.SecurityContext.Privileged
+ }
+
+ if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil {
+ securityConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation
+ }
+
+ if seopt := containerYAML.SecurityContext.SELinuxOptions; seopt != nil {
+ if seopt.User != "" {
+ securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=user:%s", seopt.User))
+ securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("user:%s", seopt.User))
+ }
+ if seopt.Role != "" {
+ securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=role:%s", seopt.Role))
+ securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("role:%s", seopt.Role))
+ }
+ if seopt.Type != "" {
+ securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=type:%s", seopt.Type))
+ securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("type:%s", seopt.Type))
+ }
+ if seopt.Level != "" {
+ securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=level:%s", seopt.Level))
+ securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("level:%s", seopt.Level))
+ }
+ }
+ if caps := containerYAML.SecurityContext.Capabilities; caps != nil {
+ for _, capability := range caps.Add {
+ securityConfig.CapAdd = append(securityConfig.CapAdd, string(capability))
+ }
+ for _, capability := range caps.Drop {
+ securityConfig.CapDrop = append(securityConfig.CapDrop, string(capability))
+ }
+ }
+ if containerYAML.SecurityContext.RunAsUser != nil {
+ userConfig.User = fmt.Sprintf("%d", *containerYAML.SecurityContext.RunAsUser)
+ }
+ if containerYAML.SecurityContext.RunAsGroup != nil {
+ if userConfig.User == "" {
+ userConfig.User = "0"
+ }
+ userConfig.User = fmt.Sprintf("%s:%d", userConfig.User, *containerYAML.SecurityContext.RunAsGroup)
+ }
+}
+
// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container
func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID string) (*createconfig.CreateConfig, error) {
var (
containerConfig createconfig.CreateConfig
+ pidConfig createconfig.PidConfig
+ networkConfig createconfig.NetworkConfig
+ cgroupConfig createconfig.CgroupConfig
+ utsConfig createconfig.UtsConfig
+ ipcConfig createconfig.IpcConfig
+ userConfig createconfig.UserConfig
+ securityConfig createconfig.SecurityConfig
)
// The default for MemorySwappiness is -1, not 0
@@ -685,36 +744,15 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
imageData, _ := newImage.Inspect(ctx)
- containerConfig.User = "0"
+ userConfig.User = "0"
if imageData != nil {
- containerConfig.User = imageData.Config.User
+ userConfig.User = imageData.Config.User
}
- if containerYAML.SecurityContext != nil {
- if containerConfig.SecurityOpts != nil {
- if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil {
- containerConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem
- }
- if containerYAML.SecurityContext.Privileged != nil {
- containerConfig.Privileged = *containerYAML.SecurityContext.Privileged
- }
-
- if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil {
- containerConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation
- }
+ setupSecurityContext(&securityConfig, &userConfig, containerYAML)
- }
- if caps := containerYAML.SecurityContext.Capabilities; caps != nil {
- for _, capability := range caps.Add {
- containerConfig.CapAdd = append(containerConfig.CapAdd, string(capability))
- }
- for _, capability := range caps.Drop {
- containerConfig.CapDrop = append(containerConfig.CapDrop, string(capability))
- }
- }
- }
var err error
- containerConfig.SeccompProfilePath, err = libpod.DefaultSeccompPath()
+ containerConfig.Security.SeccompProfilePath, err = libpod.DefaultSeccompPath()
if err != nil {
return nil, err
}
@@ -737,20 +775,28 @@ func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container
containerConfig.StopSignal = 15
// If the user does not pass in ID mappings, just set to basics
- if containerConfig.IDMappings == nil {
- containerConfig.IDMappings = &storage.IDMappingOptions{}
+ if userConfig.IDMappings == nil {
+ userConfig.IDMappings = &storage.IDMappingOptions{}
}
- containerConfig.NetMode = ns.NetworkMode(namespaces["net"])
- containerConfig.IpcMode = ns.IpcMode(namespaces["ipc"])
- containerConfig.UtsMode = ns.UTSMode(namespaces["uts"])
+ networkConfig.NetMode = ns.NetworkMode(namespaces["net"])
+ ipcConfig.IpcMode = ns.IpcMode(namespaces["ipc"])
+ utsConfig.UtsMode = ns.UTSMode(namespaces["uts"])
// disabled in code review per mheon
//containerConfig.PidMode = ns.PidMode(namespaces["pid"])
- containerConfig.UsernsMode = ns.UsernsMode(namespaces["user"])
+ userConfig.UsernsMode = ns.UsernsMode(namespaces["user"])
if len(containerConfig.WorkDir) == 0 {
containerConfig.WorkDir = "/"
}
+ containerConfig.Pid = pidConfig
+ containerConfig.Network = networkConfig
+ containerConfig.Uts = utsConfig
+ containerConfig.Ipc = ipcConfig
+ containerConfig.Cgroup = cgroupConfig
+ containerConfig.User = userConfig
+ containerConfig.Security = securityConfig
+
// Set default environment variables and incorporate data from image, if necessary
envs := shared.EnvVariablesFromData(imageData)
diff --git a/pkg/adapter/runtime.go b/pkg/adapter/runtime.go
index 4f70e90f9..81a43853c 100644
--- a/pkg/adapter/runtime.go
+++ b/pkg/adapter/runtime.go
@@ -338,7 +338,7 @@ func (r *LocalRuntime) SaveImage(ctx context.Context, c *cliconfig.SaveValues) e
return newImage.Save(ctx, source, c.Format, c.Output, additionalTags, c.Quiet, c.Compress)
}
-// LoadImage is a wrapper function for libpod PruneVolumes
+// LoadImage is a wrapper function for libpod LoadImage
func (r *LocalRuntime) LoadImage(ctx context.Context, name string, cli *cliconfig.LoadValues) (string, error) {
var (
writer io.Writer
diff --git a/pkg/adapter/terminal_linux.go b/pkg/adapter/terminal_linux.go
index 16e552802..3dc5864e2 100644
--- a/pkg/adapter/terminal_linux.go
+++ b/pkg/adapter/terminal_linux.go
@@ -1,6 +1,7 @@
package adapter
import (
+ "bufio"
"context"
"fmt"
"os"
@@ -61,7 +62,7 @@ func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr,
streams := new(libpod.AttachStreams)
streams.OutputStream = stdout
streams.ErrorStream = stderr
- streams.InputStream = stdin
+ streams.InputStream = bufio.NewReader(stdin)
streams.AttachOutput = true
streams.AttachError = true
streams.AttachInput = true
diff --git a/pkg/cgroups/cpu.go b/pkg/cgroups/cpu.go
index 03677f1ef..a43a76b22 100644
--- a/pkg/cgroups/cpu.go
+++ b/pkg/cgroups/cpu.go
@@ -81,14 +81,14 @@ func (c *cpuHandler) Stat(ctr *CgroupControl, m *Metrics) error {
return err
}
if val, found := values["usage_usec"]; found {
- usage.Kernel, err = strconv.ParseUint(cleanString(val[0]), 10, 0)
+ usage.Total, err = strconv.ParseUint(cleanString(val[0]), 10, 0)
if err != nil {
return err
}
usage.Kernel *= 1000
}
if val, found := values["system_usec"]; found {
- usage.Total, err = strconv.ParseUint(cleanString(val[0]), 10, 0)
+ usage.Kernel, err = strconv.ParseUint(cleanString(val[0]), 10, 0)
if err != nil {
return err
}
diff --git a/pkg/namespaces/namespaces.go b/pkg/namespaces/namespaces.go
index 9d1033b93..78b55bb2a 100644
--- a/pkg/namespaces/namespaces.go
+++ b/pkg/namespaces/namespaces.go
@@ -25,6 +25,11 @@ func (n CgroupMode) IsHost() bool {
return n == hostType
}
+// IsDefaultValue indicates whether the cgroup namespace has the default value.
+func (n CgroupMode) IsDefaultValue() bool {
+ return n == ""
+}
+
// IsNS indicates a cgroup namespace passed in by path (ns:<path>)
func (n CgroupMode) IsNS() bool {
return strings.HasPrefix(string(n), nsType)
diff --git a/pkg/rootless/rootless_linux.c b/pkg/rootless/rootless_linux.c
index 94933ddd0..9604de638 100644
--- a/pkg/rootless/rootless_linux.c
+++ b/pkg/rootless/rootless_linux.c
@@ -24,12 +24,16 @@
int renameat2 (int olddirfd, const char *oldpath, int newdirfd, const char *newpath, unsigned int flags)
{
-# ifdef __NR_renameat2
- return (int) syscall (__NR_renameat2, olddirfd, oldpath, newdirfd, newpath, flags);
+# ifdef SYS_renameat2
+ return (int) syscall (SYS_renameat2, olddirfd, oldpath, newdirfd, newpath, flags);
# else
- /* no way to implement it atomically. */
- errno = ENOSYS;
- return -1;
+ /* This might be an issue if another process is trying to read the file while it is empty. */
+ int fd = open (newpath, O_EXCL|O_CREAT, 0700);
+ if (fd < 0)
+ return fd;
+ close (fd);
+ /* We are sure we created the file, let's overwrite it. */
+ return rename (oldpath, newpath);
# endif
}
#endif
diff --git a/pkg/spec/config_linux_cgo.go b/pkg/spec/config_linux_cgo.go
index a1527752a..c47156456 100644
--- a/pkg/spec/config_linux_cgo.go
+++ b/pkg/spec/config_linux_cgo.go
@@ -10,7 +10,7 @@ import (
seccomp "github.com/seccomp/containers-golang"
)
-func getSeccompConfig(config *CreateConfig, configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
+func getSeccompConfig(config *SecurityConfig, configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
var seccompConfig *spec.LinuxSeccomp
var err error
diff --git a/pkg/spec/config_linux_nocgo.go b/pkg/spec/config_linux_nocgo.go
index 10329ff3b..8d720b6d4 100644
--- a/pkg/spec/config_linux_nocgo.go
+++ b/pkg/spec/config_linux_nocgo.go
@@ -6,6 +6,6 @@ import (
spec "github.com/opencontainers/runtime-spec/specs-go"
)
-func getSeccompConfig(config *CreateConfig, configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
+func getSeccompConfig(config *SecurityConfig, configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
return nil, nil
}
diff --git a/pkg/spec/config_unsupported.go b/pkg/spec/config_unsupported.go
index 160414878..a2c7f4416 100644
--- a/pkg/spec/config_unsupported.go
+++ b/pkg/spec/config_unsupported.go
@@ -8,7 +8,7 @@ import (
"github.com/pkg/errors"
)
-func getSeccompConfig(config *CreateConfig, configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
+func getSeccompConfig(config *SecurityConfig, configSpec *spec.Spec) (*spec.LinuxSeccomp, error) {
return nil, errors.New("function not supported on non-linux OS's")
}
func addDevice(g *generate.Generator, device string) error {
diff --git a/pkg/spec/createconfig.go b/pkg/spec/createconfig.go
index 2a8fe7332..244a8d1cd 100644
--- a/pkg/spec/createconfig.go
+++ b/pkg/spec/createconfig.go
@@ -1,7 +1,6 @@
package createconfig
import (
- "net"
"os"
"strconv"
"strings"
@@ -12,7 +11,6 @@ import (
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/namespaces"
"github.com/containers/storage"
- "github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/go-connections/nat"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
@@ -55,89 +53,126 @@ type CreateResourceConfig struct {
Ulimit []string //ulimit
}
-// CreateConfig is a pre OCI spec structure. It represents user input from varlink or the CLI
-type CreateConfig struct {
- Annotations map[string]string
- Args []string
+// PidConfig configures the pid namespace for the container
+type PidConfig struct {
+ PidMode namespaces.PidMode //pid
+}
+
+// IpcConfig configures the ipc namespace for the container
+type IpcConfig struct {
+ IpcMode namespaces.IpcMode //ipc
+}
+
+// CgroupConfig configures the cgroup namespace for the container
+type CgroupConfig struct {
+ Cgroups string
+ Cgroupns string
+ CgroupParent string // cgroup-parent
+ CgroupMode namespaces.CgroupMode //cgroup
+}
+
+// UserConfig configures the user namespace for the container
+type UserConfig struct {
+ GroupAdd []string // group-add
+ IDMappings *storage.IDMappingOptions
+ UsernsMode namespaces.UsernsMode //userns
+ User string //user
+}
+
+// UtsConfig configures the uts namespace for the container
+type UtsConfig struct {
+ UtsMode namespaces.UTSMode //uts
+ NoHosts bool
+ HostAdd []string //add-host
+ Hostname string
+}
+
+// NetworkConfig configures the network namespace for the container
+type NetworkConfig struct {
+ DNSOpt []string //dns-opt
+ DNSSearch []string //dns-search
+ DNSServers []string //dns
+ ExposedPorts map[nat.Port]struct{}
+ HTTPProxy bool
+ IP6Address string //ipv6
+ IPAddress string //ip
+ LinkLocalIP []string // link-local-ip
+ MacAddress string //mac-address
+ NetMode namespaces.NetworkMode //net
+ Network string //network
+ NetworkAlias []string //network-alias
+ PortBindings nat.PortMap
+ Publish []string //publish
+ PublishAll bool //publish-all
+}
+
+// SecurityConfig configures the security features for the container
+type SecurityConfig struct {
CapAdd []string // cap-add
CapDrop []string // cap-drop
- CidFile string
- ConmonPidFile string
- Cgroupns string
- Cgroups string
- CgroupParent string // cgroup-parent
- Command []string // Full command that will be used
- UserCommand []string // User-entered command (or image CMD)
- Detach bool // detach
- Devices []string // device
- DNSOpt []string //dns-opt
- DNSSearch []string //dns-search
- DNSServers []string //dns
- Entrypoint []string //entrypoint
- Env map[string]string //env
- ExposedPorts map[nat.Port]struct{}
- GroupAdd []string // group-add
- HealthCheck *manifest.Schema2HealthConfig
- NoHosts bool
- HostAdd []string //add-host
- Hostname string //hostname
- HTTPProxy bool
- Init bool // init
- InitPath string //init-path
- Image string
- ImageID string
- BuiltinImgVolumes map[string]struct{} // volumes defined in the image config
- IDMappings *storage.IDMappingOptions
- ImageVolumeType string // how to handle the image volume, either bind, tmpfs, or ignore
- Interactive bool //interactive
- IpcMode namespaces.IpcMode //ipc
- IP6Address string //ipv6
- IPAddress string //ip
- Labels map[string]string //label
- LinkLocalIP []string // link-local-ip
- LogDriver string // log-driver
- LogDriverOpt []string // log-opt
- MacAddress string //mac-address
- Name string //name
- NetMode namespaces.NetworkMode //net
- Network string //network
- NetworkAlias []string //network-alias
- PidMode namespaces.PidMode //pid
- Pod string //pod
- PodmanPath string
- CgroupMode namespaces.CgroupMode //cgroup
- PortBindings nat.PortMap
- Privileged bool //privileged
- Publish []string //publish
- PublishAll bool //publish-all
- Quiet bool //quiet
- ReadOnlyRootfs bool //read-only
- ReadOnlyTmpfs bool //read-only-tmpfs
- Resources CreateResourceConfig
- RestartPolicy string
- Rm bool //rm
- StopSignal syscall.Signal // stop-signal
- StopTimeout uint // stop-timeout
- Sysctl map[string]string //sysctl
- Systemd bool
- Tmpfs []string // tmpfs
- Tty bool //tty
- UsernsMode namespaces.UsernsMode //userns
- User string //user
- UtsMode namespaces.UTSMode //uts
- Mounts []spec.Mount
- MountsFlag []string // mounts
- NamedVolumes []*libpod.ContainerNamedVolume
- Volumes []string //volume
- VolumesFrom []string
- WorkDir string //workdir
LabelOpts []string //SecurityOpts
NoNewPrivs bool //SecurityOpts
ApparmorProfile string //SecurityOpts
SeccompProfilePath string //SecurityOpts
SecurityOpts []string
- Rootfs string
- Syslog bool // Whether to enable syslog on exit commands
+ Privileged bool //privileged
+ ReadOnlyRootfs bool //read-only
+ ReadOnlyTmpfs bool //read-only-tmpfs
+ Sysctl map[string]string //sysctl
+}
+
+// CreateConfig is a pre OCI spec structure. It represents user input from varlink or the CLI
+type CreateConfig struct {
+ Annotations map[string]string
+ Args []string
+ CidFile string
+ ConmonPidFile string
+ Command []string // Full command that will be used
+ UserCommand []string // User-entered command (or image CMD)
+ Detach bool // detach
+ Devices []string // device
+ Entrypoint []string //entrypoint
+ Env map[string]string //env
+ HealthCheck *manifest.Schema2HealthConfig
+ Init bool // init
+ InitPath string //init-path
+ Image string
+ ImageID string
+ BuiltinImgVolumes map[string]struct{} // volumes defined in the image config
+ ImageVolumeType string // how to handle the image volume, either bind, tmpfs, or ignore
+ Interactive bool //interactive
+ Labels map[string]string //label
+ LogDriver string // log-driver
+ LogDriverOpt []string // log-opt
+ Name string //name
+ PodmanPath string
+ Pod string //pod
+ Quiet bool //quiet
+ Resources CreateResourceConfig
+ RestartPolicy string
+ Rm bool //rm
+ StopSignal syscall.Signal // stop-signal
+ StopTimeout uint // stop-timeout
+ Systemd bool
+ Tmpfs []string // tmpfs
+ Tty bool //tty
+ Mounts []spec.Mount
+ MountsFlag []string // mounts
+ NamedVolumes []*libpod.ContainerNamedVolume
+ Volumes []string //volume
+ VolumesFrom []string
+ WorkDir string //workdir
+ Rootfs string
+ Security SecurityConfig
+ Syslog bool // Whether to enable syslog on exit commands
+
+ // Namespaces
+ Pid PidConfig
+ Ipc IpcConfig
+ Cgroup CgroupConfig
+ User UserConfig
+ Uts UtsConfig
+ Network NetworkConfig
}
func u32Ptr(i int64) *uint32 { u := uint32(i); return &u }
@@ -199,7 +234,6 @@ func (c *CreateConfig) createExitCommand(runtime *libpod.Runtime) ([]string, err
// GetContainerCreateOptions takes a CreateConfig and returns a slice of CtrCreateOptions
func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *libpod.Pod, mounts []spec.Mount, namedVolumes []*libpod.ContainerNamedVolume) ([]libpod.CtrCreateOption, error) {
var options []libpod.CtrCreateOption
- var portBindings []ocicni.PortMapping
var err error
if c.Interactive {
@@ -216,15 +250,6 @@ func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *l
logrus.Debugf("adding container to pod %s", c.Pod)
options = append(options, runtime.WithPod(pod))
}
- if c.Cgroups == "disabled" {
- options = append(options, libpod.WithNoCgroups())
- }
- if len(c.PortBindings) > 0 {
- portBindings, err = c.CreatePortBindings()
- if err != nil {
- return nil, errors.Wrapf(err, "unable to create port bindings")
- }
- }
if len(mounts) != 0 || len(namedVolumes) != 0 {
destinations := []string{}
@@ -253,179 +278,72 @@ func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *l
// does not have one
options = append(options, libpod.WithEntrypoint(c.Entrypoint))
- networks := make([]string, 0)
- userNetworks := c.NetMode.UserDefined()
- if IsPod(userNetworks) {
- userNetworks = ""
- }
- if userNetworks != "" {
- for _, netName := range strings.Split(userNetworks, ",") {
- if netName == "" {
- return nil, errors.Wrapf(err, "container networks %q invalid", networks)
- }
- networks = append(networks, netName)
- }
- }
-
- if c.NetMode.IsNS() {
- ns := c.NetMode.NS()
- if ns == "" {
- return nil, errors.Errorf("invalid empty user-defined network namespace")
- }
- _, err := os.Stat(ns)
- if err != nil {
- return nil, err
- }
- } else if c.NetMode.IsContainer() {
- connectedCtr, err := runtime.LookupContainer(c.NetMode.Container())
- if err != nil {
- return nil, errors.Wrapf(err, "container %q not found", c.NetMode.Container())
- }
- options = append(options, libpod.WithNetNSFrom(connectedCtr))
- } else if !c.NetMode.IsHost() && !c.NetMode.IsNone() {
- hasUserns := c.UsernsMode.IsContainer() || c.UsernsMode.IsNS() || len(c.IDMappings.UIDMap) > 0 || len(c.IDMappings.GIDMap) > 0
- postConfigureNetNS := hasUserns && !c.UsernsMode.IsHost()
- options = append(options, libpod.WithNetNS(portBindings, postConfigureNetNS, string(c.NetMode), networks))
- }
+ // TODO: MNT, USER, CGROUP
+ options = append(options, libpod.WithStopSignal(c.StopSignal))
+ options = append(options, libpod.WithStopTimeout(c.StopTimeout))
- if c.CgroupMode.IsNS() {
- ns := c.CgroupMode.NS()
- if ns == "" {
- return nil, errors.Errorf("invalid empty user-defined network namespace")
- }
- _, err := os.Stat(ns)
- if err != nil {
- return nil, err
- }
- } else if c.CgroupMode.IsContainer() {
- connectedCtr, err := runtime.LookupContainer(c.CgroupMode.Container())
- if err != nil {
- return nil, errors.Wrapf(err, "container %q not found", c.CgroupMode.Container())
- }
- options = append(options, libpod.WithCgroupNSFrom(connectedCtr))
+ logPath := getLoggingPath(c.LogDriverOpt)
+ if logPath != "" {
+ options = append(options, libpod.WithLogPath(logPath))
}
- if c.UsernsMode.IsNS() {
- ns := c.UsernsMode.NS()
- if ns == "" {
- return nil, errors.Errorf("invalid empty user-defined user namespace")
- }
- _, err := os.Stat(ns)
- if err != nil {
- return nil, err
- }
- options = append(options, libpod.WithIDMappings(*c.IDMappings))
- } else if c.UsernsMode.IsContainer() {
- connectedCtr, err := runtime.LookupContainer(c.UsernsMode.Container())
- if err != nil {
- return nil, errors.Wrapf(err, "container %q not found", c.UsernsMode.Container())
- }
- options = append(options, libpod.WithUserNSFrom(connectedCtr))
- } else {
- options = append(options, libpod.WithIDMappings(*c.IDMappings))
+ if c.LogDriver != "" {
+ options = append(options, libpod.WithLogDriver(c.LogDriver))
}
- if c.PidMode.IsContainer() {
- connectedCtr, err := runtime.LookupContainer(c.PidMode.Container())
- if err != nil {
- return nil, errors.Wrapf(err, "container %q not found", c.PidMode.Container())
- }
-
- options = append(options, libpod.WithPIDNSFrom(connectedCtr))
+ secOpts, err := c.Security.ToCreateOptions()
+ if err != nil {
+ return nil, err
}
+ options = append(options, secOpts...)
- if c.IpcMode.IsContainer() {
- connectedCtr, err := runtime.LookupContainer(c.IpcMode.Container())
- if err != nil {
- return nil, errors.Wrapf(err, "container %q not found", c.IpcMode.Container())
- }
-
- options = append(options, libpod.WithIPCNSFrom(connectedCtr))
+ nsOpts, err := c.Cgroup.ToCreateOptions(runtime)
+ if err != nil {
+ return nil, err
}
+ options = append(options, nsOpts...)
- if IsPod(string(c.UtsMode)) {
- options = append(options, libpod.WithUTSNSFromPod(pod))
+ nsOpts, err = c.Ipc.ToCreateOptions(runtime)
+ if err != nil {
+ return nil, err
}
- if c.UtsMode.IsContainer() {
- connectedCtr, err := runtime.LookupContainer(c.UtsMode.Container())
- if err != nil {
- return nil, errors.Wrapf(err, "container %q not found", c.UtsMode.Container())
- }
+ options = append(options, nsOpts...)
- options = append(options, libpod.WithUTSNSFrom(connectedCtr))
+ nsOpts, err = c.Pid.ToCreateOptions(runtime)
+ if err != nil {
+ return nil, err
}
+ options = append(options, nsOpts...)
- // TODO: MNT, USER, CGROUP
- options = append(options, libpod.WithStopSignal(c.StopSignal))
- options = append(options, libpod.WithStopTimeout(c.StopTimeout))
- if len(c.DNSSearch) > 0 {
- options = append(options, libpod.WithDNSSearch(c.DNSSearch))
- }
- if len(c.DNSServers) > 0 {
- if len(c.DNSServers) == 1 && strings.ToLower(c.DNSServers[0]) == "none" {
- options = append(options, libpod.WithUseImageResolvConf())
- } else {
- options = append(options, libpod.WithDNS(c.DNSServers))
- }
- }
- if len(c.DNSOpt) > 0 {
- options = append(options, libpod.WithDNSOption(c.DNSOpt))
- }
- if c.NoHosts {
- options = append(options, libpod.WithUseImageHosts())
- }
- if len(c.HostAdd) > 0 && !c.NoHosts {
- options = append(options, libpod.WithHosts(c.HostAdd))
- }
- logPath := getLoggingPath(c.LogDriverOpt)
- if logPath != "" {
- options = append(options, libpod.WithLogPath(logPath))
+ nsOpts, err = c.Network.ToCreateOptions(runtime, &c.User)
+ if err != nil {
+ return nil, err
}
+ options = append(options, nsOpts...)
- if c.LogDriver != "" {
- options = append(options, libpod.WithLogDriver(c.LogDriver))
+ nsOpts, err = c.Uts.ToCreateOptions(runtime, pod)
+ if err != nil {
+ return nil, err
}
+ options = append(options, nsOpts...)
- if c.IPAddress != "" {
- ip := net.ParseIP(c.IPAddress)
- if ip == nil {
- return nil, errors.Wrapf(define.ErrInvalidArg, "cannot parse %s as IP address", c.IPAddress)
- } else if ip.To4() == nil {
- return nil, errors.Wrapf(define.ErrInvalidArg, "%s is not an IPv4 address", c.IPAddress)
- }
- options = append(options, libpod.WithStaticIP(ip))
+ nsOpts, err = c.User.ToCreateOptions(runtime)
+ if err != nil {
+ return nil, err
}
-
- options = append(options, libpod.WithPrivileged(c.Privileged))
+ options = append(options, nsOpts...)
useImageVolumes := c.ImageVolumeType == TypeBind
// Gather up the options for NewContainer which consist of With... funcs
options = append(options, libpod.WithRootFSFromImage(c.ImageID, c.Image, useImageVolumes))
- options = append(options, libpod.WithSecLabels(c.LabelOpts))
options = append(options, libpod.WithConmonPidFile(c.ConmonPidFile))
options = append(options, libpod.WithLabels(c.Labels))
- options = append(options, libpod.WithUser(c.User))
- if c.IpcMode.IsHost() {
- options = append(options, libpod.WithShmDir("/dev/shm"))
-
- } else if c.IpcMode.IsContainer() {
- ctr, err := runtime.LookupContainer(c.IpcMode.Container())
- if err != nil {
- return nil, errors.Wrapf(err, "container %q not found", c.IpcMode.Container())
- }
- options = append(options, libpod.WithShmDir(ctr.ShmDir()))
- }
options = append(options, libpod.WithShmSize(c.Resources.ShmSize))
- options = append(options, libpod.WithGroups(c.GroupAdd))
if c.Rootfs != "" {
options = append(options, libpod.WithRootFS(c.Rootfs))
}
// Default used if not overridden on command line
- if c.CgroupParent != "" {
- options = append(options, libpod.WithCgroupParent(c.CgroupParent))
- }
-
if c.RestartPolicy != "" {
if c.RestartPolicy == "unless-stopped" {
return nil, errors.Wrapf(define.ErrInvalidArg, "the unless-stopped restart policy is not supported")
@@ -459,38 +377,6 @@ func (c *CreateConfig) getContainerCreateOptions(runtime *libpod.Runtime, pod *l
return options, nil
}
-// CreatePortBindings iterates ports mappings and exposed ports into a format CNI understands
-func (c *CreateConfig) CreatePortBindings() ([]ocicni.PortMapping, error) {
- return NatToOCIPortBindings(c.PortBindings)
-}
-
-// NatToOCIPortBindings iterates a nat.portmap slice and creates []ocicni portmapping slice
-func NatToOCIPortBindings(ports nat.PortMap) ([]ocicni.PortMapping, error) {
- var portBindings []ocicni.PortMapping
- for containerPb, hostPb := range ports {
- var pm ocicni.PortMapping
- pm.ContainerPort = int32(containerPb.Int())
- for _, i := range hostPb {
- var hostPort int
- var err error
- pm.HostIP = i.HostIP
- if i.HostPort == "" {
- hostPort = containerPb.Int()
- } else {
- hostPort, err = strconv.Atoi(i.HostPort)
- if err != nil {
- return nil, errors.Wrapf(err, "unable to convert host port to integer")
- }
- }
-
- pm.HostPort = int32(hostPort)
- pm.Protocol = containerPb.Proto()
- portBindings = append(portBindings, pm)
- }
- }
- return portBindings, nil
-}
-
// AddPrivilegedDevices iterates through host devices and adds all
// host devices to the spec
func (c *CreateConfig) AddPrivilegedDevices(g *generate.Generator) error {
diff --git a/pkg/spec/namespaces.go b/pkg/spec/namespaces.go
new file mode 100644
index 000000000..a45137416
--- /dev/null
+++ b/pkg/spec/namespaces.go
@@ -0,0 +1,433 @@
+package createconfig
+
+import (
+ "net"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/containers/libpod/libpod/define"
+ "github.com/containers/libpod/pkg/cgroups"
+ "github.com/cri-o/ocicni/pkg/ocicni"
+ "github.com/docker/go-connections/nat"
+ spec "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+func (c *NetworkConfig) ToCreateOptions(runtime *libpod.Runtime, userns *UserConfig) ([]libpod.CtrCreateOption, error) {
+ var portBindings []ocicni.PortMapping
+ var err error
+ if len(c.PortBindings) > 0 {
+ portBindings, err = NatToOCIPortBindings(c.PortBindings)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to create port bindings")
+ }
+ }
+
+ options := make([]libpod.CtrCreateOption, 0)
+ userNetworks := c.NetMode.UserDefined()
+ networks := make([]string, 0)
+
+ if IsPod(userNetworks) {
+ userNetworks = ""
+ }
+ if userNetworks != "" {
+ for _, netName := range strings.Split(userNetworks, ",") {
+ if netName == "" {
+ return nil, errors.Errorf("container networks %q invalid", userNetworks)
+ }
+ networks = append(networks, netName)
+ }
+ }
+
+ if c.NetMode.IsNS() {
+ ns := c.NetMode.NS()
+ if ns == "" {
+ return nil, errors.Errorf("invalid empty user-defined network namespace")
+ }
+ _, err := os.Stat(ns)
+ if err != nil {
+ return nil, err
+ }
+ } else if c.NetMode.IsContainer() {
+ connectedCtr, err := runtime.LookupContainer(c.NetMode.Container())
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", c.NetMode.Container())
+ }
+ options = append(options, libpod.WithNetNSFrom(connectedCtr))
+ } else if !c.NetMode.IsHost() && !c.NetMode.IsNone() {
+ postConfigureNetNS := userns.getPostConfigureNetNS()
+ options = append(options, libpod.WithNetNS(portBindings, postConfigureNetNS, string(c.NetMode), networks))
+ }
+
+ if len(c.DNSSearch) > 0 {
+ options = append(options, libpod.WithDNSSearch(c.DNSSearch))
+ }
+ if len(c.DNSServers) > 0 {
+ if len(c.DNSServers) == 1 && strings.ToLower(c.DNSServers[0]) == "none" {
+ options = append(options, libpod.WithUseImageResolvConf())
+ } else {
+ options = append(options, libpod.WithDNS(c.DNSServers))
+ }
+ }
+ if len(c.DNSOpt) > 0 {
+ options = append(options, libpod.WithDNSOption(c.DNSOpt))
+ }
+ if c.IPAddress != "" {
+ ip := net.ParseIP(c.IPAddress)
+ if ip == nil {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "cannot parse %s as IP address", c.IPAddress)
+ } else if ip.To4() == nil {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "%s is not an IPv4 address", c.IPAddress)
+ }
+ options = append(options, libpod.WithStaticIP(ip))
+ }
+
+ if c.MacAddress != "" {
+ mac, err := net.ParseMAC(c.MacAddress)
+ if err != nil {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "cannot parse %s as MAC address: %v", c.MacAddress, err)
+ }
+ options = append(options, libpod.WithStaticMAC(mac))
+ }
+
+ return options, nil
+}
+
+func (c *NetworkConfig) ConfigureGenerator(g *generate.Generator) error {
+ netMode := c.NetMode
+ if netMode.IsHost() {
+ logrus.Debug("Using host netmode")
+ if err := g.RemoveLinuxNamespace(string(spec.NetworkNamespace)); err != nil {
+ return err
+ }
+ } else if netMode.IsNone() {
+ logrus.Debug("Using none netmode")
+ } else if netMode.IsBridge() {
+ logrus.Debug("Using bridge netmode")
+ } else if netCtr := netMode.Container(); netCtr != "" {
+ logrus.Debugf("using container %s netmode", netCtr)
+ } else if IsNS(string(netMode)) {
+ logrus.Debug("Using ns netmode")
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), NS(string(netMode))); err != nil {
+ return err
+ }
+ } else if IsPod(string(netMode)) {
+ logrus.Debug("Using pod netmode, unless pod is not sharing")
+ } else if netMode.IsSlirp4netns() {
+ logrus.Debug("Using slirp4netns netmode")
+ } else if netMode.IsUserDefined() {
+ logrus.Debug("Using user defined netmode")
+ } else {
+ return errors.Errorf("unknown network mode")
+ }
+
+ if c.HTTPProxy {
+ for _, envSpec := range []string{
+ "http_proxy",
+ "HTTP_PROXY",
+ "https_proxy",
+ "HTTPS_PROXY",
+ "ftp_proxy",
+ "FTP_PROXY",
+ "no_proxy",
+ "NO_PROXY",
+ } {
+ envVal := os.Getenv(envSpec)
+ if envVal != "" {
+ g.AddProcessEnv(envSpec, envVal)
+ }
+ }
+ }
+
+ if g.Config.Annotations == nil {
+ g.Config.Annotations = make(map[string]string)
+ }
+
+ if c.PublishAll {
+ g.Config.Annotations[libpod.InspectAnnotationPublishAll] = libpod.InspectResponseTrue
+ } else {
+ g.Config.Annotations[libpod.InspectAnnotationPublishAll] = libpod.InspectResponseFalse
+ }
+
+ return nil
+}
+
+// NatToOCIPortBindings iterates a nat.portmap slice and creates []ocicni portmapping slice
+func NatToOCIPortBindings(ports nat.PortMap) ([]ocicni.PortMapping, error) {
+ var portBindings []ocicni.PortMapping
+ for containerPb, hostPb := range ports {
+ var pm ocicni.PortMapping
+ pm.ContainerPort = int32(containerPb.Int())
+ for _, i := range hostPb {
+ var hostPort int
+ var err error
+ pm.HostIP = i.HostIP
+ if i.HostPort == "" {
+ hostPort = containerPb.Int()
+ } else {
+ hostPort, err = strconv.Atoi(i.HostPort)
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to convert host port to integer")
+ }
+ }
+
+ pm.HostPort = int32(hostPort)
+ pm.Protocol = containerPb.Proto()
+ portBindings = append(portBindings, pm)
+ }
+ }
+ return portBindings, nil
+}
+
+func (c *CgroupConfig) ToCreateOptions(runtime *libpod.Runtime) ([]libpod.CtrCreateOption, error) {
+ options := make([]libpod.CtrCreateOption, 0)
+ if c.CgroupMode.IsNS() {
+ ns := c.CgroupMode.NS()
+ if ns == "" {
+ return nil, errors.Errorf("invalid empty user-defined network namespace")
+ }
+ _, err := os.Stat(ns)
+ if err != nil {
+ return nil, err
+ }
+ } else if c.CgroupMode.IsContainer() {
+ connectedCtr, err := runtime.LookupContainer(c.CgroupMode.Container())
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", c.CgroupMode.Container())
+ }
+ options = append(options, libpod.WithCgroupNSFrom(connectedCtr))
+ }
+
+ if c.CgroupParent != "" {
+ options = append(options, libpod.WithCgroupParent(c.CgroupParent))
+ }
+
+ if c.Cgroups == "disabled" {
+ options = append(options, libpod.WithNoCgroups())
+ }
+
+ return options, nil
+}
+
+func (c *UserConfig) ToCreateOptions(runtime *libpod.Runtime) ([]libpod.CtrCreateOption, error) {
+ options := make([]libpod.CtrCreateOption, 0)
+ if c.UsernsMode.IsNS() {
+ ns := c.UsernsMode.NS()
+ if ns == "" {
+ return nil, errors.Errorf("invalid empty user-defined user namespace")
+ }
+ _, err := os.Stat(ns)
+ if err != nil {
+ return nil, err
+ }
+ options = append(options, libpod.WithIDMappings(*c.IDMappings))
+ } else if c.UsernsMode.IsContainer() {
+ connectedCtr, err := runtime.LookupContainer(c.UsernsMode.Container())
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", c.UsernsMode.Container())
+ }
+ options = append(options, libpod.WithUserNSFrom(connectedCtr))
+ } else {
+ options = append(options, libpod.WithIDMappings(*c.IDMappings))
+ }
+
+ options = append(options, libpod.WithUser(c.User))
+ options = append(options, libpod.WithGroups(c.GroupAdd))
+
+ return options, nil
+}
+
+func (c *UserConfig) ConfigureGenerator(g *generate.Generator) error {
+ if IsNS(string(c.UsernsMode)) {
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), NS(string(c.UsernsMode))); err != nil {
+ return err
+ }
+ // runc complains if no mapping is specified, even if we join another ns. So provide a dummy mapping
+ g.AddLinuxUIDMapping(uint32(0), uint32(0), uint32(1))
+ g.AddLinuxGIDMapping(uint32(0), uint32(0), uint32(1))
+ }
+
+ if (len(c.IDMappings.UIDMap) > 0 || len(c.IDMappings.GIDMap) > 0) && !c.UsernsMode.IsHost() {
+ if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), ""); err != nil {
+ return err
+ }
+ }
+ for _, uidmap := range c.IDMappings.UIDMap {
+ g.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size))
+ }
+ for _, gidmap := range c.IDMappings.GIDMap {
+ g.AddLinuxGIDMapping(uint32(gidmap.HostID), uint32(gidmap.ContainerID), uint32(gidmap.Size))
+ }
+ return nil
+}
+
+func (c *UserConfig) getPostConfigureNetNS() bool {
+ hasUserns := c.UsernsMode.IsContainer() || c.UsernsMode.IsNS() || len(c.IDMappings.UIDMap) > 0 || len(c.IDMappings.GIDMap) > 0
+ postConfigureNetNS := hasUserns && !c.UsernsMode.IsHost()
+ return postConfigureNetNS
+}
+
+func (c *UserConfig) InNS(isRootless bool) bool {
+ hasUserns := c.UsernsMode.IsContainer() || c.UsernsMode.IsNS() || len(c.IDMappings.UIDMap) > 0 || len(c.IDMappings.GIDMap) > 0
+ return isRootless || (hasUserns && !c.UsernsMode.IsHost())
+}
+
+func (c *IpcConfig) ToCreateOptions(runtime *libpod.Runtime) ([]libpod.CtrCreateOption, error) {
+ options := make([]libpod.CtrCreateOption, 0)
+ if c.IpcMode.IsHost() {
+ options = append(options, libpod.WithShmDir("/dev/shm"))
+ } else if c.IpcMode.IsContainer() {
+ connectedCtr, err := runtime.LookupContainer(c.IpcMode.Container())
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", c.IpcMode.Container())
+ }
+
+ options = append(options, libpod.WithIPCNSFrom(connectedCtr))
+ options = append(options, libpod.WithShmDir(connectedCtr.ShmDir()))
+ }
+
+ return options, nil
+}
+
+func (c *IpcConfig) ConfigureGenerator(g *generate.Generator) error {
+ ipcMode := c.IpcMode
+ if IsNS(string(ipcMode)) {
+ return g.AddOrReplaceLinuxNamespace(string(spec.IPCNamespace), NS(string(ipcMode)))
+ }
+ if ipcMode.IsHost() {
+ return g.RemoveLinuxNamespace(string(spec.IPCNamespace))
+ }
+ if ipcCtr := ipcMode.Container(); ipcCtr != "" {
+ logrus.Debugf("Using container %s ipcmode", ipcCtr)
+ }
+
+ return nil
+}
+
+func (c *CgroupConfig) ConfigureGenerator(g *generate.Generator) error {
+ cgroupMode := c.CgroupMode
+ if cgroupMode.IsDefaultValue() {
+ // If the value is not specified, default to "private" on cgroups v2 and "host" on cgroups v1.
+ unified, err := cgroups.IsCgroup2UnifiedMode()
+ if err != nil {
+ return err
+ }
+ if unified {
+ cgroupMode = "private"
+ } else {
+ cgroupMode = "host"
+ }
+ }
+ if cgroupMode.IsNS() {
+ return g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), NS(string(cgroupMode)))
+ }
+ if cgroupMode.IsHost() {
+ return g.RemoveLinuxNamespace(string(spec.CgroupNamespace))
+ }
+ if cgroupMode.IsPrivate() {
+ return g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), "")
+ }
+ if cgCtr := cgroupMode.Container(); cgCtr != "" {
+ logrus.Debugf("Using container %s cgroup mode", cgCtr)
+ }
+ return nil
+}
+
+func (c *PidConfig) ToCreateOptions(runtime *libpod.Runtime) ([]libpod.CtrCreateOption, error) {
+ options := make([]libpod.CtrCreateOption, 0)
+ if c.PidMode.IsContainer() {
+ connectedCtr, err := runtime.LookupContainer(c.PidMode.Container())
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", c.PidMode.Container())
+ }
+
+ options = append(options, libpod.WithPIDNSFrom(connectedCtr))
+ }
+
+ return options, nil
+}
+
+func (c *PidConfig) ConfigureGenerator(g *generate.Generator) error {
+ pidMode := c.PidMode
+ if IsNS(string(pidMode)) {
+ return g.AddOrReplaceLinuxNamespace(string(spec.PIDNamespace), NS(string(pidMode)))
+ }
+ if pidMode.IsHost() {
+ return g.RemoveLinuxNamespace(string(spec.PIDNamespace))
+ }
+ if pidCtr := pidMode.Container(); pidCtr != "" {
+ logrus.Debugf("using container %s pidmode", pidCtr)
+ }
+ if IsPod(string(pidMode)) {
+ logrus.Debug("using pod pidmode")
+ }
+ return nil
+}
+
+func (c *UtsConfig) ToCreateOptions(runtime *libpod.Runtime, pod *libpod.Pod) ([]libpod.CtrCreateOption, error) {
+ options := make([]libpod.CtrCreateOption, 0)
+ if IsPod(string(c.UtsMode)) {
+ options = append(options, libpod.WithUTSNSFromPod(pod))
+ }
+ if c.UtsMode.IsContainer() {
+ connectedCtr, err := runtime.LookupContainer(c.UtsMode.Container())
+ if err != nil {
+ return nil, errors.Wrapf(err, "container %q not found", c.UtsMode.Container())
+ }
+
+ options = append(options, libpod.WithUTSNSFrom(connectedCtr))
+ }
+ if c.NoHosts {
+ options = append(options, libpod.WithUseImageHosts())
+ }
+ if len(c.HostAdd) > 0 && !c.NoHosts {
+ options = append(options, libpod.WithHosts(c.HostAdd))
+ }
+
+ return options, nil
+}
+
+func (c *UtsConfig) ConfigureGenerator(g *generate.Generator, net *NetworkConfig, runtime *libpod.Runtime) error {
+ hostname := c.Hostname
+ var err error
+ if hostname == "" {
+ if utsCtrID := c.UtsMode.Container(); utsCtrID != "" {
+ utsCtr, err := runtime.GetContainer(utsCtrID)
+ if err != nil {
+ return errors.Wrapf(err, "unable to retrieve hostname from dependency container %s", utsCtrID)
+ }
+ hostname = utsCtr.Hostname()
+ } else if net.NetMode.IsHost() || c.UtsMode.IsHost() {
+ hostname, err = os.Hostname()
+ if err != nil {
+ return errors.Wrap(err, "unable to retrieve hostname of the host")
+ }
+ } else {
+ logrus.Debug("No hostname set; container's hostname will default to runtime default")
+ }
+ }
+ g.RemoveHostname()
+ if c.Hostname != "" || !c.UtsMode.IsHost() {
+ // Set the hostname in the OCI configuration only
+ // if specified by the user or if we are creating
+ // a new UTS namespace.
+ g.SetHostname(hostname)
+ }
+ g.AddProcessEnv("HOSTNAME", hostname)
+
+ utsMode := c.UtsMode
+ if IsNS(string(utsMode)) {
+ return g.AddOrReplaceLinuxNamespace(string(spec.UTSNamespace), NS(string(utsMode)))
+ }
+ if utsMode.IsHost() {
+ return g.RemoveLinuxNamespace(string(spec.UTSNamespace))
+ }
+ if utsCtr := utsMode.Container(); utsCtr != "" {
+ logrus.Debugf("using container %s utsmode", utsCtr)
+ }
+ return nil
+}
diff --git a/pkg/spec/security.go b/pkg/spec/security.go
new file mode 100644
index 000000000..05ed94e66
--- /dev/null
+++ b/pkg/spec/security.go
@@ -0,0 +1,172 @@
+package createconfig
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/containers/libpod/libpod"
+ "github.com/docker/docker/oci/caps"
+ "github.com/opencontainers/runtime-tools/generate"
+ "github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
+)
+
+func (c *SecurityConfig) ToCreateOptions() ([]libpod.CtrCreateOption, error) {
+ options := make([]libpod.CtrCreateOption, 0)
+ options = append(options, libpod.WithSecLabels(c.LabelOpts))
+ options = append(options, libpod.WithPrivileged(c.Privileged))
+ return options, nil
+}
+
+func (c *SecurityConfig) SetLabelOpts(runtime *libpod.Runtime, pidConfig *PidConfig, ipcConfig *IpcConfig) error {
+ if c.Privileged {
+ c.LabelOpts = label.DisableSecOpt()
+ return nil
+ }
+
+ var labelOpts []string
+ if pidConfig.PidMode.IsHost() {
+ labelOpts = append(labelOpts, label.DisableSecOpt()...)
+ } else if pidConfig.PidMode.IsContainer() {
+ ctr, err := runtime.LookupContainer(pidConfig.PidMode.Container())
+ if err != nil {
+ return errors.Wrapf(err, "container %q not found", pidConfig.PidMode.Container())
+ }
+ secopts, err := label.DupSecOpt(ctr.ProcessLabel())
+ if err != nil {
+ return errors.Wrapf(err, "failed to duplicate label %q ", ctr.ProcessLabel())
+ }
+ labelOpts = append(labelOpts, secopts...)
+ }
+
+ if ipcConfig.IpcMode.IsHost() {
+ labelOpts = append(labelOpts, label.DisableSecOpt()...)
+ } else if ipcConfig.IpcMode.IsContainer() {
+ ctr, err := runtime.LookupContainer(ipcConfig.IpcMode.Container())
+ if err != nil {
+ return errors.Wrapf(err, "container %q not found", ipcConfig.IpcMode.Container())
+ }
+ secopts, err := label.DupSecOpt(ctr.ProcessLabel())
+ if err != nil {
+ return errors.Wrapf(err, "failed to duplicate label %q ", ctr.ProcessLabel())
+ }
+ labelOpts = append(labelOpts, secopts...)
+ }
+
+ c.LabelOpts = append(c.LabelOpts, labelOpts...)
+ return nil
+}
+
+func (c *SecurityConfig) SetSecurityOpts(runtime *libpod.Runtime, securityOpts []string) error {
+ for _, opt := range securityOpts {
+ if opt == "no-new-privileges" {
+ c.NoNewPrivs = true
+ } else {
+ con := strings.SplitN(opt, "=", 2)
+ if len(con) != 2 {
+ return fmt.Errorf("invalid --security-opt 1: %q", opt)
+ }
+
+ switch con[0] {
+ case "label":
+ c.LabelOpts = append(c.LabelOpts, con[1])
+ case "apparmor":
+ c.ApparmorProfile = con[1]
+ case "seccomp":
+ c.SeccompProfilePath = con[1]
+ default:
+ return fmt.Errorf("invalid --security-opt 2: %q", opt)
+ }
+ }
+ }
+
+ if c.SeccompProfilePath == "" {
+ var err error
+ c.SeccompProfilePath, err = libpod.DefaultSeccompPath()
+ if err != nil {
+ return err
+ }
+ }
+ c.SecurityOpts = securityOpts
+ return nil
+}
+
+func (c *SecurityConfig) ConfigureGenerator(g *generate.Generator, user *UserConfig) error {
+ // HANDLE CAPABILITIES
+ // NOTE: Must happen before SECCOMP
+ if c.Privileged {
+ g.SetupPrivileged(true)
+ }
+
+ useNotRoot := func(user string) bool {
+ if user == "" || user == "root" || user == "0" {
+ return false
+ }
+ return true
+ }
+
+ configSpec := g.Config
+ var err error
+ var caplist []string
+ bounding := configSpec.Process.Capabilities.Bounding
+ if useNotRoot(user.User) {
+ configSpec.Process.Capabilities.Bounding = caplist
+ }
+ caplist, err = caps.TweakCapabilities(configSpec.Process.Capabilities.Bounding, c.CapAdd, c.CapDrop, nil, false)
+ if err != nil {
+ return err
+ }
+
+ configSpec.Process.Capabilities.Bounding = caplist
+ configSpec.Process.Capabilities.Permitted = caplist
+ configSpec.Process.Capabilities.Inheritable = caplist
+ configSpec.Process.Capabilities.Effective = caplist
+ configSpec.Process.Capabilities.Ambient = caplist
+ if useNotRoot(user.User) {
+ caplist, err = caps.TweakCapabilities(bounding, c.CapAdd, c.CapDrop, nil, false)
+ if err != nil {
+ return err
+ }
+ }
+ configSpec.Process.Capabilities.Bounding = caplist
+
+ // HANDLE SECCOMP
+ if c.SeccompProfilePath != "unconfined" {
+ seccompConfig, err := getSeccompConfig(c, configSpec)
+ if err != nil {
+ return err
+ }
+ configSpec.Linux.Seccomp = seccompConfig
+ }
+
+ // Clear default Seccomp profile from Generator for privileged containers
+ if c.SeccompProfilePath == "unconfined" || c.Privileged {
+ configSpec.Linux.Seccomp = nil
+ }
+
+ for _, opt := range c.SecurityOpts {
+ // Split on both : and =
+ splitOpt := strings.Split(opt, "=")
+ if len(splitOpt) == 1 {
+ splitOpt = strings.Split(opt, ":")
+ }
+ if len(splitOpt) < 2 {
+ continue
+ }
+ switch splitOpt[0] {
+ case "label":
+ configSpec.Annotations[libpod.InspectAnnotationLabel] = splitOpt[1]
+ case "seccomp":
+ configSpec.Annotations[libpod.InspectAnnotationSeccomp] = splitOpt[1]
+ case "apparmor":
+ configSpec.Annotations[libpod.InspectAnnotationApparmor] = splitOpt[1]
+ }
+ }
+
+ g.SetRootReadonly(c.ReadOnlyRootfs)
+ for sysctlKey, sysctlVal := range c.Sysctl {
+ g.AddLinuxSysctl(sysctlKey, sysctlVal)
+ }
+
+ return nil
+}
diff --git a/pkg/spec/spec.go b/pkg/spec/spec.go
index da5c14948..7a220012f 100644
--- a/pkg/spec/spec.go
+++ b/pkg/spec/spec.go
@@ -1,20 +1,19 @@
package createconfig
import (
- "os"
"strings"
"github.com/containers/libpod/libpod"
+ libpodconfig "github.com/containers/libpod/libpod/config"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/cgroups"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/sysinfo"
- "github.com/docker/docker/oci/caps"
"github.com/docker/go-units"
"github.com/opencontainers/runc/libcontainer/user"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
const cpuPeriod = 100000
@@ -45,14 +44,13 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
canMountSys := true
isRootless := rootless.IsRootless()
- hasUserns := config.UsernsMode.IsContainer() || config.UsernsMode.IsNS() || len(config.IDMappings.UIDMap) > 0 || len(config.IDMappings.GIDMap) > 0
- inUserNS := isRootless || (hasUserns && !config.UsernsMode.IsHost())
+ inUserNS := config.User.InNS(isRootless)
- if inUserNS && config.NetMode.IsHost() {
+ if inUserNS && config.Network.NetMode.IsHost() {
canMountSys = false
}
- if config.Privileged && canMountSys {
+ if config.Security.Privileged && canMountSys {
cgroupPerm = "rw"
g.RemoveMount("/sys")
sysMnt := spec.Mount{
@@ -66,7 +64,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
addCgroup = false
g.RemoveMount("/sys")
r := "ro"
- if config.Privileged {
+ if config.Security.Privileged {
r = "rw"
}
sysMnt := spec.Mount{
@@ -76,7 +74,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
Options: []string{"rprivate", "nosuid", "noexec", "nodev", r, "rbind"},
}
g.AddMount(sysMnt)
- if !config.Privileged && isRootless {
+ if !config.Security.Privileged && isRootless {
g.AddLinuxMaskedPaths("/sys/kernel")
}
}
@@ -90,9 +88,9 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
}
// When using a different user namespace, check that the GID 5 is mapped inside
// the container.
- if gid5Available && len(config.IDMappings.GIDMap) > 0 {
+ if gid5Available && len(config.User.IDMappings.GIDMap) > 0 {
mappingFound := false
- for _, r := range config.IDMappings.GIDMap {
+ for _, r := range config.User.IDMappings.GIDMap {
if r.ContainerID <= 5 && 5 < r.ContainerID+r.Size {
mappingFound = true
break
@@ -115,7 +113,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
g.AddMount(devPts)
}
- if inUserNS && config.IpcMode.IsHost() {
+ if inUserNS && config.Ipc.IpcMode.IsHost() {
g.RemoveMount("/dev/mqueue")
devMqueue := spec.Mount{
Destination: "/dev/mqueue",
@@ -125,7 +123,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
}
g.AddMount(devMqueue)
}
- if inUserNS && config.PidMode.IsHost() {
+ if inUserNS && config.Pid.PidMode.IsHost() {
g.RemoveMount("/proc")
procMount := spec.Mount{
Destination: "/proc",
@@ -152,55 +150,6 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
for key, val := range config.Annotations {
g.AddAnnotation(key, val)
}
- g.SetRootReadonly(config.ReadOnlyRootfs)
-
- if config.HTTPProxy {
- for _, envSpec := range []string{
- "http_proxy",
- "HTTP_PROXY",
- "https_proxy",
- "HTTPS_PROXY",
- "ftp_proxy",
- "FTP_PROXY",
- "no_proxy",
- "NO_PROXY",
- } {
- envVal := os.Getenv(envSpec)
- if envVal != "" {
- g.AddProcessEnv(envSpec, envVal)
- }
- }
- }
-
- hostname := config.Hostname
- if hostname == "" {
- if utsCtrID := config.UtsMode.Container(); utsCtrID != "" {
- utsCtr, err := runtime.GetContainer(utsCtrID)
- if err != nil {
- return nil, errors.Wrapf(err, "unable to retrieve hostname from dependency container %s", utsCtrID)
- }
- hostname = utsCtr.Hostname()
- } else if config.NetMode.IsHost() || config.UtsMode.IsHost() {
- hostname, err = os.Hostname()
- if err != nil {
- return nil, errors.Wrap(err, "unable to retrieve hostname of the host")
- }
- } else {
- logrus.Debug("No hostname set; container's hostname will default to runtime default")
- }
- }
- g.RemoveHostname()
- if config.Hostname != "" || !config.UtsMode.IsHost() {
- // Set the hostname in the OCI configuration only
- // if specified by the user or if we are creating
- // a new UTS namespace.
- g.SetHostname(hostname)
- }
- g.AddProcessEnv("HOSTNAME", hostname)
-
- for sysctlKey, sysctlVal := range config.Sysctl {
- g.AddLinuxSysctl(sysctlKey, sysctlVal)
- }
g.AddProcessEnv("container", "podman")
addedResources := false
@@ -270,7 +219,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
}
// Devices
- if config.Privileged {
+ if config.Security.Privileged {
// If privileged, we need to add all the host devices to the
// spec. We do not add the user provided ones because we are
// already adding them all.
@@ -285,22 +234,16 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
}
}
- for _, uidmap := range config.IDMappings.UIDMap {
- g.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size))
- }
- for _, gidmap := range config.IDMappings.GIDMap {
- g.AddLinuxGIDMapping(uint32(gidmap.HostID), uint32(gidmap.ContainerID), uint32(gidmap.Size))
- }
// SECURITY OPTS
- g.SetProcessNoNewPrivileges(config.NoNewPrivs)
+ g.SetProcessNoNewPrivileges(config.Security.NoNewPrivs)
- if !config.Privileged {
- g.SetProcessApparmorProfile(config.ApparmorProfile)
+ if !config.Security.Privileged {
+ g.SetProcessApparmorProfile(config.Security.ApparmorProfile)
}
blockAccessToKernelFilesystems(config, &g)
- var runtimeConfig *libpod.RuntimeConfig
+ var runtimeConfig *libpodconfig.Config
if runtime != nil {
runtimeConfig, err = runtime.GetConfig()
@@ -321,7 +264,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
if err != nil {
return nil, err
}
- if (!cgroup2 || (runtimeConfig != nil && runtimeConfig.CgroupManager != libpod.SystemdCgroupsManager)) && config.Resources.PidsLimit == sysinfo.GetDefaultPidsLimit() {
+ if (!cgroup2 || (runtimeConfig != nil && runtimeConfig.CgroupManager != define.SystemdCgroupsManager)) && config.Resources.PidsLimit == sysinfo.GetDefaultPidsLimit() {
setPidLimit = false
}
}
@@ -339,54 +282,35 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
return nil, err
}
- if err := addPidNS(config, &g); err != nil {
+ // NAMESPACES
+
+ if err := config.Pid.ConfigureGenerator(&g); err != nil {
return nil, err
}
- if err := addUserNS(config, &g); err != nil {
+ if err := config.User.ConfigureGenerator(&g); err != nil {
return nil, err
}
- if err := addNetNS(config, &g); err != nil {
+ if err := config.Network.ConfigureGenerator(&g); err != nil {
return nil, err
}
- if err := addUTSNS(config, &g); err != nil {
+ if err := config.Uts.ConfigureGenerator(&g, &config.Network, runtime); err != nil {
return nil, err
}
- if err := addIpcNS(config, &g); err != nil {
+ if err := config.Ipc.ConfigureGenerator(&g); err != nil {
return nil, err
}
- if err := addCgroupNS(config, &g); err != nil {
+ if err := config.Cgroup.ConfigureGenerator(&g); err != nil {
return nil, err
}
configSpec := g.Config
- // HANDLE CAPABILITIES
- // NOTE: Must happen before SECCOMP
- if !config.Privileged {
- if err := setupCapabilities(config, configSpec); err != nil {
- return nil, err
- }
- } else {
- g.SetupPrivileged(true)
- }
-
- // HANDLE SECCOMP
-
- if config.SeccompProfilePath != "unconfined" {
- seccompConfig, err := getSeccompConfig(config, configSpec)
- if err != nil {
- return nil, err
- }
- configSpec.Linux.Seccomp = seccompConfig
- }
-
- // Clear default Seccomp profile from Generator for privileged containers
- if config.SeccompProfilePath == "unconfined" || config.Privileged {
- configSpec.Linux.Seccomp = nil
+ if err := config.Security.ConfigureGenerator(&g, &config.User); err != nil {
+ return nil, err
}
// BIND MOUNTS
@@ -417,7 +341,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
configSpec.Linux.Resources = &spec.LinuxResources{}
}
- canUseResources := cgroup2 && runtimeConfig != nil && (runtimeConfig.CgroupManager == libpod.SystemdCgroupsManager)
+ canUseResources := cgroup2 && runtimeConfig != nil && (runtimeConfig.CgroupManager == define.SystemdCgroupsManager)
if addedResources && !canUseResources {
return nil, errors.New("invalid configuration, cannot specify resource limits without cgroups v2 and --cgroup-manager=systemd")
@@ -428,7 +352,7 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
}
}
- switch config.Cgroups {
+ switch config.Cgroup.Cgroups {
case "disabled":
if addedResources {
return nil, errors.New("cannot specify resource limits when cgroups are disabled is specified")
@@ -459,48 +383,23 @@ func (config *CreateConfig) createConfigToOCISpec(runtime *libpod.Runtime, userM
configSpec.Annotations[libpod.InspectAnnotationVolumesFrom] = strings.Join(config.VolumesFrom, ",")
}
- if config.Privileged {
+ if config.Security.Privileged {
configSpec.Annotations[libpod.InspectAnnotationPrivileged] = libpod.InspectResponseTrue
} else {
configSpec.Annotations[libpod.InspectAnnotationPrivileged] = libpod.InspectResponseFalse
}
- if config.PublishAll {
- configSpec.Annotations[libpod.InspectAnnotationPublishAll] = libpod.InspectResponseTrue
- } else {
- configSpec.Annotations[libpod.InspectAnnotationPublishAll] = libpod.InspectResponseFalse
- }
-
if config.Init {
configSpec.Annotations[libpod.InspectAnnotationInit] = libpod.InspectResponseTrue
} else {
configSpec.Annotations[libpod.InspectAnnotationInit] = libpod.InspectResponseFalse
}
- for _, opt := range config.SecurityOpts {
- // Split on both : and =
- splitOpt := strings.Split(opt, "=")
- if len(splitOpt) == 1 {
- splitOpt = strings.Split(opt, ":")
- }
- if len(splitOpt) < 2 {
- continue
- }
- switch splitOpt[0] {
- case "label":
- configSpec.Annotations[libpod.InspectAnnotationLabel] = splitOpt[1]
- case "seccomp":
- configSpec.Annotations[libpod.InspectAnnotationSeccomp] = splitOpt[1]
- case "apparmor":
- configSpec.Annotations[libpod.InspectAnnotationApparmor] = splitOpt[1]
- }
- }
-
return configSpec, nil
}
func blockAccessToKernelFilesystems(config *CreateConfig, g *generate.Generator) {
- if !config.Privileged {
+ if !config.Security.Privileged {
for _, mp := range []string{
"/proc/acpi",
"/proc/kcore",
@@ -516,7 +415,7 @@ func blockAccessToKernelFilesystems(config *CreateConfig, g *generate.Generator)
g.AddLinuxMaskedPaths(mp)
}
- if config.PidMode.IsHost() && rootless.IsRootless() {
+ if config.Pid.PidMode.IsHost() && rootless.IsRootless() {
return
}
@@ -533,117 +432,6 @@ func blockAccessToKernelFilesystems(config *CreateConfig, g *generate.Generator)
}
}
-func addPidNS(config *CreateConfig, g *generate.Generator) error {
- pidMode := config.PidMode
- if IsNS(string(pidMode)) {
- return g.AddOrReplaceLinuxNamespace(string(spec.PIDNamespace), NS(string(pidMode)))
- }
- if pidMode.IsHost() {
- return g.RemoveLinuxNamespace(string(spec.PIDNamespace))
- }
- if pidCtr := pidMode.Container(); pidCtr != "" {
- logrus.Debugf("using container %s pidmode", pidCtr)
- }
- if IsPod(string(pidMode)) {
- logrus.Debug("using pod pidmode")
- }
- return nil
-}
-
-func addUserNS(config *CreateConfig, g *generate.Generator) error {
- if IsNS(string(config.UsernsMode)) {
- if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), NS(string(config.UsernsMode))); err != nil {
- return err
- }
- // runc complains if no mapping is specified, even if we join another ns. So provide a dummy mapping
- g.AddLinuxUIDMapping(uint32(0), uint32(0), uint32(1))
- g.AddLinuxGIDMapping(uint32(0), uint32(0), uint32(1))
- }
-
- if (len(config.IDMappings.UIDMap) > 0 || len(config.IDMappings.GIDMap) > 0) && !config.UsernsMode.IsHost() {
- if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), ""); err != nil {
- return err
- }
- }
- return nil
-}
-
-func addNetNS(config *CreateConfig, g *generate.Generator) error {
- netMode := config.NetMode
- if netMode.IsHost() {
- logrus.Debug("Using host netmode")
- return g.RemoveLinuxNamespace(string(spec.NetworkNamespace))
- } else if netMode.IsNone() {
- logrus.Debug("Using none netmode")
- return nil
- } else if netMode.IsBridge() {
- logrus.Debug("Using bridge netmode")
- return nil
- } else if netCtr := netMode.Container(); netCtr != "" {
- logrus.Debugf("using container %s netmode", netCtr)
- return nil
- } else if IsNS(string(netMode)) {
- logrus.Debug("Using ns netmode")
- return g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), NS(string(netMode)))
- } else if IsPod(string(netMode)) {
- logrus.Debug("Using pod netmode, unless pod is not sharing")
- return nil
- } else if netMode.IsSlirp4netns() {
- logrus.Debug("Using slirp4netns netmode")
- return nil
- } else if netMode.IsUserDefined() {
- logrus.Debug("Using user defined netmode")
- return nil
- }
- return errors.Errorf("unknown network mode")
-}
-
-func addUTSNS(config *CreateConfig, g *generate.Generator) error {
- utsMode := config.UtsMode
- if IsNS(string(utsMode)) {
- return g.AddOrReplaceLinuxNamespace(string(spec.UTSNamespace), NS(string(utsMode)))
- }
- if utsMode.IsHost() {
- return g.RemoveLinuxNamespace(string(spec.UTSNamespace))
- }
- if utsCtr := utsMode.Container(); utsCtr != "" {
- logrus.Debugf("using container %s utsmode", utsCtr)
- }
- return nil
-}
-
-func addIpcNS(config *CreateConfig, g *generate.Generator) error {
- ipcMode := config.IpcMode
- if IsNS(string(ipcMode)) {
- return g.AddOrReplaceLinuxNamespace(string(spec.IPCNamespace), NS(string(ipcMode)))
- }
- if ipcMode.IsHost() {
- return g.RemoveLinuxNamespace(string(spec.IPCNamespace))
- }
- if ipcCtr := ipcMode.Container(); ipcCtr != "" {
- logrus.Debugf("Using container %s ipcmode", ipcCtr)
- }
-
- return nil
-}
-
-func addCgroupNS(config *CreateConfig, g *generate.Generator) error {
- cgroupMode := config.CgroupMode
- if cgroupMode.IsNS() {
- return g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), NS(string(cgroupMode)))
- }
- if cgroupMode.IsHost() {
- return g.RemoveLinuxNamespace(string(spec.CgroupNamespace))
- }
- if cgroupMode.IsPrivate() {
- return g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), "")
- }
- if cgCtr := cgroupMode.Container(); cgCtr != "" {
- logrus.Debugf("Using container %s cgroup mode", cgCtr)
- }
- return nil
-}
-
func addRlimits(config *CreateConfig, g *generate.Generator) error {
var (
kernelMax uint64 = 1048576
@@ -687,37 +475,3 @@ func addRlimits(config *CreateConfig, g *generate.Generator) error {
return nil
}
-
-func setupCapabilities(config *CreateConfig, configSpec *spec.Spec) error {
- useNotRoot := func(user string) bool {
- if user == "" || user == "root" || user == "0" {
- return false
- }
- return true
- }
-
- var err error
- var caplist []string
- bounding := configSpec.Process.Capabilities.Bounding
- if useNotRoot(config.User) {
- configSpec.Process.Capabilities.Bounding = caplist
- }
- caplist, err = caps.TweakCapabilities(configSpec.Process.Capabilities.Bounding, config.CapAdd, config.CapDrop, nil, false)
- if err != nil {
- return err
- }
-
- configSpec.Process.Capabilities.Bounding = caplist
- configSpec.Process.Capabilities.Permitted = caplist
- configSpec.Process.Capabilities.Inheritable = caplist
- configSpec.Process.Capabilities.Effective = caplist
- configSpec.Process.Capabilities.Ambient = caplist
- if useNotRoot(config.User) {
- caplist, err = caps.TweakCapabilities(bounding, config.CapAdd, config.CapDrop, nil, false)
- if err != nil {
- return err
- }
- }
- configSpec.Process.Capabilities.Bounding = caplist
- return nil
-}
diff --git a/pkg/spec/spec_test.go b/pkg/spec/spec_test.go
index 2f91e1b21..0f63b2bbc 100644
--- a/pkg/spec/spec_test.go
+++ b/pkg/spec/spec_test.go
@@ -21,9 +21,9 @@ var (
func makeTestCreateConfig() *CreateConfig {
cc := new(CreateConfig)
cc.Resources = CreateResourceConfig{}
- cc.IDMappings = new(storage.IDMappingOptions)
- cc.IDMappings.UIDMap = []idtools.IDMap{}
- cc.IDMappings.GIDMap = []idtools.IDMap{}
+ cc.User.IDMappings = new(storage.IDMappingOptions)
+ cc.User.IDMappings.UIDMap = []idtools.IDMap{}
+ cc.User.IDMappings.GIDMap = []idtools.IDMap{}
return cc
}
diff --git a/pkg/spec/storage.go b/pkg/spec/storage.go
index 095534589..79c065b5d 100644
--- a/pkg/spec/storage.go
+++ b/pkg/spec/storage.go
@@ -160,7 +160,7 @@ func (config *CreateConfig) parseVolumes(runtime *libpod.Runtime) ([]spec.Mount,
}
// If requested, add tmpfs filesystems for read-only containers.
- if config.ReadOnlyRootfs && config.ReadOnlyTmpfs {
+ if config.Security.ReadOnlyRootfs && config.Security.ReadOnlyTmpfs {
readonlyTmpfs := []string{"/tmp", "/var/tmp", "/run"}
options := []string{"rw", "rprivate", "nosuid", "nodev", "tmpcopyup"}
for _, dest := range readonlyTmpfs {
@@ -514,11 +514,17 @@ func getTmpfsMount(args []string) (spec.Mount, error) {
Source: TypeTmpfs,
}
- var setDest, setRORW, setSuid, setDev, setExec bool
+ var setDest, setRORW, setSuid, setDev, setExec, setTmpcopyup bool
for _, val := range args {
kv := strings.Split(val, "=")
switch kv[0] {
+ case "tmpcopyup", "notmpcopyup":
+ if setTmpcopyup {
+ return newMount, errors.Wrapf(optionArgError, "cannot pass 'tmpcopyup' and 'notmpcopyup' options more than once")
+ }
+ setTmpcopyup = true
+ newMount.Options = append(newMount.Options, kv[0])
case "ro", "rw":
if setRORW {
return newMount, errors.Wrapf(optionArgError, "cannot pass 'ro' and 'rw' options more than once")
@@ -801,7 +807,7 @@ func (config *CreateConfig) addContainerInitBinary(path string) (spec.Mount, err
if path == "" {
return mount, fmt.Errorf("please specify a path to the container-init binary")
}
- if !config.PidMode.IsPrivate() {
+ if !config.Pid.PidMode.IsPrivate() {
return mount, fmt.Errorf("cannot add init binary as PID 1 (PID namespace isn't private)")
}
if config.Systemd {
diff --git a/pkg/util/mountOpts.go b/pkg/util/mountOpts.go
index 670daeaf9..d21800bc3 100644
--- a/pkg/util/mountOpts.go
+++ b/pkg/util/mountOpts.go
@@ -30,6 +30,8 @@ func ProcessOptions(options []string, isTmpfs bool, defaults *DefaultMountOption
foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ bool
)
+ var newOptions []string
+
for _, opt := range options {
// Some options have parameters - size, mode
splitOpt := strings.SplitN(opt, "=", 2)
@@ -80,9 +82,19 @@ func ProcessOptions(options []string, isTmpfs bool, defaults *DefaultMountOption
return nil, errors.Wrapf(ErrBadMntOption, "the 'tmpcopyup' option is only allowed with tmpfs mounts")
}
if foundCopyUp {
- return nil, errors.Wrapf(ErrDupeMntOption, "the 'tmpcopyup' option can only be set once")
+ return nil, errors.Wrapf(ErrDupeMntOption, "the 'tmpcopyup' or 'notmpcopyup' option can only be set once")
+ }
+ foundCopyUp = true
+ case "notmpcopyup":
+ if !isTmpfs {
+ return nil, errors.Wrapf(ErrBadMntOption, "the 'notmpcopyup' option is only allowed with tmpfs mounts")
+ }
+ if foundCopyUp {
+ return nil, errors.Wrapf(ErrDupeMntOption, "the 'tmpcopyup' or 'notmpcopyup' option can only be set once")
}
foundCopyUp = true
+ // do not propagate notmpcopyup to the OCI runtime
+ continue
case "bind", "rbind":
if isTmpfs {
return nil, errors.Wrapf(ErrBadMntOption, "the 'bind' and 'rbind' options are not allowed with tmpfs mounts")
@@ -101,29 +113,30 @@ func ProcessOptions(options []string, isTmpfs bool, defaults *DefaultMountOption
default:
return nil, errors.Wrapf(ErrBadMntOption, "unknown mount option %q", opt)
}
+ newOptions = append(newOptions, opt)
}
if !foundWrite {
- options = append(options, "rw")
+ newOptions = append(newOptions, "rw")
}
if !foundProp {
- options = append(options, "rprivate")
+ newOptions = append(newOptions, "rprivate")
}
if !foundExec && (defaults == nil || defaults.Noexec) {
- options = append(options, "noexec")
+ newOptions = append(newOptions, "noexec")
}
if !foundSuid && (defaults == nil || defaults.Nosuid) {
- options = append(options, "nosuid")
+ newOptions = append(newOptions, "nosuid")
}
if !foundDev && (defaults == nil || defaults.Nodev) {
- options = append(options, "nodev")
+ newOptions = append(newOptions, "nodev")
}
if isTmpfs && !foundCopyUp {
- options = append(options, "tmpcopyup")
+ newOptions = append(newOptions, "tmpcopyup")
}
if !isTmpfs && !foundBind {
- options = append(options, "rbind")
+ newOptions = append(newOptions, "rbind")
}
- return options, nil
+ return newOptions, nil
}
diff --git a/pkg/util/utils.go b/pkg/util/utils.go
index 71f3e26dc..633d8a124 100644
--- a/pkg/util/utils.go
+++ b/pkg/util/utils.go
@@ -3,6 +3,7 @@ package util
import (
"fmt"
"os"
+ "os/user"
"path/filepath"
"regexp"
"strings"
@@ -440,3 +441,16 @@ func ExitCode(err error) int {
return 126
}
+
+// HomeDir returns the home directory for the current user.
+func HomeDir() (string, error) {
+ home := os.Getenv("HOME")
+ if home == "" {
+ usr, err := user.LookupId(fmt.Sprintf("%d", rootless.GetRootlessUID()))
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to resolve HOME directory")
+ }
+ home = usr.HomeDir
+ }
+ return home, nil
+}
diff --git a/pkg/varlinkapi/attach.go b/pkg/varlinkapi/attach.go
index 37adbbf55..5beca3c6f 100644
--- a/pkg/varlinkapi/attach.go
+++ b/pkg/varlinkapi/attach.go
@@ -32,7 +32,7 @@ func setupStreams(call iopodman.VarlinkCall) (*bufio.Reader, *bufio.Writer, *io.
streams := libpod.AttachStreams{
OutputStream: stdoutWriter,
- InputStream: pr,
+ InputStream: bufio.NewReader(pr),
// Runc eats the error stream
ErrorStream: stdoutWriter,
AttachInput: true,
diff --git a/pkg/varlinkapi/containers.go b/pkg/varlinkapi/containers.go
index b471ee2cf..94726bbbd 100644
--- a/pkg/varlinkapi/containers.go
+++ b/pkg/varlinkapi/containers.go
@@ -739,7 +739,7 @@ func (i *LibpodAPI) GetContainersLogs(call iopodman.VarlinkCall, names []string,
options := logs.LogOptions{
Follow: follow,
Since: sinceTime,
- Tail: uint64(tail),
+ Tail: tail,
Timestamps: timestamps,
}
@@ -747,7 +747,11 @@ func (i *LibpodAPI) GetContainersLogs(call iopodman.VarlinkCall, names []string,
if len(names) > 1 {
options.Multi = true
}
- logChannel := make(chan *logs.LogLine, int(tail)*len(names)+1)
+ tailLen := int(tail)
+ if tailLen < 0 {
+ tailLen = 0
+ }
+ logChannel := make(chan *logs.LogLine, tailLen*len(names)+1)
containers, err := shortcuts.GetContainersByContext(false, latest, names, i.Runtime)
if err != nil {
return call.ReplyErrorOccurred(err.Error())
diff --git a/rootless.md b/rootless.md
index 8cccb86eb..4fb3c7deb 100644
--- a/rootless.md
+++ b/rootless.md
@@ -6,6 +6,7 @@ Contributors are more than welcomed to help with this work. If you decide to ca
* Podman can not create containers that bind to ports < 1024.
* The kernel does not allow processes without CAP_NET_BIND_SERVICE to bind to low ports.
+ * You can modify the `net.ipv4.ip_unprivileged_port_start` sysctl to change the lowest port. For example `sysctl net.ipv4.ip_unprivileged_port_start=443` allows rootless Podman containers to bind to ports >= 443.
* “How To” documentation is patchy at best.
* If /etc/subuid and /etc/subgid are not setup for a user, then podman commands
can easily fail
@@ -13,11 +14,11 @@ can easily fail
* We are working to get support for NSSWITCH on the /etc/subuid and /etc/subgid files.
* No cgroup V1 Support
* cgroup V1 does not safely support cgroup delegation.
- * However, cgroup V2 provides cgroup delegation and is available on Fedora starting with version 29 and other Linux distributions.
-* Some systemd's unit configuration options do not work in the rootless container
- * systemd fails to apply several options and failures are silently ignored (e.g. CPUShares, MemoryLimit).
+ * As of Fedora 31 defaults to cgroup V2, which has full support of rootless cgroup management. Note this requires the --cgroup-manager within rootless containers to use systemd, which new containers will get by default.
+* Some system unit configuration options do not work in the rootless container
+ * systemd fails to apply several options and failures are silently ignored (e.g. CPUShares, MemoryLimit). Should work on cgroup V2.
* Use of certain options will cause service startup failures (e.g. PrivateNetwork).
-* Can not share container images with CRI-O or other users
+* Can not share container images with CRI-O or other rootfull users
* Difficult to use additional stores for sharing content
* Does not work on NFS or parallel filesystem homedirs (e.g. [GPFS](https://www.ibm.com/support/knowledgecenter/en/SSFKCN/gpfs_welcome.html))
* NFS and parallel filesystems enforce file creation on different UIDs on the server side and does not understand User Namespace.
@@ -35,9 +36,9 @@ can easily fail
* Requires new shadow-utils (not found in older (RHEL7/Centos7 distros) Should be fixed in RHEL7.7 release)
* A few commands do not work.
* mount/unmount (on fuse-overlay)
- * Only works if you enter the mount namespace with a tool like buildah unshare
- * podman stats (Without cgroup V2 support)
+ * Only works if you enter the mount namespace with podman unshare
+ * podman stats (Works with cgroup V2 support)
* Checkpoint and Restore (CRIU requires root)
- * Pause and Unpause (no freezer cgroup)
+ * Pause and Unpause (Works with cgroup V2 support)
* Issues with higher UIDs can cause builds to fail
* If a build is attempting to use a UID that is not mapped into the user namespace mapping for a container, then builds will not be able to put the UID in an image.
diff --git a/test/e2e/checkpoint_test.go b/test/e2e/checkpoint_test.go
index 4b43ceb30..2d3efcbef 100644
--- a/test/e2e/checkpoint_test.go
+++ b/test/e2e/checkpoint_test.go
@@ -334,6 +334,10 @@ var _ = Describe("Podman checkpoint", func() {
IPBefore.WaitWithDefaultTimeout()
Expect(IPBefore.ExitCode()).To(Equal(0))
+ MACBefore := podmanTest.Podman([]string{"inspect", "-l", "--format={{.NetworkSettings.MacAddress}}"})
+ MACBefore.WaitWithDefaultTimeout()
+ Expect(MACBefore.ExitCode()).To(Equal(0))
+
result := podmanTest.Podman([]string{"container", "checkpoint", "test_name"})
result.WaitWithDefaultTimeout()
@@ -348,9 +352,16 @@ var _ = Describe("Podman checkpoint", func() {
IPAfter.WaitWithDefaultTimeout()
Expect(IPAfter.ExitCode()).To(Equal(0))
+ MACAfter := podmanTest.Podman([]string{"inspect", "-l", "--format={{.NetworkSettings.MacAddress}}"})
+ MACAfter.WaitWithDefaultTimeout()
+ Expect(MACAfter.ExitCode()).To(Equal(0))
+
// Check that IP address did not change between checkpointing and restoring
Expect(IPBefore.OutputToString()).To(Equal(IPAfter.OutputToString()))
+ // Check that MAC address did not change between checkpointing and restoring
+ Expect(MACBefore.OutputToString()).To(Equal(MACAfter.OutputToString()))
+
Expect(result.ExitCode()).To(Equal(0))
Expect(podmanTest.NumberOfContainersRunning()).To(Equal(1))
Expect(podmanTest.GetContainerStatus()).To(ContainSubstring("Up"))
diff --git a/test/e2e/create_staticmac_test.go b/test/e2e/create_staticmac_test.go
new file mode 100644
index 000000000..6460659f7
--- /dev/null
+++ b/test/e2e/create_staticmac_test.go
@@ -0,0 +1,46 @@
+// +build !remoteclient
+
+package integration
+
+import (
+ "os"
+
+ . "github.com/containers/libpod/test/utils"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("Podman run with --mac-address flag", func() {
+ var (
+ tempdir string
+ err error
+ podmanTest *PodmanTestIntegration
+ )
+
+ BeforeEach(func() {
+ SkipIfRootless()
+ tempdir, err = CreateTempDirInTempDir()
+ if err != nil {
+ os.Exit(1)
+ }
+ podmanTest = PodmanTestCreate(tempdir)
+ podmanTest.Setup()
+ podmanTest.SeedImages()
+ // Cleanup the CNI networks used by the tests
+ os.RemoveAll("/var/lib/cni/networks/podman")
+ })
+
+ AfterEach(func() {
+ podmanTest.Cleanup()
+ f := CurrentGinkgoTestDescription()
+ processTestResult(f)
+
+ })
+
+ It("Podman run --mac-address", func() {
+ result := podmanTest.Podman([]string{"run", "--mac-address", "92:d0:c6:0a:29:34", ALPINE, "ip", "addr"})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ Expect(result.OutputToString()).To(ContainSubstring("92:d0:c6:0a:29:34"))
+ })
+})
diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go
index f5dca321c..7d977f4e3 100644
--- a/test/e2e/create_test.go
+++ b/test/e2e/create_test.go
@@ -297,4 +297,11 @@ var _ = Describe("Podman create", func() {
Expect(session.ExitCode()).To((Equal(0)))
Expect(string(session.Out.Contents())).To(ContainSubstring(ALPINEARM64DIGEST))
})
+
+ It("podman create --authfile with nonexist authfile", func() {
+ SkipIfRemote()
+ session := podmanTest.PodmanNoCache([]string{"create", "--authfile", "/tmp/nonexist", "--name=foo", ALPINE})
+ session.WaitWithDefaultTimeout()
+ Expect(session).To(Not(Equal(0)))
+ })
})
diff --git a/test/e2e/generate_kube_test.go b/test/e2e/generate_kube_test.go
index 5d3b1238a..603edbe6b 100644
--- a/test/e2e/generate_kube_test.go
+++ b/test/e2e/generate_kube_test.go
@@ -208,6 +208,39 @@ var _ = Describe("Podman generate kube", func() {
Expect(psOut).To(ContainSubstring("test2"))
})
+ It("podman generate with user and reimport kube on pod", func() {
+ podName := "toppod"
+ _, rc, _ := podmanTest.CreatePod(podName)
+ Expect(rc).To(Equal(0))
+
+ session := podmanTest.Podman([]string{"create", "--pod", podName, "--name", "test1", "--user", "100:200", ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ inspect := podmanTest.Podman([]string{"inspect", "--format", "{{.Config.User}}", "test1"})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(Equal(0))
+ Expect(inspect.OutputToString()).To(ContainSubstring("100:200"))
+
+ outputFile := filepath.Join(podmanTest.RunRoot, "pod.yaml")
+ kube := podmanTest.Podman([]string{"generate", "kube", "-f", outputFile, podName})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"pod", "rm", "-af"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ session = podmanTest.Podman([]string{"play", "kube", outputFile})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+
+ inspect1 := podmanTest.Podman([]string{"inspect", "--format", "{{.Config.User}}", "test1"})
+ inspect1.WaitWithDefaultTimeout()
+ Expect(inspect1.ExitCode()).To(Equal(0))
+ Expect(inspect1.OutputToString()).To(ContainSubstring(inspect.OutputToString()))
+ })
+
It("podman generate kube with volume", func() {
vol1 := filepath.Join(podmanTest.TempDir, "vol-test1")
err := os.MkdirAll(vol1, 0755)
diff --git a/test/e2e/login_logout_test.go b/test/e2e/login_logout_test.go
index 14cfed5db..c3df10f5e 100644
--- a/test/e2e/login_logout_test.go
+++ b/test/e2e/login_logout_test.go
@@ -123,6 +123,11 @@ var _ = Describe("Podman login and logout", func() {
json.Unmarshal(authInfo, &info)
fmt.Println(info)
+ // push should fail with nonexist authfile
+ session = podmanTest.Podman([]string{"push", "--authfile", "/tmp/nonexist", ALPINE, testImg})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+
session = podmanTest.Podman([]string{"push", "--authfile", authFile, ALPINE, testImg})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
@@ -131,6 +136,11 @@ var _ = Describe("Podman login and logout", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
+ // logout should fail with nonexist authfile
+ session = podmanTest.Podman([]string{"logout", "--authfile", "/tmp/nonexist", server})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+
session = podmanTest.Podman([]string{"logout", "--authfile", authFile, server})
})
diff --git a/test/e2e/logs_test.go b/test/e2e/logs_test.go
index f34d85d76..e25364695 100644
--- a/test/e2e/logs_test.go
+++ b/test/e2e/logs_test.go
@@ -57,6 +57,18 @@ var _ = Describe("Podman logs", func() {
Expect(len(results.OutputToStringArray())).To(Equal(2))
})
+ It("podman logs tail zero lines", func() {
+ logc := podmanTest.Podman([]string{"run", "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
+ logc.WaitWithDefaultTimeout()
+ Expect(logc.ExitCode()).To(Equal(0))
+ cid := logc.OutputToString()
+
+ results := podmanTest.Podman([]string{"logs", "--tail", "0", cid})
+ results.WaitWithDefaultTimeout()
+ Expect(results.ExitCode()).To(Equal(0))
+ Expect(len(results.OutputToStringArray())).To(Equal(0))
+ })
+
It("podman logs tail 99 lines", func() {
logc := podmanTest.Podman([]string{"run", "-dt", ALPINE, "sh", "-c", "echo podman; echo podman; echo podman"})
logc.WaitWithDefaultTimeout()
diff --git a/test/e2e/play_kube_test.go b/test/e2e/play_kube_test.go
index 7069e049d..416c64b5a 100644
--- a/test/e2e/play_kube_test.go
+++ b/test/e2e/play_kube_test.go
@@ -209,6 +209,16 @@ var _ = Describe("Podman generate kube", func() {
processTestResult(f)
})
+ It("podman play kube fail with nonexist authfile", func() {
+ err := generateKubeYaml(getPod(), kubeYaml)
+ Expect(err).To(BeNil())
+
+ kube := podmanTest.Podman([]string{"play", "kube", "--authfile", "/tmp/nonexist", kubeYaml})
+ kube.WaitWithDefaultTimeout()
+ Expect(kube.ExitCode()).To(Not(Equal(0)))
+
+ })
+
It("podman play kube test correct command", func() {
err := generateKubeYaml(getPod(), kubeYaml)
Expect(err).To(BeNil())
diff --git a/test/e2e/pull_test.go b/test/e2e/pull_test.go
index 5152409af..eee0b131b 100644
--- a/test/e2e/pull_test.go
+++ b/test/e2e/pull_test.go
@@ -353,4 +353,11 @@ var _ = Describe("Podman pull", func() {
rmi.WaitWithDefaultTimeout()
Expect(rmi.ExitCode()).To(Equal(0))
})
+
+ It("podman pull from docker with nonexist --authfile", func() {
+ SkipIfRemote()
+ session := podmanTest.PodmanNoCache([]string{"pull", "--authfile", "/tmp/nonexist", ALPINE})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+ })
})
diff --git a/test/e2e/run_cgroup_parent_test.go b/test/e2e/run_cgroup_parent_test.go
index 1fb9f6871..14294eeac 100644
--- a/test/e2e/run_cgroup_parent_test.go
+++ b/test/e2e/run_cgroup_parent_test.go
@@ -40,7 +40,7 @@ var _ = Describe("Podman run with --cgroup-parent", func() {
Skip("Must be containerized to run this test.")
}
cgroup := "/zzz"
- run := podmanTest.Podman([]string{"run", "--cgroup-parent", cgroup, fedoraMinimal, "cat", "/proc/self/cgroup"})
+ run := podmanTest.Podman([]string{"run", "--cgroupns=host", "--cgroup-parent", cgroup, fedoraMinimal, "cat", "/proc/self/cgroup"})
run.WaitWithDefaultTimeout()
Expect(run.ExitCode()).To(Equal(0))
ok, _ := run.GrepString(cgroup)
@@ -52,7 +52,7 @@ var _ = Describe("Podman run with --cgroup-parent", func() {
if !Containerized() && podmanTest.CgroupManager != "cgroupfs" {
cgroup = "/machine.slice"
}
- run := podmanTest.Podman([]string{"run", fedoraMinimal, "cat", "/proc/self/cgroup"})
+ run := podmanTest.Podman([]string{"run", "--cgroupns=host", fedoraMinimal, "cat", "/proc/self/cgroup"})
run.WaitWithDefaultTimeout()
Expect(run.ExitCode()).To(Equal(0))
ok, _ := run.GrepString(cgroup)
@@ -64,7 +64,7 @@ var _ = Describe("Podman run with --cgroup-parent", func() {
Skip("Requires Systemd cgroup manager support")
}
cgroup := "aaaa.slice"
- run := podmanTest.Podman([]string{"run", "--cgroup-parent", cgroup, fedoraMinimal, "cat", "/proc/1/cgroup"})
+ run := podmanTest.Podman([]string{"run", "--cgroupns=host", "--cgroup-parent", cgroup, fedoraMinimal, "cat", "/proc/1/cgroup"})
run.WaitWithDefaultTimeout()
Expect(run.ExitCode()).To(Equal(0))
ok, _ := run.GrepString(cgroup)
diff --git a/test/e2e/run_selinux_test.go b/test/e2e/run_selinux_test.go
index ebc36b7f1..358137aa9 100644
--- a/test/e2e/run_selinux_test.go
+++ b/test/e2e/run_selinux_test.go
@@ -170,7 +170,7 @@ var _ = Describe("Podman run", func() {
setup.WaitWithDefaultTimeout()
Expect(setup.ExitCode()).To(Equal(0))
- session := podmanTest.Podman([]string{"exec", "test1", "cat", "/proc/self/attr/current"})
+ session := podmanTest.Podman([]string{"exec", "test1", "cat", "/proc/1/attr/current"})
session.WaitWithDefaultTimeout()
session1 := podmanTest.Podman([]string{"exec", "test1", "cat", "/proc/self/attr/current"})
session1.WaitWithDefaultTimeout()
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index 874aa498e..7fc85c9ce 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -992,4 +992,11 @@ USER mail`
session.WaitWithDefaultTimeout()
Expect(session).To(ExitWithError())
})
+
+ It("podman run should fail with nonexist authfile", func() {
+ SkipIfRemote()
+ session := podmanTest.Podman([]string{"run", "--authfile", "/tmp/nonexist", ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+ })
})
diff --git a/test/e2e/run_volume_test.go b/test/e2e/run_volume_test.go
index 8e5de85e4..0c2389e40 100644
--- a/test/e2e/run_volume_test.go
+++ b/test/e2e/run_volume_test.go
@@ -116,6 +116,28 @@ var _ = Describe("Podman run with volumes", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(session.OutputToString()).To(ContainSubstring("/run/test rw,nosuid,nodev,noexec,relatime - tmpfs"))
+
+ session = podmanTest.Podman([]string{"run", "--rm", "--mount", "type=tmpfs,target=/etc/ssl,tmpcopyup", ALPINE, "ls", "/etc/ssl"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(ContainSubstring("certs"))
+
+ session = podmanTest.Podman([]string{"run", "--rm", "--mount", "type=tmpfs,target=/etc/ssl,tmpcopyup,notmpcopyup", ALPINE, "ls", "/etc/ssl"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+
+ session = podmanTest.Podman([]string{"run", "--rm", "--mount", "type=bind,src=/tmp,target=/tmp,tmpcopyup", ALPINE, "true"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+
+ session = podmanTest.Podman([]string{"run", "--rm", "--mount", "type=bind,src=/tmp,target=/tmp,notmpcopyup", ALPINE, "true"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Not(Equal(0)))
+
+ session = podmanTest.Podman([]string{"run", "--rm", "--mount", "type=tmpfs,target=/etc/ssl,notmpcopyup", ALPINE, "ls", "/etc/ssl"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(Not(ContainSubstring("certs")))
})
It("podman run with conflicting volumes errors", func() {
diff --git a/test/e2e/runlabel_test.go b/test/e2e/runlabel_test.go
index 52a011efb..41d61e9d9 100644
--- a/test/e2e/runlabel_test.go
+++ b/test/e2e/runlabel_test.go
@@ -98,4 +98,19 @@ var _ = Describe("podman container runlabel", func() {
result.WaitWithDefaultTimeout()
Expect(result.ExitCode()).To(Equal(0))
})
+
+ It("runlabel should fail with nonexist authfile", func() {
+ SkipIfRemote()
+ image := "podman-runlabel-test:podman"
+ podmanTest.BuildImage(PodmanDockerfile, image, "false")
+
+ // runlabel should fail with nonexist authfile
+ result := podmanTest.Podman([]string{"container", "runlabel", "--authfile", "/tmp/nonexist", "RUN", image})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Not(Equal(0)))
+
+ result = podmanTest.Podman([]string{"rmi", image})
+ result.WaitWithDefaultTimeout()
+ Expect(result.ExitCode()).To(Equal(0))
+ })
})
diff --git a/test/e2e/search_test.go b/test/e2e/search_test.go
index 9c28849f0..d88231510 100644
--- a/test/e2e/search_test.go
+++ b/test/e2e/search_test.go
@@ -391,4 +391,12 @@ registries = ['{{.Host}}:{{.Port}}']`
// cleanup
resetRegistriesConfigEnv()
})
+
+ // search should fail with nonexist authfile
+ It("podman search fail with nonexist --authfile", func() {
+ SkipIfRemote()
+ search := podmanTest.Podman([]string{"search", "--authfile", "/tmp/nonexist", ALPINE})
+ search.WaitWithDefaultTimeout()
+ Expect(search.ExitCode()).To(Not(Equal(0)))
+ })
})
diff --git a/test/e2e/start_test.go b/test/e2e/start_test.go
index da581f158..47b058845 100644
--- a/test/e2e/start_test.go
+++ b/test/e2e/start_test.go
@@ -57,15 +57,32 @@ var _ = Describe("Podman start", func() {
session = podmanTest.Podman([]string{"container", "start", cid})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(Equal(cid))
+ })
+
+ It("podman container start single container by short id", func() {
+ session := podmanTest.Podman([]string{"container", "create", "-d", ALPINE, "ls"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ cid := session.OutputToString()
+ session = podmanTest.Podman([]string{"container", "start", cid[0:10]})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(Equal(0))
+ Expect(session.OutputToString()).To(Equal(cid))
})
It("podman start single container by name", func() {
- session := podmanTest.Podman([]string{"create", "-d", "--name", "foobar99", ALPINE, "ls"})
+ name := "foobar99"
+ session := podmanTest.Podman([]string{"create", "-d", "--name", name, ALPINE, "ls"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
- session = podmanTest.Podman([]string{"start", "foobar99"})
+ session = podmanTest.Podman([]string{"start", name})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
+ if podmanTest.RemoteTest {
+ Skip("Container-start name check doesn't work on remote client. It always returns the full ID.")
+ }
+ Expect(session.OutputToString()).To(Equal(name))
})
It("podman start multiple containers", func() {
diff --git a/test/e2e/test.yaml b/test/e2e/test.yaml
new file mode 100644
index 000000000..98d2c91df
--- /dev/null
+++ b/test/e2e/test.yaml
@@ -0,0 +1,37 @@
+# Save the output of this file and use kubectl create -f to import
+# it into Kubernetes.
+#
+# Created with podman-1.6.2
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ app: test
+ name: test
+spec:
+ containers:
+ - command:
+ - sleep
+ - "100"
+ env:
+ - name: PATH
+ value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+ - name: TERM
+ value: xterm
+ - name: container
+ value: podman
+ image: docker.io/library/fedora:latest
+ name: test
+ resources: {}
+ securityContext:
+ runAsUser: 1000
+ runAsGroup: 3000
+ fsGroup: 2000
+ allowPrivilegeEscalation: true
+ capabilities: {}
+ privileged: false
+ seLinuxOptions:
+ level: "s0:c1,c2"
+ readOnlyRootFilesystem: false
+ workingDir: /
+status: {}
diff --git a/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go
deleted file mode 100644
index 7647734de..000000000
--- a/vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go
+++ /dev/null
@@ -1,943 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-mksyscall_windows generates windows system call bodies
-
-It parses all files specified on command line containing function
-prototypes (like syscall_windows.go) and prints system call bodies
-to standard output.
-
-The prototypes are marked by lines beginning with "//sys" and read
-like func declarations if //sys is replaced by func, but:
-
-* The parameter lists must give a name for each argument. This
- includes return parameters.
-
-* The parameter lists must give a type for each argument:
- the (x, y, z int) shorthand is not allowed.
-
-* If the return parameter is an error number, it must be named err.
-
-* If go func name needs to be different from it's winapi dll name,
- the winapi name could be specified at the end, after "=" sign, like
- //sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
-
-* Each function that returns err needs to supply a condition, that
- return value of winapi will be tested against to detect failure.
- This would set err to windows "last-error", otherwise it will be nil.
- The value can be provided at end of //sys declaration, like
- //sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA
- and is [failretval==0] by default.
-
-Usage:
- mksyscall_windows [flags] [path ...]
-
-The flags are:
- -output
- Specify output file name (outputs to console if blank).
- -trace
- Generate print statement after every syscall.
-*/
-package main
-
-import (
- "bufio"
- "bytes"
- "errors"
- "flag"
- "fmt"
- "go/format"
- "go/parser"
- "go/token"
- "io"
- "io/ioutil"
- "log"
- "os"
- "path/filepath"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "text/template"
-)
-
-var (
- filename = flag.String("output", "", "output file name (standard output if omitted)")
- printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall")
- systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory")
- winio = flag.Bool("winio", false, "import go-winio")
-)
-
-func trim(s string) string {
- return strings.Trim(s, " \t")
-}
-
-var packageName string
-
-func packagename() string {
- return packageName
-}
-
-func syscalldot() string {
- if packageName == "syscall" {
- return ""
- }
- return "syscall."
-}
-
-// Param is function parameter
-type Param struct {
- Name string
- Type string
- fn *Fn
- tmpVarIdx int
-}
-
-// tmpVar returns temp variable name that will be used to represent p during syscall.
-func (p *Param) tmpVar() string {
- if p.tmpVarIdx < 0 {
- p.tmpVarIdx = p.fn.curTmpVarIdx
- p.fn.curTmpVarIdx++
- }
- return fmt.Sprintf("_p%d", p.tmpVarIdx)
-}
-
-// BoolTmpVarCode returns source code for bool temp variable.
-func (p *Param) BoolTmpVarCode() string {
- const code = `var %s uint32
- if %s {
- %s = 1
- } else {
- %s = 0
- }`
- tmp := p.tmpVar()
- return fmt.Sprintf(code, tmp, p.Name, tmp, tmp)
-}
-
-// SliceTmpVarCode returns source code for slice temp variable.
-func (p *Param) SliceTmpVarCode() string {
- const code = `var %s *%s
- if len(%s) > 0 {
- %s = &%s[0]
- }`
- tmp := p.tmpVar()
- return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name)
-}
-
-// StringTmpVarCode returns source code for string temp variable.
-func (p *Param) StringTmpVarCode() string {
- errvar := p.fn.Rets.ErrorVarName()
- if errvar == "" {
- errvar = "_"
- }
- tmp := p.tmpVar()
- const code = `var %s %s
- %s, %s = %s(%s)`
- s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name)
- if errvar == "-" {
- return s
- }
- const morecode = `
- if %s != nil {
- return
- }`
- return s + fmt.Sprintf(morecode, errvar)
-}
-
-// TmpVarCode returns source code for temp variable.
-func (p *Param) TmpVarCode() string {
- switch {
- case p.Type == "bool":
- return p.BoolTmpVarCode()
- case strings.HasPrefix(p.Type, "[]"):
- return p.SliceTmpVarCode()
- default:
- return ""
- }
-}
-
-// TmpVarHelperCode returns source code for helper's temp variable.
-func (p *Param) TmpVarHelperCode() string {
- if p.Type != "string" {
- return ""
- }
- return p.StringTmpVarCode()
-}
-
-// SyscallArgList returns source code fragments representing p parameter
-// in syscall. Slices are translated into 2 syscall parameters: pointer to
-// the first element and length.
-func (p *Param) SyscallArgList() []string {
- t := p.HelperType()
- var s string
- switch {
- case t[0] == '*':
- s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name)
- case t == "bool":
- s = p.tmpVar()
- case strings.HasPrefix(t, "[]"):
- return []string{
- fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()),
- fmt.Sprintf("uintptr(len(%s))", p.Name),
- }
- default:
- s = p.Name
- }
- return []string{fmt.Sprintf("uintptr(%s)", s)}
-}
-
-// IsError determines if p parameter is used to return error.
-func (p *Param) IsError() bool {
- return p.Name == "err" && p.Type == "error"
-}
-
-// HelperType returns type of parameter p used in helper function.
-func (p *Param) HelperType() string {
- if p.Type == "string" {
- return p.fn.StrconvType()
- }
- return p.Type
-}
-
-// join concatenates parameters ps into a string with sep separator.
-// Each parameter is converted into string by applying fn to it
-// before conversion.
-func join(ps []*Param, fn func(*Param) string, sep string) string {
- if len(ps) == 0 {
- return ""
- }
- a := make([]string, 0)
- for _, p := range ps {
- a = append(a, fn(p))
- }
- return strings.Join(a, sep)
-}
-
-// Rets describes function return parameters.
-type Rets struct {
- Name string
- Type string
- ReturnsError bool
- FailCond string
-}
-
-// ErrorVarName returns error variable name for r.
-func (r *Rets) ErrorVarName() string {
- if r.ReturnsError {
- return "err"
- }
- if r.Type == "error" {
- return r.Name
- }
- return ""
-}
-
-// ToParams converts r into slice of *Param.
-func (r *Rets) ToParams() []*Param {
- ps := make([]*Param, 0)
- if len(r.Name) > 0 {
- ps = append(ps, &Param{Name: r.Name, Type: r.Type})
- }
- if r.ReturnsError {
- ps = append(ps, &Param{Name: "err", Type: "error"})
- }
- return ps
-}
-
-// List returns source code of syscall return parameters.
-func (r *Rets) List() string {
- s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ")
- if len(s) > 0 {
- s = "(" + s + ")"
- }
- return s
-}
-
-// PrintList returns source code of trace printing part correspondent
-// to syscall return values.
-func (r *Rets) PrintList() string {
- return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
-}
-
-// SetReturnValuesCode returns source code that accepts syscall return values.
-func (r *Rets) SetReturnValuesCode() string {
- if r.Name == "" && !r.ReturnsError {
- return ""
- }
- retvar := "r0"
- if r.Name == "" {
- retvar = "r1"
- }
- errvar := "_"
- if r.ReturnsError {
- errvar = "e1"
- }
- return fmt.Sprintf("%s, _, %s := ", retvar, errvar)
-}
-
-func (r *Rets) useLongHandleErrorCode(retvar string) string {
- const code = `if %s {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = %sEINVAL
- }
- }`
- cond := retvar + " == 0"
- if r.FailCond != "" {
- cond = strings.Replace(r.FailCond, "failretval", retvar, 1)
- }
- return fmt.Sprintf(code, cond, syscalldot())
-}
-
-// SetErrorCode returns source code that sets return parameters.
-func (r *Rets) SetErrorCode() string {
- const code = `if r0 != 0 {
- %s = %sErrno(r0)
- }`
- const hrCode = `if int32(r0) < 0 {
- if r0&0x1fff0000 == 0x00070000 {
- r0 &= 0xffff
- }
- %s = %sErrno(r0)
- }`
- if r.Name == "" && !r.ReturnsError {
- return ""
- }
- if r.Name == "" {
- return r.useLongHandleErrorCode("r1")
- }
- if r.Type == "error" {
- if r.Name == "hr" {
- return fmt.Sprintf(hrCode, r.Name, syscalldot())
- } else {
- return fmt.Sprintf(code, r.Name, syscalldot())
- }
- }
- s := ""
- switch {
- case r.Type[0] == '*':
- s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type)
- case r.Type == "bool":
- s = fmt.Sprintf("%s = r0 != 0", r.Name)
- default:
- s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type)
- }
- if !r.ReturnsError {
- return s
- }
- return s + "\n\t" + r.useLongHandleErrorCode(r.Name)
-}
-
-// Fn describes syscall function.
-type Fn struct {
- Name string
- Params []*Param
- Rets *Rets
- PrintTrace bool
- confirmproc bool
- dllname string
- dllfuncname string
- src string
- // TODO: get rid of this field and just use parameter index instead
- curTmpVarIdx int // insure tmp variables have uniq names
-}
-
-// extractParams parses s to extract function parameters.
-func extractParams(s string, f *Fn) ([]*Param, error) {
- s = trim(s)
- if s == "" {
- return nil, nil
- }
- a := strings.Split(s, ",")
- ps := make([]*Param, len(a))
- for i := range ps {
- s2 := trim(a[i])
- b := strings.Split(s2, " ")
- if len(b) != 2 {
- b = strings.Split(s2, "\t")
- if len(b) != 2 {
- return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"")
- }
- }
- ps[i] = &Param{
- Name: trim(b[0]),
- Type: trim(b[1]),
- fn: f,
- tmpVarIdx: -1,
- }
- }
- return ps, nil
-}
-
-// extractSection extracts text out of string s starting after start
-// and ending just before end. found return value will indicate success,
-// and prefix, body and suffix will contain correspondent parts of string s.
-func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) {
- s = trim(s)
- if strings.HasPrefix(s, string(start)) {
- // no prefix
- body = s[1:]
- } else {
- a := strings.SplitN(s, string(start), 2)
- if len(a) != 2 {
- return "", "", s, false
- }
- prefix = a[0]
- body = a[1]
- }
- a := strings.SplitN(body, string(end), 2)
- if len(a) != 2 {
- return "", "", "", false
- }
- return prefix, a[0], a[1], true
-}
-
-// newFn parses string s and return created function Fn.
-func newFn(s string) (*Fn, error) {
- s = trim(s)
- f := &Fn{
- Rets: &Rets{},
- src: s,
- PrintTrace: *printTraceFlag,
- }
- // function name and args
- prefix, body, s, found := extractSection(s, '(', ')')
- if !found || prefix == "" {
- return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"")
- }
- f.Name = prefix
- var err error
- f.Params, err = extractParams(body, f)
- if err != nil {
- return nil, err
- }
- // return values
- _, body, s, found = extractSection(s, '(', ')')
- if found {
- r, err := extractParams(body, f)
- if err != nil {
- return nil, err
- }
- switch len(r) {
- case 0:
- case 1:
- if r[0].IsError() {
- f.Rets.ReturnsError = true
- } else {
- f.Rets.Name = r[0].Name
- f.Rets.Type = r[0].Type
- }
- case 2:
- if !r[1].IsError() {
- return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"")
- }
- f.Rets.ReturnsError = true
- f.Rets.Name = r[0].Name
- f.Rets.Type = r[0].Type
- default:
- return nil, errors.New("Too many return values in \"" + f.src + "\"")
- }
- }
- // fail condition
- _, body, s, found = extractSection(s, '[', ']')
- if found {
- f.Rets.FailCond = body
- }
- // dll and dll function names
- s = trim(s)
- if s == "" {
- return f, nil
- }
- if !strings.HasPrefix(s, "=") {
- return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
- }
- s = trim(s[1:])
- a := strings.Split(s, ".")
- switch len(a) {
- case 1:
- f.dllfuncname = a[0]
- case 2:
- f.dllname = a[0]
- f.dllfuncname = a[1]
- default:
- return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
- }
- if f.dllfuncname[len(f.dllfuncname)-1] == '?' {
- f.confirmproc = true
- f.dllfuncname = f.dllfuncname[0 : len(f.dllfuncname)-1]
- }
- return f, nil
-}
-
-// DLLName returns DLL name for function f.
-func (f *Fn) DLLName() string {
- if f.dllname == "" {
- return "kernel32"
- }
- return f.dllname
-}
-
-// DLLName returns DLL function name for function f.
-func (f *Fn) DLLFuncName() string {
- if f.dllfuncname == "" {
- return f.Name
- }
- return f.dllfuncname
-}
-
-func (f *Fn) ConfirmProc() bool {
- return f.confirmproc
-}
-
-// ParamList returns source code for function f parameters.
-func (f *Fn) ParamList() string {
- return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ")
-}
-
-// HelperParamList returns source code for helper function f parameters.
-func (f *Fn) HelperParamList() string {
- return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ")
-}
-
-// ParamPrintList returns source code of trace printing part correspondent
-// to syscall input parameters.
-func (f *Fn) ParamPrintList() string {
- return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
-}
-
-// ParamCount return number of syscall parameters for function f.
-func (f *Fn) ParamCount() int {
- n := 0
- for _, p := range f.Params {
- n += len(p.SyscallArgList())
- }
- return n
-}
-
-// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/...
-// to use. It returns parameter count for correspondent SyscallX function.
-func (f *Fn) SyscallParamCount() int {
- n := f.ParamCount()
- switch {
- case n <= 3:
- return 3
- case n <= 6:
- return 6
- case n <= 9:
- return 9
- case n <= 12:
- return 12
- case n <= 15:
- return 15
- default:
- panic("too many arguments to system call")
- }
-}
-
-// Syscall determines which SyscallX function to use for function f.
-func (f *Fn) Syscall() string {
- c := f.SyscallParamCount()
- if c == 3 {
- return syscalldot() + "Syscall"
- }
- return syscalldot() + "Syscall" + strconv.Itoa(c)
-}
-
-// SyscallParamList returns source code for SyscallX parameters for function f.
-func (f *Fn) SyscallParamList() string {
- a := make([]string, 0)
- for _, p := range f.Params {
- a = append(a, p.SyscallArgList()...)
- }
- for len(a) < f.SyscallParamCount() {
- a = append(a, "0")
- }
- return strings.Join(a, ", ")
-}
-
-// HelperCallParamList returns source code of call into function f helper.
-func (f *Fn) HelperCallParamList() string {
- a := make([]string, 0, len(f.Params))
- for _, p := range f.Params {
- s := p.Name
- if p.Type == "string" {
- s = p.tmpVar()
- }
- a = append(a, s)
- }
- return strings.Join(a, ", ")
-}
-
-// IsUTF16 is true, if f is W (utf16) function. It is false
-// for all A (ascii) functions.
-func (_ *Fn) IsUTF16() bool {
- return true
-}
-
-// StrconvFunc returns name of Go string to OS string function for f.
-func (f *Fn) StrconvFunc() string {
- if f.IsUTF16() {
- return syscalldot() + "UTF16PtrFromString"
- }
- return syscalldot() + "BytePtrFromString"
-}
-
-// StrconvType returns Go type name used for OS string for f.
-func (f *Fn) StrconvType() string {
- if f.IsUTF16() {
- return "*uint16"
- }
- return "*byte"
-}
-
-// HasStringParam is true, if f has at least one string parameter.
-// Otherwise it is false.
-func (f *Fn) HasStringParam() bool {
- for _, p := range f.Params {
- if p.Type == "string" {
- return true
- }
- }
- return false
-}
-
-var uniqDllFuncName = make(map[string]bool)
-
-// IsNotDuplicate is true if f is not a duplicated function
-func (f *Fn) IsNotDuplicate() bool {
- funcName := f.DLLFuncName()
- if uniqDllFuncName[funcName] == false {
- uniqDllFuncName[funcName] = true
- return true
- }
- return false
-}
-
-// HelperName returns name of function f helper.
-func (f *Fn) HelperName() string {
- if !f.HasStringParam() {
- return f.Name
- }
- return "_" + f.Name
-}
-
-// Source files and functions.
-type Source struct {
- Funcs []*Fn
- Files []string
- StdLibImports []string
- ExternalImports []string
-}
-
-func (src *Source) Import(pkg string) {
- src.StdLibImports = append(src.StdLibImports, pkg)
- sort.Strings(src.StdLibImports)
-}
-
-func (src *Source) ExternalImport(pkg string) {
- src.ExternalImports = append(src.ExternalImports, pkg)
- sort.Strings(src.ExternalImports)
-}
-
-// ParseFiles parses files listed in fs and extracts all syscall
-// functions listed in sys comments. It returns source files
-// and functions collection *Source if successful.
-func ParseFiles(fs []string) (*Source, error) {
- src := &Source{
- Funcs: make([]*Fn, 0),
- Files: make([]string, 0),
- StdLibImports: []string{
- "unsafe",
- },
- ExternalImports: make([]string, 0),
- }
- for _, file := range fs {
- if err := src.ParseFile(file); err != nil {
- return nil, err
- }
- }
- return src, nil
-}
-
-// DLLs return dll names for a source set src.
-func (src *Source) DLLs() []string {
- uniq := make(map[string]bool)
- r := make([]string, 0)
- for _, f := range src.Funcs {
- name := f.DLLName()
- if _, found := uniq[name]; !found {
- uniq[name] = true
- r = append(r, name)
- }
- }
- return r
-}
-
-// ParseFile adds additional file path to a source set src.
-func (src *Source) ParseFile(path string) error {
- file, err := os.Open(path)
- if err != nil {
- return err
- }
- defer file.Close()
-
- s := bufio.NewScanner(file)
- for s.Scan() {
- t := trim(s.Text())
- if len(t) < 7 {
- continue
- }
- if !strings.HasPrefix(t, "//sys") {
- continue
- }
- t = t[5:]
- if !(t[0] == ' ' || t[0] == '\t') {
- continue
- }
- f, err := newFn(t[1:])
- if err != nil {
- return err
- }
- src.Funcs = append(src.Funcs, f)
- }
- if err := s.Err(); err != nil {
- return err
- }
- src.Files = append(src.Files, path)
-
- // get package name
- fset := token.NewFileSet()
- _, err = file.Seek(0, 0)
- if err != nil {
- return err
- }
- pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly)
- if err != nil {
- return err
- }
- packageName = pkg.Name.Name
-
- return nil
-}
-
-// IsStdRepo returns true if src is part of standard library.
-func (src *Source) IsStdRepo() (bool, error) {
- if len(src.Files) == 0 {
- return false, errors.New("no input files provided")
- }
- abspath, err := filepath.Abs(src.Files[0])
- if err != nil {
- return false, err
- }
- goroot := runtime.GOROOT()
- if runtime.GOOS == "windows" {
- abspath = strings.ToLower(abspath)
- goroot = strings.ToLower(goroot)
- }
- sep := string(os.PathSeparator)
- if !strings.HasSuffix(goroot, sep) {
- goroot += sep
- }
- return strings.HasPrefix(abspath, goroot), nil
-}
-
-// Generate output source file from a source set src.
-func (src *Source) Generate(w io.Writer) error {
- const (
- pkgStd = iota // any package in std library
- pkgXSysWindows // x/sys/windows package
- pkgOther
- )
- isStdRepo, err := src.IsStdRepo()
- if err != nil {
- return err
- }
- var pkgtype int
- switch {
- case isStdRepo:
- pkgtype = pkgStd
- case packageName == "windows":
- // TODO: this needs better logic than just using package name
- pkgtype = pkgXSysWindows
- default:
- pkgtype = pkgOther
- }
- if *systemDLL {
- switch pkgtype {
- case pkgStd:
- src.Import("internal/syscall/windows/sysdll")
- case pkgXSysWindows:
- default:
- src.ExternalImport("golang.org/x/sys/windows")
- }
- }
- if *winio {
- src.ExternalImport("github.com/Microsoft/go-winio")
- }
- if packageName != "syscall" {
- src.Import("syscall")
- }
- funcMap := template.FuncMap{
- "packagename": packagename,
- "syscalldot": syscalldot,
- "newlazydll": func(dll string) string {
- arg := "\"" + dll + ".dll\""
- if !*systemDLL {
- return syscalldot() + "NewLazyDLL(" + arg + ")"
- }
- if strings.HasPrefix(dll, "api_") || strings.HasPrefix(dll, "ext_") {
- arg = strings.Replace(arg, "_", "-", -1)
- }
- switch pkgtype {
- case pkgStd:
- return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))"
- case pkgXSysWindows:
- return "NewLazySystemDLL(" + arg + ")"
- default:
- return "windows.NewLazySystemDLL(" + arg + ")"
- }
- },
- }
- t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate))
- err = t.Execute(w, src)
- if err != nil {
- return errors.New("Failed to execute template: " + err.Error())
- }
- return nil
-}
-
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n")
- flag.PrintDefaults()
- os.Exit(1)
-}
-
-func main() {
- flag.Usage = usage
- flag.Parse()
- if len(flag.Args()) <= 0 {
- fmt.Fprintf(os.Stderr, "no files to parse provided\n")
- usage()
- }
-
- src, err := ParseFiles(flag.Args())
- if err != nil {
- log.Fatal(err)
- }
-
- var buf bytes.Buffer
- if err := src.Generate(&buf); err != nil {
- log.Fatal(err)
- }
-
- data, err := format.Source(buf.Bytes())
- if err != nil {
- log.Fatal(err)
- }
- if *filename == "" {
- _, err = os.Stdout.Write(data)
- } else {
- err = ioutil.WriteFile(*filename, data, 0644)
- }
- if err != nil {
- log.Fatal(err)
- }
-}
-
-// TODO: use println instead to print in the following template
-const srcTemplate = `
-
-{{define "main"}}// Code generated mksyscall_windows.exe DO NOT EDIT
-
-package {{packagename}}
-
-import (
-{{range .StdLibImports}}"{{.}}"
-{{end}}
-
-{{range .ExternalImports}}"{{.}}"
-{{end}}
-)
-
-var _ unsafe.Pointer
-
-// Do the interface allocations only once for common
-// Errno values.
-const (
- errnoERROR_IO_PENDING = 997
-)
-
-var (
- errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING)
-)
-
-// errnoErr returns common boxed Errno values, to prevent
-// allocations at runtime.
-func errnoErr(e {{syscalldot}}Errno) error {
- switch e {
- case 0:
- return nil
- case errnoERROR_IO_PENDING:
- return errERROR_IO_PENDING
- }
- // TODO: add more here, after collecting data on the common
- // error values see on Windows. (perhaps when running
- // all.bat?)
- return e
-}
-
-var (
-{{template "dlls" .}}
-{{template "funcnames" .}})
-{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}}
-{{end}}
-
-{{/* help functions */}}
-
-{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}}
-{{end}}{{end}}
-
-{{define "funcnames"}}{{range .Funcs}}{{if .IsNotDuplicate}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}"){{end}}
-{{end}}{{end}}
-
-{{define "helperbody"}}
-func {{.Name}}({{.ParamList}}) {{template "results" .}}{
-{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}})
-}
-{{end}}
-
-{{define "funcbody"}}
-func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{
-{{template "tmpvars" .}} {{template "syscallcheck" .}}{{template "syscall" .}}
-{{template "seterror" .}}{{template "printtrace" .}} return
-}
-{{end}}
-
-{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}}
-{{end}}{{end}}{{end}}
-
-{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}}
-{{end}}{{end}}{{end}}
-
-{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}}
-
-{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}}
-
-{{define "syscallcheck"}}{{if .ConfirmProc}}if {{.Rets.ErrorVarName}} = proc{{.DLLFuncName}}.Find(); {{.Rets.ErrorVarName}} != nil {
- return
-}
-{{end}}{{end}}
-
-
-{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}}
-{{end}}{{end}}
-
-{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n")
-{{end}}{{end}}
-
-`
diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go
index 0f14d3427..22b111742 100644
--- a/vendor/github.com/containernetworking/cni/libcni/api.go
+++ b/vendor/github.com/containernetworking/cni/libcni/api.go
@@ -25,6 +25,7 @@ import (
"github.com/containernetworking/cni/pkg/invoke"
"github.com/containernetworking/cni/pkg/types"
+ "github.com/containernetworking/cni/pkg/utils"
"github.com/containernetworking/cni/pkg/version"
)
@@ -32,6 +33,10 @@ var (
CacheDir = "/var/lib/cni"
)
+const (
+ CNICacheV1 = "cniCacheV1"
+)
+
// A RuntimeConf holds the arguments to one invocation of a CNI plugin
// excepting the network configuration, with the nested exception that
// the `runtimeConfig` from the network configuration is included
@@ -48,7 +53,7 @@ type RuntimeConf struct {
// to the plugin
CapabilityArgs map[string]interface{}
- // A cache directory in which to library data. Defaults to CacheDir
+ // DEPRECATED. Will be removed in a future release.
CacheDir string
}
@@ -70,19 +75,22 @@ type CNI interface {
CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error
GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error)
+ GetNetworkListCachedConfig(net *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error)
AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error
GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error)
+ GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error)
ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error)
ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error)
}
type CNIConfig struct {
- Path []string
- exec invoke.Exec
+ Path []string
+ exec invoke.Exec
+ cacheDir string
}
// CNIConfig implements the CNI interface
@@ -92,9 +100,18 @@ var _ CNI = &CNIConfig{}
// in the given paths and use the given exec interface to run those plugins,
// or if the exec interface is not given, will use a default exec handler.
func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig {
+ return NewCNIConfigWithCacheDir(path, "", exec)
+}
+
+// NewCNIConfigWithCacheDir returns a new CNIConfig object that will search for plugins
+// in the given paths use the given exec interface to run those plugins,
+// or if the exec interface is not given, will use a default exec handler.
+// The given cache directory will be used for temporary data storage when needed.
+func NewCNIConfigWithCacheDir(path []string, cacheDir string, exec invoke.Exec) *CNIConfig {
return &CNIConfig{
- Path: path,
- exec: exec,
+ Path: path,
+ cacheDir: cacheDir,
+ exec: exec,
}
}
@@ -165,33 +182,122 @@ func (c *CNIConfig) ensureExec() invoke.Exec {
return c.exec
}
-func getResultCacheFilePath(netName string, rt *RuntimeConf) string {
- cacheDir := rt.CacheDir
- if cacheDir == "" {
- cacheDir = CacheDir
+type cachedInfo struct {
+ Kind string `json:"kind"`
+ ContainerID string `json:"containerId"`
+ Config []byte `json:"config"`
+ IfName string `json:"ifName"`
+ NetworkName string `json:"networkName"`
+ CniArgs [][2]string `json:"cniArgs,omitempty"`
+ CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"`
+ RawResult map[string]interface{} `json:"result,omitempty"`
+ Result types.Result `json:"-"`
+}
+
+// getCacheDir returns the cache directory in this order:
+// 1) global cacheDir from CNIConfig object
+// 2) deprecated cacheDir from RuntimeConf object
+// 3) fall back to default cache directory
+func (c *CNIConfig) getCacheDir(rt *RuntimeConf) string {
+ if c.cacheDir != "" {
+ return c.cacheDir
+ }
+ if rt.CacheDir != "" {
+ return rt.CacheDir
+ }
+ return CacheDir
+}
+
+func (c *CNIConfig) getCacheFilePath(netName string, rt *RuntimeConf) (string, error) {
+ if netName == "" || rt.ContainerID == "" || rt.IfName == "" {
+ return "", fmt.Errorf("cache file path requires network name (%q), container ID (%q), and interface name (%q)", netName, rt.ContainerID, rt.IfName)
}
- return filepath.Join(cacheDir, "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName))
+ return filepath.Join(c.getCacheDir(rt), "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)), nil
}
-func setCachedResult(result types.Result, netName string, rt *RuntimeConf) error {
+func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, rt *RuntimeConf) error {
+ cached := cachedInfo{
+ Kind: CNICacheV1,
+ ContainerID: rt.ContainerID,
+ Config: config,
+ IfName: rt.IfName,
+ NetworkName: netName,
+ CniArgs: rt.Args,
+ CapabilityArgs: rt.CapabilityArgs,
+ }
+
+ // We need to get type.Result into cachedInfo as JSON map
+ // Marshal to []byte, then Unmarshal into cached.RawResult
data, err := json.Marshal(result)
if err != nil {
return err
}
- fname := getResultCacheFilePath(netName, rt)
+
+ err = json.Unmarshal(data, &cached.RawResult)
+ if err != nil {
+ return err
+ }
+
+ newBytes, err := json.Marshal(&cached)
+ if err != nil {
+ return err
+ }
+
+ fname, err := c.getCacheFilePath(netName, rt)
+ if err != nil {
+ return err
+ }
if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil {
return err
}
- return ioutil.WriteFile(fname, data, 0600)
+
+ return ioutil.WriteFile(fname, newBytes, 0600)
}
-func delCachedResult(netName string, rt *RuntimeConf) error {
- fname := getResultCacheFilePath(netName, rt)
+func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error {
+ fname, err := c.getCacheFilePath(netName, rt)
+ if err != nil {
+ // Ignore error
+ return nil
+ }
return os.Remove(fname)
}
-func getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) {
- fname := getResultCacheFilePath(netName, rt)
+func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *RuntimeConf, error) {
+ var bytes []byte
+
+ fname, err := c.getCacheFilePath(netName, rt)
+ if err != nil {
+ return nil, nil, err
+ }
+ bytes, err = ioutil.ReadFile(fname)
+ if err != nil {
+ // Ignore read errors; the cached result may not exist on-disk
+ return nil, nil, nil
+ }
+
+ unmarshaled := cachedInfo{}
+ if err := json.Unmarshal(bytes, &unmarshaled); err != nil {
+ return nil, nil, fmt.Errorf("failed to unmarshal cached network %q config: %v", netName, err)
+ }
+ if unmarshaled.Kind != CNICacheV1 {
+ return nil, nil, fmt.Errorf("read cached network %q config has wrong kind: %v", netName, unmarshaled.Kind)
+ }
+
+ newRt := *rt
+ if unmarshaled.CniArgs != nil {
+ newRt.Args = unmarshaled.CniArgs
+ }
+ newRt.CapabilityArgs = unmarshaled.CapabilityArgs
+
+ return unmarshaled.Config, &newRt, nil
+}
+
+func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) {
+ fname, err := c.getCacheFilePath(netName, rt)
+ if err != nil {
+ return nil, err
+ }
data, err := ioutil.ReadFile(fname)
if err != nil {
// Ignore read errors; the cached result may not exist on-disk
@@ -222,16 +328,73 @@ func getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result,
return result, err
}
+func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) {
+ fname, err := c.getCacheFilePath(netName, rt)
+ if err != nil {
+ return nil, err
+ }
+ fdata, err := ioutil.ReadFile(fname)
+ if err != nil {
+ // Ignore read errors; the cached result may not exist on-disk
+ return nil, nil
+ }
+
+ cachedInfo := cachedInfo{}
+ if err := json.Unmarshal(fdata, &cachedInfo); err != nil || cachedInfo.Kind != CNICacheV1 {
+ return c.getLegacyCachedResult(netName, cniVersion, rt)
+ }
+
+ newBytes, err := json.Marshal(&cachedInfo.RawResult)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal cached network %q config: %v", netName, err)
+ }
+
+ // Read the version of the cached result
+ decoder := version.ConfigDecoder{}
+ resultCniVersion, err := decoder.Decode(newBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ // Ensure we can understand the result
+ result, err := version.NewResult(resultCniVersion, newBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert to the config version to ensure plugins get prevResult
+ // in the same version as the config. The cached result version
+ // should match the config version unless the config was changed
+ // while the container was running.
+ result, err = result.GetAsVersion(cniVersion)
+ if err != nil && resultCniVersion != cniVersion {
+ return nil, fmt.Errorf("failed to convert cached result version %q to config version %q: %v", resultCniVersion, cniVersion, err)
+ }
+ return result, err
+}
+
// GetNetworkListCachedResult returns the cached Result of the previous
-// previous AddNetworkList() operation for a network list, or an error.
+// AddNetworkList() operation for a network list, or an error.
func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) {
- return getCachedResult(list.Name, list.CNIVersion, rt)
+ return c.getCachedResult(list.Name, list.CNIVersion, rt)
}
// GetNetworkCachedResult returns the cached Result of the previous
-// previous AddNetwork() operation for a network, or an error.
+// AddNetwork() operation for a network, or an error.
func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) {
- return getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
+ return c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
+}
+
+// GetNetworkListCachedConfig copies the input RuntimeConf to output
+// RuntimeConf with fields updated with info from the cached Config.
+func (c *CNIConfig) GetNetworkListCachedConfig(list *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) {
+ return c.getCachedConfig(list.Name, rt)
+}
+
+// GetNetworkCachedConfig copies the input RuntimeConf to output
+// RuntimeConf with fields updated with info from the cached Config.
+func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) {
+ return c.getCachedConfig(net.Network.Name, rt)
}
func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) {
@@ -240,6 +403,12 @@ func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net
if err != nil {
return nil, err
}
+ if err := utils.ValidateContainerID(rt.ContainerID); err != nil {
+ return nil, err
+ }
+ if err := utils.ValidateNetworkName(name); err != nil {
+ return nil, err
+ }
newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt)
if err != nil {
@@ -260,7 +429,7 @@ func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList,
}
}
- if err = setCachedResult(result, list.Name, rt); err != nil {
+ if err = c.cacheAdd(result, list.Bytes, list.Name, rt); err != nil {
return nil, fmt.Errorf("failed to set network %q cached result: %v", list.Name, err)
}
@@ -295,7 +464,7 @@ func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigLis
return nil
}
- cachedResult, err := getCachedResult(list.Name, list.CNIVersion, rt)
+ cachedResult, err := c.getCachedResult(list.Name, list.CNIVersion, rt)
if err != nil {
return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err)
}
@@ -332,7 +501,7 @@ func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList,
if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil {
return err
} else if gtet {
- cachedResult, err = getCachedResult(list.Name, list.CNIVersion, rt)
+ cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt)
if err != nil {
return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err)
}
@@ -344,7 +513,7 @@ func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList,
return err
}
}
- _ = delCachedResult(list.Name, rt)
+ _ = c.cacheDel(list.Name, rt)
return nil
}
@@ -356,7 +525,7 @@ func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *Runt
return nil, err
}
- if err = setCachedResult(result, net.Network.Name, rt); err != nil {
+ if err = c.cacheAdd(result, net.Bytes, net.Network.Name, rt); err != nil {
return nil, fmt.Errorf("failed to set network %q cached result: %v", net.Network.Name, err)
}
@@ -372,7 +541,7 @@ func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *Ru
return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion)
}
- cachedResult, err := getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
+ cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
if err != nil {
return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err)
}
@@ -387,7 +556,7 @@ func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *Runt
if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil {
return err
} else if gtet {
- cachedResult, err = getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
+ cachedResult, err = c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt)
if err != nil {
return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err)
}
@@ -396,7 +565,7 @@ func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *Runt
if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil {
return err
}
- _ = delCachedResult(net.Network.Name, rt)
+ _ = c.cacheDel(net.Network.Name, rt)
return nil
}
@@ -455,7 +624,8 @@ func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]
// validatePlugin checks that an individual plugin's configuration is sane
func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error {
- pluginPath, err := invoke.FindInPath(pluginName, c.Path)
+ c.ensureExec()
+ pluginPath, err := c.exec.FindInPath(pluginName, c.Path)
if err != nil {
return err
}
diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go
index ea56c509d..d8920cf8c 100644
--- a/vendor/github.com/containernetworking/cni/libcni/conf.go
+++ b/vendor/github.com/containernetworking/cni/libcni/conf.go
@@ -114,11 +114,11 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) {
for i, conf := range plugins {
newBytes, err := json.Marshal(conf)
if err != nil {
- return nil, fmt.Errorf("Failed to marshal plugin config %d: %v", i, err)
+ return nil, fmt.Errorf("failed to marshal plugin config %d: %v", i, err)
}
netConf, err := ConfFromBytes(newBytes)
if err != nil {
- return nil, fmt.Errorf("Failed to parse plugin config %d: %v", i, err)
+ return nil, fmt.Errorf("failed to parse plugin config %d: %v", i, err)
}
list.Plugins = append(list.Plugins, netConf)
}
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go
index 913528c1d..d31a44e87 100644
--- a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go
@@ -32,7 +32,7 @@ type inherited struct{}
var inheritArgsFromEnv inherited
-func (_ *inherited) AsEnv() []string {
+func (*inherited) AsEnv() []string {
return nil
}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go
index bd8640fc9..4eac64899 100644
--- a/vendor/github.com/containernetworking/cni/pkg/types/args.go
+++ b/vendor/github.com/containernetworking/cni/pkg/types/args.go
@@ -36,7 +36,7 @@ func (b *UnmarshallableBool) UnmarshalText(data []byte) error {
case "0", "false":
*b = false
default:
- return fmt.Errorf("Boolean unmarshal error: invalid input %s", s)
+ return fmt.Errorf("boolean unmarshal error: invalid input %s", s)
}
return nil
}
diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go
index d0d11006a..3e185c1ce 100644
--- a/vendor/github.com/containernetworking/cni/pkg/types/types.go
+++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go
@@ -16,7 +16,6 @@ package types
import (
"encoding/json"
- "errors"
"fmt"
"io"
"net"
@@ -134,9 +133,16 @@ func (r *Route) String() string {
// Well known error codes
// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes
const (
- ErrUnknown uint = iota // 0
- ErrIncompatibleCNIVersion // 1
- ErrUnsupportedField // 2
+ ErrUnknown uint = iota // 0
+ ErrIncompatibleCNIVersion // 1
+ ErrUnsupportedField // 2
+ ErrUnknownContainer // 3
+ ErrInvalidEnvironmentVariables // 4
+ ErrIOFailure // 5
+ ErrDecodingFailure // 6
+ ErrInvalidNetworkConfig // 7
+ ErrTryAgainLater uint = 11
+ ErrInternal uint = 999
)
type Error struct {
@@ -145,6 +151,14 @@ type Error struct {
Details string `json:"details,omitempty"`
}
+func NewError(code uint, msg, details string) *Error {
+ return &Error{
+ Code: code,
+ Msg: msg,
+ Details: details,
+ }
+}
+
func (e *Error) Error() string {
details := ""
if e.Details != "" {
@@ -194,6 +208,3 @@ func prettyPrint(obj interface{}) error {
_, err = os.Stdout.Write(data)
return err
}
-
-// NotImplementedError is used to indicate that a method is not implemented for the given platform
-var NotImplementedError = errors.New("Not Implemented")
diff --git a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go
new file mode 100644
index 000000000..324c40dea
--- /dev/null
+++ b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go
@@ -0,0 +1,51 @@
+// Copyright 2019 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "regexp"
+
+ "github.com/containernetworking/cni/pkg/types"
+)
+
+// cniValidNameChars is the regexp used to validate valid characters in
+// containerID and networkName
+const cniValidNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.\-]`
+
+var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`)
+
+// ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters
+func ValidateContainerID(containerID string) *types.Error {
+
+ if containerID == "" {
+ return types.NewError(types.ErrUnknownContainer, "missing containerID", "")
+ }
+ if !cniReg.MatchString(containerID) {
+ return types.NewError(types.ErrInvalidEnvironmentVariables, "invalid characters in containerID", containerID)
+ }
+ return nil
+}
+
+// ValidateNetworkName will validate that the supplied networkName does not contain invalid characters
+func ValidateNetworkName(networkName string) *types.Error {
+
+ if networkName == "" {
+ return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "")
+ }
+ if !cniReg.MatchString(networkName) {
+ return types.NewError(types.ErrInvalidNetworkConfig, "invalid characters found in network name", networkName)
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml
index 1f7815073..66774c226 100644
--- a/vendor/github.com/containers/buildah/.cirrus.yml
+++ b/vendor/github.com/containers/buildah/.cirrus.yml
@@ -29,7 +29,7 @@ env:
####
# Command to prefix every output line with a timestamp
# (can't do inline awk script, Cirrus-CI or YAML mangles quoting)
- _TIMESTAMP: 'awk --file ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/timestamp.awk'
+ _TIMESTAMP: 'awk -f ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/timestamp.awk'
_DFCMD: 'df -lhTx tmpfs'
_RAUDITCMD: 'cat /var/log/audit/audit.log'
_UAUDITCMD: 'cat /var/log/kern.log'
@@ -75,9 +75,37 @@ testing_task:
failure_journal_log_script: '${_JOURNALCMD} || true'
+# This task runs `make vendor` followed by ./hack/tree_status.sh to check
+# whether the git tree is clean. The reasoning for that is to make sure
+# that the vendor.conf, the code and the vendored packages in ./vendor are
+# in sync at all times.
+vendor_task:
+
+ only_if: $CIRRUS_CHANGE_MESSAGE !=~ '.*\*\*\*\s*CIRRUS:\s*TEST\s*IMAGES\s*\*\*\*.*'
+
+ env:
+ CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/buildah"
+ GOPATH: "/go"
+ GOSRC: "/go/src/github.com/containers/buildah"
+
+ # Runs within Cirrus's "community cluster"
+ container:
+ image: docker.io/library/golang:1.13
+ cpu: 1
+ memory: 1
+
+ timeout_in: 30m
+
+ vendor_script:
+ - 'cd ${CIRRUS_WORKING_DIR} && make vendor'
+ - 'cd ${CIRRUS_WORKING_DIR} && ./hack/tree_status.sh'
+
# Update metadata on VM images referenced by this repository state
meta_task:
+ depends_on:
+ - "vendor"
+
container:
image: "quay.io/libpod/imgts:latest" # see contrib/imgts
cpu: 1
diff --git a/vendor/github.com/containers/buildah/.golangci.yml b/vendor/github.com/containers/buildah/.golangci.yml
index 044bc1f1b..dde37ad79 100644
--- a/vendor/github.com/containers/buildah/.golangci.yml
+++ b/vendor/github.com/containers/buildah/.golangci.yml
@@ -2,7 +2,6 @@
run:
build-tags:
- apparmor
- - ostree
- seccomp
- selinux
concurrency: 6
diff --git a/vendor/github.com/containers/buildah/.papr.sh b/vendor/github.com/containers/buildah/.papr.sh
index 25ab4c29d..2795e9ec0 100644
--- a/vendor/github.com/containers/buildah/.papr.sh
+++ b/vendor/github.com/containers/buildah/.papr.sh
@@ -26,7 +26,6 @@ dnf install -y \
libselinux-utils \
make \
openssl \
- ostree-devel \
skopeo-containers \
which
diff --git a/vendor/github.com/containers/buildah/.papr.yml b/vendor/github.com/containers/buildah/.papr.yml
index 6eaba332c..4be12a18e 100644
--- a/vendor/github.com/containers/buildah/.papr.yml
+++ b/vendor/github.com/containers/buildah/.papr.yml
@@ -68,7 +68,6 @@ packages:
- golang
- libassuan-devel
- make
- - ostree-devel
- skopeo-containers
required: false
diff --git a/vendor/github.com/containers/buildah/.travis.yml b/vendor/github.com/containers/buildah/.travis.yml
index fbc0a7862..a74108230 100644
--- a/vendor/github.com/containers/buildah/.travis.yml
+++ b/vendor/github.com/containers/buildah/.travis.yml
@@ -2,8 +2,8 @@ language: go
dist: xenial
sudo: required
go:
- - 1.11.x
- 1.12.x
+ - 1.13.x
- tip
go_import_path: github.com/containers/buildah
@@ -40,9 +40,6 @@ matrix:
services:
- docker
before_install:
- - make vendor
- - ./hack/tree_status.sh
- - sudo apt-get update
- sudo apt-get -qq install software-properties-common
- sudo add-apt-repository -y ppa:duggan/bats
- sudo apt-get update
@@ -51,15 +48,6 @@ before_install:
- sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
- mkdir /home/travis/auth
- sudo mkdir -p /var/lib/containers/storage/overlay
- - >
- OSTREE_VERSION=v2019.2;
- git clone https://github.com/ostreedev/ostree &&
- pushd ostree &&
- git checkout $OSTREE_VERSION &&
- ./autogen.sh --prefix=/usr &&
- sudo make -j4 install &&
- popd &&
- sudo rm -rf ostree
install:
# Let's create a self signed certificate and get it in the right places
- hostname
@@ -98,7 +86,7 @@ script:
# Setting up Docker Registry is complete, let's do Buildah testing!
- make install.tools -j4
- make install.libseccomp.sudo all runc validate lint SECURITYTAGS="apparmor seccomp"
- - go test -c -tags "apparmor seccomp `./btrfs_tag.sh` `./libdm_tag.sh` `./ostree_tag.sh` `./selinux_tag.sh`" ./cmd/buildah
+ - go test -c -tags "apparmor seccomp `./btrfs_tag.sh` `./libdm_tag.sh` `./selinux_tag.sh`" ./cmd/buildah
- tmp=`mktemp -d`; mkdir $tmp/root $tmp/runroot; sudo PATH="$PATH" ./buildah.test -test.v --root $tmp/root --runroot $tmp/runroot --storage-driver vfs --signature-policy `pwd`/tests/policy.json --registries-conf `pwd`/tests/registries.conf
- cd tests; sudo PATH="$PATH" ./test_runner.sh
- cd ..
diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md
index c75a055a6..b41ff8350 100644
--- a/vendor/github.com/containers/buildah/CHANGELOG.md
+++ b/vendor/github.com/containers/buildah/CHANGELOG.md
@@ -2,6 +2,28 @@
# Changelog
+## v1.11.4 (2019-10-28)
+ buildah: add a "manifest" command
+ manifests: add the module
+ pkg/supplemented: add a package for grouping images together
+ pkg/manifests: add a manifest list build/manipulation API
+ Update for ErrUnauthorizedForCredentials API change in containers/image
+ Update for manifest-lists API changes in containers/image
+ version: also note the version of containers/image
+ Move to containers/image v5.0.0
+ Enable --device directory as src device
+ Fix git build with branch specified
+ Bump github.com/openshift/imagebuilder from 1.1.0 to 1.1.1
+ Bump github.com/fsouza/go-dockerclient from 1.4.4 to 1.5.0
+ Add clarification to the Tutorial for new users
+ Silence "using cache" to ensure -q is fully quiet
+ Add OWNERS File to Buildah
+ Bump github.com/containers/storage from 1.13.4 to 1.13.5
+ Move runtime flag to bud from common
+ Commit: check for storage.ErrImageUnknown using errors.Cause()
+ Fix crash when invalid COPY --from flag is specified.
+ Bump back to v1.12.0-dev
+
## v1.11.3 (2019-10-04)
Update c/image to v4.0.1
Bump github.com/spf13/pflag from 1.0.3 to 1.0.5
diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile
index cb0dfeb51..9d04177d0 100644
--- a/vendor/github.com/containers/buildah/Makefile
+++ b/vendor/github.com/containers/buildah/Makefile
@@ -2,7 +2,7 @@ export GOPROXY=https://proxy.golang.org
SELINUXTAG := $(shell ./selinux_tag.sh)
APPARMORTAG := $(shell hack/apparmor_tag.sh)
-STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./libdm_tag.sh) $(shell ./ostree_tag.sh)
+STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./libdm_tag.sh)
SECURITYTAGS ?= seccomp $(SELINUXTAG) $(APPARMORTAG)
TAGS ?= $(SECURITYTAGS) $(STORAGETAGS)
BUILDTAGS += $(TAGS)
@@ -24,7 +24,7 @@ endif
GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed"))
SOURCE_DATE_EPOCH ?= $(if $(shell date +%s),$(shell date +%s),$(error "date failed"))
-STATIC_STORAGETAGS = "containers_image_ostree_stub containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)"
+STATIC_STORAGETAGS = "containers_image_openpgp exclude_graphdriver_devicemapper $(STORAGE_TAGS)"
CNI_COMMIT := $(shell sed -n 's;\tgithub.com/containernetworking/cni \([^ \n]*\).*$\;\1;p' go.mod)
#RUNC_COMMIT := $(shell sed -n 's;\tgithub.com/opencontainers/runc \([^ \n]*\).*$\;\1;p' go.mod)
@@ -130,6 +130,9 @@ test-unit: tests/testreport/testreport
mkdir -p $$tmp/root $$tmp/runroot; \
$(GO) test -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" ./cmd/buildah -args -root $$tmp/root -runroot $$tmp/runroot -storage-driver vfs -signature-policy $(shell pwd)/tests/policy.json -registries-conf $(shell pwd)/tests/registries.conf
+vendor-in-container:
+ podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.13 make vendor
+
.PHONY: vendor
vendor:
export GO111MODULE=on \
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 59b62925a..d57eea818 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -27,7 +27,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.11.4"
+ Version = "1.12.0-dev"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index 6bba4a1f8..c8741b781 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -21,7 +21,7 @@ require (
github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111 // indirect
github.com/mattn/go-shellwords v1.0.6
github.com/morikuni/aec v1.0.0 // indirect
- github.com/onsi/ginkgo v1.10.1
+ github.com/onsi/ginkgo v1.10.2
github.com/onsi/gomega v1.7.0
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index e5ce6a290..4a6673b04 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -212,6 +212,8 @@ github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.2 h1:uqH7bpe+ERSiDa34FDOF7RikN6RzXgduUF8yarlZp94=
+github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md
index 858364b45..f533b0fb2 100644
--- a/vendor/github.com/containers/buildah/install.md
+++ b/vendor/github.com/containers/buildah/install.md
@@ -139,7 +139,6 @@ Prior to installing Buildah, install the following packages on your Linux distro
* glib2-devel
* libassuan-devel
* libseccomp-devel
-* ostree-devel
* runc (Requires version 1.0 RC4 or higher.)
* containers-common
@@ -158,7 +157,6 @@ In Fedora, you can use this command:
gpgme-devel \
libassuan-devel \
libseccomp-devel \
- ostree-devel \
git \
bzip2 \
go-md2man \
@@ -196,7 +194,6 @@ run this command:
gpgme-devel \
libassuan-devel \
libseccomp-devel \
- ostree-devel \
git \
bzip2 \
go-md2man \
@@ -241,7 +238,7 @@ In Ubuntu zesty and xenial, you can use these commands:
add-apt-repository -y ppa:gophers/archive
apt-add-repository -y ppa:projectatomic/ppa
apt-get -y -qq update
- apt-get -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libostree-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man
+ apt-get -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man
apt-get -y install golang-1.10
```
Then to install Buildah on Ubuntu follow the steps in this example:
@@ -266,7 +263,7 @@ gpg --recv-keys 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D
gpg --export 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D >> /usr/share/keyrings/projectatomic-ppa.gpg
echo 'deb [signed-by=/usr/share/keyrings/projectatomic-ppa.gpg] http://ppa.launchpad.net/projectatomic/ppa/ubuntu zesty main' > /etc/apt/sources.list.d/projectatomic-ppa.list
apt update
-apt -y install -t stretch-backports libostree-dev golang
+apt -y install -t stretch-backports golang
apt -y install bats btrfs-tools git libapparmor-dev libdevmapper-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man
```
@@ -274,7 +271,7 @@ The build steps on Debian are otherwise the same as Ubuntu, above.
## Vendoring - Dependency Management
-This project is using [go modules](https://github.com/golang/go/wiki/Modules) for dependency management. If the CI is complaining about a pull request leaving behind an unclean state, it is very likely right about it. After changing dependencies, make sure to run `make vendor` to synchronize the code with the go module and repopulate the `./vendor` directory.
+This project is using [go modules](https://github.com/golang/go/wiki/Modules) for dependency management. If the CI is complaining about a pull request leaving behind an unclean state, it is very likely right about it. After changing dependencies, make sure to run `make vendor-in-container` to synchronize the code with the go module and repopulate the `./vendor` directory.
## Configuration files
@@ -381,7 +378,7 @@ Buildah uses Go Modules for vendoring purposes. If you need to update or add a
* Enter into your sandbox `src/github.com/containers/buildah` and ensure that the GOPATH variable is set to the directory prior as noted above.
* `export GO111MODULE=on`
* Assuming you want to 'bump' the `github.com/containers/storage` package to version 1.12.13, use this command: `go get github.com/containers/storage@v1.12.13`
- * `make vendor`
+ * `make vendor-in-container`
* `make`
* `make install`
* Then add any updated or added files with `git add` then do a `git commit` and create a PR.
@@ -391,10 +388,10 @@ Buildah uses Go Modules for vendoring purposes. If you need to update or add a
If you wish to vendor in your personal fork to try changes out (assuming containers/storage in the below example):
* `go mod edit -replace github.com/containers/storage=github.com/{mygithub_username}/storage@YOUR_BRANCH`
- * `make vendor`
+ * `make vendor-in-container`
To revert
* `go mod edit -dropreplace github.com/containers/storage`
- * `make vendor`
+ * `make vendor-in-container`
To speed up fetching dependencies, you can use a [Go Module Proxy](https://proxy.golang.org) by setting `GOPROXY=https://proxy.golang.org`.
diff --git a/vendor/github.com/containers/buildah/ostree_tag.sh b/vendor/github.com/containers/buildah/ostree_tag.sh
deleted file mode 100644
index 537c17ff4..000000000
--- a/vendor/github.com/containers/buildah/ostree_tag.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/usr/bin/env bash
-if pkg-config ostree-1 2> /dev/null ; then
- echo containers_image_ostree
-else
- echo containers_image_ostree_stub
-fi
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index a9bf94a32..e8979cd7f 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -7,7 +7,6 @@ package cli
import (
"fmt"
"os"
- "path/filepath"
"runtime"
"strings"
@@ -270,9 +269,5 @@ func GetDefaultAuthFile() string {
if authfile != "" {
return authfile
}
- runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
- if runtimeDir != "" {
- return filepath.Join(runtimeDir, "containers/auth.json")
- }
return ""
}
diff --git a/vendor/github.com/containers/buildah/pkg/secrets/secrets.go b/vendor/github.com/containers/buildah/pkg/secrets/secrets.go
index 9be9bb3b6..80ca05016 100644
--- a/vendor/github.com/containers/buildah/pkg/secrets/secrets.go
+++ b/vendor/github.com/containers/buildah/pkg/secrets/secrets.go
@@ -148,12 +148,12 @@ func getMountsMap(path string) (string, string, error) {
}
// SecretMounts copies, adds, and mounts the secrets to the container root filesystem
-func SecretMounts(mountLabel, containerWorkingDir, mountFile string, rootless bool) []rspec.Mount {
- return SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, containerWorkingDir, 0, 0, rootless)
+func SecretMounts(mountLabel, containerWorkingDir, mountFile string, rootless, disableFips bool) []rspec.Mount {
+ return SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, containerWorkingDir, 0, 0, rootless, disableFips)
}
// SecretMountsWithUIDGID specifies the uid/gid of the owner
-func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPrefix string, uid, gid int, rootless bool) []rspec.Mount {
+func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPrefix string, uid, gid int, rootless, disableFips bool) []rspec.Mount {
var (
secretMounts []rspec.Mount
mountFiles []string
@@ -180,6 +180,10 @@ func SecretMountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPre
}
}
+ // Only add FIPS secret mount if disableFips=false
+ if disableFips {
+ return secretMounts
+ }
// Add FIPS mode secret if /etc/system-fips exists on the host
_, err := os.Stat("/etc/system-fips")
if err == nil {
diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go
index 624da9dae..4f507d1bc 100644
--- a/vendor/github.com/containers/buildah/run_linux.go
+++ b/vendor/github.com/containers/buildah/run_linux.go
@@ -460,7 +460,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
}
// Get the list of secrets mounts.
- secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID), unshare.IsRootless())
+ secretMounts := secrets.SecretMountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, cdir, int(rootUID), int(rootGID), unshare.IsRootless(), false)
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
diff --git a/vendor/github.com/containers/buildah/troubleshooting.md b/vendor/github.com/containers/buildah/troubleshooting.md
index 96fa403f0..4ff2f06c4 100644
--- a/vendor/github.com/containers/buildah/troubleshooting.md
+++ b/vendor/github.com/containers/buildah/troubleshooting.md
@@ -82,9 +82,9 @@ the command with single quotes and use `bash -c`. The previous examples would b
changed to:
```console
-# buildah run bash -c '$whalecontainer /usr/games/fortune -a | cowsay'
-# buildah run bash -c '$newcontainer echo "daemon off;" >> /etc/nginx/nginx.conf'
-# buildah run bash -c '$newcontainer echo "nginx on Fedora" > /usr/share/nginx/html/index.html'
+# buildah run $whalecontainer bash -c '/usr/games/fortune -a | cowsay'
+# buildah run $newcontainer bash -c 'echo "daemon off;" >> /etc/nginx/nginx.conf'
+# buildah run $newcontainer bash -c 'echo "nginx on Fedora" > /usr/share/nginx/html/index.html'
```
---
diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go
index 44db18b45..b4670e41c 100644
--- a/vendor/github.com/containers/buildah/util.go
+++ b/vendor/github.com/containers/buildah/util.go
@@ -248,28 +248,36 @@ func (b *Builder) copyWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *
// location into our working container, mapping permissions using the
// container's ID maps, possibly overridden using the passed-in chownOpts
func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error {
- if hasher != nil && b.ContentDigester.Hash() != nil {
- hasher = io.MultiWriter(hasher, b.ContentDigester.Hash())
- }
- if hasher == nil {
- hasher = b.ContentDigester.Hash()
- }
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
if dryRun {
return func(src, dest string) error {
- if hasher == nil {
- hasher = ioutil.Discard
+ thisHasher := hasher
+ if thisHasher != nil && b.ContentDigester.Hash() != nil {
+ thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
+ }
+ if thisHasher == nil {
+ thisHasher = b.ContentDigester.Hash()
}
f, err := os.Open(src)
if err != nil {
return errors.Wrapf(err, "error opening %q", src)
}
defer f.Close()
- _, err = io.Copy(hasher, f)
+ _, err = io.Copy(thisHasher, f)
return err
}
}
- return chrootarchive.UntarPathAndChown(chownOpts, hasher, convertedUIDMap, convertedGIDMap)
+ return func(src, dest string) error {
+ thisHasher := hasher
+ if thisHasher != nil && b.ContentDigester.Hash() != nil {
+ thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
+ }
+ if thisHasher == nil {
+ thisHasher = b.ContentDigester.Hash()
+ }
+ untarPathAndChown := chrootarchive.UntarPathAndChown(chownOpts, thisHasher, convertedUIDMap, convertedGIDMap)
+ return untarPathAndChown(src, dest)
+ }
}
// tarPath returns a function which creates an archive of a specified location,
diff --git a/vendor/github.com/containers/storage/pkg/archive/example_changes.go b/vendor/github.com/containers/storage/pkg/archive/example_changes.go
deleted file mode 100644
index 70f9c5564..000000000
--- a/vendor/github.com/containers/storage/pkg/archive/example_changes.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// +build ignore
-
-// Simple tool to create an archive stream from an old and new directory
-//
-// By default it will stream the comparison of two temporary directories with junk files
-package main
-
-import (
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path"
-
- "github.com/containers/storage/pkg/archive"
- "github.com/sirupsen/logrus"
-)
-
-var (
- flDebug = flag.Bool("D", false, "debugging output")
- flNewDir = flag.String("newdir", "", "")
- flOldDir = flag.String("olddir", "", "")
- log = logrus.New()
-)
-
-func main() {
- flag.Usage = func() {
- fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
- fmt.Printf("%s [OPTIONS]\n", os.Args[0])
- flag.PrintDefaults()
- }
- flag.Parse()
- log.Out = os.Stderr
- if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
- logrus.SetLevel(logrus.DebugLevel)
- }
- var newDir, oldDir string
-
- if len(*flNewDir) == 0 {
- var err error
- newDir, err = ioutil.TempDir("", "storage-test-newDir")
- if err != nil {
- log.Fatal(err)
- }
- defer os.RemoveAll(newDir)
- if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
- log.Fatal(err)
- }
- } else {
- newDir = *flNewDir
- }
-
- if len(*flOldDir) == 0 {
- oldDir, err := ioutil.TempDir("", "storage-test-oldDir")
- if err != nil {
- log.Fatal(err)
- }
- defer os.RemoveAll(oldDir)
- } else {
- oldDir = *flOldDir
- }
-
- changes, err := archive.ChangesDirs(newDir, oldDir)
- if err != nil {
- log.Fatal(err)
- }
-
- a, err := archive.ExportChanges(newDir, changes)
- if err != nil {
- log.Fatal(err)
- }
- defer a.Close()
-
- i, err := io.Copy(os.Stdout, a)
- if err != nil && err != io.EOF {
- log.Fatal(err)
- }
- fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
-}
-
-func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
- fileData := []byte("fooo")
- for n := 0; n < numberOfFiles; n++ {
- fileName := fmt.Sprintf("file-%d", n)
- if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
- return 0, err
- }
- if makeLinks {
- if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
- return 0, err
- }
- }
- }
- totalSize := numberOfFiles * len(fileData)
- return totalSize, nil
-}
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
index 8743abc56..0cdbf14b7 100644
--- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
@@ -2,11 +2,14 @@ package ocicni
import (
"context"
+ "encoding/json"
"errors"
"fmt"
+ "io/ioutil"
"net"
"os"
"path"
+ "path/filepath"
"sort"
"strings"
"sync"
@@ -21,10 +24,11 @@ import (
)
type cniNetworkPlugin struct {
+ cniConfig *libcni.CNIConfig
loNetwork *cniNetwork
sync.RWMutex
- defaultNetName string
+ defaultNetName netName
networks map[string]*cniNetwork
nsManager *nsManager
@@ -47,11 +51,15 @@ type cniNetworkPlugin struct {
cacheDir string
}
+type netName struct {
+ name string
+ changeable bool
+}
+
type cniNetwork struct {
- name string
- filePath string
- NetworkConfig *libcni.NetworkConfigList
- CNIConfig *libcni.CNIConfig
+ name string
+ filePath string
+ config *libcni.NetworkConfigList
}
var errMissingDefaultNetwork = errors.New("Missing CNI default network")
@@ -186,6 +194,8 @@ func (plugin *cniNetworkPlugin) monitorConfDir(start *sync.WaitGroup) {
// If defaultNetName is not empty, a CNI config with that network name will
// be used as the default CNI network, and container network operations will
// fail until that network config is present and valid.
+// If defaultNetName is empty, CNI config files should be reloaded real-time and
+// defaultNetName should be changeable and determined by file sorting.
func InitCNI(defaultNetName string, confDir string, binDirs ...string) (CNIPlugin, error) {
return initCNI(nil, "", defaultNetName, confDir, binDirs...)
}
@@ -198,17 +208,24 @@ func initCNI(exec cniinvoke.Exec, cacheDir, defaultNetName string, confDir strin
if len(binDirs) == 0 {
binDirs = []string{DefaultBinDir}
}
+
plugin := &cniNetworkPlugin{
- defaultNetName: defaultNetName,
- networks: make(map[string]*cniNetwork),
- loNetwork: getLoNetwork(exec, binDirs),
- confDir: confDir,
- binDirs: binDirs,
- shutdownChan: make(chan struct{}),
- done: &sync.WaitGroup{},
- pods: make(map[string]*podLock),
- exec: exec,
- cacheDir: cacheDir,
+ cniConfig: libcni.NewCNIConfig(binDirs, exec),
+ defaultNetName: netName{
+ name: defaultNetName,
+ // If defaultNetName is not assigned in initialization,
+ // it should be changeable
+ changeable: defaultNetName == "",
+ },
+ networks: make(map[string]*cniNetwork),
+ loNetwork: getLoNetwork(),
+ confDir: confDir,
+ binDirs: binDirs,
+ shutdownChan: make(chan struct{}),
+ done: &sync.WaitGroup{},
+ pods: make(map[string]*podLock),
+ exec: exec,
+ cacheDir: cacheDir,
}
if exec == nil {
@@ -246,7 +263,7 @@ func (plugin *cniNetworkPlugin) Shutdown() error {
return nil
}
-func loadNetworks(exec cniinvoke.Exec, confDir string, binDirs []string) (map[string]*cniNetwork, string, error) {
+func loadNetworks(confDir string, cni *libcni.CNIConfig) (map[string]*cniNetwork, string, error) {
files, err := libcni.ConfFiles(confDir, []string{".conf", ".conflist", ".json"})
if err != nil {
return nil, "", err
@@ -284,17 +301,30 @@ func loadNetworks(exec cniinvoke.Exec, confDir string, binDirs []string) (map[st
logrus.Warningf("CNI config list %s has no networks, skipping", confFile)
continue
}
+
+ // Validation on CNI config should be done to pre-check presence
+ // of plugins which are necessary.
+ if _, err := cni.ValidateNetworkList(context.TODO(), confList); err != nil {
+ logrus.Warningf("Error validating CNI config file %s: %v", confFile, err)
+ continue
+ }
+
if confList.Name == "" {
confList.Name = path.Base(confFile)
}
+ cniNet := &cniNetwork{
+ name: confList.Name,
+ filePath: confFile,
+ config: confList,
+ }
+
logrus.Infof("Found CNI network %s (type=%v) at %s", confList.Name, confList.Plugins[0].Network.Type, confFile)
- networks[confList.Name] = &cniNetwork{
- name: confList.Name,
- filePath: confFile,
- NetworkConfig: confList,
- CNIConfig: libcni.NewCNIConfig(binDirs, exec),
+ if _, ok := networks[confList.Name]; !ok {
+ networks[confList.Name] = cniNet
+ } else {
+ logrus.Infof("Ignore CNI network %s (type=%v) at %s because already exists", confList.Name, confList.Plugins[0].Network.Type, confFile)
}
if defaultNetName == "" {
@@ -305,39 +335,49 @@ func loadNetworks(exec cniinvoke.Exec, confDir string, binDirs []string) (map[st
return networks, defaultNetName, nil
}
-func getLoNetwork(exec cniinvoke.Exec, binDirs []string) *cniNetwork {
- loConfig, err := libcni.ConfListFromBytes([]byte(`{
- "cniVersion": "0.2.0",
- "name": "cni-loopback",
+const (
+ loIfname string = "lo"
+ loNetname string = "cni-loopback"
+)
+
+func getLoNetwork() *cniNetwork {
+ loConfig, err := libcni.ConfListFromBytes([]byte(fmt.Sprintf(`{
+ "cniVersion": "0.3.1",
+ "name": "%s",
"plugins": [{
"type": "loopback"
}]
-}`))
+}`, loNetname)))
if err != nil {
// The hardcoded config above should always be valid and unit tests will
// catch this
panic(err)
}
loNetwork := &cniNetwork{
- name: "lo",
- NetworkConfig: loConfig,
- CNIConfig: libcni.NewCNIConfig(binDirs, exec),
+ name: loIfname,
+ config: loConfig,
}
return loNetwork
}
func (plugin *cniNetworkPlugin) syncNetworkConfig() error {
- networks, defaultNetName, err := loadNetworks(plugin.exec, plugin.confDir, plugin.binDirs)
+ networks, defaultNetName, err := loadNetworks(plugin.confDir, plugin.cniConfig)
if err != nil {
return err
}
plugin.Lock()
defer plugin.Unlock()
- if plugin.defaultNetName == "" {
- plugin.defaultNetName = defaultNetName
+
+ // Update defaultNetName if it is changeable
+ if plugin.defaultNetName.changeable {
+ plugin.defaultNetName.name = defaultNetName
+ logrus.Infof("Update default CNI network name to %s", defaultNetName)
+ } else {
+ logrus.Warnf("Default CNI network name %s is unchangeable", plugin.defaultNetName.name)
}
+
plugin.networks = networks
return nil
@@ -356,7 +396,7 @@ func (plugin *cniNetworkPlugin) getNetwork(name string) (*cniNetwork, error) {
func (plugin *cniNetworkPlugin) GetDefaultNetworkName() string {
plugin.RLock()
defer plugin.RUnlock()
- return plugin.defaultNetName
+ return plugin.defaultNetName.name
}
func (plugin *cniNetworkPlugin) getDefaultNetwork() *cniNetwork {
@@ -382,27 +422,120 @@ func (plugin *cniNetworkPlugin) Name() string {
return CNIPluginName
}
-func (plugin *cniNetworkPlugin) forEachNetwork(podNetwork *PodNetwork, forEachFunc func(*cniNetwork, string, *PodNetwork, RuntimeConfig) error) error {
+func (plugin *cniNetworkPlugin) loadNetworkFromCache(name string, rt *libcni.RuntimeConf) (*cniNetwork, *libcni.RuntimeConf, error) {
+ cniNet := &cniNetwork{
+ name: name,
+ config: &libcni.NetworkConfigList{
+ Name: name,
+ },
+ }
+
+ var confBytes []byte
+ var err error
+ confBytes, rt, err = plugin.cniConfig.GetNetworkListCachedConfig(cniNet.config, rt)
+ if err != nil {
+ return nil, nil, err
+ } else if confBytes == nil {
+ return nil, nil, fmt.Errorf("network %q not found in CNI cache", name)
+ }
+
+ cniNet.config, err = libcni.ConfListFromBytes(confBytes)
+ if err != nil {
+ // Might be a plain NetworkConfig
+ netConf, err := libcni.ConfFromBytes(confBytes)
+ if err != nil {
+ return nil, nil, err
+ }
+ // Up-convert to a NetworkConfigList
+ cniNet.config, err = libcni.ConfListFromConf(netConf)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return cniNet, rt, nil
+}
+
+type forEachNetworkFn func(*cniNetwork, *PodNetwork, *libcni.RuntimeConf) error
+
+func (plugin *cniNetworkPlugin) forEachNetwork(podNetwork *PodNetwork, fromCache bool, actionFn forEachNetworkFn) error {
networks := podNetwork.Networks
if len(networks) == 0 {
- networks = append(networks, plugin.GetDefaultNetworkName())
+ networks = append(networks, NetAttachment{
+ Name: plugin.GetDefaultNetworkName(),
+ })
}
- for i, netName := range networks {
- // Interface names start at "eth0" and count up for each network
- ifName := fmt.Sprintf("eth%d", i)
- network, err := plugin.getNetwork(netName)
+
+ allIfNames := make(map[string]bool)
+ for _, req := range networks {
+ if req.Ifname != "" {
+ // Make sure the requested name isn't already assigned
+ if allIfNames[req.Ifname] {
+ return fmt.Errorf("network %q requested interface name %q already assigned", req.Name, req.Ifname)
+ }
+ allIfNames[req.Ifname] = true
+ }
+ }
+
+ for _, network := range networks {
+ ifName := network.Ifname
+ if ifName == "" {
+ for i := 0; i < 10000; i++ {
+ candidate := fmt.Sprintf("eth%d", i)
+ if !allIfNames[candidate] {
+ allIfNames[candidate] = true
+ ifName = candidate
+ break
+ }
+ }
+ if ifName == "" {
+ return fmt.Errorf("failed to find free interface name for network %q", network.Name)
+ }
+ }
+
+ rt, err := buildCNIRuntimeConf(plugin.cacheDir, podNetwork, ifName, podNetwork.RuntimeConfig[network.Name])
if err != nil {
- logrus.Errorf(err.Error())
+ logrus.Errorf("error building CNI runtime config: %v", err)
return err
}
- if err := forEachFunc(network, ifName, podNetwork, podNetwork.RuntimeConfig[netName]); err != nil {
+
+ var cniNet *cniNetwork
+ if fromCache {
+ var newRt *libcni.RuntimeConf
+ cniNet, newRt, err = plugin.loadNetworkFromCache(network.Name, rt)
+ if err != nil {
+ logrus.Errorf("error loading cached network config: %v", err)
+ // fall back to loading from existing plugins on disk
+ } else {
+ // Use the updated RuntimeConf
+ rt = newRt
+ }
+ }
+ if cniNet == nil {
+ cniNet, err = plugin.getNetwork(network.Name)
+ if err != nil {
+ logrus.Errorf(err.Error())
+ return err
+ }
+ }
+
+ if err := actionFn(cniNet, podNetwork, rt); err != nil {
return err
}
}
return nil
}
-func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]cnitypes.Result, error) {
+func buildLoopbackRuntimeConf(cacheDir string, podNetwork *PodNetwork) *libcni.RuntimeConf {
+ return &libcni.RuntimeConf{
+ ContainerID: podNetwork.ID,
+ NetNS: podNetwork.NetNS,
+ CacheDir: cacheDir,
+ IfName: loIfname,
+ }
+}
+
+func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]NetResult, error) {
if err := plugin.networksAvailable(&podNetwork); err != nil {
return nil, err
}
@@ -410,20 +543,26 @@ func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]cnitypes.Resu
plugin.podLock(podNetwork).Lock()
defer plugin.podUnlock(podNetwork)
- _, err := plugin.loNetwork.addToNetwork(plugin.cacheDir, &podNetwork, "lo", RuntimeConfig{})
- if err != nil {
+ loRt := buildLoopbackRuntimeConf(plugin.cacheDir, &podNetwork)
+ if _, err := plugin.loNetwork.addToNetwork(loRt, plugin.cniConfig); err != nil {
logrus.Errorf("Error while adding to cni lo network: %s", err)
return nil, err
}
- results := make([]cnitypes.Result, 0)
- if err := plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork, runtimeConfig RuntimeConfig) error {
- result, err := network.addToNetwork(plugin.cacheDir, podNetwork, ifName, runtimeConfig)
+ results := make([]NetResult, 0)
+ if err := plugin.forEachNetwork(&podNetwork, false, func(network *cniNetwork, podNetwork *PodNetwork, rt *libcni.RuntimeConf) error {
+ result, err := network.addToNetwork(rt, plugin.cniConfig)
if err != nil {
logrus.Errorf("Error while adding pod to CNI network %q: %s", network.name, err)
return err
}
- results = append(results, result)
+ results = append(results, NetResult{
+ Result: result,
+ NetAttachment: NetAttachment{
+ Name: network.name,
+ Ifname: rt.IfName,
+ },
+ })
return nil
}); err != nil {
return nil, err
@@ -432,16 +571,99 @@ func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]cnitypes.Resu
return results, nil
}
+func (plugin *cniNetworkPlugin) getCachedNetworkInfo(containerID string) ([]NetAttachment, error) {
+ cacheDir := libcni.CacheDir
+ if plugin.cacheDir != "" {
+ cacheDir = plugin.cacheDir
+ }
+
+ dirPath := filepath.Join(cacheDir, "results")
+ entries, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ return nil, err
+ }
+
+ fileNames := make([]string, 0, len(entries))
+ for _, e := range entries {
+ fileNames = append(fileNames, e.Name())
+ }
+ sort.Strings(fileNames)
+
+ attachments := []NetAttachment{}
+ for _, fname := range fileNames {
+ part := fmt.Sprintf("-%s-", containerID)
+ pos := strings.Index(fname, part)
+ if pos <= 0 || pos+len(part) >= len(fname) {
+ continue
+ }
+
+ cacheFile := filepath.Join(dirPath, fname)
+ bytes, err := ioutil.ReadFile(cacheFile)
+ if err != nil {
+ logrus.Warningf("failed to read CNI cache file %s: %v", cacheFile, err)
+ continue
+ }
+
+ cachedInfo := struct {
+ Kind string `json:"kind"`
+ IfName string `json:"ifName"`
+ ContainerID string `json:"containerID"`
+ NetName string `json:"networkName"`
+ }{}
+
+ if err := json.Unmarshal(bytes, &cachedInfo); err != nil {
+ logrus.Warningf("failed to unmarshal CNI cache file %s: %v", cacheFile, err)
+ continue
+ }
+ if cachedInfo.Kind != libcni.CNICacheV1 {
+ logrus.Warningf("unknown CNI cache file %s kind %q", cacheFile, cachedInfo.Kind)
+ continue
+ }
+ if cachedInfo.ContainerID != containerID {
+ continue
+ }
+ // Ignore the loopback interface; it's handled separately
+ if cachedInfo.IfName == loIfname && cachedInfo.NetName == loNetname {
+ continue
+ }
+ if cachedInfo.IfName == "" || cachedInfo.NetName == "" {
+ logrus.Warningf("missing CNI cache file %s ifname %q or netname %q", cacheFile, cachedInfo.IfName, cachedInfo.NetName)
+ continue
+ }
+
+ attachments = append(attachments, NetAttachment{
+ Name: cachedInfo.NetName,
+ Ifname: cachedInfo.IfName,
+ })
+ }
+ return attachments, nil
+}
+
+// TearDownPod tears down pod networks. Prefers cached pod attachment information
+// but falls back to given network attachment information.
func (plugin *cniNetworkPlugin) TearDownPod(podNetwork PodNetwork) error {
+ if len(podNetwork.Networks) == 0 {
+ attachments, err := plugin.getCachedNetworkInfo(podNetwork.ID)
+ if err == nil && len(attachments) > 0 {
+ podNetwork.Networks = attachments
+ }
+ }
+
if err := plugin.networksAvailable(&podNetwork); err != nil {
return err
}
+ loRt := buildLoopbackRuntimeConf(plugin.cacheDir, &podNetwork)
+ if err := plugin.loNetwork.deleteFromNetwork(loRt, plugin.cniConfig); err != nil {
+ logrus.Errorf("Error while removing pod from CNI lo network: %v", err)
+ // Loopback teardown errors are not fatal
+ }
+
plugin.podLock(podNetwork).Lock()
defer plugin.podUnlock(podNetwork)
- return plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork, runtimeConfig RuntimeConfig) error {
- if err := network.deleteFromNetwork(plugin.cacheDir, podNetwork, ifName, runtimeConfig); err != nil {
+ return plugin.forEachNetwork(&podNetwork, true, func(network *cniNetwork, podNetwork *PodNetwork, rt *libcni.RuntimeConf) error {
+ if err := network.deleteFromNetwork(rt, plugin.cniConfig); err != nil {
logrus.Errorf("Error while removing pod from CNI network %q: %s", network.name, err)
return err
}
@@ -451,19 +673,25 @@ func (plugin *cniNetworkPlugin) TearDownPod(podNetwork PodNetwork) error {
// GetPodNetworkStatus returns IP addressing and interface details for all
// networks attached to the pod.
-func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) ([]cnitypes.Result, error) {
+func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) ([]NetResult, error) {
plugin.podLock(podNetwork).Lock()
defer plugin.podUnlock(podNetwork)
- results := make([]cnitypes.Result, 0)
- if err := plugin.forEachNetwork(&podNetwork, func(network *cniNetwork, ifName string, podNetwork *PodNetwork, runtimeConfig RuntimeConfig) error {
- result, err := network.checkNetwork(plugin.cacheDir, podNetwork, ifName, runtimeConfig, plugin.nsManager)
+ results := make([]NetResult, 0)
+ if err := plugin.forEachNetwork(&podNetwork, true, func(network *cniNetwork, podNetwork *PodNetwork, rt *libcni.RuntimeConf) error {
+ result, err := network.checkNetwork(rt, plugin.cniConfig, plugin.nsManager, podNetwork.NetNS)
if err != nil {
logrus.Errorf("Error while checking pod to CNI network %q: %s", network.name, err)
return err
}
if result != nil {
- results = append(results, result)
+ results = append(results, NetResult{
+ Result: result,
+ NetAttachment: NetAttachment{
+ Name: network.name,
+ Ifname: rt.IfName,
+ },
+ })
}
return nil
}); err != nil {
@@ -473,16 +701,9 @@ func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) ([]cn
return results, nil
}
-func (network *cniNetwork) addToNetwork(cacheDir string, podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig) (cnitypes.Result, error) {
- rt, err := buildCNIRuntimeConf(cacheDir, podNetwork, ifName, runtimeConfig)
- if err != nil {
- logrus.Errorf("Error adding network: %v", err)
- return nil, err
- }
-
- netconf, cninet := network.NetworkConfig, network.CNIConfig
- logrus.Infof("About to add CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type)
- res, err := cninet.AddNetworkList(context.Background(), netconf, rt)
+func (network *cniNetwork) addToNetwork(rt *libcni.RuntimeConf, cni *libcni.CNIConfig) (cnitypes.Result, error) {
+ logrus.Infof("About to add CNI network %s (type=%v)", network.name, network.config.Plugins[0].Network.Type)
+ res, err := cni.AddNetworkList(context.Background(), network.config, rt)
if err != nil {
logrus.Errorf("Error adding network: %v", err)
return nil, err
@@ -491,18 +712,10 @@ func (network *cniNetwork) addToNetwork(cacheDir string, podNetwork *PodNetwork,
return res, nil
}
-func (network *cniNetwork) checkNetwork(cacheDir string, podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig, nsManager *nsManager) (cnitypes.Result, error) {
-
- rt, err := buildCNIRuntimeConf(cacheDir, podNetwork, ifName, runtimeConfig)
- if err != nil {
- logrus.Errorf("Error checking network: %v", err)
- return nil, err
- }
-
- netconf, cninet := network.NetworkConfig, network.CNIConfig
- logrus.Infof("About to check CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type)
+func (network *cniNetwork) checkNetwork(rt *libcni.RuntimeConf, cni *libcni.CNIConfig, nsManager *nsManager, netns string) (cnitypes.Result, error) {
+ logrus.Infof("About to check CNI network %s (type=%v)", network.name, network.config.Plugins[0].Network.Type)
- gtet, err := cniversion.GreaterThanOrEqualTo(netconf.CNIVersion, "0.4.0")
+ gtet, err := cniversion.GreaterThanOrEqualTo(network.config.CNIVersion, "0.4.0")
if err != nil {
return nil, err
}
@@ -511,15 +724,15 @@ func (network *cniNetwork) checkNetwork(cacheDir string, podNetwork *PodNetwork,
// When CNIVersion supports Check, use it. Otherwise fall back on what was done initially.
if gtet {
- err = cninet.CheckNetworkList(context.Background(), netconf, rt)
- logrus.Infof("Checking CNI network %s (config version=%v)", netconf.Name, netconf.CNIVersion)
+ err = cni.CheckNetworkList(context.Background(), network.config, rt)
+ logrus.Infof("Checking CNI network %s (config version=%v)", network.name, network.config.CNIVersion)
if err != nil {
logrus.Errorf("Error checking network: %v", err)
return nil, err
}
}
- result, err = cninet.GetNetworkListCachedResult(netconf, rt)
+ result, err = cni.GetNetworkListCachedResult(network.config, rt)
if err != nil {
logrus.Errorf("Error GetNetworkListCachedResult: %v", err)
return nil, err
@@ -528,19 +741,19 @@ func (network *cniNetwork) checkNetwork(cacheDir string, podNetwork *PodNetwork,
}
// result doesn't exist, create one
- logrus.Infof("Checking CNI network %s (config version=%v) nsManager=%v", netconf.Name, netconf.CNIVersion, nsManager)
+ logrus.Infof("Checking CNI network %s (config version=%v) nsManager=%v", network.name, network.config.CNIVersion, nsManager)
var cniInterface *cnicurrent.Interface
ips := []*cnicurrent.IPConfig{}
errs := []error{}
for _, version := range []string{"4", "6"} {
- ip, mac, err := getContainerDetails(nsManager, podNetwork.NetNS, ifName, "-"+version)
+ ip, mac, err := getContainerDetails(nsManager, netns, rt.IfName, "-"+version)
if err == nil {
if cniInterface == nil {
cniInterface = &cnicurrent.Interface{
- Name: ifName,
+ Name: rt.IfName,
Mac: mac.String(),
- Sandbox: podNetwork.NetNS,
+ Sandbox: netns,
}
}
ips = append(ips, &cnicurrent.IPConfig{
@@ -557,25 +770,23 @@ func (network *cniNetwork) checkNetwork(cacheDir string, podNetwork *PodNetwork,
}
result = &cnicurrent.Result{
- CNIVersion: netconf.CNIVersion,
+ CNIVersion: network.config.CNIVersion,
Interfaces: []*cnicurrent.Interface{cniInterface},
IPs: ips,
}
- return result, nil
-}
-
-func (network *cniNetwork) deleteFromNetwork(cacheDir string, podNetwork *PodNetwork, ifName string, runtimeConfig RuntimeConfig) error {
- rt, err := buildCNIRuntimeConf(cacheDir, podNetwork, ifName, runtimeConfig)
+ // Result must be the same CNIVersion as the CNI config
+ converted, err := result.GetAsVersion(network.config.CNIVersion)
if err != nil {
- logrus.Errorf("Error deleting network: %v", err)
- return err
+ return nil, err
}
- netconf, cninet := network.NetworkConfig, network.CNIConfig
- logrus.Infof("About to del CNI network %s (type=%v)", netconf.Name, netconf.Plugins[0].Network.Type)
- err = cninet.DelNetworkList(context.Background(), netconf, rt)
- if err != nil {
+ return converted, nil
+}
+
+func (network *cniNetwork) deleteFromNetwork(rt *libcni.RuntimeConf, cni *libcni.CNIConfig) error {
+ logrus.Infof("About to del CNI network %s (type=%v)", network.name, network.config.Plugins[0].Network.Type)
+ if err := cni.DelNetworkList(context.Background(), network.config, rt); err != nil {
logrus.Errorf("Error deleting network: %v", err)
return err
}
@@ -608,6 +819,16 @@ func buildCNIRuntimeConf(cacheDir string, podNetwork *PodNetwork, ifName string,
rt.Args = append(rt.Args, [2]string{"IP", ip})
}
+ // Add the requested static MAC to CNI_ARGS
+ mac := runtimeConfig.MAC
+ if mac != "" {
+ _, err := net.ParseMAC(mac)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse MAC address %q: %v", mac, err)
+ }
+ rt.Args = append(rt.Args, [2]string{"MAC", mac})
+ }
+
// Set PortMappings in Capabilities
if len(runtimeConfig.PortMappings) != 0 {
rt.CapabilityArgs["portMappings"] = runtimeConfig.PortMappings
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go
index 8709711e0..717ecda33 100644
--- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go
@@ -44,6 +44,9 @@ type RuntimeConfig struct {
// with the hostlocal IP allocator. If left unset, an IP will be
// dynamically allocated.
IP string
+ // MAC is a static MAC address to be assigned to the network interface.
+ // If left unset, a MAC will be dynamically allocated.
+ MAC string
// PortMappings is the port mapping of the sandbox.
PortMappings []PortMapping
// Bandwidth is the bandwidth limiting of the pod
@@ -75,9 +78,10 @@ type PodNetwork struct {
// NetNS is the network namespace path of the sandbox.
NetNS string
- // Networks is a list of CNI network names to attach to the sandbox
- // Leave this list empty to attach the default network to the sandbox
- Networks []string
+ // Networks is a list of CNI network names (and optional interface
+ // names) to attach to the sandbox. Leave this list empty to attach the
+ // default network to the sandbox
+ Networks []NetAttachment
// NetworkConfig is configuration specific to a single CNI network.
// It is optional, and can be omitted for some or all specified networks
@@ -85,6 +89,24 @@ type PodNetwork struct {
RuntimeConfig map[string]RuntimeConfig
}
+// NetAttachment describes a container network attachment
+type NetAttachment struct {
+ // NetName contains the name of the CNI network to which the container
+ // should be or is attached
+ Name string
+ // Ifname contains the optional interface name of the attachment
+ Ifname string
+}
+
+// NetResult contains the result the network attachment operation
+type NetResult struct {
+ // Result is the CNI Result
+ Result types.Result
+ // NetAttachment contains the network and interface names of this
+ // network attachment
+ NetAttachment
+}
+
// CNIPlugin is the interface that needs to be implemented by a plugin
type CNIPlugin interface {
// Name returns the plugin's name. This will be used when searching
@@ -98,13 +120,13 @@ type CNIPlugin interface {
// SetUpPod is the method called after the sandbox container of
// the pod has been created but before the other containers of the
// pod are launched.
- SetUpPod(network PodNetwork) ([]types.Result, error)
+ SetUpPod(network PodNetwork) ([]NetResult, error)
// TearDownPod is the method called before a pod's sandbox container will be deleted
TearDownPod(network PodNetwork) error
// Status is the method called to obtain the ipv4 or ipv6 addresses of the pod sandbox
- GetPodNetworkStatus(network PodNetwork) ([]types.Result, error)
+ GetPodNetworkStatus(network PodNetwork) ([]NetResult, error)
// NetworkStatus returns error if the network plugin is in error state
Status() error
diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
deleted file mode 100644
index 495db809e..000000000
--- a/vendor/github.com/docker/docker/pkg/archive/example_changes.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// +build ignore
-
-// Simple tool to create an archive stream from an old and new directory
-//
-// By default it will stream the comparison of two temporary directories with junk files
-package main
-
-import (
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path"
-
- "github.com/docker/docker/pkg/archive"
- "github.com/sirupsen/logrus"
-)
-
-var (
- flDebug = flag.Bool("D", false, "debugging output")
- flNewDir = flag.String("newdir", "", "")
- flOldDir = flag.String("olddir", "", "")
- log = logrus.New()
-)
-
-func main() {
- flag.Usage = func() {
- fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
- fmt.Printf("%s [OPTIONS]\n", os.Args[0])
- flag.PrintDefaults()
- }
- flag.Parse()
- log.Out = os.Stderr
- if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
- logrus.SetLevel(logrus.DebugLevel)
- }
- var newDir, oldDir string
-
- if len(*flNewDir) == 0 {
- var err error
- newDir, err = ioutil.TempDir("", "docker-test-newDir")
- if err != nil {
- log.Fatal(err)
- }
- defer os.RemoveAll(newDir)
- if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
- log.Fatal(err)
- }
- } else {
- newDir = *flNewDir
- }
-
- if len(*flOldDir) == 0 {
- oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
- if err != nil {
- log.Fatal(err)
- }
- defer os.RemoveAll(oldDir)
- } else {
- oldDir = *flOldDir
- }
-
- changes, err := archive.ChangesDirs(newDir, oldDir)
- if err != nil {
- log.Fatal(err)
- }
-
- a, err := archive.ExportChanges(newDir, changes)
- if err != nil {
- log.Fatal(err)
- }
- defer a.Close()
-
- i, err := io.Copy(os.Stdout, a)
- if err != nil && err != io.EOF {
- log.Fatal(err)
- }
- fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
-}
-
-func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
- fileData := []byte("fooo")
- for n := 0; n < numberOfFiles; n++ {
- fileName := fmt.Sprintf("file-%d", n)
- if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
- return 0, err
- }
- if makeLinks {
- if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
- return 0, err
- }
- }
- }
- totalSize := numberOfFiles * len(fileData)
- return totalSize, nil
-}
diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go
index 95ae54fbf..29b31cf78 100644
--- a/vendor/github.com/json-iterator/go/iter.go
+++ b/vendor/github.com/json-iterator/go/iter.go
@@ -74,6 +74,7 @@ type Iterator struct {
buf []byte
head int
tail int
+ depth int
captureStartedAt int
captured []byte
Error error
@@ -88,6 +89,7 @@ func NewIterator(cfg API) *Iterator {
buf: nil,
head: 0,
tail: 0,
+ depth: 0,
}
}
@@ -99,6 +101,7 @@ func Parse(cfg API, reader io.Reader, bufSize int) *Iterator {
buf: make([]byte, bufSize),
head: 0,
tail: 0,
+ depth: 0,
}
}
@@ -110,6 +113,7 @@ func ParseBytes(cfg API, input []byte) *Iterator {
buf: input,
head: 0,
tail: len(input),
+ depth: 0,
}
}
@@ -128,6 +132,7 @@ func (iter *Iterator) Reset(reader io.Reader) *Iterator {
iter.reader = reader
iter.head = 0
iter.tail = 0
+ iter.depth = 0
return iter
}
@@ -137,6 +142,7 @@ func (iter *Iterator) ResetBytes(input []byte) *Iterator {
iter.buf = input
iter.head = 0
iter.tail = len(input)
+ iter.depth = 0
return iter
}
@@ -320,3 +326,24 @@ func (iter *Iterator) Read() interface{} {
return nil
}
}
+
+// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9
+const maxDepth = 10000
+
+func (iter *Iterator) incrementDepth() (success bool) {
+ iter.depth++
+ if iter.depth <= maxDepth {
+ return true
+ }
+ iter.ReportError("incrementDepth", "exceeded max depth")
+ return false
+}
+
+func (iter *Iterator) decrementDepth() (success bool) {
+ iter.depth--
+ if iter.depth >= 0 {
+ return true
+ }
+ iter.ReportError("decrementDepth", "unexpected negative nesting")
+ return false
+}
diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go
index 6188cb457..204fe0e09 100644
--- a/vendor/github.com/json-iterator/go/iter_array.go
+++ b/vendor/github.com/json-iterator/go/iter_array.go
@@ -28,26 +28,32 @@ func (iter *Iterator) ReadArray() (ret bool) {
func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
c := iter.nextToken()
if c == '[' {
+ if !iter.incrementDepth() {
+ return false
+ }
c = iter.nextToken()
if c != ']' {
iter.unreadByte()
if !callback(iter) {
+ iter.decrementDepth()
return false
}
c = iter.nextToken()
for c == ',' {
if !callback(iter) {
+ iter.decrementDepth()
return false
}
c = iter.nextToken()
}
if c != ']' {
iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c}))
+ iter.decrementDepth()
return false
}
- return true
+ return iter.decrementDepth()
}
- return true
+ return iter.decrementDepth()
}
if c == 'n' {
iter.skipThreeBytes('u', 'l', 'l')
diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go
index 1c5757671..b65137114 100644
--- a/vendor/github.com/json-iterator/go/iter_object.go
+++ b/vendor/github.com/json-iterator/go/iter_object.go
@@ -112,6 +112,9 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
c := iter.nextToken()
var field string
if c == '{' {
+ if !iter.incrementDepth() {
+ return false
+ }
c = iter.nextToken()
if c == '"' {
iter.unreadByte()
@@ -121,6 +124,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
if !callback(iter, field) {
+ iter.decrementDepth()
return false
}
c = iter.nextToken()
@@ -131,20 +135,23 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
if !callback(iter, field) {
+ iter.decrementDepth()
return false
}
c = iter.nextToken()
}
if c != '}' {
iter.ReportError("ReadObjectCB", `object not ended with }`)
+ iter.decrementDepth()
return false
}
- return true
+ return iter.decrementDepth()
}
if c == '}' {
- return true
+ return iter.decrementDepth()
}
iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c}))
+ iter.decrementDepth()
return false
}
if c == 'n' {
@@ -159,15 +166,20 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
c := iter.nextToken()
if c == '{' {
+ if !iter.incrementDepth() {
+ return false
+ }
c = iter.nextToken()
if c == '"' {
iter.unreadByte()
field := iter.ReadString()
if iter.nextToken() != ':' {
iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ iter.decrementDepth()
return false
}
if !callback(iter, field) {
+ iter.decrementDepth()
return false
}
c = iter.nextToken()
@@ -175,23 +187,27 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
field = iter.ReadString()
if iter.nextToken() != ':' {
iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ iter.decrementDepth()
return false
}
if !callback(iter, field) {
+ iter.decrementDepth()
return false
}
c = iter.nextToken()
}
if c != '}' {
iter.ReportError("ReadMapCB", `object not ended with }`)
+ iter.decrementDepth()
return false
}
- return true
+ return iter.decrementDepth()
}
if c == '}' {
- return true
+ return iter.decrementDepth()
}
iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
+ iter.decrementDepth()
return false
}
if c == 'n' {
diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
index 8fcdc3b69..9303de41e 100644
--- a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
+++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
@@ -22,6 +22,9 @@ func (iter *Iterator) skipNumber() {
func (iter *Iterator) skipArray() {
level := 1
+ if !iter.incrementDepth() {
+ return
+ }
for {
for i := iter.head; i < iter.tail; i++ {
switch iter.buf[i] {
@@ -31,8 +34,14 @@ func (iter *Iterator) skipArray() {
i = iter.head - 1 // it will be i++ soon
case '[': // If open symbol, increase level
level++
+ if !iter.incrementDepth() {
+ return
+ }
case ']': // If close symbol, increase level
level--
+ if !iter.decrementDepth() {
+ return
+ }
// If we have returned to the original level, we're done
if level == 0 {
@@ -50,6 +59,10 @@ func (iter *Iterator) skipArray() {
func (iter *Iterator) skipObject() {
level := 1
+ if !iter.incrementDepth() {
+ return
+ }
+
for {
for i := iter.head; i < iter.tail; i++ {
switch iter.buf[i] {
@@ -59,8 +72,14 @@ func (iter *Iterator) skipObject() {
i = iter.head - 1 // it will be i++ soon
case '{': // If open symbol, increase level
level++
+ if !iter.incrementDepth() {
+ return
+ }
case '}': // If close symbol, increase level
level--
+ if !iter.decrementDepth() {
+ return
+ }
// If we have returned to the original level, we're done
if level == 0 {
diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go
index 4459e203f..74974ba74 100644
--- a/vendor/github.com/json-iterator/go/reflect.go
+++ b/vendor/github.com/json-iterator/go/reflect.go
@@ -60,6 +60,7 @@ func (b *ctx) append(prefix string) *ctx {
// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
func (iter *Iterator) ReadVal(obj interface{}) {
+ depth := iter.depth
cacheKey := reflect2.RTypeOf(obj)
decoder := iter.cfg.getDecoderFromCache(cacheKey)
if decoder == nil {
@@ -76,6 +77,10 @@ func (iter *Iterator) ReadVal(obj interface{}) {
return
}
decoder.Decode(ptr, iter)
+ if iter.depth != depth {
+ iter.ReportError("ReadVal", "unexpected mismatched nesting")
+ return
+ }
}
// WriteVal copy the go interface into underlying JSON, same as json.Marshal
diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go
index 05e8fbf1f..e27e8d191 100644
--- a/vendor/github.com/json-iterator/go/reflect_extension.go
+++ b/vendor/github.com/json-iterator/go/reflect_extension.go
@@ -341,10 +341,10 @@ func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
continue
}
- tagParts := strings.Split(tag, ",")
if tag == "-" {
continue
}
+ tagParts := strings.Split(tag, ",")
if field.Anonymous() && (tag == "" || tagParts[0] == "") {
if field.Type().Kind() == reflect.Struct {
structDescriptor := describeStruct(ctx, field.Type())
diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go
index 547b4421e..08e9a3912 100644
--- a/vendor/github.com/json-iterator/go/reflect_map.go
+++ b/vendor/github.com/json-iterator/go/reflect_map.go
@@ -249,6 +249,10 @@ type mapEncoder struct {
}
func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *(*unsafe.Pointer)(ptr) == nil {
+ stream.WriteNil()
+ return
+ }
stream.WriteObjectStart()
iter := encoder.mapType.UnsafeIterate(ptr)
for i := 0; iter.HasNext(); i++ {
diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go
index fea50719d..3e21f3756 100644
--- a/vendor/github.com/json-iterator/go/reflect_marshaler.go
+++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go
@@ -3,8 +3,9 @@ package jsoniter
import (
"encoding"
"encoding/json"
- "github.com/modern-go/reflect2"
"unsafe"
+
+ "github.com/modern-go/reflect2"
)
var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem()
@@ -93,10 +94,17 @@ func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteNil()
return
}
- bytes, err := json.Marshal(obj)
+ marshaler := obj.(json.Marshaler)
+ bytes, err := marshaler.MarshalJSON()
if err != nil {
stream.Error = err
} else {
+ // html escape was already done by jsoniter
+ // but the extra '\n' should be trimed
+ l := len(bytes)
+ if l > 0 && bytes[l-1] == '\n' {
+ bytes = bytes[:l-1]
+ }
stream.Write(bytes)
}
}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
index 932641ac4..5ad5cc561 100644
--- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
+++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
@@ -500,6 +500,9 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
var c byte
for c = ','; c == ','; c = iter.nextToken() {
decoder.decodeOneField(ptr, iter)
@@ -510,6 +513,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
if c != '}' {
iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c}))
}
+ iter.decrementDepth()
}
func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) {
@@ -571,6 +575,9 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
if iter.readFieldHash() == decoder.fieldHash {
decoder.fieldDecoder.Decode(ptr, iter)
@@ -584,6 +591,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type twoFieldsStructDecoder struct {
@@ -598,6 +606,9 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -614,6 +625,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type threeFieldsStructDecoder struct {
@@ -630,6 +642,9 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -648,6 +663,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type fourFieldsStructDecoder struct {
@@ -666,6 +682,9 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -686,6 +705,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type fiveFieldsStructDecoder struct {
@@ -706,6 +726,9 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -728,6 +751,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type sixFieldsStructDecoder struct {
@@ -750,6 +774,9 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -774,6 +801,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type sevenFieldsStructDecoder struct {
@@ -798,6 +826,9 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -824,6 +855,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type eightFieldsStructDecoder struct {
@@ -850,6 +882,9 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -878,6 +913,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type nineFieldsStructDecoder struct {
@@ -906,6 +942,9 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -936,6 +975,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type tenFieldsStructDecoder struct {
@@ -966,6 +1006,9 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if !iter.readObjectStart() {
return
}
+ if !iter.incrementDepth() {
+ return
+ }
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
@@ -998,6 +1041,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
+ iter.decrementDepth()
}
type structFieldDecoder struct {
diff --git a/vendor/github.com/klauspost/compress/flate/gen.go b/vendor/github.com/klauspost/compress/flate/gen.go
deleted file mode 100644
index 154c89a48..000000000
--- a/vendor/github.com/klauspost/compress/flate/gen.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// This program generates fixedhuff.go
-// Invoke as
-//
-// go run gen.go -output fixedhuff.go
-
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "go/format"
- "io/ioutil"
- "log"
-)
-
-var filename = flag.String("output", "fixedhuff.go", "output file name")
-
-const maxCodeLen = 16
-
-// Note: the definition of the huffmanDecoder struct is copied from
-// inflate.go, as it is private to the implementation.
-
-// chunk & 15 is number of bits
-// chunk >> 4 is value, including table link
-
-const (
- huffmanChunkBits = 9
- huffmanNumChunks = 1 << huffmanChunkBits
- huffmanCountMask = 15
- huffmanValueShift = 4
-)
-
-type huffmanDecoder struct {
- min int // the minimum code length
- chunks [huffmanNumChunks]uint32 // chunks as described above
- links [][]uint32 // overflow links
- linkMask uint32 // mask the width of the link table
-}
-
-// Initialize Huffman decoding tables from array of code lengths.
-// Following this function, h is guaranteed to be initialized into a complete
-// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
-// degenerate case where the tree has only a single symbol with length 1. Empty
-// trees are permitted.
-func (h *huffmanDecoder) init(bits []int) bool {
- // Sanity enables additional runtime tests during Huffman
- // table construction. It's intended to be used during
- // development to supplement the currently ad-hoc unit tests.
- const sanity = false
-
- if h.min != 0 {
- *h = huffmanDecoder{}
- }
-
- // Count number of codes of each length,
- // compute min and max length.
- var count [maxCodeLen]int
- var min, max int
- for _, n := range bits {
- if n == 0 {
- continue
- }
- if min == 0 || n < min {
- min = n
- }
- if n > max {
- max = n
- }
- count[n]++
- }
-
- // Empty tree. The decompressor.huffSym function will fail later if the tree
- // is used. Technically, an empty tree is only valid for the HDIST tree and
- // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
- // is guaranteed to fail since it will attempt to use the tree to decode the
- // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
- // guaranteed to fail later since the compressed data section must be
- // composed of at least one symbol (the end-of-block marker).
- if max == 0 {
- return true
- }
-
- code := 0
- var nextcode [maxCodeLen]int
- for i := min; i <= max; i++ {
- code <<= 1
- nextcode[i] = code
- code += count[i]
- }
-
- // Check that the coding is complete (i.e., that we've
- // assigned all 2-to-the-max possible bit sequences).
- // Exception: To be compatible with zlib, we also need to
- // accept degenerate single-code codings. See also
- // TestDegenerateHuffmanCoding.
- if code != 1<<uint(max) && !(code == 1 && max == 1) {
- return false
- }
-
- h.min = min
- if max > huffmanChunkBits {
- numLinks := 1 << (uint(max) - huffmanChunkBits)
- h.linkMask = uint32(numLinks - 1)
-
- // create link tables
- link := nextcode[huffmanChunkBits+1] >> 1
- h.links = make([][]uint32, huffmanNumChunks-link)
- for j := uint(link); j < huffmanNumChunks; j++ {
- reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8
- reverse >>= uint(16 - huffmanChunkBits)
- off := j - uint(link)
- if sanity && h.chunks[reverse] != 0 {
- panic("impossible: overwriting existing chunk")
- }
- h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
- h.links[off] = make([]uint32, numLinks)
- }
- }
-
- for i, n := range bits {
- if n == 0 {
- continue
- }
- code := nextcode[n]
- nextcode[n]++
- chunk := uint32(i<<huffmanValueShift | n)
- reverse := int(reverseByte[code>>8]) | int(reverseByte[code&0xff])<<8
- reverse >>= uint(16 - n)
- if n <= huffmanChunkBits {
- for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
- // We should never need to overwrite
- // an existing chunk. Also, 0 is
- // never a valid chunk, because the
- // lower 4 "count" bits should be
- // between 1 and 15.
- if sanity && h.chunks[off] != 0 {
- panic("impossible: overwriting existing chunk")
- }
- h.chunks[off] = chunk
- }
- } else {
- j := reverse & (huffmanNumChunks - 1)
- if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
- // Longer codes should have been
- // associated with a link table above.
- panic("impossible: not an indirect chunk")
- }
- value := h.chunks[j] >> huffmanValueShift
- linktab := h.links[value]
- reverse >>= huffmanChunkBits
- for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
- if sanity && linktab[off] != 0 {
- panic("impossible: overwriting existing chunk")
- }
- linktab[off] = chunk
- }
- }
- }
-
- if sanity {
- // Above we've sanity checked that we never overwrote
- // an existing entry. Here we additionally check that
- // we filled the tables completely.
- for i, chunk := range h.chunks {
- if chunk == 0 {
- // As an exception, in the degenerate
- // single-code case, we allow odd
- // chunks to be missing.
- if code == 1 && i%2 == 1 {
- continue
- }
- panic("impossible: missing chunk")
- }
- }
- for _, linktab := range h.links {
- for _, chunk := range linktab {
- if chunk == 0 {
- panic("impossible: missing chunk")
- }
- }
- }
- }
-
- return true
-}
-
-func main() {
- flag.Parse()
-
- var h huffmanDecoder
- var bits [288]int
- initReverseByte()
- for i := 0; i < 144; i++ {
- bits[i] = 8
- }
- for i := 144; i < 256; i++ {
- bits[i] = 9
- }
- for i := 256; i < 280; i++ {
- bits[i] = 7
- }
- for i := 280; i < 288; i++ {
- bits[i] = 8
- }
- h.init(bits[:])
- if h.links != nil {
- log.Fatal("Unexpected links table in fixed Huffman decoder")
- }
-
- var buf bytes.Buffer
-
- fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.`+"\n\n")
-
- fmt.Fprintln(&buf, "package flate")
- fmt.Fprintln(&buf)
- fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT")
- fmt.Fprintln(&buf)
- fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{")
- fmt.Fprintf(&buf, "\t%d,\n", h.min)
- fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{")
- for i := 0; i < huffmanNumChunks; i++ {
- if i&7 == 0 {
- fmt.Fprintf(&buf, "\t\t")
- } else {
- fmt.Fprintf(&buf, " ")
- }
- fmt.Fprintf(&buf, "0x%04x,", h.chunks[i])
- if i&7 == 7 {
- fmt.Fprintln(&buf)
- }
- }
- fmt.Fprintln(&buf, "\t},")
- fmt.Fprintln(&buf, "\tnil, 0,")
- fmt.Fprintln(&buf, "}")
-
- data, err := format.Source(buf.Bytes())
- if err != nil {
- log.Fatal(err)
- }
- err = ioutil.WriteFile(*filename, data, 0644)
- if err != nil {
- log.Fatal(err)
- }
-}
-
-var reverseByte [256]byte
-
-func initReverseByte() {
- for x := 0; x < 256; x++ {
- var result byte
- for i := uint(0); i < 8; i++ {
- result |= byte(((x >> i) & 1) << (7 - i))
- }
- reverseByte[x] = result
- }
-}
diff --git a/vendor/github.com/klauspost/cpuid/private-gen.go b/vendor/github.com/klauspost/cpuid/private-gen.go
deleted file mode 100644
index 437333d29..000000000
--- a/vendor/github.com/klauspost/cpuid/private-gen.go
+++ /dev/null
@@ -1,476 +0,0 @@
-// +build ignore
-
-package main
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/parser"
- "go/printer"
- "go/token"
- "io"
- "io/ioutil"
- "log"
- "os"
- "reflect"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-var inFiles = []string{"cpuid.go", "cpuid_test.go"}
-var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"}
-var fileSet = token.NewFileSet()
-var reWrites = []rewrite{
- initRewrite("CPUInfo -> cpuInfo"),
- initRewrite("Vendor -> vendor"),
- initRewrite("Flags -> flags"),
- initRewrite("Detect -> detect"),
- initRewrite("CPU -> cpu"),
-}
-var excludeNames = map[string]bool{"string": true, "join": true, "trim": true,
- // cpuid_test.go
- "t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true,
-}
-
-var excludePrefixes = []string{"test", "benchmark"}
-
-func main() {
- Package := "private"
- parserMode := parser.ParseComments
- exported := make(map[string]rewrite)
- for _, file := range inFiles {
- in, err := os.Open(file)
- if err != nil {
- log.Fatalf("opening input", err)
- }
-
- src, err := ioutil.ReadAll(in)
- if err != nil {
- log.Fatalf("reading input", err)
- }
-
- astfile, err := parser.ParseFile(fileSet, file, src, parserMode)
- if err != nil {
- log.Fatalf("parsing input", err)
- }
-
- for _, rw := range reWrites {
- astfile = rw(astfile)
- }
-
- // Inspect the AST and print all identifiers and literals.
- var startDecl token.Pos
- var endDecl token.Pos
- ast.Inspect(astfile, func(n ast.Node) bool {
- var s string
- switch x := n.(type) {
- case *ast.Ident:
- if x.IsExported() {
- t := strings.ToLower(x.Name)
- for _, pre := range excludePrefixes {
- if strings.HasPrefix(t, pre) {
- return true
- }
- }
- if excludeNames[t] != true {
- //if x.Pos() > startDecl && x.Pos() < endDecl {
- exported[x.Name] = initRewrite(x.Name + " -> " + t)
- }
- }
-
- case *ast.GenDecl:
- if x.Tok == token.CONST && x.Lparen > 0 {
- startDecl = x.Lparen
- endDecl = x.Rparen
- // fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl))
- }
- }
- if s != "" {
- fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s)
- }
- return true
- })
-
- for _, rw := range exported {
- astfile = rw(astfile)
- }
-
- var buf bytes.Buffer
-
- printer.Fprint(&buf, fileSet, astfile)
-
- // Remove package documentation and insert information
- s := buf.String()
- ind := strings.Index(buf.String(), "\npackage cpuid")
- s = s[ind:]
- s = "// Generated, DO NOT EDIT,\n" +
- "// but copy it to your own project and rename the package.\n" +
- "// See more at http://github.com/klauspost/cpuid\n" +
- s
-
- outputName := Package + string(os.PathSeparator) + file
-
- err = ioutil.WriteFile(outputName, []byte(s), 0644)
- if err != nil {
- log.Fatalf("writing output: %s", err)
- }
- log.Println("Generated", outputName)
- }
-
- for _, file := range copyFiles {
- dst := ""
- if strings.HasPrefix(file, "cpuid") {
- dst = Package + string(os.PathSeparator) + file
- } else {
- dst = Package + string(os.PathSeparator) + "cpuid_" + file
- }
- err := copyFile(file, dst)
- if err != nil {
- log.Fatalf("copying file: %s", err)
- }
- log.Println("Copied", dst)
- }
-}
-
-// CopyFile copies a file from src to dst. If src and dst files exist, and are
-// the same, then return success. Copy the file contents from src to dst.
-func copyFile(src, dst string) (err error) {
- sfi, err := os.Stat(src)
- if err != nil {
- return
- }
- if !sfi.Mode().IsRegular() {
- // cannot copy non-regular files (e.g., directories,
- // symlinks, devices, etc.)
- return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String())
- }
- dfi, err := os.Stat(dst)
- if err != nil {
- if !os.IsNotExist(err) {
- return
- }
- } else {
- if !(dfi.Mode().IsRegular()) {
- return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String())
- }
- if os.SameFile(sfi, dfi) {
- return
- }
- }
- err = copyFileContents(src, dst)
- return
-}
-
-// copyFileContents copies the contents of the file named src to the file named
-// by dst. The file will be created if it does not already exist. If the
-// destination file exists, all it's contents will be replaced by the contents
-// of the source file.
-func copyFileContents(src, dst string) (err error) {
- in, err := os.Open(src)
- if err != nil {
- return
- }
- defer in.Close()
- out, err := os.Create(dst)
- if err != nil {
- return
- }
- defer func() {
- cerr := out.Close()
- if err == nil {
- err = cerr
- }
- }()
- if _, err = io.Copy(out, in); err != nil {
- return
- }
- err = out.Sync()
- return
-}
-
-type rewrite func(*ast.File) *ast.File
-
-// Mostly copied from gofmt
-func initRewrite(rewriteRule string) rewrite {
- f := strings.Split(rewriteRule, "->")
- if len(f) != 2 {
- fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n")
- os.Exit(2)
- }
- pattern := parseExpr(f[0], "pattern")
- replace := parseExpr(f[1], "replacement")
- return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) }
-}
-
-// parseExpr parses s as an expression.
-// It might make sense to expand this to allow statement patterns,
-// but there are problems with preserving formatting and also
-// with what a wildcard for a statement looks like.
-func parseExpr(s, what string) ast.Expr {
- x, err := parser.ParseExpr(s)
- if err != nil {
- fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err)
- os.Exit(2)
- }
- return x
-}
-
-// Keep this function for debugging.
-/*
-func dump(msg string, val reflect.Value) {
- fmt.Printf("%s:\n", msg)
- ast.Print(fileSet, val.Interface())
- fmt.Println()
-}
-*/
-
-// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file.
-func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File {
- cmap := ast.NewCommentMap(fileSet, p, p.Comments)
- m := make(map[string]reflect.Value)
- pat := reflect.ValueOf(pattern)
- repl := reflect.ValueOf(replace)
-
- var rewriteVal func(val reflect.Value) reflect.Value
- rewriteVal = func(val reflect.Value) reflect.Value {
- // don't bother if val is invalid to start with
- if !val.IsValid() {
- return reflect.Value{}
- }
- for k := range m {
- delete(m, k)
- }
- val = apply(rewriteVal, val)
- if match(m, pat, val) {
- val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos()))
- }
- return val
- }
-
- r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File)
- r.Comments = cmap.Filter(r).Comments() // recreate comments list
- return r
-}
-
-// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y.
-func set(x, y reflect.Value) {
- // don't bother if x cannot be set or y is invalid
- if !x.CanSet() || !y.IsValid() {
- return
- }
- defer func() {
- if x := recover(); x != nil {
- if s, ok := x.(string); ok &&
- (strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) {
- // x cannot be set to y - ignore this rewrite
- return
- }
- panic(x)
- }
- }()
- x.Set(y)
-}
-
-// Values/types for special cases.
-var (
- objectPtrNil = reflect.ValueOf((*ast.Object)(nil))
- scopePtrNil = reflect.ValueOf((*ast.Scope)(nil))
-
- identType = reflect.TypeOf((*ast.Ident)(nil))
- objectPtrType = reflect.TypeOf((*ast.Object)(nil))
- positionType = reflect.TypeOf(token.NoPos)
- callExprType = reflect.TypeOf((*ast.CallExpr)(nil))
- scopePtrType = reflect.TypeOf((*ast.Scope)(nil))
-)
-
-// apply replaces each AST field x in val with f(x), returning val.
-// To avoid extra conversions, f operates on the reflect.Value form.
-func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value {
- if !val.IsValid() {
- return reflect.Value{}
- }
-
- // *ast.Objects introduce cycles and are likely incorrect after
- // rewrite; don't follow them but replace with nil instead
- if val.Type() == objectPtrType {
- return objectPtrNil
- }
-
- // similarly for scopes: they are likely incorrect after a rewrite;
- // replace them with nil
- if val.Type() == scopePtrType {
- return scopePtrNil
- }
-
- switch v := reflect.Indirect(val); v.Kind() {
- case reflect.Slice:
- for i := 0; i < v.Len(); i++ {
- e := v.Index(i)
- set(e, f(e))
- }
- case reflect.Struct:
- for i := 0; i < v.NumField(); i++ {
- e := v.Field(i)
- set(e, f(e))
- }
- case reflect.Interface:
- e := v.Elem()
- set(v, f(e))
- }
- return val
-}
-
-func isWildcard(s string) bool {
- rune, size := utf8.DecodeRuneInString(s)
- return size == len(s) && unicode.IsLower(rune)
-}
-
-// match returns true if pattern matches val,
-// recording wildcard submatches in m.
-// If m == nil, match checks whether pattern == val.
-func match(m map[string]reflect.Value, pattern, val reflect.Value) bool {
- // Wildcard matches any expression. If it appears multiple
- // times in the pattern, it must match the same expression
- // each time.
- if m != nil && pattern.IsValid() && pattern.Type() == identType {
- name := pattern.Interface().(*ast.Ident).Name
- if isWildcard(name) && val.IsValid() {
- // wildcards only match valid (non-nil) expressions.
- if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() {
- if old, ok := m[name]; ok {
- return match(nil, old, val)
- }
- m[name] = val
- return true
- }
- }
- }
-
- // Otherwise, pattern and val must match recursively.
- if !pattern.IsValid() || !val.IsValid() {
- return !pattern.IsValid() && !val.IsValid()
- }
- if pattern.Type() != val.Type() {
- return false
- }
-
- // Special cases.
- switch pattern.Type() {
- case identType:
- // For identifiers, only the names need to match
- // (and none of the other *ast.Object information).
- // This is a common case, handle it all here instead
- // of recursing down any further via reflection.
- p := pattern.Interface().(*ast.Ident)
- v := val.Interface().(*ast.Ident)
- return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name
- case objectPtrType, positionType:
- // object pointers and token positions always match
- return true
- case callExprType:
- // For calls, the Ellipsis fields (token.Position) must
- // match since that is how f(x) and f(x...) are different.
- // Check them here but fall through for the remaining fields.
- p := pattern.Interface().(*ast.CallExpr)
- v := val.Interface().(*ast.CallExpr)
- if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() {
- return false
- }
- }
-
- p := reflect.Indirect(pattern)
- v := reflect.Indirect(val)
- if !p.IsValid() || !v.IsValid() {
- return !p.IsValid() && !v.IsValid()
- }
-
- switch p.Kind() {
- case reflect.Slice:
- if p.Len() != v.Len() {
- return false
- }
- for i := 0; i < p.Len(); i++ {
- if !match(m, p.Index(i), v.Index(i)) {
- return false
- }
- }
- return true
-
- case reflect.Struct:
- for i := 0; i < p.NumField(); i++ {
- if !match(m, p.Field(i), v.Field(i)) {
- return false
- }
- }
- return true
-
- case reflect.Interface:
- return match(m, p.Elem(), v.Elem())
- }
-
- // Handle token integers, etc.
- return p.Interface() == v.Interface()
-}
-
-// subst returns a copy of pattern with values from m substituted in place
-// of wildcards and pos used as the position of tokens from the pattern.
-// if m == nil, subst returns a copy of pattern and doesn't change the line
-// number information.
-func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value {
- if !pattern.IsValid() {
- return reflect.Value{}
- }
-
- // Wildcard gets replaced with map value.
- if m != nil && pattern.Type() == identType {
- name := pattern.Interface().(*ast.Ident).Name
- if isWildcard(name) {
- if old, ok := m[name]; ok {
- return subst(nil, old, reflect.Value{})
- }
- }
- }
-
- if pos.IsValid() && pattern.Type() == positionType {
- // use new position only if old position was valid in the first place
- if old := pattern.Interface().(token.Pos); !old.IsValid() {
- return pattern
- }
- return pos
- }
-
- // Otherwise copy.
- switch p := pattern; p.Kind() {
- case reflect.Slice:
- v := reflect.MakeSlice(p.Type(), p.Len(), p.Len())
- for i := 0; i < p.Len(); i++ {
- v.Index(i).Set(subst(m, p.Index(i), pos))
- }
- return v
-
- case reflect.Struct:
- v := reflect.New(p.Type()).Elem()
- for i := 0; i < p.NumField(); i++ {
- v.Field(i).Set(subst(m, p.Field(i), pos))
- }
- return v
-
- case reflect.Ptr:
- v := reflect.New(p.Type()).Elem()
- if elem := p.Elem(); elem.IsValid() {
- v.Set(subst(m, elem, pos).Addr())
- }
- return v
-
- case reflect.Interface:
- v := reflect.New(p.Type()).Elem()
- if elem := p.Elem(); elem.IsValid() {
- v.Set(subst(m, elem, pos))
- }
- return v
- }
-
- return pattern
-}
diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml
index 72e8ccf0b..9da4c5b5e 100644
--- a/vendor/github.com/onsi/ginkgo/.travis.yml
+++ b/vendor/github.com/onsi/ginkgo/.travis.yml
@@ -5,6 +5,9 @@ go:
- 1.12.x
- tip
+# allow internal package imports, necessary for forked repositories
+go_import_path: github.com/onsi/ginkgo
+
install:
- go get -v -t ./...
- go get golang.org/x/tools/cmd/cover
diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
index aeadb66e0..020496c9d 100644
--- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md
+++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
@@ -1,20 +1,34 @@
+## 1.10.3
+
+### Fixes
+- Set go_import_path in travis.yml to allow internal packages in forks (#607) [3b721db]
+- Add integration test [d90e0dc]
+- Fix coverage files combining [e5dde8c]
+- A new CLI option: -ginkgo.reportFile <file path> (#601) [034fd25]
+
+## 1.10.2
+
+### Fixes
+- speed up table entry generateIt() (#609) [5049dc5]
+- Fix. Write errors to stderr instead of stdout (#610) [7bb3091]
+
## 1.10.1
-## Fixes
+### Fixes
- stack backtrace: fix skipping (#600) [2a4c0bd]
## 1.10.0
-## Fixes
+### Fixes
- stack backtrace: fix alignment and skipping [66915d6]
- fix typo in documentation [8f97b93]
## 1.9.0
-## Features
+### Features
- Option to print output into report, when tests have passed [0545415]
-## Fixes
+### Fixes
- Fixed typos in comments [0ecbc58]
- gofmt code [a7f8bfb]
- Simplify code [7454d00]
diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go
index ac55a5ad2..89ec2b29a 100644
--- a/vendor/github.com/onsi/ginkgo/config/config.go
+++ b/vendor/github.com/onsi/ginkgo/config/config.go
@@ -20,7 +20,7 @@ import (
"fmt"
)
-const VERSION = "1.10.1"
+const VERSION = "1.10.3"
type GinkgoConfigType struct {
RandomSeed int64
@@ -53,6 +53,7 @@ type DefaultReporterConfigType struct {
Verbose bool
FullTrace bool
ReportPassed bool
+ ReportFile string
}
var DefaultReporterConfig = DefaultReporterConfigType{}
@@ -100,6 +101,8 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
flagSet.BoolVar(&(DefaultReporterConfig.ReportPassed), prefix+"reportPassed", false, "If set, default reporter prints out captured output of passed tests.")
+ flagSet.StringVar(&(DefaultReporterConfig.ReportFile), prefix+"reportFile", "", "Override the default reporter output file path.")
+
}
func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
@@ -202,5 +205,9 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor
result = append(result, fmt.Sprintf("--%sreportPassed", prefix))
}
+ if reporter.ReportFile != "" {
+ result = append(result, fmt.Sprintf("--%sreportFile=%s", prefix, reporter.ReportFile))
+ }
+
return result
}
diff --git a/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go b/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go
index 5fa645bce..93f3bc3b0 100644
--- a/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go
+++ b/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go
@@ -22,18 +22,15 @@ func (t TableEntry) generateIt(itBody reflect.Value) {
return
}
- values := []reflect.Value{}
+ values := make([]reflect.Value, len(t.Parameters))
+ iBodyType := itBody.Type()
for i, param := range t.Parameters {
- var value reflect.Value
-
if param == nil {
- inType := itBody.Type().In(i)
- value = reflect.Zero(inType)
+ inType := iBodyType.In(i)
+ values[i] = reflect.Zero(inType)
} else {
- value = reflect.ValueOf(param)
+ values[i] = reflect.ValueOf(param)
}
-
- values = append(values, value)
}
body := func() {
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
index 569b6a29c..1d06e08fd 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
@@ -5,6 +5,7 @@ import (
"fmt"
"math/rand"
"os"
+ "regexp"
"strings"
"time"
@@ -163,17 +164,27 @@ func (r *SpecRunner) combineCoverprofiles(runners []*testrunner.TestRunner) erro
fmt.Println("path is " + path)
- combined, err := os.OpenFile(filepath.Join(path, r.getCoverprofile()),
- os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666)
+ combined, err := os.OpenFile(
+ filepath.Join(path, r.getCoverprofile()),
+ os.O_WRONLY|os.O_CREATE,
+ 0666,
+ )
if err != nil {
fmt.Printf("Unable to create combined profile, %v\n", err)
return nil // non-fatal error
}
- for _, runner := range runners {
+ modeRegex := regexp.MustCompile(`^mode: .*\n`)
+ for index, runner := range runners {
contents, err := ioutil.ReadFile(runner.CoverageFile)
+ // remove the cover mode line from every file
+ // except the first one
+ if index > 0 {
+ contents = modeRegex.ReplaceAll(contents, []byte{})
+ }
+
if err != nil {
fmt.Printf("Unable to read coverage file %s to combine, %v\n", runner.CoverageFile, err)
return nil // non-fatal error
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
index 8734c061d..3cbf89a35 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
@@ -199,6 +199,11 @@ type Benchmarker interface {
// ginkgo bootstrap
func RunSpecs(t GinkgoTestingT, description string) bool {
specReporters := []Reporter{buildDefaultReporter()}
+ if config.DefaultReporterConfig.ReportFile != "" {
+ reportFile := config.DefaultReporterConfig.ReportFile
+ specReporters[0] = reporters.NewJUnitReporter(reportFile)
+ return RunSpecsWithDefaultAndCustomReporters(t, description, specReporters)
+ }
return RunSpecsWithCustomReporters(t, description, specReporters)
}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
index 89a7c8465..d76e2fe77 100644
--- a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
+++ b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
@@ -13,6 +13,7 @@ import (
"fmt"
"math"
"os"
+ "path/filepath"
"strings"
"github.com/onsi/ginkgo/config"
@@ -141,17 +142,29 @@ func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000
reporter.suite.Failures = summary.NumberOfFailedSpecs
reporter.suite.Errors = 0
- file, err := os.Create(reporter.filename)
+ if reporter.ReporterConfig.ReportFile != "" {
+ reporter.filename = reporter.ReporterConfig.ReportFile
+ fmt.Printf("\nJUnit path was configured: %s\n", reporter.filename)
+ }
+ filePath, _ := filepath.Abs(reporter.filename)
+ dirPath := filepath.Dir(filePath)
+ err := os.MkdirAll(dirPath, os.ModePerm)
+ if err != nil {
+ fmt.Printf("\nFailed to create JUnit directory: %s\n\t%s", filePath, err.Error())
+ }
+ file, err := os.Create(filePath)
if err != nil {
- fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error())
+ fmt.Fprintf(os.Stderr, "Failed to create JUnit report file: %s\n\t%s", filePath, err.Error())
}
defer file.Close()
file.WriteString(xml.Header)
encoder := xml.NewEncoder(file)
encoder.Indent(" ", " ")
err = encoder.Encode(reporter.suite)
- if err != nil {
- fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error())
+ if err == nil {
+ fmt.Fprintf(os.Stdout, "\nJUnit report was created: %s\n", filePath)
+ } else {
+ fmt.Fprintf(os.Stderr,"\nFailed to generate JUnit report data:\n\t%s", err.Error())
}
}
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
index f67074016..ecbdd2734 100644
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,8 @@
+## 1.7.1
+
+### Fixes
+- Bump go-yaml version to cover fixed ddos heuristic (#362) [95e431e]
+
## 1.7.0
### Features
diff --git a/vendor/github.com/onsi/gomega/go.mod b/vendor/github.com/onsi/gomega/go.mod
index 65eedf696..177a541c4 100644
--- a/vendor/github.com/onsi/gomega/go.mod
+++ b/vendor/github.com/onsi/gomega/go.mod
@@ -11,5 +11,6 @@ require (
golang.org/x/text v0.3.0 // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
- gopkg.in/yaml.v2 v2.2.1
+ gopkg.in/yaml.v2 v2.2.4
)
+
diff --git a/vendor/github.com/onsi/gomega/go.sum b/vendor/github.com/onsi/gomega/go.sum
index b23f6ef02..bbcc05d3e 100644
--- a/vendor/github.com/onsi/gomega/go.sum
+++ b/vendor/github.com/onsi/gomega/go.sum
@@ -20,5 +20,5 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
index b145768cf..85505f2ec 100644
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -24,7 +24,7 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.7.0"
+const GOMEGA_VERSION = "1.7.1"
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
diff --git a/vendor/github.com/uber/jaeger-client-go/.travis.yml b/vendor/github.com/uber/jaeger-client-go/.travis.yml
index 0d7bdd9ab..e81cc8805 100644
--- a/vendor/github.com/uber/jaeger-client-go/.travis.yml
+++ b/vendor/github.com/uber/jaeger-client-go/.travis.yml
@@ -7,21 +7,22 @@ dist: trusty
matrix:
include:
- - go: 1.12.x
+ - go: 1.13.x
env:
- TESTS=true
- USE_DEP=true
- COVERAGE=true
- - go: 1.12.x
+ - go: 1.13.x
env:
- USE_DEP=true
- CROSSDOCK=true
- - go: 1.12.x
+ - go: 1.13.x
env:
- TESTS=true
- USE_DEP=false
- USE_GLIDE=true
- - go: 1.11.x
+ # test with previous version of Go
+ - go: 1.12.x
env:
- TESTS=true
- USE_DEP=true
diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
index 31b22e40c..c4590bf93 100644
--- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
+++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
@@ -1,6 +1,45 @@
Changes by Version
==================
+2.20.0 (2019-11-06)
+-------------------
+
+## New Features
+
+- Allow all in-process spans of a trace to share sampling state (#443) -- Prithvi Raj
+
+ Sampling state is shared between all spans of the trace that are still in memory.
+ This allows implementation of delayed sampling decisions (see below).
+
+- Support delayed sampling decisions (#449) -- Yuri Shkuro
+
+ This is a large structural change to how the samplers work.
+ It allows some samplers to be executed multiple times on different
+ span events (like setting a tag) and make a positive sampling decision
+ later in the span life cycle, or even based on children spans.
+ See [README](./README.md#delayed-sampling) for more details.
+
+ There is a related minor change in behavior of the adaptive (per-operation) sampler,
+ which will no longer re-sample the trace when `span.SetOperation()` is called, i.e. the
+ operation used to make the sampling decision is always the one provided at span creation.
+
+- Add experimental tag matching sampler (#452) -- Yuri Shkuro
+
+ A sampler that can sample a trace based on a certain tag added to the root
+ span or one of its local (in-process) children. The sampler can be used with
+ another experimental `PrioritySampler` that allows multiple samplers to try
+ to make a sampling decision, in a certain priority order.
+
+- [log/zap] Report whether a trace was sampled (#445) -- Abhinav Gupta
+- Allow config.FromEnv() to enrich an existing config object (#436) -- Vineeth Reddy
+
+## Minor patches
+
+- Expose Sampler on Tracer and accept sampler options via Configuration (#460) -- Yuri Shkuro
+- Fix github.com/uber-go/atomic import (#464) -- Yuri Shkuro
+- Add nodejs to crossdock tests (#441) -- Bhavin Gandhi
+- Bump Go compiler version to 1.13 (#453) -- Yuri Shkuro
+
2.19.0 (2019-09-23)
-------------------
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
index 1ed86f4a7..5a42ebf16 100644
--- a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
+++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
@@ -2,6 +2,14 @@
[[projects]]
+ digest = "1:9f3b30d9f8e0d7040f729b82dcbc8f0dead820a133b3147ce355fc451f32d761"
+ name = "github.com/BurntSushi/toml"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005"
+ version = "v0.3.1"
+
+[[projects]]
digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
name = "github.com/beorn7/perks"
packages = ["quantile"]
@@ -138,14 +146,6 @@
version = "v1.4.0"
[[projects]]
- digest = "1:a5158647b553c61877aa9ae74f4015000294e47981e6b8b07525edcbb0747c81"
- name = "github.com/uber-go/atomic"
- packages = ["."]
- pruneopts = "UT"
- revision = "df976f2515e274675050de7b3f42545de80594fd"
- version = "v1.4.0"
-
-[[projects]]
digest = "1:0ec60ffd594af00ba1660bc746aa0e443d27dd4003dee55f9d08a0b4ff5431a3"
name = "github.com/uber/jaeger-lib"
packages = [
@@ -158,23 +158,31 @@
version = "v2.2.0"
[[projects]]
- digest = "1:a5158647b553c61877aa9ae74f4015000294e47981e6b8b07525edcbb0747c81"
+ digest = "1:0bdcb0c740d79d400bd3f7946ac22a715c94db62b20bfd2e01cd50693aba0600"
name = "go.uber.org/atomic"
packages = ["."]
pruneopts = "UT"
- revision = "df976f2515e274675050de7b3f42545de80594fd"
- version = "v1.4.0"
+ revision = "9dc4df04d0d1c39369750a9f6c32c39560672089"
+ version = "v1.5.0"
[[projects]]
- digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a"
+ digest = "1:002ebc50f3ef475ac325e1904be931d9dcba6dc6d73b5682afce0c63436e3902"
name = "go.uber.org/multierr"
packages = ["."]
pruneopts = "UT"
- revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
- version = "v1.1.0"
+ revision = "c3fc3d02ec864719d8e25be2d7dde1e35a36aa27"
+ version = "v1.3.0"
+
+[[projects]]
+ branch = "master"
+ digest = "1:3032e90a153750ea149f68bf081f97ca738f041fba45c41c80737f572ffdf2f4"
+ name = "go.uber.org/tools"
+ packages = ["update-license"]
+ pruneopts = "UT"
+ revision = "2cfd321de3ee5d5f8a5fda2521d1703478334d98"
[[projects]]
- digest = "1:676160e6a4722b08e0e26b11521d575c2cb2b6f0c679e1ee6178c5d8dee51e5e"
+ digest = "1:6be13632ab4bd5842a097abb3aabac045a8601e19a10da4239e7d8bd83d4b83c"
name = "go.uber.org/zap"
packages = [
".",
@@ -185,8 +193,19 @@
"zapcore",
]
pruneopts = "UT"
- revision = "27376062155ad36be76b0f12cf1572a221d3a48c"
- version = "v1.10.0"
+ revision = "a6015e13fab9b744d96085308ce4e8f11bad1996"
+ version = "v1.12.0"
+
+[[projects]]
+ branch = "master"
+ digest = "1:21d7bad9b7da270fd2d50aba8971a041bd691165c95096a2a4c68db823cbc86a"
+ name = "golang.org/x/lint"
+ packages = [
+ ".",
+ "golint",
+ ]
+ pruneopts = "UT"
+ revision = "16217165b5de779cb6a5e4fc81fa9c1166fda457"
[[projects]]
branch = "master"
@@ -197,23 +216,81 @@
"context/ctxhttp",
]
pruneopts = "UT"
- revision = "aa69164e4478b84860dc6769c710c699c67058a3"
+ revision = "0deb6923b6d97481cb43bc1043fe5b72a0143032"
[[projects]]
branch = "master"
- digest = "1:712252802d318c8107d8f2136b99aa10feb17eca715245ed915199fbfc260155"
+ digest = "1:5dfb17d45415b7b8927382f53955a66f55f9d9d11557aa82f7f481d642ab247a"
name = "golang.org/x/sys"
packages = ["windows"]
pruneopts = "UT"
- revision = "0a153f010e6963173baba2306531d173aa843137"
+ revision = "f43be2a4598cf3a47be9f94f0c28197ed9eae611"
+
+[[projects]]
+ branch = "master"
+ digest = "1:bae8b3bf837d9d7f601776f37f44e031d46943677beff8fb2eb9c7317d44de2f"
+ name = "golang.org/x/tools"
+ packages = [
+ "go/analysis",
+ "go/analysis/passes/inspect",
+ "go/ast/astutil",
+ "go/ast/inspector",
+ "go/buildutil",
+ "go/gcexportdata",
+ "go/internal/gcimporter",
+ "go/internal/packagesdriver",
+ "go/packages",
+ "go/types/objectpath",
+ "go/types/typeutil",
+ "internal/fastwalk",
+ "internal/gopathwalk",
+ "internal/semver",
+ "internal/span",
+ ]
+ pruneopts = "UT"
+ revision = "8dbcdeb83d3faec5315146800b375c4962a42fc6"
[[projects]]
- digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96"
+ digest = "1:59f10c1537d2199d9115d946927fe31165959a95190849c82ff11e05803528b0"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "UT"
- revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
- version = "v2.2.2"
+ revision = "f221b8435cfb71e54062f6c6e99e9ade30b124d5"
+ version = "v2.2.4"
+
+[[projects]]
+ digest = "1:131158a88aad1f94854d0aa21a64af2802d0a470fb0f01cb33c04fafd2047111"
+ name = "honnef.co/go/tools"
+ packages = [
+ "arg",
+ "cmd/staticcheck",
+ "config",
+ "deprecated",
+ "facts",
+ "functions",
+ "go/types/typeutil",
+ "internal/cache",
+ "internal/passes/buildssa",
+ "internal/renameio",
+ "internal/sharedcheck",
+ "lint",
+ "lint/lintdsl",
+ "lint/lintutil",
+ "lint/lintutil/format",
+ "loader",
+ "printf",
+ "simple",
+ "ssa",
+ "ssautil",
+ "staticcheck",
+ "staticcheck/vrp",
+ "stylecheck",
+ "unused",
+ "version",
+ ]
+ pruneopts = "UT"
+ revision = "afd67930eec2a9ed3e9b19f684d17a062285f16a"
+ version = "2019.2.3"
[solve-meta]
analyzer-name = "dep"
@@ -229,10 +306,10 @@
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/require",
"github.com/stretchr/testify/suite",
- "github.com/uber-go/atomic",
"github.com/uber/jaeger-lib/metrics",
"github.com/uber/jaeger-lib/metrics/metricstest",
"github.com/uber/jaeger-lib/metrics/prometheus",
+ "go.uber.org/atomic",
"go.uber.org/zap",
"go.uber.org/zap/zapcore",
]
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
index 3e6ac35ae..1fed7f814 100644
--- a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
+++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
@@ -15,7 +15,7 @@
version = "^1.1.3"
[[constraint]]
- name = "github.com/uber-go/atomic"
+ name = "go.uber.org/atomic"
version = "^1"
[[constraint]]
diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile
index 74e11787a..0cfe6a5f6 100644
--- a/vendor/github.com/uber/jaeger-client-go/Makefile
+++ b/vendor/github.com/uber/jaeger-client-go/Makefile
@@ -1,5 +1,5 @@
PROJECT_ROOT=github.com/uber/jaeger-client-go
-PACKAGES := $(shell go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u)
+PACKAGES := . $(shell go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u)
# all .go files that don't exist in hidden directories
ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \
-e ".*/\..*" \
@@ -125,3 +125,4 @@ ifeq ($(CI_SKIP_LINT),true)
else
make lint
endif
+
diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md
index 604d4b571..a3366114d 100644
--- a/vendor/github.com/uber/jaeger-client-go/README.md
+++ b/vendor/github.com/uber/jaeger-client-go/README.md
@@ -182,6 +182,29 @@ are available:
1. `RateLimitingSampler` can be used to allow only a certain fixed
number of traces to be sampled per second.
+#### Delayed sampling
+
+Version 2.20 introduced the ability to delay sampling decisions in the life cycle
+of the root span. It involves several features and architectural changes:
+ * **Shared sampling state**: the sampling state is shared across all local
+ (i.e. in-process) spans for a given trace.
+ * **New `SamplerV2` API** allows the sampler to be called at multiple points
+ in the life cycle of a span:
+ * on span creation
+ * on overwriting span operation name
+ * on setting span tags
+ * on finishing the span
+ * **Final/non-final sampling state**: the new `SamplerV2` API allows the sampler
+ to indicate if the negative sampling decision is final or not (positive sampling
+ decisions are always final). If the decision is not final, the sampler will be
+ called again on further span life cycle events, like setting tags.
+
+These new features are used in the experimental `x.TagMatchingSampler`, which
+can sample a trace based on a certain tag added to the root
+span or one of its local (in-process) children. The sampler can be used with
+another experimental `x.PrioritySampler` that allows multiple samplers to try
+to make a sampling decision, in a certain priority order.
+
### Baggage Injection
The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go
index 6bce1b3b0..965f7c3ee 100644
--- a/vendor/github.com/uber/jaeger-client-go/config/config.go
+++ b/vendor/github.com/uber/jaeger-client-go/config/config.go
@@ -86,6 +86,9 @@ type SamplerConfig struct {
// jaeger-agent for the appropriate sampling strategy.
// Can be set by exporting an environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL
SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"`
+
+ // Options can be used to programmatically pass additional options to the Remote sampler.
+ Options []jaeger.SamplerOption
}
// ReporterConfig configures the reporter. All fields are optional.
@@ -357,6 +360,7 @@ func (sc *SamplerConfig) NewSampler(
if sc.SamplingRefreshInterval != 0 {
options = append(options, jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval))
}
+ options = append(options, sc.Options...)
return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil
}
return nil, fmt.Errorf("Unknown sampler type %v", sc.Type)
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
index 14d69b11d..a729bd8fe 100644
--- a/vendor/github.com/uber/jaeger-client-go/config/config_env.go
+++ b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
@@ -52,7 +52,11 @@ const (
// FromEnv uses environment variables to set the tracer's Configuration
func FromEnv() (*Configuration, error) {
c := &Configuration{}
+ return c.FromEnv()
+}
+// FromEnv uses environment variables and overrides existing tracer's Configuration
+func (c *Configuration) FromEnv() (*Configuration, error) {
if e := os.Getenv(envServiceName); e != "" {
c.ServiceName = e
}
@@ -77,13 +81,21 @@ func FromEnv() (*Configuration, error) {
c.Tags = parseTags(e)
}
- if s, err := samplerConfigFromEnv(); err == nil {
+ if c.Sampler == nil {
+ c.Sampler = &SamplerConfig{}
+ }
+
+ if s, err := c.Sampler.samplerConfigFromEnv(); err == nil {
c.Sampler = s
} else {
return nil, errors.Wrap(err, "cannot obtain sampler config from env")
}
- if r, err := reporterConfigFromEnv(); err == nil {
+ if c.Reporter == nil {
+ c.Reporter = &ReporterConfig{}
+ }
+
+ if r, err := c.Reporter.reporterConfigFromEnv(); err == nil {
c.Reporter = r
} else {
return nil, errors.Wrap(err, "cannot obtain reporter config from env")
@@ -93,9 +105,7 @@ func FromEnv() (*Configuration, error) {
}
// samplerConfigFromEnv creates a new SamplerConfig based on the environment variables
-func samplerConfigFromEnv() (*SamplerConfig, error) {
- sc := &SamplerConfig{}
-
+func (sc *SamplerConfig) samplerConfigFromEnv() (*SamplerConfig, error) {
if e := os.Getenv(envSamplerType); e != "" {
sc.Type = e
}
@@ -135,9 +145,7 @@ func samplerConfigFromEnv() (*SamplerConfig, error) {
}
// reporterConfigFromEnv creates a new ReporterConfig based on the environment variables
-func reporterConfigFromEnv() (*ReporterConfig, error) {
- rc := &ReporterConfig{}
-
+func (rc *ReporterConfig) reporterConfigFromEnv() (*ReporterConfig, error) {
if e := os.Getenv(envReporterMaxQueueSize); e != "" {
if value, err := strconv.ParseInt(e, 10, 0); err == nil {
rc.QueueSize = int(value)
diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go
index e95b2ba09..0da47b02f 100644
--- a/vendor/github.com/uber/jaeger-client-go/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/constants.go
@@ -22,7 +22,7 @@ import (
const (
// JaegerClientVersion is the version of the client library reported as Span tag.
- JaegerClientVersion = "Go-2.19.0"
+ JaegerClientVersion = "Go-2.20.0"
// JaegerClientVersionTagKey is the name of the tag used to report client version.
JaegerClientVersionTagKey = "jaeger.version"
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
index 6ce1caf87..f0f1afe2f 100644
--- a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
+++ b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
@@ -35,7 +35,7 @@ func BuildJaegerThrift(span *Span) *j.Span {
SpanId: int64(span.context.spanID),
ParentSpanId: int64(span.context.parentID),
OperationName: span.operationName,
- Flags: int32(span.context.flags),
+ Flags: int32(span.context.samplingState.flags()),
StartTime: startTime,
Duration: duration,
Tags: buildTags(span.tags, span.tracer.options.maxTagValueLength),
diff --git a/vendor/github.com/uber/jaeger-client-go/metrics.go b/vendor/github.com/uber/jaeger-client-go/metrics.go
index e56db9b73..50e4e22d6 100644
--- a/vendor/github.com/uber/jaeger-client-go/metrics.go
+++ b/vendor/github.com/uber/jaeger-client-go/metrics.go
@@ -26,6 +26,9 @@ type Metrics struct {
// Number of traces started by this tracer as not sampled
TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer as not sampled"`
+ // Number of traces started by this tracer with delayed sampling
+ TracesStartedDelayedSampling metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer with delayed sampling"`
+
// Number of externally started sampled traces this tracer joined
TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y" help:"Number of externally started sampled traces this tracer joined"`
@@ -33,13 +36,22 @@ type Metrics struct {
TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n" help:"Number of externally started not-sampled traces this tracer joined"`
// Number of sampled spans started by this tracer
- SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of sampled spans started by this tracer"`
+ SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of spans started by this tracer as sampled"`
+
+ // Number of not sampled spans started by this tracer
+ SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of spans started by this tracer as not sampled"`
+
+ // Number of spans with delayed sampling started by this tracer
+ SpansStartedDelayedSampling metrics.Counter `metric:"started_spans" tags:"sampled=delayed" help:"Number of spans started by this tracer with delayed sampling"`
- // Number of unsampled spans started by this tracer
- SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of unsampled spans started by this tracer"`
+ // Number of spans finished by this tracer
+ SpansFinishedSampled metrics.Counter `metric:"finished_spans" tags:"sampled=y" help:"Number of sampled spans finished by this tracer"`
+
+ // Number of spans finished by this tracer
+ SpansFinishedNotSampled metrics.Counter `metric:"finished_spans" tags:"sampled=n" help:"Number of not-sampled spans finished by this tracer"`
// Number of spans finished by this tracer
- SpansFinished metrics.Counter `metric:"finished_spans" help:"Number of spans finished by this tracer"`
+ SpansFinishedDelayedSampling metrics.Counter `metric:"finished_spans" tags:"sampled=delayed" help:"Number of spans with delayed sampling finished by this tracer"`
// Number of errors decoding tracing context
DecodingErrors metrics.Counter `metric:"span_context_decoding_errors" help:"Number of errors decoding tracing context"`
diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go
index 5b50cfb71..42fd64b58 100644
--- a/vendor/github.com/uber/jaeger-client-go/propagation.go
+++ b/vendor/github.com/uber/jaeger-client-go/propagation.go
@@ -193,7 +193,7 @@ func (p *BinaryPropagator) Inject(
if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil {
return err
}
- if err := binary.Write(carrier, binary.BigEndian, sc.flags); err != nil {
+ if err := binary.Write(carrier, binary.BigEndian, sc.samplingState.flags()); err != nil {
return err
}
@@ -222,6 +222,7 @@ func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, er
return emptyContext, opentracing.ErrInvalidCarrier
}
var ctx SpanContext
+ ctx.samplingState = &samplingState{}
if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
@@ -232,9 +233,12 @@ func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, er
if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
- if err := binary.Read(carrier, binary.BigEndian, &ctx.flags); err != nil {
+
+ var flags byte
+ if err := binary.Read(carrier, binary.BigEndian, &flags); err != nil {
return emptyContext, opentracing.ErrSpanContextCorrupted
}
+ ctx.samplingState.setFlags(flags)
// Handle the baggage items
var numBaggage int32
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter.go b/vendor/github.com/uber/jaeger-client-go/reporter.go
index 27163ebe4..0b78cec20 100644
--- a/vendor/github.com/uber/jaeger-client-go/reporter.go
+++ b/vendor/github.com/uber/jaeger-client-go/reporter.go
@@ -28,6 +28,8 @@ import (
// Reporter is called by the tracer when a span is completed to report the span to the tracing collector.
type Reporter interface {
// Report submits a new span to collectors, possibly asynchronously and/or with buffering.
+ // If the reporter is processing Span asynchronously then it needs to Retain() the span,
+ // and then Release() it when no longer needed, to avoid span data corruption.
Report(span *Span)
// Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory.
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler.go b/vendor/github.com/uber/jaeger-client-go/sampler.go
index ea6984e02..6195d59c5 100644
--- a/vendor/github.com/uber/jaeger-client-go/sampler.go
+++ b/vendor/github.com/uber/jaeger-client-go/sampler.go
@@ -17,19 +17,14 @@ package jaeger
import (
"fmt"
"math"
- "net/url"
"sync"
- "sync/atomic"
- "time"
- "github.com/uber/jaeger-client-go/log"
"github.com/uber/jaeger-client-go/thrift-gen/sampling"
"github.com/uber/jaeger-client-go/utils"
)
const (
- defaultSamplingRefreshInterval = time.Minute
- defaultMaxOperations = 2000
+ defaultMaxOperations = 2000
)
// Sampler decides whether a new trace should be sampled or not.
@@ -47,9 +42,7 @@ type Sampler interface {
// Equal checks if the `other` sampler is functionally equivalent
// to this sampler.
- // TODO remove this function. This function is used to determine if 2 samplers are equivalent
- // which does not bode well with the adaptive sampler which has to create all the composite samplers
- // for the comparison to occur. This is expensive to do if only one sampler has changed.
+ // TODO (breaking change) remove this function. See PerOperationSampler.Equals for explanation.
Equal(other Sampler) bool
}
@@ -57,17 +50,23 @@ type Sampler interface {
// ConstSampler is a sampler that always makes the same decision.
type ConstSampler struct {
+ legacySamplerV1Base
Decision bool
tags []Tag
}
// NewConstSampler creates a ConstSampler.
-func NewConstSampler(sample bool) Sampler {
+func NewConstSampler(sample bool) *ConstSampler {
tags := []Tag{
{key: SamplerTypeTagKey, value: SamplerTypeConst},
{key: SamplerParamTagKey, value: sample},
}
- return &ConstSampler{Decision: sample, tags: tags}
+ s := &ConstSampler{
+ Decision: sample,
+ tags: tags,
+ }
+ s.delegate = s.IsSampled
+ return s
}
// IsSampled implements IsSampled() of Sampler.
@@ -88,11 +87,17 @@ func (s *ConstSampler) Equal(other Sampler) bool {
return false
}
+// String is used to log sampler details.
+func (s *ConstSampler) String() string {
+ return fmt.Sprintf("ConstSampler(decision=%t)", s.Decision)
+}
+
// -----------------------
// ProbabilisticSampler is a sampler that randomly samples a certain percentage
// of traces.
type ProbabilisticSampler struct {
+ legacySamplerV1Base
samplingRate float64
samplingBoundary uint64
tags []Tag
@@ -114,16 +119,19 @@ func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error
}
func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler {
- samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0))
- tags := []Tag{
+ s := new(ProbabilisticSampler)
+ s.delegate = s.IsSampled
+ return s.init(samplingRate)
+}
+
+func (s *ProbabilisticSampler) init(samplingRate float64) *ProbabilisticSampler {
+ s.samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0))
+ s.samplingBoundary = uint64(float64(maxRandomNumber) * s.samplingRate)
+ s.tags = []Tag{
{key: SamplerTypeTagKey, value: SamplerTypeProbabilistic},
- {key: SamplerParamTagKey, value: samplingRate},
- }
- return &ProbabilisticSampler{
- samplingRate: samplingRate,
- samplingBoundary: uint64(float64(maxRandomNumber) * samplingRate),
- tags: tags,
+ {key: SamplerParamTagKey, value: s.samplingRate},
}
+ return s
}
// SamplingRate returns the sampling probability this sampled was constructed with.
@@ -149,65 +157,104 @@ func (s *ProbabilisticSampler) Equal(other Sampler) bool {
return false
}
+// Update modifies in-place the sampling rate. Locking must be done externally.
+func (s *ProbabilisticSampler) Update(samplingRate float64) error {
+ if samplingRate < 0.0 || samplingRate > 1.0 {
+ return fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
+ }
+ s.init(samplingRate)
+ return nil
+}
+
+// String is used to log sampler details.
+func (s *ProbabilisticSampler) String() string {
+ return fmt.Sprintf("ProbabilisticSampler(samplingRate=%v)", s.samplingRate)
+}
+
// -----------------------
-type rateLimitingSampler struct {
+// RateLimitingSampler samples at most maxTracesPerSecond. The distribution of sampled traces follows
+// burstiness of the service, i.e. a service with uniformly distributed requests will have those
+// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a
+// number of sequential requests can be sampled each second.
+type RateLimitingSampler struct {
+ legacySamplerV1Base
maxTracesPerSecond float64
- rateLimiter utils.RateLimiter
+ rateLimiter *utils.ReconfigurableRateLimiter
tags []Tag
}
-// NewRateLimitingSampler creates a sampler that samples at most maxTracesPerSecond. The distribution of sampled
-// traces follows burstiness of the service, i.e. a service with uniformly distributed requests will have those
-// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a number of
-// sequential requests can be sampled each second.
-func NewRateLimitingSampler(maxTracesPerSecond float64) Sampler {
- tags := []Tag{
+// NewRateLimitingSampler creates new RateLimitingSampler.
+func NewRateLimitingSampler(maxTracesPerSecond float64) *RateLimitingSampler {
+ s := new(RateLimitingSampler)
+ s.delegate = s.IsSampled
+ return s.init(maxTracesPerSecond)
+}
+
+func (s *RateLimitingSampler) init(maxTracesPerSecond float64) *RateLimitingSampler {
+ if s.rateLimiter == nil {
+ s.rateLimiter = utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
+ } else {
+ s.rateLimiter.Update(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
+ }
+ s.maxTracesPerSecond = maxTracesPerSecond
+ s.tags = []Tag{
{key: SamplerTypeTagKey, value: SamplerTypeRateLimiting},
{key: SamplerParamTagKey, value: maxTracesPerSecond},
}
- return &rateLimitingSampler{
- maxTracesPerSecond: maxTracesPerSecond,
- rateLimiter: utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)),
- tags: tags,
- }
+ return s
}
// IsSampled implements IsSampled() of Sampler.
-func (s *rateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+func (s *RateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
return s.rateLimiter.CheckCredit(1.0), s.tags
}
-func (s *rateLimitingSampler) Close() {
+// Update reconfigures the rate limiter, while preserving its accumulated balance.
+// Locking must be done externally.
+func (s *RateLimitingSampler) Update(maxTracesPerSecond float64) {
+ if s.maxTracesPerSecond != maxTracesPerSecond {
+ s.init(maxTracesPerSecond)
+ }
+}
+
+// Close does nothing.
+func (s *RateLimitingSampler) Close() {
// nothing to do
}
-func (s *rateLimitingSampler) Equal(other Sampler) bool {
- if o, ok := other.(*rateLimitingSampler); ok {
+// Equal compares with another sampler.
+func (s *RateLimitingSampler) Equal(other Sampler) bool {
+ if o, ok := other.(*RateLimitingSampler); ok {
return s.maxTracesPerSecond == o.maxTracesPerSecond
}
return false
}
+// String is used to log sampler details.
+func (s *RateLimitingSampler) String() string {
+ return fmt.Sprintf("RateLimitingSampler(maxTracesPerSecond=%v)", s.maxTracesPerSecond)
+}
+
// -----------------------
-// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both probabilisticSampler and
-// rateLimitingSampler. The rateLimitingSampler is used as a guaranteed lower bound sampler such that
+// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both ProbabilisticSampler and
+// RateLimitingSampler. The RateLimitingSampler is used as a guaranteed lower bound sampler such that
// every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound
// of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes.
//
-// The probabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both
-// samplers return true, the tags for probabilisticSampler will be used.
+// The ProbabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both
+// samplers return true, the tags for ProbabilisticSampler will be used.
type GuaranteedThroughputProbabilisticSampler struct {
probabilisticSampler *ProbabilisticSampler
- lowerBoundSampler Sampler
+ lowerBoundSampler *RateLimitingSampler
tags []Tag
samplingRate float64
lowerBound float64
}
// NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both
-// probabilisticSampler and rateLimitingSampler.
+// ProbabilisticSampler and RateLimitingSampler.
func NewGuaranteedThroughputProbabilisticSampler(
lowerBound, samplingRate float64,
) (*GuaranteedThroughputProbabilisticSampler, error) {
@@ -224,8 +271,14 @@ func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float6
}
func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) {
- if s.probabilisticSampler == nil || s.samplingRate != samplingRate {
+ if s.probabilisticSampler == nil {
s.probabilisticSampler = newProbabilisticSampler(samplingRate)
+ } else if s.samplingRate != samplingRate {
+ s.probabilisticSampler.init(samplingRate)
+ }
+ // since we don't validate samplingRate, sampler may have clamped it to [0, 1] interval
+ samplingRate = s.probabilisticSampler.SamplingRate()
+ if s.samplingRate != samplingRate || s.tags == nil {
s.samplingRate = s.probabilisticSampler.SamplingRate()
s.tags = []Tag{
{key: SamplerTypeTagKey, value: SamplerTypeLowerBound},
@@ -252,7 +305,7 @@ func (s *GuaranteedThroughputProbabilisticSampler) Close() {
// Equal implements Equal() of Sampler.
func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool {
- // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for
+ // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
// more information.
return false
}
@@ -261,52 +314,116 @@ func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool {
func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) {
s.setProbabilisticSampler(samplingRate)
if s.lowerBound != lowerBound {
- s.lowerBoundSampler = NewRateLimitingSampler(lowerBound)
+ s.lowerBoundSampler.Update(lowerBound)
s.lowerBound = lowerBound
}
}
// -----------------------
-type adaptiveSampler struct {
+// PerOperationSampler is a delegating sampler that applies GuaranteedThroughputProbabilisticSampler
+// on a per-operation basis.
+type PerOperationSampler struct {
sync.RWMutex
samplers map[string]*GuaranteedThroughputProbabilisticSampler
defaultSampler *ProbabilisticSampler
lowerBound float64
maxOperations int
+
+ // see description in PerOperationSamplerParams
+ operationNameLateBinding bool
}
-// NewAdaptiveSampler returns a delegating sampler that applies both probabilisticSampler and
-// rateLimitingSampler via the guaranteedThroughputProbabilisticSampler. This sampler keeps track of all
-// operations and delegates calls to the respective guaranteedThroughputProbabilisticSampler.
-func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (Sampler, error) {
- return newAdaptiveSampler(strategies, maxOperations), nil
+// NewAdaptiveSampler returns a new PerOperationSampler.
+// Deprecated: please use NewPerOperationSampler.
+func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (*PerOperationSampler, error) {
+ return NewPerOperationSampler(PerOperationSamplerParams{
+ MaxOperations: maxOperations,
+ Strategies: strategies,
+ }), nil
}
-func newAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) Sampler {
+// PerOperationSamplerParams defines parameters when creating PerOperationSampler.
+type PerOperationSamplerParams struct {
+ // Max number of operations that will be tracked. Other operations will be given default strategy.
+ MaxOperations int
+
+ // Opt-in feature for applications that require late binding of span name via explicit call to SetOperationName.
+ // When this feature is enabled, the sampler will return retryable=true from OnCreateSpan(), thus leaving
+ // the sampling decision as non-final (and the span as writeable). This may lead to degraded performance
+ // in applications that always provide the correct span name on trace creation.
+ //
+ // For backwards compatibility this option is off by default.
+ OperationNameLateBinding bool
+
+ // Initial configuration of the sampling strategies (usually retrieved from the backend by Remote Sampler).
+ Strategies *sampling.PerOperationSamplingStrategies
+}
+
+// NewPerOperationSampler returns a new PerOperationSampler.
+func NewPerOperationSampler(params PerOperationSamplerParams) *PerOperationSampler {
samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler)
- for _, strategy := range strategies.PerOperationStrategies {
+ for _, strategy := range params.Strategies.PerOperationStrategies {
sampler := newGuaranteedThroughputProbabilisticSampler(
- strategies.DefaultLowerBoundTracesPerSecond,
+ params.Strategies.DefaultLowerBoundTracesPerSecond,
strategy.ProbabilisticSampling.SamplingRate,
)
samplers[strategy.Operation] = sampler
}
- return &adaptiveSampler{
- samplers: samplers,
- defaultSampler: newProbabilisticSampler(strategies.DefaultSamplingProbability),
- lowerBound: strategies.DefaultLowerBoundTracesPerSecond,
- maxOperations: maxOperations,
+ return &PerOperationSampler{
+ samplers: samplers,
+ defaultSampler: newProbabilisticSampler(params.Strategies.DefaultSamplingProbability),
+ lowerBound: params.Strategies.DefaultLowerBoundTracesPerSecond,
+ maxOperations: params.MaxOperations,
+ operationNameLateBinding: params.OperationNameLateBinding,
}
}
-func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+// IsSampled is not used and only exists to match Sampler V1 API.
+// TODO (breaking change) remove when upgrading everything to SamplerV2
+func (s *PerOperationSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ return false, nil
+}
+
+func (s *PerOperationSampler) trySampling(span *Span, operationName string) (bool, []Tag) {
+ samplerV1 := s.getSamplerForOperation(operationName)
+ var sampled bool
+ var tags []Tag
+ if span.context.samplingState.isLocalRootSpan(span.context.spanID) {
+ sampled, tags = samplerV1.IsSampled(span.context.TraceID(), operationName)
+ }
+ return sampled, tags
+}
+
+// OnCreateSpan implements OnCreateSpan of SamplerV2.
+func (s *PerOperationSampler) OnCreateSpan(span *Span) SamplingDecision {
+ sampled, tags := s.trySampling(span, span.OperationName())
+ return SamplingDecision{Sample: sampled, Retryable: s.operationNameLateBinding, Tags: tags}
+}
+
+// OnSetOperationName implements OnSetOperationName of SamplerV2.
+func (s *PerOperationSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
+ sampled, tags := s.trySampling(span, operationName)
+ return SamplingDecision{Sample: sampled, Retryable: false, Tags: tags}
+}
+
+// OnSetTag implements OnSetTag of SamplerV2.
+func (s *PerOperationSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
+ return SamplingDecision{Sample: false, Retryable: true}
+}
+
+// OnFinishSpan implements OnFinishSpan of SamplerV2.
+func (s *PerOperationSampler) OnFinishSpan(span *Span) SamplingDecision {
+ return SamplingDecision{Sample: false, Retryable: true}
+}
+
+func (s *PerOperationSampler) getSamplerForOperation(operation string) Sampler {
s.RLock()
sampler, ok := s.samplers[operation]
if ok {
defer s.RUnlock()
- return sampler.IsSampled(id, operation)
+ return sampler
}
s.RUnlock()
s.Lock()
@@ -315,18 +432,19 @@ func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag)
// Check if sampler has already been created
sampler, ok = s.samplers[operation]
if ok {
- return sampler.IsSampled(id, operation)
+ return sampler
}
// Store only up to maxOperations of unique ops.
if len(s.samplers) >= s.maxOperations {
- return s.defaultSampler.IsSampled(id, operation)
+ return s.defaultSampler
}
newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate())
s.samplers[operation] = newSampler
- return newSampler.IsSampled(id, operation)
+ return newSampler
}
-func (s *adaptiveSampler) Close() {
+// Close invokes Close on all underlying samplers.
+func (s *PerOperationSampler) Close() {
s.Lock()
defer s.Unlock()
for _, sampler := range s.samplers {
@@ -335,16 +453,18 @@ func (s *adaptiveSampler) Close() {
s.defaultSampler.Close()
}
-func (s *adaptiveSampler) Equal(other Sampler) bool {
- // NB The Equal() function is overly expensive for adaptiveSampler since it's composed of multiple
+// Equal is not used.
+// TODO (breaking change) remove this in the future
+func (s *PerOperationSampler) Equal(other Sampler) bool {
+ // NB The Equal() function is overly expensive for PerOperationSampler since it's composed of multiple
// samplers which all need to be initialized before this function can be called for a comparison.
- // Therefore, adaptiveSampler uses the update() function to only alter the samplers that need
+ // Therefore, PerOperationSampler uses the update() function to only alter the samplers that need
// changing. Hence this function always returns false so that the update function can be called.
// Once the Equal() function is removed from the Sampler API, this will no longer be needed.
return false
}
-func (s *adaptiveSampler) update(strategies *sampling.PerOperationSamplingStrategies) {
+func (s *PerOperationSampler) update(strategies *sampling.PerOperationSamplingStrategies) {
s.Lock()
defer s.Unlock()
newSamplers := map[string]*GuaranteedThroughputProbabilisticSampler{}
@@ -369,191 +489,3 @@ func (s *adaptiveSampler) update(strategies *sampling.PerOperationSamplingStrate
}
s.samplers = newSamplers
}
-
-// -----------------------
-
-// RemotelyControlledSampler is a delegating sampler that polls a remote server
-// for the appropriate sampling strategy, constructs a corresponding sampler and
-// delegates to it for sampling decisions.
-type RemotelyControlledSampler struct {
- // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
- // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
- closed int64 // 0 - not closed, 1 - closed
-
- sync.RWMutex
- samplerOptions
-
- serviceName string
- manager sampling.SamplingManager
- doneChan chan *sync.WaitGroup
-}
-
-type httpSamplingManager struct {
- serverURL string
-}
-
-func (s *httpSamplingManager) GetSamplingStrategy(serviceName string) (*sampling.SamplingStrategyResponse, error) {
- var out sampling.SamplingStrategyResponse
- v := url.Values{}
- v.Set("service", serviceName)
- if err := utils.GetJSON(s.serverURL+"?"+v.Encode(), &out); err != nil {
- return nil, err
- }
- return &out, nil
-}
-
-// NewRemotelyControlledSampler creates a sampler that periodically pulls
-// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent).
-func NewRemotelyControlledSampler(
- serviceName string,
- opts ...SamplerOption,
-) *RemotelyControlledSampler {
- options := applySamplerOptions(opts...)
- sampler := &RemotelyControlledSampler{
- samplerOptions: options,
- serviceName: serviceName,
- manager: &httpSamplingManager{serverURL: options.samplingServerURL},
- doneChan: make(chan *sync.WaitGroup),
- }
- go sampler.pollController()
- return sampler
-}
-
-func applySamplerOptions(opts ...SamplerOption) samplerOptions {
- options := samplerOptions{}
- for _, option := range opts {
- option(&options)
- }
- if options.sampler == nil {
- options.sampler = newProbabilisticSampler(0.001)
- }
- if options.logger == nil {
- options.logger = log.NullLogger
- }
- if options.maxOperations <= 0 {
- options.maxOperations = defaultMaxOperations
- }
- if options.samplingServerURL == "" {
- options.samplingServerURL = DefaultSamplingServerURL
- }
- if options.metrics == nil {
- options.metrics = NewNullMetrics()
- }
- if options.samplingRefreshInterval <= 0 {
- options.samplingRefreshInterval = defaultSamplingRefreshInterval
- }
- return options
-}
-
-// IsSampled implements IsSampled() of Sampler.
-func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
- s.RLock()
- defer s.RUnlock()
- return s.sampler.IsSampled(id, operation)
-}
-
-// Close implements Close() of Sampler.
-func (s *RemotelyControlledSampler) Close() {
- if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped {
- s.logger.Error("Repeated attempt to close the sampler is ignored")
- return
- }
-
- var wg sync.WaitGroup
- wg.Add(1)
- s.doneChan <- &wg
- wg.Wait()
-}
-
-// Equal implements Equal() of Sampler.
-func (s *RemotelyControlledSampler) Equal(other Sampler) bool {
- // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for
- // more information.
- if o, ok := other.(*RemotelyControlledSampler); ok {
- s.RLock()
- o.RLock()
- defer s.RUnlock()
- defer o.RUnlock()
- return s.sampler.Equal(o.sampler)
- }
- return false
-}
-
-func (s *RemotelyControlledSampler) pollController() {
- ticker := time.NewTicker(s.samplingRefreshInterval)
- defer ticker.Stop()
- s.pollControllerWithTicker(ticker)
-}
-
-func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) {
- for {
- select {
- case <-ticker.C:
- s.updateSampler()
- case wg := <-s.doneChan:
- wg.Done()
- return
- }
- }
-}
-
-func (s *RemotelyControlledSampler) getSampler() Sampler {
- s.Lock()
- defer s.Unlock()
- return s.sampler
-}
-
-func (s *RemotelyControlledSampler) setSampler(sampler Sampler) {
- s.Lock()
- defer s.Unlock()
- s.sampler = sampler
-}
-
-func (s *RemotelyControlledSampler) updateSampler() {
- res, err := s.manager.GetSamplingStrategy(s.serviceName)
- if err != nil {
- s.metrics.SamplerQueryFailure.Inc(1)
- s.logger.Infof("Unable to query sampling strategy: %v", err)
- return
- }
- s.Lock()
- defer s.Unlock()
-
- s.metrics.SamplerRetrieved.Inc(1)
- if strategies := res.GetOperationSampling(); strategies != nil {
- s.updateAdaptiveSampler(strategies)
- } else {
- err = s.updateRateLimitingOrProbabilisticSampler(res)
- }
- if err != nil {
- s.metrics.SamplerUpdateFailure.Inc(1)
- s.logger.Infof("Unable to handle sampling strategy response %+v. Got error: %v", res, err)
- return
- }
- s.metrics.SamplerUpdated.Inc(1)
-}
-
-// NB: this function should only be called while holding a Write lock
-func (s *RemotelyControlledSampler) updateAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies) {
- if adaptiveSampler, ok := s.sampler.(*adaptiveSampler); ok {
- adaptiveSampler.update(strategies)
- } else {
- s.sampler = newAdaptiveSampler(strategies, s.maxOperations)
- }
-}
-
-// NB: this function should only be called while holding a Write lock
-func (s *RemotelyControlledSampler) updateRateLimitingOrProbabilisticSampler(res *sampling.SamplingStrategyResponse) error {
- var newSampler Sampler
- if probabilistic := res.GetProbabilisticSampling(); probabilistic != nil {
- newSampler = newProbabilisticSampler(probabilistic.SamplingRate)
- } else if rateLimiting := res.GetRateLimitingSampling(); rateLimiting != nil {
- newSampler = NewRateLimitingSampler(float64(rateLimiting.MaxTracesPerSecond))
- } else {
- return fmt.Errorf("Unsupported sampling strategy type %v", res.GetStrategyType())
- }
- if !s.sampler.Equal(newSampler) {
- s.sampler = newSampler
- }
- return nil
-}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
new file mode 100644
index 000000000..9bd0c9822
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
@@ -0,0 +1,334 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/uber/jaeger-client-go/thrift-gen/sampling"
+)
+
+const (
+ defaultSamplingRefreshInterval = time.Minute
+)
+
+// SamplingStrategyFetcher is used to fetch sampling strategy updates from remote server.
+type SamplingStrategyFetcher interface {
+ Fetch(service string) ([]byte, error)
+}
+
+// SamplingStrategyParser is used to parse sampling strategy updates. The output object
+// should be of the type that is recognized by the SamplerUpdaters.
+type SamplingStrategyParser interface {
+ Parse(response []byte) (interface{}, error)
+}
+
+// SamplerUpdater is used by RemotelyControlledSampler to apply sampling strategies,
+// retrieved from remote config server, to the current sampler. The updater can modify
+// the sampler in-place if sampler supports it, or create a new one.
+//
+// If the strategy does not contain configuration for the sampler in question,
+// updater must return modifiedSampler=nil to give other updaters a chance to inspect
+// the sampling strategy response.
+//
+// RemotelyControlledSampler invokes the updaters while holding a lock on the main sampler.
+type SamplerUpdater interface {
+ Update(sampler SamplerV2, strategy interface{}) (modified SamplerV2, err error)
+}
+
+// RemotelyControlledSampler is a delegating sampler that polls a remote server
+// for the appropriate sampling strategy, constructs a corresponding sampler and
+// delegates to it for sampling decisions.
+type RemotelyControlledSampler struct {
+ // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
+ // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
+ closed int64 // 0 - not closed, 1 - closed
+
+ sync.RWMutex
+ samplerOptions
+
+ serviceName string
+ doneChan chan *sync.WaitGroup
+}
+
+// NewRemotelyControlledSampler creates a sampler that periodically pulls
+// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent).
+func NewRemotelyControlledSampler(
+ serviceName string,
+ opts ...SamplerOption,
+) *RemotelyControlledSampler {
+ options := new(samplerOptions).applyOptionsAndDefaults(opts...)
+ sampler := &RemotelyControlledSampler{
+ samplerOptions: *options,
+ serviceName: serviceName,
+ doneChan: make(chan *sync.WaitGroup),
+ }
+ go sampler.pollController()
+ return sampler
+}
+
+// IsSampled implements IsSampled() of Sampler.
+// TODO (breaking change) remove when Sampler V1 is removed
+func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ return false, nil
+}
+
+// OnCreateSpan implements OnCreateSpan of SamplerV2.
+func (s *RemotelyControlledSampler) OnCreateSpan(span *Span) SamplingDecision {
+ return s.sampler.OnCreateSpan(span)
+}
+
+// OnSetOperationName implements OnSetOperationName of SamplerV2.
+func (s *RemotelyControlledSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
+ return s.sampler.OnSetOperationName(span, operationName)
+}
+
+// OnSetTag implements OnSetTag of SamplerV2.
+func (s *RemotelyControlledSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
+ return s.sampler.OnSetTag(span, key, value)
+}
+
+// OnFinishSpan implements OnFinishSpan of SamplerV2.
+func (s *RemotelyControlledSampler) OnFinishSpan(span *Span) SamplingDecision {
+ return s.sampler.OnFinishSpan(span)
+}
+
+// Close implements Close() of Sampler.
+func (s *RemotelyControlledSampler) Close() {
+ if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped {
+ s.logger.Error("Repeated attempt to close the sampler is ignored")
+ return
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ s.doneChan <- &wg
+ wg.Wait()
+}
+
+// Equal implements Equal() of Sampler.
+func (s *RemotelyControlledSampler) Equal(other Sampler) bool {
+ // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
+ // more information.
+ return false
+}
+
+func (s *RemotelyControlledSampler) pollController() {
+ ticker := time.NewTicker(s.samplingRefreshInterval)
+ defer ticker.Stop()
+ s.pollControllerWithTicker(ticker)
+}
+
+func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) {
+ for {
+ select {
+ case <-ticker.C:
+ s.UpdateSampler()
+ case wg := <-s.doneChan:
+ wg.Done()
+ return
+ }
+ }
+}
+
+// Sampler returns the currently active sampler.
+func (s *RemotelyControlledSampler) Sampler() SamplerV2 {
+ s.Lock()
+ defer s.Unlock()
+ return s.sampler
+}
+
+func (s *RemotelyControlledSampler) setSampler(sampler SamplerV2) {
+ s.Lock()
+ defer s.Unlock()
+ s.sampler = sampler
+}
+
+// UpdateSampler forces the sampler to fetch sampling strategy from backend server.
+// This function is called automatically on a timer, but can also be safely called manually, e.g. from tests.
+func (s *RemotelyControlledSampler) UpdateSampler() {
+ res, err := s.samplingFetcher.Fetch(s.serviceName)
+ if err != nil {
+ s.metrics.SamplerQueryFailure.Inc(1)
+ s.logger.Infof("failed to fetch sampling strategy: %v", err)
+ return
+ }
+ strategy, err := s.samplingParser.Parse(res)
+ if err != nil {
+ s.metrics.SamplerUpdateFailure.Inc(1)
+ s.logger.Infof("failed to parse sampling strategy response: %v", err)
+ return
+ }
+
+ s.Lock()
+ defer s.Unlock()
+
+ s.metrics.SamplerRetrieved.Inc(1)
+ if err := s.updateSamplerViaUpdaters(strategy); err != nil {
+ s.metrics.SamplerUpdateFailure.Inc(1)
+ s.logger.Infof("failed to handle sampling strategy response %+v. Got error: %v", res, err)
+ return
+ }
+ s.metrics.SamplerUpdated.Inc(1)
+}
+
+// NB: this function should only be called while holding a Write lock
+func (s *RemotelyControlledSampler) updateSamplerViaUpdaters(strategy interface{}) error {
+ for _, updater := range s.updaters {
+ sampler, err := updater.Update(s.sampler, strategy)
+ if err != nil {
+ return err
+ }
+ if sampler != nil {
+ s.sampler = sampler
+ return nil
+ }
+ }
+ return fmt.Errorf("unsupported sampling strategy %+v", strategy)
+}
+
+// -----------------------
+
+// ProbabilisticSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
+type ProbabilisticSamplerUpdater struct{}
+
+// Update implements Update of SamplerUpdater.
+func (u *ProbabilisticSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
+ type response interface {
+ GetProbabilisticSampling() *sampling.ProbabilisticSamplingStrategy
+ }
+ var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
+ if resp, ok := strategy.(response); ok {
+ if probabilistic := resp.GetProbabilisticSampling(); probabilistic != nil {
+ if ps, ok := sampler.(*ProbabilisticSampler); ok {
+ if err := ps.Update(probabilistic.SamplingRate); err != nil {
+ return nil, err
+ }
+ return sampler, nil
+ }
+ return newProbabilisticSampler(probabilistic.SamplingRate), nil
+ }
+ }
+ return nil, nil
+}
+
+// -----------------------
+
+// RateLimitingSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
+type RateLimitingSamplerUpdater struct{}
+
+// Update implements Update of SamplerUpdater.
+func (u *RateLimitingSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
+ type response interface {
+ GetRateLimitingSampling() *sampling.RateLimitingSamplingStrategy
+ }
+ var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
+ if resp, ok := strategy.(response); ok {
+ if rateLimiting := resp.GetRateLimitingSampling(); rateLimiting != nil {
+ rateLimit := float64(rateLimiting.MaxTracesPerSecond)
+ if rl, ok := sampler.(*RateLimitingSampler); ok {
+ rl.Update(rateLimit)
+ return rl, nil
+ }
+ return NewRateLimitingSampler(rateLimit), nil
+ }
+ }
+ return nil, nil
+}
+
+// -----------------------
+
+// AdaptiveSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
+type AdaptiveSamplerUpdater struct {
+ MaxOperations int // required
+ OperationNameLateBinding bool
+}
+
+// Update implements Update of SamplerUpdater.
+func (u *AdaptiveSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
+ type response interface {
+ GetOperationSampling() *sampling.PerOperationSamplingStrategies
+ }
+ var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
+ if p, ok := strategy.(response); ok {
+ if operations := p.GetOperationSampling(); operations != nil {
+ if as, ok := sampler.(*PerOperationSampler); ok {
+ as.update(operations)
+ return as, nil
+ }
+ return NewPerOperationSampler(PerOperationSamplerParams{
+ MaxOperations: u.MaxOperations,
+ OperationNameLateBinding: u.OperationNameLateBinding,
+ Strategies: operations,
+ }), nil
+ }
+ }
+ return nil, nil
+}
+
+// -----------------------
+
+type httpSamplingStrategyFetcher struct {
+ serverURL string
+ logger Logger
+}
+
+func (f *httpSamplingStrategyFetcher) Fetch(serviceName string) ([]byte, error) {
+ v := url.Values{}
+ v.Set("service", serviceName)
+ uri := f.serverURL + "?" + v.Encode()
+
+ // TODO create and reuse http.Client with proper timeout settings, etc.
+ resp, err := http.Get(uri)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ if err := resp.Body.Close(); err != nil {
+ f.logger.Error(fmt.Sprintf("failed to close HTTP response body: %+v", err))
+ }
+ }()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode >= 400 {
+ return nil, fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body)
+ }
+
+ return body, nil
+}
+
+// -----------------------
+
+type samplingStrategyParser struct{}
+
+func (p *samplingStrategyParser) Parse(response []byte) (interface{}, error) {
+ strategy := new(sampling.SamplingStrategyResponse)
+ if err := json.Unmarshal(response, strategy); err != nil {
+ return nil, err
+ }
+ return strategy, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_options.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
index 75d28a561..7a292effc 100644
--- a/vendor/github.com/uber/jaeger-client-go/sampler_options.go
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
@@ -16,6 +16,8 @@ package jaeger
import (
"time"
+
+ "github.com/uber/jaeger-client-go/log"
)
// SamplerOption is a function that sets some option on the sampler
@@ -27,10 +29,13 @@ var SamplerOptions samplerOptions
type samplerOptions struct {
metrics *Metrics
maxOperations int
- sampler Sampler
+ sampler SamplerV2
logger Logger
samplingServerURL string
samplingRefreshInterval time.Duration
+ samplingFetcher SamplingStrategyFetcher
+ samplingParser SamplingStrategyParser
+ updaters []SamplerUpdater
}
// Metrics creates a SamplerOption that initializes Metrics on the sampler,
@@ -53,7 +58,7 @@ func (samplerOptions) MaxOperations(maxOperations int) SamplerOption {
// to use before a remote sampler is created and used.
func (samplerOptions) InitialSampler(sampler Sampler) SamplerOption {
return func(o *samplerOptions) {
- o.sampler = sampler
+ o.sampler = samplerV1toV2(sampler)
}
}
@@ -79,3 +84,65 @@ func (samplerOptions) SamplingRefreshInterval(samplingRefreshInterval time.Durat
o.samplingRefreshInterval = samplingRefreshInterval
}
}
+
+// SamplingStrategyFetcher creates a SamplerOption that initializes sampling strategy fetcher.
+func (samplerOptions) SamplingStrategyFetcher(fetcher SamplingStrategyFetcher) SamplerOption {
+ return func(o *samplerOptions) {
+ o.samplingFetcher = fetcher
+ }
+}
+
+// SamplingStrategyParser creates a SamplerOption that initializes sampling strategy parser.
+func (samplerOptions) SamplingStrategyParser(parser SamplingStrategyParser) SamplerOption {
+ return func(o *samplerOptions) {
+ o.samplingParser = parser
+ }
+}
+
+// Updaters creates a SamplerOption that initializes sampler updaters.
+func (samplerOptions) Updaters(updaters ...SamplerUpdater) SamplerOption {
+ return func(o *samplerOptions) {
+ o.updaters = updaters
+ }
+}
+
+func (o *samplerOptions) applyOptionsAndDefaults(opts ...SamplerOption) *samplerOptions {
+ for _, option := range opts {
+ option(o)
+ }
+ if o.sampler == nil {
+ o.sampler = newProbabilisticSampler(0.001)
+ }
+ if o.logger == nil {
+ o.logger = log.NullLogger
+ }
+ if o.maxOperations <= 0 {
+ o.maxOperations = defaultMaxOperations
+ }
+ if o.samplingServerURL == "" {
+ o.samplingServerURL = DefaultSamplingServerURL
+ }
+ if o.metrics == nil {
+ o.metrics = NewNullMetrics()
+ }
+ if o.samplingRefreshInterval <= 0 {
+ o.samplingRefreshInterval = defaultSamplingRefreshInterval
+ }
+ if o.samplingFetcher == nil {
+ o.samplingFetcher = &httpSamplingStrategyFetcher{
+ serverURL: o.samplingServerURL,
+ logger: o.logger,
+ }
+ }
+ if o.samplingParser == nil {
+ o.samplingParser = new(samplingStrategyParser)
+ }
+ if o.updaters == nil {
+ o.updaters = []SamplerUpdater{
+ &AdaptiveSamplerUpdater{MaxOperations: o.maxOperations},
+ new(ProbabilisticSamplerUpdater),
+ new(RateLimitingSamplerUpdater),
+ }
+ }
+ return o
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_v2.go b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go
new file mode 100644
index 000000000..a50671a23
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go
@@ -0,0 +1,93 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+// SamplingDecision is returned by the V2 samplers.
+type SamplingDecision struct {
+ Sample bool
+ Retryable bool
+ Tags []Tag
+}
+
+// SamplerV2 is an extension of the V1 samplers that allows sampling decisions
+// be made at different points of the span lifecycle.
+type SamplerV2 interface {
+ OnCreateSpan(span *Span) SamplingDecision
+ OnSetOperationName(span *Span, operationName string) SamplingDecision
+ OnSetTag(span *Span, key string, value interface{}) SamplingDecision
+ OnFinishSpan(span *Span) SamplingDecision
+
+ // Close does a clean shutdown of the sampler, stopping any background
+ // go-routines it may have started.
+ Close()
+}
+
+// samplerV1toV2 wraps legacy V1 sampler into an adapter that make it look like V2.
+func samplerV1toV2(s Sampler) SamplerV2 {
+ if s2, ok := s.(SamplerV2); ok {
+ return s2
+ }
+ type legacySamplerV1toV2Adapter struct {
+ legacySamplerV1Base
+ }
+ return &legacySamplerV1toV2Adapter{
+ legacySamplerV1Base: legacySamplerV1Base{
+ delegate: s.IsSampled,
+ },
+ }
+}
+
+// SamplerV2Base can be used by V2 samplers to implement dummy V1 methods.
+// Supporting V1 API is required because Tracer configuration only accepts V1 Sampler
+// for backwards compatibility reasons.
+// TODO (breaking change) remove this in the next major release
+type SamplerV2Base struct{}
+
+// IsSampled implements IsSampled of Sampler.
+func (SamplerV2Base) IsSampled(id TraceID, operation string) (sampled bool, tags []Tag) {
+ return false, nil
+}
+
+// Close implements Close of Sampler.
+func (SamplerV2Base) Close() {}
+
+// Equal implements Equal of Sampler.
+func (SamplerV2Base) Equal(other Sampler) bool { return false }
+
+// legacySamplerV1Base is used as a base for simple samplers that only implement
+// the legacy isSampled() function that is not sensitive to its arguments.
+type legacySamplerV1Base struct {
+ delegate func(id TraceID, operation string) (sampled bool, tags []Tag)
+}
+
+func (s *legacySamplerV1Base) OnCreateSpan(span *Span) SamplingDecision {
+ isSampled, tags := s.delegate(span.context.traceID, span.operationName)
+ return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
+}
+
+func (s *legacySamplerV1Base) OnSetOperationName(span *Span, operationName string) SamplingDecision {
+ isSampled, tags := s.delegate(span.context.traceID, span.operationName)
+ return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
+}
+
+func (s *legacySamplerV1Base) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
+ return SamplingDecision{Sample: false, Retryable: true}
+}
+
+func (s *legacySamplerV1Base) OnFinishSpan(span *Span) SamplingDecision {
+ return SamplingDecision{Sample: false, Retryable: true}
+}
+
+func (s *legacySamplerV1Base) Close() {}
diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go
index 9df8b6017..bbf6fb068 100644
--- a/vendor/github.com/uber/jaeger-client-go/span.go
+++ b/vendor/github.com/uber/jaeger-client-go/span.go
@@ -34,6 +34,7 @@ type Span struct {
tracer *Tracer
+ // TODO: (breaking change) change to use a pointer
context SpanContext
// The name of the "operation" this span is an instance of.
@@ -65,18 +66,26 @@ type Span struct {
}
// Tag is a simple key value wrapper.
-// TODO deprecate in the next major release, use opentracing.Tag instead.
+// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
type Tag struct {
key string
value interface{}
}
+// NewTag creates a new Tag.
+// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
+func NewTag(key string, value interface{}) Tag {
+ return Tag{key: key, value: value}
+}
+
// SetOperationName sets or changes the operation name.
func (s *Span) SetOperationName(operationName string) opentracing.Span {
s.Lock()
- defer s.Unlock()
- if s.context.IsSampled() {
- s.operationName = operationName
+ s.operationName = operationName
+ s.Unlock()
+ if !s.isSamplingFinalized() {
+ decision := s.tracer.sampler.OnSetOperationName(s, operationName)
+ s.applySamplingDecision(decision, true)
}
s.observer.OnSetOperationName(operationName)
return s
@@ -84,14 +93,24 @@ func (s *Span) SetOperationName(operationName string) opentracing.Span {
// SetTag implements SetTag() of opentracing.Span
func (s *Span) SetTag(key string, value interface{}) opentracing.Span {
+ return s.setTagInternal(key, value, true)
+}
+
+func (s *Span) setTagInternal(key string, value interface{}, lock bool) opentracing.Span {
s.observer.OnSetTag(key, value)
if key == string(ext.SamplingPriority) && !setSamplingPriority(s, value) {
return s
}
- s.Lock()
- defer s.Unlock()
- if s.context.IsSampled() {
- s.setTagNoLocking(key, value)
+ if !s.isSamplingFinalized() {
+ decision := s.tracer.sampler.OnSetTag(s, key, value)
+ s.applySamplingDecision(decision, lock)
+ }
+ if s.isWriteable() {
+ if lock {
+ s.Lock()
+ defer s.Unlock()
+ }
+ s.appendTagNoLocking(key, value)
}
return s
}
@@ -121,14 +140,38 @@ func (s *Span) Duration() time.Duration {
func (s *Span) Tags() opentracing.Tags {
s.Lock()
defer s.Unlock()
- var result = make(opentracing.Tags)
+ var result = make(opentracing.Tags, len(s.tags))
for _, tag := range s.tags {
result[tag.key] = tag.value
}
return result
}
-func (s *Span) setTagNoLocking(key string, value interface{}) {
+// Logs returns micro logs for span
+func (s *Span) Logs() []opentracing.LogRecord {
+ s.Lock()
+ defer s.Unlock()
+
+ return append([]opentracing.LogRecord(nil), s.logs...)
+}
+
+// References returns references for this span
+func (s *Span) References() []opentracing.SpanReference {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.references == nil || len(s.references) == 0 {
+ return nil
+ }
+
+ result := make([]opentracing.SpanReference, len(s.references))
+ for i, r := range s.references {
+ result[i] = opentracing.SpanReference{Type: r.Type, ReferencedContext: r.Context}
+ }
+ return result
+}
+
+func (s *Span) appendTagNoLocking(key string, value interface{}) {
s.tags = append(s.tags, Tag{key: key, value: value})
}
@@ -148,7 +191,7 @@ func (s *Span) logFieldsNoLocking(fields ...log.Field) {
Fields: fields,
Timestamp: time.Now(),
}
- s.appendLog(lr)
+ s.appendLogNoLocking(lr)
}
// LogKV implements opentracing.Span API
@@ -185,12 +228,12 @@ func (s *Span) Log(ld opentracing.LogData) {
if ld.Timestamp.IsZero() {
ld.Timestamp = s.tracer.timeNow()
}
- s.appendLog(ld.ToLogRecord())
+ s.appendLogNoLocking(ld.ToLogRecord())
}
}
// this function should only be called while holding a Write lock
-func (s *Span) appendLog(lr opentracing.LogRecord) {
+func (s *Span) appendLogNoLocking(lr opentracing.LogRecord) {
// TODO add logic to limit number of logs per span (issue #46)
s.logs = append(s.logs, lr)
}
@@ -224,17 +267,25 @@ func (s *Span) FinishWithOptions(options opentracing.FinishOptions) {
}
s.observer.OnFinish(options)
s.Lock()
+ s.duration = options.FinishTime.Sub(s.startTime)
+ s.Unlock()
+ if !s.isSamplingFinalized() {
+ decision := s.tracer.sampler.OnFinishSpan(s)
+ s.applySamplingDecision(decision, true)
+ }
if s.context.IsSampled() {
- s.duration = options.FinishTime.Sub(s.startTime)
- // Note: bulk logs are not subject to maxLogsPerSpan limit
- if options.LogRecords != nil {
- s.logs = append(s.logs, options.LogRecords...)
- }
- for _, ld := range options.BulkLogData {
- s.logs = append(s.logs, ld.ToLogRecord())
+ if len(options.LogRecords) > 0 || len(options.BulkLogData) > 0 {
+ s.Lock()
+ // Note: bulk logs are not subject to maxLogsPerSpan limit
+ if options.LogRecords != nil {
+ s.logs = append(s.logs, options.LogRecords...)
+ }
+ for _, ld := range options.BulkLogData {
+ s.logs = append(s.logs, ld.ToLogRecord())
+ }
+ s.Unlock()
}
}
- s.Unlock()
// call reportSpan even for non-sampled traces, to return span to the pool
// and update metrics counter
s.tracer.reportSpan(s)
@@ -300,23 +351,62 @@ func (s *Span) serviceName() string {
return s.tracer.serviceName
}
+func (s *Span) applySamplingDecision(decision SamplingDecision, lock bool) {
+ if !decision.Retryable {
+ s.context.samplingState.setFinal()
+ }
+ if decision.Sample {
+ s.context.samplingState.setSampled()
+ if len(decision.Tags) > 0 {
+ if lock {
+ s.Lock()
+ defer s.Unlock()
+ }
+ for _, tag := range decision.Tags {
+ s.appendTagNoLocking(tag.key, tag.value)
+ }
+ }
+ }
+}
+
+// Span can be written to if it is sampled or the sampling decision has not been finalized.
+func (s *Span) isWriteable() bool {
+ state := s.context.samplingState
+ return !state.isFinal() || state.isSampled()
+}
+
+func (s *Span) isSamplingFinalized() bool {
+ return s.context.samplingState.isFinal()
+}
+
// setSamplingPriority returns true if the flag was updated successfully, false otherwise.
+// The behavior of setSamplingPriority is surprising
+// If noDebugFlagOnForcedSampling is set
+// setSamplingPriority(span, 1) always sets only flagSampled
+// If noDebugFlagOnForcedSampling is unset, and isDebugAllowed passes
+// setSamplingPriority(span, 1) sets both flagSampled and flagDebug
+// However,
+// setSamplingPriority(span, 0) always only resets flagSampled
+//
+// This means that doing a setSamplingPriority(span, 1) followed by setSamplingPriority(span, 0) can
+// leave flagDebug set
func setSamplingPriority(s *Span, value interface{}) bool {
val, ok := value.(uint16)
if !ok {
return false
}
- s.Lock()
- defer s.Unlock()
if val == 0 {
- s.context.flags = s.context.flags & (^flagSampled)
+ s.context.samplingState.unsetSampled()
+ s.context.samplingState.setFinal()
return true
}
if s.tracer.options.noDebugFlagOnForcedSampling {
- s.context.flags = s.context.flags | flagSampled
+ s.context.samplingState.setSampled()
+ s.context.samplingState.setFinal()
return true
} else if s.tracer.isDebugAllowed(s.operationName) {
- s.context.flags = s.context.flags | flagDebug | flagSampled
+ s.context.samplingState.setDebugAndSampled()
+ s.context.samplingState.setFinal()
return true
}
return false
@@ -326,5 +416,5 @@ func setSamplingPriority(s *Span, value interface{}) bool {
func EnableFirehose(s *Span) {
s.Lock()
defer s.Unlock()
- s.context.flags |= flagFirehose
+ s.context.samplingState.setFirehose()
}
diff --git a/vendor/github.com/uber/jaeger-client-go/context.go b/vendor/github.com/uber/jaeger-client-go/span_context.go
index 43553655a..b7230abfe 100644
--- a/vendor/github.com/uber/jaeger-client-go/context.go
+++ b/vendor/github.com/uber/jaeger-client-go/span_context.go
@@ -19,12 +19,15 @@ import (
"fmt"
"strconv"
"strings"
+ "sync"
+
+ "go.uber.org/atomic"
)
const (
- flagSampled = byte(1)
- flagDebug = byte(2)
- flagFirehose = byte(8)
+ flagSampled = 1
+ flagDebug = 2
+ flagFirehose = 8
)
var (
@@ -56,9 +59,6 @@ type SpanContext struct {
// Should be 0 if the current span is a root span.
parentID SpanID
- // flags is a bitmap containing such bits as 'sampled' and 'debug'.
- flags byte
-
// Distributed Context baggage. The is a snapshot in time.
baggage map[string]string
@@ -67,6 +67,102 @@ type SpanContext struct {
//
// See JaegerDebugHeader in constants.go
debugID string
+
+ // samplingState is shared across all spans
+ samplingState *samplingState
+
+ // remote indicates that span context represents a remote parent
+ remote bool
+}
+
+type samplingState struct {
+ // Span context's state flags that are propagated across processes. Only lower 8 bits are used.
+ // We use an int32 instead of byte to be able to use CAS operations.
+ stateFlags atomic.Int32
+
+ // When state is not final, sampling will be retried on other span write operations,
+ // like SetOperationName / SetTag, and the spans will remain writable.
+ final atomic.Bool
+
+ // localRootSpan stores the SpanID of the first span created in this process for a given trace.
+ localRootSpan SpanID
+
+ // extendedState allows samplers to keep intermediate state.
+ // The keys and values in this map are completely opaque: interface{} -> interface{}.
+ extendedState sync.Map
+}
+
+func (s *samplingState) isLocalRootSpan(id SpanID) bool {
+ return id == s.localRootSpan
+}
+
+func (s *samplingState) setFlag(newFlag int32) {
+ swapped := false
+ for !swapped {
+ old := s.stateFlags.Load()
+ swapped = s.stateFlags.CAS(old, old|newFlag)
+ }
+}
+
+func (s *samplingState) unsetFlag(newFlag int32) {
+ swapped := false
+ for !swapped {
+ old := s.stateFlags.Load()
+ swapped = s.stateFlags.CAS(old, old&^newFlag)
+ }
+}
+
+func (s *samplingState) setSampled() {
+ s.setFlag(flagSampled)
+}
+
+func (s *samplingState) unsetSampled() {
+ s.unsetFlag(flagSampled)
+}
+
+func (s *samplingState) setDebugAndSampled() {
+ s.setFlag(flagDebug | flagSampled)
+}
+
+func (s *samplingState) setFirehose() {
+ s.setFlag(flagFirehose)
+}
+
+func (s *samplingState) setFlags(flags byte) {
+ s.stateFlags.Store(int32(flags))
+}
+
+func (s *samplingState) setFinal() {
+ s.final.Store(true)
+}
+
+func (s *samplingState) flags() byte {
+ return byte(s.stateFlags.Load())
+}
+
+func (s *samplingState) isSampled() bool {
+ return s.stateFlags.Load()&flagSampled == flagSampled
+}
+
+func (s *samplingState) isDebug() bool {
+ return s.stateFlags.Load()&flagDebug == flagDebug
+}
+
+func (s *samplingState) isFirehose() bool {
+ return s.stateFlags.Load()&flagFirehose == flagFirehose
+}
+
+func (s *samplingState) isFinal() bool {
+ return s.final.Load()
+}
+
+func (s *samplingState) extendedStateForKey(key interface{}, initValue func() interface{}) interface{} {
+ if value, ok := s.extendedState.Load(key); ok {
+ return value
+ }
+ value := initValue()
+ value, _ = s.extendedState.LoadOrStore(key, value)
+ return value
}
// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext
@@ -81,17 +177,28 @@ func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
// IsSampled returns whether this trace was chosen for permanent storage
// by the sampling mechanism of the tracer.
func (c SpanContext) IsSampled() bool {
- return (c.flags & flagSampled) == flagSampled
+ return c.samplingState.isSampled()
}
// IsDebug indicates whether sampling was explicitly requested by the service.
func (c SpanContext) IsDebug() bool {
- return (c.flags & flagDebug) == flagDebug
+ return c.samplingState.isDebug()
+}
+
+// IsSamplingFinalized indicates whether the sampling decision has been finalized.
+func (c SpanContext) IsSamplingFinalized() bool {
+ return c.samplingState.isFinal()
}
// IsFirehose indicates whether the firehose flag was set
func (c SpanContext) IsFirehose() bool {
- return (c.flags & flagFirehose) == flagFirehose
+ return c.samplingState.isFirehose()
+}
+
+// ExtendedSamplingState returns the custom state object for a given key. If the value for this key does not exist,
+// it is initialized via initValue function. This state can be used by samplers (e.g. x.PrioritySampler).
+func (c SpanContext) ExtendedSamplingState(key interface{}, initValue func() interface{}) interface{} {
+ return c.samplingState.extendedStateForKey(key, initValue)
}
// IsValid indicates whether this context actually represents a valid trace.
@@ -99,11 +206,16 @@ func (c SpanContext) IsValid() bool {
return c.traceID.IsValid() && c.spanID != 0
}
+// SetFirehose enables firehose mode for this trace.
+func (c SpanContext) SetFirehose() {
+ c.samplingState.setFirehose()
+}
+
func (c SpanContext) String() string {
if c.traceID.High == 0 {
- return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags)
+ return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load())
}
- return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags)
+ return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load())
}
// ContextFromString reconstructs the Context encoded in a string
@@ -130,7 +242,8 @@ func ContextFromString(value string) (SpanContext, error) {
if err != nil {
return emptyContext, err
}
- context.flags = byte(flags)
+ context.samplingState = &samplingState{}
+ context.samplingState.setFlags(byte(flags))
return context, nil
}
@@ -149,18 +262,24 @@ func (c SpanContext) ParentID() SpanID {
return c.parentID
}
+// Flags returns the bitmap containing such bits as 'sampled' and 'debug'.
+func (c SpanContext) Flags() byte {
+ return c.samplingState.flags()
+}
+
// NewSpanContext creates a new instance of SpanContext
func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext {
- flags := byte(0)
+ samplingState := &samplingState{}
if sampled {
- flags = flagSampled
+ samplingState.setSampled()
}
+
return SpanContext{
- traceID: traceID,
- spanID: spanID,
- parentID: parentID,
- flags: flags,
- baggage: baggage}
+ traceID: traceID,
+ spanID: spanID,
+ parentID: parentID,
+ samplingState: samplingState,
+ baggage: baggage}
}
// CopyFrom copies data from ctx into this context, including span identity and baggage.
@@ -169,7 +288,7 @@ func (c *SpanContext) CopyFrom(ctx *SpanContext) {
c.traceID = ctx.traceID
c.spanID = ctx.spanID
c.parentID = ctx.parentID
- c.flags = ctx.flags
+ c.samplingState = ctx.samplingState
if l := len(ctx.baggage); l > 0 {
c.baggage = make(map[string]string, l)
for k, v := range ctx.baggage {
@@ -193,7 +312,7 @@ func (c SpanContext) WithBaggageItem(key, value string) SpanContext {
newBaggage[key] = value
}
// Use positional parameters so the compiler will help catch new fields.
- return SpanContext{c.traceID, c.spanID, c.parentID, c.flags, newBaggage, ""}
+ return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote}
}
// isDebugIDContainerOnly returns true when the instance of the context is only
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go
index 745a0c38a..f03372dc7 100644
--- a/vendor/github.com/uber/jaeger-client-go/tracer.go
+++ b/vendor/github.com/uber/jaeger-client-go/tracer.go
@@ -38,7 +38,7 @@ type Tracer struct {
serviceName string
hostIPv4 uint32 // this is for zipkin endpoint conversion
- sampler Sampler
+ sampler SamplerV2
reporter Reporter
metrics Metrics
logger log.Logger
@@ -74,6 +74,7 @@ type Tracer struct {
// NewTracer creates Tracer implementation that reports tracing to Jaeger.
// The returned io.Closer can be used in shutdown hooks to ensure that the internal
// queue of the Reporter is drained and all buffered spans are submitted to collectors.
+// TODO (breaking change) return *Tracer only, without closer.
func NewTracer(
serviceName string,
sampler Sampler,
@@ -82,7 +83,7 @@ func NewTracer(
) (opentracing.Tracer, io.Closer) {
t := &Tracer{
serviceName: serviceName,
- sampler: sampler,
+ sampler: samplerV1toV2(sampler),
reporter: reporter,
injectors: make(map[interface{}]Injector),
extractors: make(map[interface{}]Extractor),
@@ -261,7 +262,7 @@ func (t *Tracer) startSpanWithOptions(
rpcServer = (v == ext.SpanKindRPCServerEnum || v == string(ext.SpanKindRPCServerEnum))
}
- var samplerTags []Tag
+ var internalTags []Tag
newTrace := false
if !isSelfRef {
if !hasParent || !parent.IsValid() {
@@ -272,13 +273,12 @@ func (t *Tracer) startSpanWithOptions(
}
ctx.spanID = SpanID(ctx.traceID.Low)
ctx.parentID = 0
- ctx.flags = byte(0)
+ ctx.samplingState = &samplingState{
+ localRootSpan: ctx.spanID,
+ }
if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) {
- ctx.flags |= (flagSampled | flagDebug)
- samplerTags = []Tag{{key: JaegerDebugHeader, value: parent.debugID}}
- } else if sampled, tags := t.sampler.IsSampled(ctx.traceID, operationName); sampled {
- ctx.flags |= flagSampled
- samplerTags = tags
+ ctx.samplingState.setDebugAndSampled()
+ internalTags = append(internalTags, Tag{key: JaegerDebugHeader, value: parent.debugID})
}
} else {
ctx.traceID = parent.traceID
@@ -290,7 +290,11 @@ func (t *Tracer) startSpanWithOptions(
ctx.spanID = SpanID(t.randomID())
ctx.parentID = parent.spanID
}
- ctx.flags = parent.flags
+ ctx.samplingState = parent.samplingState
+ if parent.remote {
+ ctx.samplingState.setFinal()
+ ctx.samplingState.localRootSpan = ctx.spanID
+ }
}
if hasParent {
// copy baggage items
@@ -305,17 +309,30 @@ func (t *Tracer) startSpanWithOptions(
sp := t.newSpan()
sp.context = ctx
+ sp.tracer = t
+ sp.operationName = operationName
+ sp.startTime = options.StartTime
+ sp.duration = 0
+ sp.references = references
+ sp.firstInProcess = rpcServer || sp.context.parentID == 0
+
+ if !sp.isSamplingFinalized() {
+ decision := t.sampler.OnCreateSpan(sp)
+ sp.applySamplingDecision(decision, false)
+ }
sp.observer = t.observer.OnStartSpan(sp, operationName, options)
- return t.startSpanInternal(
- sp,
- operationName,
- options.StartTime,
- samplerTags,
- options.Tags,
- newTrace,
- rpcServer,
- references,
- )
+
+ if tagsTotalLength := len(options.Tags) + len(internalTags); tagsTotalLength > 0 {
+ if sp.tags == nil || cap(sp.tags) < tagsTotalLength {
+ sp.tags = make([]Tag, 0, tagsTotalLength)
+ }
+ sp.tags = append(sp.tags, internalTags...)
+ for k, v := range options.Tags {
+ sp.setTagInternal(k, v, false)
+ }
+ }
+ t.emitNewSpanMetrics(sp, newTrace)
+ return sp
}
// Inject implements Inject() method of opentracing.Tracer
@@ -340,6 +357,7 @@ func (t *Tracer) Extract(
if err != nil {
return nil, err // ensure returned spanCtx is nil
}
+ spanCtx.remote = true
return spanCtx, nil
}
return nil, opentracing.ErrUnsupportedFormat
@@ -350,10 +368,10 @@ func (t *Tracer) Close() error {
t.reporter.Close()
t.sampler.Close()
if mgr, ok := t.baggageRestrictionManager.(io.Closer); ok {
- mgr.Close()
+ _ = mgr.Close()
}
if throttler, ok := t.debugThrottler.(io.Closer); ok {
- throttler.Close()
+ _ = throttler.Close()
}
return nil
}
@@ -368,6 +386,7 @@ func (t *Tracer) Tags() []opentracing.Tag {
}
// getTag returns the value of specific tag, if not exists, return nil.
+// TODO only used by tests, move there.
func (t *Tracer) getTag(key string) (interface{}, bool) {
for _, tag := range t.tags {
if tag.key == key {
@@ -383,41 +402,21 @@ func (t *Tracer) newSpan() *Span {
return t.spanAllocator.Get()
}
-func (t *Tracer) startSpanInternal(
- sp *Span,
- operationName string,
- startTime time.Time,
- internalTags []Tag,
- tags opentracing.Tags,
- newTrace bool,
- rpcServer bool,
- references []Reference,
-) *Span {
- sp.tracer = t
- sp.operationName = operationName
- sp.startTime = startTime
- sp.duration = 0
- sp.references = references
- sp.firstInProcess = rpcServer || sp.context.parentID == 0
- if len(tags) > 0 || len(internalTags) > 0 {
- sp.tags = make([]Tag, len(internalTags), len(tags)+len(internalTags))
- copy(sp.tags, internalTags)
- for k, v := range tags {
- sp.observer.OnSetTag(k, v)
- if k == string(ext.SamplingPriority) && !setSamplingPriority(sp, v) {
- continue
- }
- sp.setTagNoLocking(k, v)
+// emitNewSpanMetrics generates metrics on the number of started spans and traces.
+// newTrace param: we cannot simply check for parentID==0 because in Zipkin model the
+// server-side RPC span has the exact same trace/span/parent IDs as the
+// calling client-side span, but obviously the server side span is
+// no longer a root span of the trace.
+func (t *Tracer) emitNewSpanMetrics(sp *Span, newTrace bool) {
+ if !sp.isSamplingFinalized() {
+ t.metrics.SpansStartedDelayedSampling.Inc(1)
+ if newTrace {
+ t.metrics.TracesStartedDelayedSampling.Inc(1)
}
- }
- // emit metrics
- if sp.context.IsSampled() {
+ // joining a trace is not possible, because sampling decision inherited from upstream is final
+ } else if sp.context.IsSampled() {
t.metrics.SpansStartedSampled.Inc(1)
if newTrace {
- // We cannot simply check for parentID==0 because in Zipkin model the
- // server-side RPC span has the exact same trace/span/parent IDs as the
- // calling client-side span, but obviously the server side span is
- // no longer a root span of the trace.
t.metrics.TracesStartedSampled.Inc(1)
} else if sp.firstInProcess {
t.metrics.TracesJoinedSampled.Inc(1)
@@ -430,15 +429,20 @@ func (t *Tracer) startSpanInternal(
t.metrics.TracesJoinedNotSampled.Inc(1)
}
}
- return sp
}
func (t *Tracer) reportSpan(sp *Span) {
- t.metrics.SpansFinished.Inc(1)
+ if !sp.isSamplingFinalized() {
+ t.metrics.SpansFinishedDelayedSampling.Inc(1)
+ } else if sp.context.IsSampled() {
+ t.metrics.SpansFinishedSampled.Inc(1)
+ } else {
+ t.metrics.SpansFinishedNotSampled.Inc(1)
+ }
- // Note: if the reporter is processing Span asynchronously need to Retain() it
- // otherwise, in the racing condition will be rewritten span data before it will be sent
- // * To remove object use method span.Release()
+ // Note: if the reporter is processing Span asynchronously then it needs to Retain() the span,
+ // and then Release() it when no longer needed.
+ // Otherwise, the span may be reused for another trace and its data may be overwritten.
if sp.context.IsSampled() {
t.reporter.Report(sp)
}
@@ -466,6 +470,11 @@ func (t *Tracer) isDebugAllowed(operation string) bool {
return t.debugThrottler.IsAllowed(operation)
}
+// Sampler returns the sampler given to the tracer at creation.
+func (t *Tracer) Sampler() SamplerV2 {
+ return t.sampler
+}
+
// SelfRef creates an opentracing compliant SpanReference from a jaeger
// SpanContext. This is a factory function in order to encapsulate jaeger specific
// types.
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
index 1b8db9758..bf2f13165 100644
--- a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
+++ b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
@@ -20,22 +20,15 @@ import (
)
// RateLimiter is a filter used to check if a message that is worth itemCost units is within the rate limits.
+//
+// TODO (breaking change) remove this interface in favor of public struct below
+//
+// Deprecated, use ReconfigurableRateLimiter.
type RateLimiter interface {
CheckCredit(itemCost float64) bool
}
-type rateLimiter struct {
- sync.Mutex
-
- creditsPerSecond float64
- balance float64
- maxBalance float64
- lastTick time.Time
-
- timeNow func() time.Time
-}
-
-// NewRateLimiter creates a new rate limiter based on leaky bucket algorithm, formulated in terms of a
+// ReconfigurableRateLimiter is a rate limiter based on leaky bucket algorithm, formulated in terms of a
// credits balance that is replenished every time CheckCredit() method is called (tick) by the amount proportional
// to the time elapsed since the last tick, up to max of creditsPerSecond. A call to CheckCredit() takes a cost
// of an item we want to pay with the balance. If the balance exceeds the cost of the item, the item is "purchased"
@@ -47,31 +40,73 @@ type rateLimiter struct {
//
// It can also be used to limit the rate of traffic in bytes, by setting creditsPerSecond to desired throughput
// as bytes/second, and calling CheckCredit() with the actual message size.
-func NewRateLimiter(creditsPerSecond, maxBalance float64) RateLimiter {
- return &rateLimiter{
+//
+// TODO (breaking change) rename to RateLimiter once the interface is removed
+type ReconfigurableRateLimiter struct {
+ lock sync.Mutex
+
+ creditsPerSecond float64
+ balance float64
+ maxBalance float64
+ lastTick time.Time
+
+ timeNow func() time.Time
+}
+
+// NewRateLimiter creates a new ReconfigurableRateLimiter.
+func NewRateLimiter(creditsPerSecond, maxBalance float64) *ReconfigurableRateLimiter {
+ return &ReconfigurableRateLimiter{
creditsPerSecond: creditsPerSecond,
balance: maxBalance,
maxBalance: maxBalance,
lastTick: time.Now(),
- timeNow: time.Now}
+ timeNow: time.Now,
+ }
}
-func (b *rateLimiter) CheckCredit(itemCost float64) bool {
- b.Lock()
- defer b.Unlock()
- // calculate how much time passed since the last tick, and update current tick
- currentTime := b.timeNow()
- elapsedTime := currentTime.Sub(b.lastTick)
- b.lastTick = currentTime
- // calculate how much credit have we accumulated since the last tick
- b.balance += elapsedTime.Seconds() * b.creditsPerSecond
- if b.balance > b.maxBalance {
- b.balance = b.maxBalance
- }
+// CheckCredit tries to reduce the current balance by itemCost provided that the current balance
+// is not lest than itemCost.
+func (rl *ReconfigurableRateLimiter) CheckCredit(itemCost float64) bool {
+ rl.lock.Lock()
+ defer rl.lock.Unlock()
+
// if we have enough credits to pay for current item, then reduce balance and allow
- if b.balance >= itemCost {
- b.balance -= itemCost
+ if rl.balance >= itemCost {
+ rl.balance -= itemCost
+ return true
+ }
+ // otherwise check if balance can be increased due to time elapsed, and try again
+ rl.updateBalance()
+ if rl.balance >= itemCost {
+ rl.balance -= itemCost
return true
}
return false
}
+
+// updateBalance recalculates current balance based on time elapsed. Must be called while holding a lock.
+func (rl *ReconfigurableRateLimiter) updateBalance() {
+ // calculate how much time passed since the last tick, and update current tick
+ currentTime := rl.timeNow()
+ elapsedTime := currentTime.Sub(rl.lastTick)
+ rl.lastTick = currentTime
+ // calculate how much credit have we accumulated since the last tick
+ rl.balance += elapsedTime.Seconds() * rl.creditsPerSecond
+ if rl.balance > rl.maxBalance {
+ rl.balance = rl.maxBalance
+ }
+}
+
+// Update changes the main parameters of the rate limiter in-place, while retaining
+// the current accumulated balance (pro-rated to the new maxBalance value). Using this method
+// instead of creating a new rate limiter helps to avoid thundering herd when sampling
+// strategies are updated.
+func (rl *ReconfigurableRateLimiter) Update(creditsPerSecond, maxBalance float64) {
+ rl.lock.Lock()
+ defer rl.lock.Unlock()
+
+ rl.updateBalance() // get up to date balance
+ rl.balance = rl.balance * maxBalance / rl.maxBalance
+ rl.creditsPerSecond = creditsPerSecond
+ rl.maxBalance = maxBalance
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin.go b/vendor/github.com/uber/jaeger-client-go/zipkin.go
index 636952b7f..98cab4b6e 100644
--- a/vendor/github.com/uber/jaeger-client-go/zipkin.go
+++ b/vendor/github.com/uber/jaeger-client-go/zipkin.go
@@ -55,7 +55,7 @@ func (p *zipkinPropagator) Inject(
carrier.SetTraceID(ctx.TraceID().Low) // TODO this cannot work with 128bit IDs
carrier.SetSpanID(uint64(ctx.SpanID()))
carrier.SetParentID(uint64(ctx.ParentID()))
- carrier.SetFlags(ctx.flags)
+ carrier.SetFlags(ctx.samplingState.flags())
return nil
}
@@ -71,6 +71,7 @@ func (p *zipkinPropagator) Extract(abstractCarrier interface{}) (SpanContext, er
ctx.traceID.Low = carrier.TraceID()
ctx.spanID = SpanID(carrier.SpanID())
ctx.parentID = SpanID(carrier.ParentID())
- ctx.flags = carrier.Flags()
+ ctx.samplingState = &samplingState{}
+ ctx.samplingState.setFlags(carrier.Flags())
return ctx, nil
}
diff --git a/vendor/github.com/ulikunitz/xz/example.go b/vendor/github.com/ulikunitz/xz/example.go
deleted file mode 100644
index 855e60aee..000000000
--- a/vendor/github.com/ulikunitz/xz/example.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "bytes"
- "io"
- "log"
- "os"
-
- "github.com/ulikunitz/xz"
-)
-
-func main() {
- const text = "The quick brown fox jumps over the lazy dog.\n"
- var buf bytes.Buffer
- // compress text
- w, err := xz.NewWriter(&buf)
- if err != nil {
- log.Fatalf("xz.NewWriter error %s", err)
- }
- if _, err := io.WriteString(w, text); err != nil {
- log.Fatalf("WriteString error %s", err)
- }
- if err := w.Close(); err != nil {
- log.Fatalf("w.Close error %s", err)
- }
- // decompress buffer and write output to stdout
- r, err := xz.NewReader(&buf)
- if err != nil {
- log.Fatalf("NewReader error %s", err)
- }
- if _, err = io.Copy(os.Stdout, r); err != nil {
- log.Fatalf("io.Copy error %s", err)
- }
-}
diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml
new file mode 100644
index 000000000..6d4d1be7b
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.codecov.yml
@@ -0,0 +1,15 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 100 # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+
diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore
new file mode 100644
index 000000000..0a4504f11
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.gitignore
@@ -0,0 +1,11 @@
+.DS_Store
+/vendor
+/cover
+cover.out
+lint.log
+
+# Binaries
+*.test
+
+# Profiling output
+*.prof
diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml
new file mode 100644
index 000000000..0f3769e5f
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.travis.yml
@@ -0,0 +1,27 @@
+sudo: false
+language: go
+go_import_path: go.uber.org/atomic
+
+go:
+ - 1.11.x
+ - 1.12.x
+
+matrix:
+ include:
+ - go: 1.12.x
+ env: NO_TEST=yes LINT=yes
+
+cache:
+ directories:
+ - vendor
+
+install:
+ - make install_ci
+
+script:
+ - test -n "$NO_TEST" || make test_ci
+ - test -n "$NO_TEST" || scripts/test-ubergo.sh
+ - test -z "$LINT" || make install_lint lint
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt
new file mode 100644
index 000000000..8765c9fbc
--- /dev/null
+++ b/vendor/go.uber.org/atomic/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2016 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile
new file mode 100644
index 000000000..1ef263075
--- /dev/null
+++ b/vendor/go.uber.org/atomic/Makefile
@@ -0,0 +1,51 @@
+# Many Go tools take file globs or directories as arguments instead of packages.
+PACKAGE_FILES ?= *.go
+
+# For pre go1.6
+export GO15VENDOREXPERIMENT=1
+
+
+.PHONY: build
+build:
+ go build -i ./...
+
+
+.PHONY: install
+install:
+ glide --version || go get github.com/Masterminds/glide
+ glide install
+
+
+.PHONY: test
+test:
+ go test -cover -race ./...
+
+
+.PHONY: install_ci
+install_ci: install
+ go get github.com/wadey/gocovmerge
+ go get github.com/mattn/goveralls
+ go get golang.org/x/tools/cmd/cover
+
+.PHONY: install_lint
+install_lint:
+ go get golang.org/x/lint/golint
+
+
+.PHONY: lint
+lint:
+ @rm -rf lint.log
+ @echo "Checking formatting..."
+ @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log
+ @echo "Checking vet..."
+ @go vet ./... 2>&1 | tee -a lint.log;)
+ @echo "Checking lint..."
+ @golint $$(go list ./...) 2>&1 | tee -a lint.log
+ @echo "Checking for unresolved FIXMEs..."
+ @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log
+ @[ ! -s lint.log ]
+
+
+.PHONY: test_ci
+test_ci: install_ci build
+ ./scripts/cover.sh $(shell go list $(PACKAGES))
diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md
new file mode 100644
index 000000000..62eb8e576
--- /dev/null
+++ b/vendor/go.uber.org/atomic/README.md
@@ -0,0 +1,36 @@
+# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard]
+
+Simple wrappers for primitive types to enforce atomic access.
+
+## Installation
+`go get -u go.uber.org/atomic`
+
+## Usage
+The standard library's `sync/atomic` is powerful, but it's easy to forget which
+variables must be accessed atomically. `go.uber.org/atomic` preserves all the
+functionality of the standard library, but wraps the primitive types to
+provide a safer, more convenient API.
+
+```go
+var atom atomic.Uint32
+atom.Store(42)
+atom.Sub(2)
+atom.CAS(40, 11)
+```
+
+See the [documentation][doc] for a complete API specification.
+
+## Development Status
+Stable.
+
+___
+Released under the [MIT License](LICENSE.txt).
+
+[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
+[doc]: https://godoc.org/go.uber.org/atomic
+[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master
+[ci]: https://travis-ci.com/uber-go/atomic
+[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/atomic
+[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
+[reportcard]: https://goreportcard.com/report/go.uber.org/atomic
diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go
new file mode 100644
index 000000000..1db6849fc
--- /dev/null
+++ b/vendor/go.uber.org/atomic/atomic.go
@@ -0,0 +1,351 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package atomic provides simple wrappers around numerics to enforce atomic
+// access.
+package atomic
+
+import (
+ "math"
+ "sync/atomic"
+ "time"
+)
+
+// Int32 is an atomic wrapper around an int32.
+type Int32 struct{ v int32 }
+
+// NewInt32 creates an Int32.
+func NewInt32(i int32) *Int32 {
+ return &Int32{i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int32) Load() int32 {
+ return atomic.LoadInt32(&i.v)
+}
+
+// Add atomically adds to the wrapped int32 and returns the new value.
+func (i *Int32) Add(n int32) int32 {
+ return atomic.AddInt32(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped int32 and returns the new value.
+func (i *Int32) Sub(n int32) int32 {
+ return atomic.AddInt32(&i.v, -n)
+}
+
+// Inc atomically increments the wrapped int32 and returns the new value.
+func (i *Int32) Inc() int32 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int32 and returns the new value.
+func (i *Int32) Dec() int32 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Int32) CAS(old, new int32) bool {
+ return atomic.CompareAndSwapInt32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int32) Store(n int32) {
+ atomic.StoreInt32(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped int32 and returns the old value.
+func (i *Int32) Swap(n int32) int32 {
+ return atomic.SwapInt32(&i.v, n)
+}
+
+// Int64 is an atomic wrapper around an int64.
+type Int64 struct{ v int64 }
+
+// NewInt64 creates an Int64.
+func NewInt64(i int64) *Int64 {
+ return &Int64{i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int64) Load() int64 {
+ return atomic.LoadInt64(&i.v)
+}
+
+// Add atomically adds to the wrapped int64 and returns the new value.
+func (i *Int64) Add(n int64) int64 {
+ return atomic.AddInt64(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped int64 and returns the new value.
+func (i *Int64) Sub(n int64) int64 {
+ return atomic.AddInt64(&i.v, -n)
+}
+
+// Inc atomically increments the wrapped int64 and returns the new value.
+func (i *Int64) Inc() int64 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int64 and returns the new value.
+func (i *Int64) Dec() int64 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Int64) CAS(old, new int64) bool {
+ return atomic.CompareAndSwapInt64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int64) Store(n int64) {
+ atomic.StoreInt64(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped int64 and returns the old value.
+func (i *Int64) Swap(n int64) int64 {
+ return atomic.SwapInt64(&i.v, n)
+}
+
+// Uint32 is an atomic wrapper around an uint32.
+type Uint32 struct{ v uint32 }
+
+// NewUint32 creates a Uint32.
+func NewUint32(i uint32) *Uint32 {
+ return &Uint32{i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint32) Load() uint32 {
+ return atomic.LoadUint32(&i.v)
+}
+
+// Add atomically adds to the wrapped uint32 and returns the new value.
+func (i *Uint32) Add(n uint32) uint32 {
+ return atomic.AddUint32(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped uint32 and returns the new value.
+func (i *Uint32) Sub(n uint32) uint32 {
+ return atomic.AddUint32(&i.v, ^(n - 1))
+}
+
+// Inc atomically increments the wrapped uint32 and returns the new value.
+func (i *Uint32) Inc() uint32 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int32 and returns the new value.
+func (i *Uint32) Dec() uint32 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Uint32) CAS(old, new uint32) bool {
+ return atomic.CompareAndSwapUint32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint32) Store(n uint32) {
+ atomic.StoreUint32(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped uint32 and returns the old value.
+func (i *Uint32) Swap(n uint32) uint32 {
+ return atomic.SwapUint32(&i.v, n)
+}
+
+// Uint64 is an atomic wrapper around a uint64.
+type Uint64 struct{ v uint64 }
+
+// NewUint64 creates a Uint64.
+func NewUint64(i uint64) *Uint64 {
+ return &Uint64{i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint64) Load() uint64 {
+ return atomic.LoadUint64(&i.v)
+}
+
+// Add atomically adds to the wrapped uint64 and returns the new value.
+func (i *Uint64) Add(n uint64) uint64 {
+ return atomic.AddUint64(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped uint64 and returns the new value.
+func (i *Uint64) Sub(n uint64) uint64 {
+ return atomic.AddUint64(&i.v, ^(n - 1))
+}
+
+// Inc atomically increments the wrapped uint64 and returns the new value.
+func (i *Uint64) Inc() uint64 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uint64 and returns the new value.
+func (i *Uint64) Dec() uint64 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Uint64) CAS(old, new uint64) bool {
+ return atomic.CompareAndSwapUint64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint64) Store(n uint64) {
+ atomic.StoreUint64(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped uint64 and returns the old value.
+func (i *Uint64) Swap(n uint64) uint64 {
+ return atomic.SwapUint64(&i.v, n)
+}
+
+// Bool is an atomic Boolean.
+type Bool struct{ v uint32 }
+
+// NewBool creates a Bool.
+func NewBool(initial bool) *Bool {
+ return &Bool{boolToInt(initial)}
+}
+
+// Load atomically loads the Boolean.
+func (b *Bool) Load() bool {
+ return truthy(atomic.LoadUint32(&b.v))
+}
+
+// CAS is an atomic compare-and-swap.
+func (b *Bool) CAS(old, new bool) bool {
+ return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new))
+}
+
+// Store atomically stores the passed value.
+func (b *Bool) Store(new bool) {
+ atomic.StoreUint32(&b.v, boolToInt(new))
+}
+
+// Swap sets the given value and returns the previous value.
+func (b *Bool) Swap(new bool) bool {
+ return truthy(atomic.SwapUint32(&b.v, boolToInt(new)))
+}
+
+// Toggle atomically negates the Boolean and returns the previous value.
+func (b *Bool) Toggle() bool {
+ return truthy(atomic.AddUint32(&b.v, 1) - 1)
+}
+
+func truthy(n uint32) bool {
+ return n&1 == 1
+}
+
+func boolToInt(b bool) uint32 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// Float64 is an atomic wrapper around float64.
+type Float64 struct {
+ v uint64
+}
+
+// NewFloat64 creates a Float64.
+func NewFloat64(f float64) *Float64 {
+ return &Float64{math.Float64bits(f)}
+}
+
+// Load atomically loads the wrapped value.
+func (f *Float64) Load() float64 {
+ return math.Float64frombits(atomic.LoadUint64(&f.v))
+}
+
+// Store atomically stores the passed value.
+func (f *Float64) Store(s float64) {
+ atomic.StoreUint64(&f.v, math.Float64bits(s))
+}
+
+// Add atomically adds to the wrapped float64 and returns the new value.
+func (f *Float64) Add(s float64) float64 {
+ for {
+ old := f.Load()
+ new := old + s
+ if f.CAS(old, new) {
+ return new
+ }
+ }
+}
+
+// Sub atomically subtracts from the wrapped float64 and returns the new value.
+func (f *Float64) Sub(s float64) float64 {
+ return f.Add(-s)
+}
+
+// CAS is an atomic compare-and-swap.
+func (f *Float64) CAS(old, new float64) bool {
+ return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new))
+}
+
+// Duration is an atomic wrapper around time.Duration
+// https://godoc.org/time#Duration
+type Duration struct {
+ v Int64
+}
+
+// NewDuration creates a Duration.
+func NewDuration(d time.Duration) *Duration {
+ return &Duration{v: *NewInt64(int64(d))}
+}
+
+// Load atomically loads the wrapped value.
+func (d *Duration) Load() time.Duration {
+ return time.Duration(d.v.Load())
+}
+
+// Store atomically stores the passed value.
+func (d *Duration) Store(n time.Duration) {
+ d.v.Store(int64(n))
+}
+
+// Add atomically adds to the wrapped time.Duration and returns the new value.
+func (d *Duration) Add(n time.Duration) time.Duration {
+ return time.Duration(d.v.Add(int64(n)))
+}
+
+// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
+func (d *Duration) Sub(n time.Duration) time.Duration {
+ return time.Duration(d.v.Sub(int64(n)))
+}
+
+// Swap atomically swaps the wrapped time.Duration and returns the old value.
+func (d *Duration) Swap(n time.Duration) time.Duration {
+ return time.Duration(d.v.Swap(int64(n)))
+}
+
+// CAS is an atomic compare-and-swap.
+func (d *Duration) CAS(old, new time.Duration) bool {
+ return d.v.CAS(int64(old), int64(new))
+}
+
+// Value shadows the type of the same name from sync/atomic
+// https://godoc.org/sync/atomic#Value
+type Value struct{ atomic.Value }
diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go
new file mode 100644
index 000000000..0489d19ba
--- /dev/null
+++ b/vendor/go.uber.org/atomic/error.go
@@ -0,0 +1,55 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// Error is an atomic type-safe wrapper around Value for errors
+type Error struct{ v Value }
+
+// errorHolder is non-nil holder for error object.
+// atomic.Value panics on saving nil object, so err object needs to be
+// wrapped with valid object first.
+type errorHolder struct{ err error }
+
+// NewError creates new atomic error object
+func NewError(err error) *Error {
+ e := &Error{}
+ if err != nil {
+ e.Store(err)
+ }
+ return e
+}
+
+// Load atomically loads the wrapped error
+func (e *Error) Load() error {
+ v := e.v.Load()
+ if v == nil {
+ return nil
+ }
+
+ eh := v.(errorHolder)
+ return eh.err
+}
+
+// Store atomically stores error.
+// NOTE: a holder object is allocated on each Store call.
+func (e *Error) Store(err error) {
+ e.v.Store(errorHolder{err: err})
+}
diff --git a/vendor/go.uber.org/atomic/glide.lock b/vendor/go.uber.org/atomic/glide.lock
new file mode 100644
index 000000000..3c72c5997
--- /dev/null
+++ b/vendor/go.uber.org/atomic/glide.lock
@@ -0,0 +1,17 @@
+hash: f14d51408e3e0e4f73b34e4039484c78059cd7fc5f4996fdd73db20dc8d24f53
+updated: 2016-10-27T00:10:51.16960137-07:00
+imports: []
+testImports:
+- name: github.com/davecgh/go-spew
+ version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
+ subpackages:
+ - spew
+- name: github.com/pmezard/go-difflib
+ version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
+ subpackages:
+ - difflib
+- name: github.com/stretchr/testify
+ version: d77da356e56a7428ad25149ca77381849a6a5232
+ subpackages:
+ - assert
+ - require
diff --git a/vendor/go.uber.org/atomic/glide.yaml b/vendor/go.uber.org/atomic/glide.yaml
new file mode 100644
index 000000000..4cf608ec0
--- /dev/null
+++ b/vendor/go.uber.org/atomic/glide.yaml
@@ -0,0 +1,6 @@
+package: go.uber.org/atomic
+testImport:
+- package: github.com/stretchr/testify
+ subpackages:
+ - assert
+ - require
diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go
new file mode 100644
index 000000000..ede8136fa
--- /dev/null
+++ b/vendor/go.uber.org/atomic/string.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// String is an atomic type-safe wrapper around Value for strings.
+type String struct{ v Value }
+
+// NewString creates a String.
+func NewString(str string) *String {
+ s := &String{}
+ if str != "" {
+ s.Store(str)
+ }
+ return s
+}
+
+// Load atomically loads the wrapped string.
+func (s *String) Load() string {
+ v := s.v.Load()
+ if v == nil {
+ return ""
+ }
+ return v.(string)
+}
+
+// Store atomically stores the passed string.
+// Note: Converting the string to an interface{} to store in the Value
+// requires an allocation.
+func (s *String) Store(str string) {
+ s.v.Store(str)
+}
diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go
deleted file mode 100644
index 5d052781b..000000000
--- a/vendor/golang.org/x/net/html/atom/gen.go
+++ /dev/null
@@ -1,712 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-//go:generate go run gen.go
-//go:generate go run gen.go -test
-
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "go/format"
- "io/ioutil"
- "math/rand"
- "os"
- "sort"
- "strings"
-)
-
-// identifier converts s to a Go exported identifier.
-// It converts "div" to "Div" and "accept-charset" to "AcceptCharset".
-func identifier(s string) string {
- b := make([]byte, 0, len(s))
- cap := true
- for _, c := range s {
- if c == '-' {
- cap = true
- continue
- }
- if cap && 'a' <= c && c <= 'z' {
- c -= 'a' - 'A'
- }
- cap = false
- b = append(b, byte(c))
- }
- return string(b)
-}
-
-var test = flag.Bool("test", false, "generate table_test.go")
-
-func genFile(name string, buf *bytes.Buffer) {
- b, err := format.Source(buf.Bytes())
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
- if err := ioutil.WriteFile(name, b, 0644); err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
-}
-
-func main() {
- flag.Parse()
-
- var all []string
- all = append(all, elements...)
- all = append(all, attributes...)
- all = append(all, eventHandlers...)
- all = append(all, extra...)
- sort.Strings(all)
-
- // uniq - lists have dups
- w := 0
- for _, s := range all {
- if w == 0 || all[w-1] != s {
- all[w] = s
- w++
- }
- }
- all = all[:w]
-
- if *test {
- var buf bytes.Buffer
- fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n")
- fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n")
- fmt.Fprintln(&buf, "package atom\n")
- fmt.Fprintln(&buf, "var testAtomList = []string{")
- for _, s := range all {
- fmt.Fprintf(&buf, "\t%q,\n", s)
- }
- fmt.Fprintln(&buf, "}")
-
- genFile("table_test.go", &buf)
- return
- }
-
- // Find hash that minimizes table size.
- var best *table
- for i := 0; i < 1000000; i++ {
- if best != nil && 1<<(best.k-1) < len(all) {
- break
- }
- h := rand.Uint32()
- for k := uint(0); k <= 16; k++ {
- if best != nil && k >= best.k {
- break
- }
- var t table
- if t.init(h, k, all) {
- best = &t
- break
- }
- }
- }
- if best == nil {
- fmt.Fprintf(os.Stderr, "failed to construct string table\n")
- os.Exit(1)
- }
-
- // Lay out strings, using overlaps when possible.
- layout := append([]string{}, all...)
-
- // Remove strings that are substrings of other strings
- for changed := true; changed; {
- changed = false
- for i, s := range layout {
- if s == "" {
- continue
- }
- for j, t := range layout {
- if i != j && t != "" && strings.Contains(s, t) {
- changed = true
- layout[j] = ""
- }
- }
- }
- }
-
- // Join strings where one suffix matches another prefix.
- for {
- // Find best i, j, k such that layout[i][len-k:] == layout[j][:k],
- // maximizing overlap length k.
- besti := -1
- bestj := -1
- bestk := 0
- for i, s := range layout {
- if s == "" {
- continue
- }
- for j, t := range layout {
- if i == j {
- continue
- }
- for k := bestk + 1; k <= len(s) && k <= len(t); k++ {
- if s[len(s)-k:] == t[:k] {
- besti = i
- bestj = j
- bestk = k
- }
- }
- }
- }
- if bestk > 0 {
- layout[besti] += layout[bestj][bestk:]
- layout[bestj] = ""
- continue
- }
- break
- }
-
- text := strings.Join(layout, "")
-
- atom := map[string]uint32{}
- for _, s := range all {
- off := strings.Index(text, s)
- if off < 0 {
- panic("lost string " + s)
- }
- atom[s] = uint32(off<<8 | len(s))
- }
-
- var buf bytes.Buffer
- // Generate the Go code.
- fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n")
- fmt.Fprintln(&buf, "//go:generate go run gen.go\n")
- fmt.Fprintln(&buf, "package atom\n\nconst (")
-
- // compute max len
- maxLen := 0
- for _, s := range all {
- if maxLen < len(s) {
- maxLen = len(s)
- }
- fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s])
- }
- fmt.Fprintln(&buf, ")\n")
-
- fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0)
- fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen)
-
- fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k)
- for i, s := range best.tab {
- if s == "" {
- continue
- }
- fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s)
- }
- fmt.Fprintf(&buf, "}\n")
- datasize := (1 << best.k) * 4
-
- fmt.Fprintln(&buf, "const atomText =")
- textsize := len(text)
- for len(text) > 60 {
- fmt.Fprintf(&buf, "\t%q +\n", text[:60])
- text = text[60:]
- }
- fmt.Fprintf(&buf, "\t%q\n\n", text)
-
- genFile("table.go", &buf)
-
- fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize)
-}
-
-type byLen []string
-
-func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) }
-func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x byLen) Len() int { return len(x) }
-
-// fnv computes the FNV hash with an arbitrary starting value h.
-func fnv(h uint32, s string) uint32 {
- for i := 0; i < len(s); i++ {
- h ^= uint32(s[i])
- h *= 16777619
- }
- return h
-}
-
-// A table represents an attempt at constructing the lookup table.
-// The lookup table uses cuckoo hashing, meaning that each string
-// can be found in one of two positions.
-type table struct {
- h0 uint32
- k uint
- mask uint32
- tab []string
-}
-
-// hash returns the two hashes for s.
-func (t *table) hash(s string) (h1, h2 uint32) {
- h := fnv(t.h0, s)
- h1 = h & t.mask
- h2 = (h >> 16) & t.mask
- return
-}
-
-// init initializes the table with the given parameters.
-// h0 is the initial hash value,
-// k is the number of bits of hash value to use, and
-// x is the list of strings to store in the table.
-// init returns false if the table cannot be constructed.
-func (t *table) init(h0 uint32, k uint, x []string) bool {
- t.h0 = h0
- t.k = k
- t.tab = make([]string, 1<<k)
- t.mask = 1<<k - 1
- for _, s := range x {
- if !t.insert(s) {
- return false
- }
- }
- return true
-}
-
-// insert inserts s in the table.
-func (t *table) insert(s string) bool {
- h1, h2 := t.hash(s)
- if t.tab[h1] == "" {
- t.tab[h1] = s
- return true
- }
- if t.tab[h2] == "" {
- t.tab[h2] = s
- return true
- }
- if t.push(h1, 0) {
- t.tab[h1] = s
- return true
- }
- if t.push(h2, 0) {
- t.tab[h2] = s
- return true
- }
- return false
-}
-
-// push attempts to push aside the entry in slot i.
-func (t *table) push(i uint32, depth int) bool {
- if depth > len(t.tab) {
- return false
- }
- s := t.tab[i]
- h1, h2 := t.hash(s)
- j := h1 + h2 - i
- if t.tab[j] != "" && !t.push(j, depth+1) {
- return false
- }
- t.tab[j] = s
- return true
-}
-
-// The lists of element names and attribute keys were taken from
-// https://html.spec.whatwg.org/multipage/indices.html#index
-// as of the "HTML Living Standard - Last Updated 16 April 2018" version.
-
-// "command", "keygen" and "menuitem" have been removed from the spec,
-// but are kept here for backwards compatibility.
-var elements = []string{
- "a",
- "abbr",
- "address",
- "area",
- "article",
- "aside",
- "audio",
- "b",
- "base",
- "bdi",
- "bdo",
- "blockquote",
- "body",
- "br",
- "button",
- "canvas",
- "caption",
- "cite",
- "code",
- "col",
- "colgroup",
- "command",
- "data",
- "datalist",
- "dd",
- "del",
- "details",
- "dfn",
- "dialog",
- "div",
- "dl",
- "dt",
- "em",
- "embed",
- "fieldset",
- "figcaption",
- "figure",
- "footer",
- "form",
- "h1",
- "h2",
- "h3",
- "h4",
- "h5",
- "h6",
- "head",
- "header",
- "hgroup",
- "hr",
- "html",
- "i",
- "iframe",
- "img",
- "input",
- "ins",
- "kbd",
- "keygen",
- "label",
- "legend",
- "li",
- "link",
- "main",
- "map",
- "mark",
- "menu",
- "menuitem",
- "meta",
- "meter",
- "nav",
- "noscript",
- "object",
- "ol",
- "optgroup",
- "option",
- "output",
- "p",
- "param",
- "picture",
- "pre",
- "progress",
- "q",
- "rp",
- "rt",
- "ruby",
- "s",
- "samp",
- "script",
- "section",
- "select",
- "slot",
- "small",
- "source",
- "span",
- "strong",
- "style",
- "sub",
- "summary",
- "sup",
- "table",
- "tbody",
- "td",
- "template",
- "textarea",
- "tfoot",
- "th",
- "thead",
- "time",
- "title",
- "tr",
- "track",
- "u",
- "ul",
- "var",
- "video",
- "wbr",
-}
-
-// https://html.spec.whatwg.org/multipage/indices.html#attributes-3
-//
-// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup",
-// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec,
-// but are kept here for backwards compatibility.
-var attributes = []string{
- "abbr",
- "accept",
- "accept-charset",
- "accesskey",
- "action",
- "allowfullscreen",
- "allowpaymentrequest",
- "allowusermedia",
- "alt",
- "as",
- "async",
- "autocomplete",
- "autofocus",
- "autoplay",
- "challenge",
- "charset",
- "checked",
- "cite",
- "class",
- "color",
- "cols",
- "colspan",
- "command",
- "content",
- "contenteditable",
- "contextmenu",
- "controls",
- "coords",
- "crossorigin",
- "data",
- "datetime",
- "default",
- "defer",
- "dir",
- "dirname",
- "disabled",
- "download",
- "draggable",
- "dropzone",
- "enctype",
- "for",
- "form",
- "formaction",
- "formenctype",
- "formmethod",
- "formnovalidate",
- "formtarget",
- "headers",
- "height",
- "hidden",
- "high",
- "href",
- "hreflang",
- "http-equiv",
- "icon",
- "id",
- "inputmode",
- "integrity",
- "is",
- "ismap",
- "itemid",
- "itemprop",
- "itemref",
- "itemscope",
- "itemtype",
- "keytype",
- "kind",
- "label",
- "lang",
- "list",
- "loop",
- "low",
- "manifest",
- "max",
- "maxlength",
- "media",
- "mediagroup",
- "method",
- "min",
- "minlength",
- "multiple",
- "muted",
- "name",
- "nomodule",
- "nonce",
- "novalidate",
- "open",
- "optimum",
- "pattern",
- "ping",
- "placeholder",
- "playsinline",
- "poster",
- "preload",
- "radiogroup",
- "readonly",
- "referrerpolicy",
- "rel",
- "required",
- "reversed",
- "rows",
- "rowspan",
- "sandbox",
- "spellcheck",
- "scope",
- "scoped",
- "seamless",
- "selected",
- "shape",
- "size",
- "sizes",
- "sortable",
- "sorted",
- "slot",
- "span",
- "spellcheck",
- "src",
- "srcdoc",
- "srclang",
- "srcset",
- "start",
- "step",
- "style",
- "tabindex",
- "target",
- "title",
- "translate",
- "type",
- "typemustmatch",
- "updateviacache",
- "usemap",
- "value",
- "width",
- "workertype",
- "wrap",
-}
-
-// "onautocomplete", "onautocompleteerror", "onmousewheel",
-// "onshow" and "onsort" have been removed from the spec,
-// but are kept here for backwards compatibility.
-var eventHandlers = []string{
- "onabort",
- "onautocomplete",
- "onautocompleteerror",
- "onauxclick",
- "onafterprint",
- "onbeforeprint",
- "onbeforeunload",
- "onblur",
- "oncancel",
- "oncanplay",
- "oncanplaythrough",
- "onchange",
- "onclick",
- "onclose",
- "oncontextmenu",
- "oncopy",
- "oncuechange",
- "oncut",
- "ondblclick",
- "ondrag",
- "ondragend",
- "ondragenter",
- "ondragexit",
- "ondragleave",
- "ondragover",
- "ondragstart",
- "ondrop",
- "ondurationchange",
- "onemptied",
- "onended",
- "onerror",
- "onfocus",
- "onhashchange",
- "oninput",
- "oninvalid",
- "onkeydown",
- "onkeypress",
- "onkeyup",
- "onlanguagechange",
- "onload",
- "onloadeddata",
- "onloadedmetadata",
- "onloadend",
- "onloadstart",
- "onmessage",
- "onmessageerror",
- "onmousedown",
- "onmouseenter",
- "onmouseleave",
- "onmousemove",
- "onmouseout",
- "onmouseover",
- "onmouseup",
- "onmousewheel",
- "onwheel",
- "onoffline",
- "ononline",
- "onpagehide",
- "onpageshow",
- "onpaste",
- "onpause",
- "onplay",
- "onplaying",
- "onpopstate",
- "onprogress",
- "onratechange",
- "onreset",
- "onresize",
- "onrejectionhandled",
- "onscroll",
- "onsecuritypolicyviolation",
- "onseeked",
- "onseeking",
- "onselect",
- "onshow",
- "onsort",
- "onstalled",
- "onstorage",
- "onsubmit",
- "onsuspend",
- "ontimeupdate",
- "ontoggle",
- "onunhandledrejection",
- "onunload",
- "onvolumechange",
- "onwaiting",
-}
-
-// extra are ad-hoc values not covered by any of the lists above.
-var extra = []string{
- "acronym",
- "align",
- "annotation",
- "annotation-xml",
- "applet",
- "basefont",
- "bgsound",
- "big",
- "blink",
- "center",
- "color",
- "desc",
- "face",
- "font",
- "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive.
- "foreignobject",
- "frame",
- "frameset",
- "image",
- "isindex",
- "listing",
- "malignmark",
- "marquee",
- "math",
- "mglyph",
- "mi",
- "mn",
- "mo",
- "ms",
- "mtext",
- "nobr",
- "noembed",
- "noframes",
- "plaintext",
- "prompt",
- "public",
- "rb",
- "rtc",
- "spacer",
- "strike",
- "svg",
- "system",
- "tt",
- "xmp",
-}
diff --git a/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/vendor/golang.org/x/sys/unix/mkasm_darwin.go
deleted file mode 100644
index 4548b993d..000000000
--- a/vendor/golang.org/x/sys/unix/mkasm_darwin.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go.
-//This program must be run after mksyscall.go.
-package main
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "strings"
-)
-
-func main() {
- in1, err := ioutil.ReadFile("syscall_darwin.go")
- if err != nil {
- log.Fatalf("can't open syscall_darwin.go: %s", err)
- }
- arch := os.Args[1]
- in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch))
- if err != nil {
- log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err)
- }
- in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch))
- if err != nil {
- log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err)
- }
- in := string(in1) + string(in2) + string(in3)
-
- trampolines := map[string]bool{}
-
- var out bytes.Buffer
-
- fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " "))
- fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n")
- fmt.Fprintf(&out, "\n")
- fmt.Fprintf(&out, "// +build go1.12\n")
- fmt.Fprintf(&out, "\n")
- fmt.Fprintf(&out, "#include \"textflag.h\"\n")
- for _, line := range strings.Split(in, "\n") {
- if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") {
- continue
- }
- fn := line[5 : len(line)-13]
- if !trampolines[fn] {
- trampolines[fn] = true
- fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn)
- fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn)
- }
- }
- err = ioutil.WriteFile(fmt.Sprintf("zsyscall_darwin_%s.s", arch), out.Bytes(), 0644)
- if err != nil {
- log.Fatalf("can't write zsyscall_darwin_%s.s: %s", arch, err)
- }
-}
diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go
deleted file mode 100644
index eb4332059..000000000
--- a/vendor/golang.org/x/sys/unix/mkpost.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// mkpost processes the output of cgo -godefs to
-// modify the generated types. It is used to clean up
-// the sys API in an architecture specific manner.
-//
-// mkpost is run after cgo -godefs; see README.md.
-package main
-
-import (
- "bytes"
- "fmt"
- "go/format"
- "io/ioutil"
- "log"
- "os"
- "regexp"
-)
-
-func main() {
- // Get the OS and architecture (using GOARCH_TARGET if it exists)
- goos := os.Getenv("GOOS")
- goarch := os.Getenv("GOARCH_TARGET")
- if goarch == "" {
- goarch = os.Getenv("GOARCH")
- }
- // Check that we are using the Docker-based build system if we should be.
- if goos == "linux" {
- if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
- os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n")
- os.Stderr.WriteString("See README.md\n")
- os.Exit(1)
- }
- }
-
- b, err := ioutil.ReadAll(os.Stdin)
- if err != nil {
- log.Fatal(err)
- }
-
- if goos == "aix" {
- // Replace type of Atim, Mtim and Ctim by Timespec in Stat_t
- // to avoid having both StTimespec and Timespec.
- sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`)
- b = sttimespec.ReplaceAll(b, []byte("Timespec"))
- }
-
- // Intentionally export __val fields in Fsid and Sigset_t
- valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`)
- b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}"))
-
- // Intentionally export __fds_bits field in FdSet
- fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`)
- b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}"))
-
- // If we have empty Ptrace structs, we should delete them. Only s390x emits
- // nonempty Ptrace structs.
- ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`)
- b = ptraceRexexp.ReplaceAll(b, nil)
-
- // Replace the control_regs union with a blank identifier for now.
- controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`)
- b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64"))
-
- // Remove fields that are added by glibc
- // Note that this is unstable as the identifers are private.
- removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`)
- b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
-
- // Convert [65]int8 to [65]byte in Utsname members to simplify
- // conversion to string; see golang.org/issue/20753
- convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`)
- b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte"))
-
- // Convert [1024]int8 to [1024]byte in Ptmget members
- convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`)
- b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte"))
-
- // Remove spare fields (e.g. in Statx_t)
- spareFieldsRegex := regexp.MustCompile(`X__spare\S*`)
- b = spareFieldsRegex.ReplaceAll(b, []byte("_"))
-
- // Remove cgo padding fields
- removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`)
- b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_"))
-
- // Remove padding, hidden, or unused fields
- removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`)
- b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
-
- // Remove the first line of warning from cgo
- b = b[bytes.IndexByte(b, '\n')+1:]
- // Modify the command in the header to include:
- // mkpost, our own warning, and a build tag.
- replacement := fmt.Sprintf(`$1 | go run mkpost.go
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s,%s`, goarch, goos)
- cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`)
- b = cgoCommandRegex.ReplaceAll(b, []byte(replacement))
-
- // Rename Stat_t time fields
- if goos == "freebsd" && goarch == "386" {
- // Hide Stat_t.[AMCB]tim_ext fields
- renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`)
- b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_"))
- }
- renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`)
- b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}"))
-
- // gofmt
- b, err = format.Source(b)
- if err != nil {
- log.Fatal(err)
- }
-
- os.Stdout.Write(b)
-}
diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go
deleted file mode 100644
index e4af9424e..000000000
--- a/vendor/golang.org/x/sys/unix/mksyscall.go
+++ /dev/null
@@ -1,407 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-This program reads a file containing function prototypes
-(like syscall_darwin.go) and generates system call bodies.
-The prototypes are marked by lines beginning with "//sys"
-and read like func declarations if //sys is replaced by func, but:
- * The parameter lists must give a name for each argument.
- This includes return parameters.
- * The parameter lists must give a type for each argument:
- the (x, y, z int) shorthand is not allowed.
- * If the return parameter is an error number, it must be named errno.
-
-A line beginning with //sysnb is like //sys, except that the
-goroutine will not be suspended during the execution of the system
-call. This must only be used for system calls which can never
-block, as otherwise the system call could cause all goroutines to
-hang.
-*/
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- b32 = flag.Bool("b32", false, "32bit big-endian")
- l32 = flag.Bool("l32", false, "32bit little-endian")
- plan9 = flag.Bool("plan9", false, "plan9")
- openbsd = flag.Bool("openbsd", false, "openbsd")
- netbsd = flag.Bool("netbsd", false, "netbsd")
- dragonfly = flag.Bool("dragonfly", false, "dragonfly")
- arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair
- tags = flag.String("tags", "", "build tags")
- filename = flag.String("output", "", "output file name (standard output if omitted)")
-)
-
-// cmdLine returns this programs's commandline arguments
-func cmdLine() string {
- return "go run mksyscall.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags
-func buildTags() string {
- return *tags
-}
-
-// Param is function parameter
-type Param struct {
- Name string
- Type string
-}
-
-// usage prints the program usage
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n")
- os.Exit(1)
-}
-
-// parseParamList parses parameter list and returns a slice of parameters
-func parseParamList(list string) []string {
- list = strings.TrimSpace(list)
- if list == "" {
- return []string{}
- }
- return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
-}
-
-// parseParam splits a parameter into name and type
-func parseParam(p string) Param {
- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
- if ps == nil {
- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
- os.Exit(1)
- }
- return Param{ps[1], ps[2]}
-}
-
-func main() {
- // Get the OS and architecture (using GOARCH_TARGET if it exists)
- goos := os.Getenv("GOOS")
- if goos == "" {
- fmt.Fprintln(os.Stderr, "GOOS not defined in environment")
- os.Exit(1)
- }
- goarch := os.Getenv("GOARCH_TARGET")
- if goarch == "" {
- goarch = os.Getenv("GOARCH")
- }
-
- // Check that we are using the Docker-based build system if we should
- if goos == "linux" {
- if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
- fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n")
- fmt.Fprintf(os.Stderr, "See README.md\n")
- os.Exit(1)
- }
- }
-
- flag.Usage = usage
- flag.Parse()
- if len(flag.Args()) <= 0 {
- fmt.Fprintf(os.Stderr, "no files to parse provided\n")
- usage()
- }
-
- endianness := ""
- if *b32 {
- endianness = "big-endian"
- } else if *l32 {
- endianness = "little-endian"
- }
-
- libc := false
- if goos == "darwin" && strings.Contains(buildTags(), ",go1.12") {
- libc = true
- }
- trampolines := map[string]bool{}
-
- text := ""
- for _, path := range flag.Args() {
- file, err := os.Open(path)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- s := bufio.NewScanner(file)
- for s.Scan() {
- t := s.Text()
- t = strings.TrimSpace(t)
- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
- continue
- }
-
- // Line must be of the form
- // func Open(path string, mode int, perm int) (fd int, errno error)
- // Split into name, in params, out params.
- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t)
- if f == nil {
- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
- os.Exit(1)
- }
- funct, inps, outps, sysname := f[2], f[3], f[4], f[5]
-
- // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers.
- if goos == "darwin" && !libc && funct == "ClockGettime" {
- continue
- }
-
- // Split argument lists on comma.
- in := parseParamList(inps)
- out := parseParamList(outps)
-
- // Try in vain to keep people from editing this file.
- // The theory is that they jump into the middle of the file
- // without reading the header.
- text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
-
- // Go function header.
- outDecl := ""
- if len(out) > 0 {
- outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", "))
- }
- text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl)
-
- // Check if err return available
- errvar := ""
- for _, param := range out {
- p := parseParam(param)
- if p.Type == "error" {
- errvar = p.Name
- break
- }
- }
-
- // Prepare arguments to Syscall.
- var args []string
- n := 0
- for _, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
- } else if p.Type == "string" && errvar != "" {
- text += fmt.Sprintf("\tvar _p%d *byte\n", n)
- text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name)
- text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- n++
- } else if p.Type == "string" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
- text += fmt.Sprintf("\tvar _p%d *byte\n", n)
- text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name)
- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- n++
- } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
- // Convert slice into pointer, length.
- // Have to be careful not to take address of &a[0] if len == 0:
- // pass dummy pointer in that case.
- // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0).
- text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n)
- text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name)
- text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n)
- args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
- n++
- } else if p.Type == "int64" && (*openbsd || *netbsd) {
- args = append(args, "0")
- if endianness == "big-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
- } else if endianness == "little-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
- }
- } else if p.Type == "int64" && *dragonfly {
- if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil {
- args = append(args, "0")
- }
- if endianness == "big-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
- } else if endianness == "little-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
- }
- } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" {
- if len(args)%2 == 1 && *arm {
- // arm abi specifies 64-bit argument uses
- // (even, odd) pair
- args = append(args, "0")
- }
- if endianness == "big-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
- }
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
- }
- }
-
- // Determine which form to use; pad args with zeros.
- asm := "Syscall"
- if nonblock != nil {
- if errvar == "" && goos == "linux" {
- asm = "RawSyscallNoError"
- } else {
- asm = "RawSyscall"
- }
- } else {
- if errvar == "" && goos == "linux" {
- asm = "SyscallNoError"
- }
- }
- if len(args) <= 3 {
- for len(args) < 3 {
- args = append(args, "0")
- }
- } else if len(args) <= 6 {
- asm += "6"
- for len(args) < 6 {
- args = append(args, "0")
- }
- } else if len(args) <= 9 {
- asm += "9"
- for len(args) < 9 {
- args = append(args, "0")
- }
- } else {
- fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct)
- }
-
- // System call number.
- if sysname == "" {
- sysname = "SYS_" + funct
- sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
- sysname = strings.ToUpper(sysname)
- }
-
- var libcFn string
- if libc {
- asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call
- sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_
- sysname = strings.ToLower(sysname) // lowercase
- if sysname == "getdirentries64" {
- // Special case - libSystem name and
- // raw syscall name don't match.
- sysname = "__getdirentries64"
- }
- libcFn = sysname
- sysname = "funcPC(libc_" + sysname + "_trampoline)"
- }
-
- // Actual call.
- arglist := strings.Join(args, ", ")
- call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist)
-
- // Assign return values.
- body := ""
- ret := []string{"_", "_", "_"}
- doErrno := false
- for i := 0; i < len(out); i++ {
- p := parseParam(out[i])
- reg := ""
- if p.Name == "err" && !*plan9 {
- reg = "e1"
- ret[2] = reg
- doErrno = true
- } else if p.Name == "err" && *plan9 {
- ret[0] = "r0"
- ret[2] = "e1"
- break
- } else {
- reg = fmt.Sprintf("r%d", i)
- ret[i] = reg
- }
- if p.Type == "bool" {
- reg = fmt.Sprintf("%s != 0", reg)
- }
- if p.Type == "int64" && endianness != "" {
- // 64-bit number in r1:r0 or r0:r1.
- if i+2 > len(out) {
- fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct)
- }
- if endianness == "big-endian" {
- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
- } else {
- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
- }
- ret[i] = fmt.Sprintf("r%d", i)
- ret[i+1] = fmt.Sprintf("r%d", i+1)
- }
- if reg != "e1" || *plan9 {
- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
- }
- }
- if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
- text += fmt.Sprintf("\t%s\n", call)
- } else {
- if errvar == "" && goos == "linux" {
- // raw syscall without error on Linux, see golang.org/issue/22924
- text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call)
- } else {
- text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
- }
- }
- text += body
-
- if *plan9 && ret[2] == "e1" {
- text += "\tif int32(r0) == -1 {\n"
- text += "\t\terr = e1\n"
- text += "\t}\n"
- } else if doErrno {
- text += "\tif e1 != 0 {\n"
- text += "\t\terr = errnoErr(e1)\n"
- text += "\t}\n"
- }
- text += "\treturn\n"
- text += "}\n\n"
-
- if libc && !trampolines[libcFn] {
- // some system calls share a trampoline, like read and readlen.
- trampolines[libcFn] = true
- // Declare assembly trampoline.
- text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn)
- // Assembly trampoline calls the libc_* function, which this magic
- // redirects to use the function from libSystem.
- text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn)
- text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn)
- text += "\n"
- }
- }
- if err := s.Err(); err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- file.Close()
- }
- fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
-}
-
-const srcTemplate = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-
-package unix
-
-import (
- "syscall"
- "unsafe"
-)
-
-var _ syscall.Errno
-
-%s
-`
diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go
deleted file mode 100644
index 3be3cdfc3..000000000
--- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go
+++ /dev/null
@@ -1,415 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-This program reads a file containing function prototypes
-(like syscall_aix.go) and generates system call bodies.
-The prototypes are marked by lines beginning with "//sys"
-and read like func declarations if //sys is replaced by func, but:
- * The parameter lists must give a name for each argument.
- This includes return parameters.
- * The parameter lists must give a type for each argument:
- the (x, y, z int) shorthand is not allowed.
- * If the return parameter is an error number, it must be named err.
- * If go func name needs to be different than its libc name,
- * or the function is not in libc, name could be specified
- * at the end, after "=" sign, like
- //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
-*/
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- b32 = flag.Bool("b32", false, "32bit big-endian")
- l32 = flag.Bool("l32", false, "32bit little-endian")
- aix = flag.Bool("aix", false, "aix")
- tags = flag.String("tags", "", "build tags")
-)
-
-// cmdLine returns this programs's commandline arguments
-func cmdLine() string {
- return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags
-func buildTags() string {
- return *tags
-}
-
-// Param is function parameter
-type Param struct {
- Name string
- Type string
-}
-
-// usage prints the program usage
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n")
- os.Exit(1)
-}
-
-// parseParamList parses parameter list and returns a slice of parameters
-func parseParamList(list string) []string {
- list = strings.TrimSpace(list)
- if list == "" {
- return []string{}
- }
- return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
-}
-
-// parseParam splits a parameter into name and type
-func parseParam(p string) Param {
- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
- if ps == nil {
- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
- os.Exit(1)
- }
- return Param{ps[1], ps[2]}
-}
-
-func main() {
- flag.Usage = usage
- flag.Parse()
- if len(flag.Args()) <= 0 {
- fmt.Fprintf(os.Stderr, "no files to parse provided\n")
- usage()
- }
-
- endianness := ""
- if *b32 {
- endianness = "big-endian"
- } else if *l32 {
- endianness = "little-endian"
- }
-
- pack := ""
- text := ""
- cExtern := "/*\n#include <stdint.h>\n#include <stddef.h>\n"
- for _, path := range flag.Args() {
- file, err := os.Open(path)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- s := bufio.NewScanner(file)
- for s.Scan() {
- t := s.Text()
- t = strings.TrimSpace(t)
- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
- if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
- pack = p[1]
- }
- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
- continue
- }
-
- // Line must be of the form
- // func Open(path string, mode int, perm int) (fd int, err error)
- // Split into name, in params, out params.
- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
- if f == nil {
- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
- os.Exit(1)
- }
- funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
-
- // Split argument lists on comma.
- in := parseParamList(inps)
- out := parseParamList(outps)
-
- inps = strings.Join(in, ", ")
- outps = strings.Join(out, ", ")
-
- // Try in vain to keep people from editing this file.
- // The theory is that they jump into the middle of the file
- // without reading the header.
- text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
-
- // Check if value return, err return available
- errvar := ""
- retvar := ""
- rettype := ""
- for _, param := range out {
- p := parseParam(param)
- if p.Type == "error" {
- errvar = p.Name
- } else {
- retvar = p.Name
- rettype = p.Type
- }
- }
-
- // System call name.
- if sysname == "" {
- sysname = funct
- }
- sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
- sysname = strings.ToLower(sysname) // All libc functions are lowercase.
-
- cRettype := ""
- if rettype == "unsafe.Pointer" {
- cRettype = "uintptr_t"
- } else if rettype == "uintptr" {
- cRettype = "uintptr_t"
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil {
- cRettype = "uintptr_t"
- } else if rettype == "int" {
- cRettype = "int"
- } else if rettype == "int32" {
- cRettype = "int"
- } else if rettype == "int64" {
- cRettype = "long long"
- } else if rettype == "uint32" {
- cRettype = "unsigned int"
- } else if rettype == "uint64" {
- cRettype = "unsigned long long"
- } else {
- cRettype = "int"
- }
- if sysname == "exit" {
- cRettype = "void"
- }
-
- // Change p.Types to c
- var cIn []string
- for _, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "string" {
- cIn = append(cIn, "uintptr_t")
- } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t", "size_t")
- } else if p.Type == "unsafe.Pointer" {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "uintptr" {
- cIn = append(cIn, "uintptr_t")
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "int" {
- cIn = append(cIn, "int")
- } else if p.Type == "int32" {
- cIn = append(cIn, "int")
- } else if p.Type == "int64" {
- cIn = append(cIn, "long long")
- } else if p.Type == "uint32" {
- cIn = append(cIn, "unsigned int")
- } else if p.Type == "uint64" {
- cIn = append(cIn, "unsigned long long")
- } else {
- cIn = append(cIn, "int")
- }
- }
-
- if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" {
- if sysname == "select" {
- // select is a keyword of Go. Its name is
- // changed to c_select.
- cExtern += "#define c_select select\n"
- }
- // Imports of system calls from libc
- cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
- cIn := strings.Join(cIn, ", ")
- cExtern += fmt.Sprintf("(%s);\n", cIn)
- }
-
- // So file name.
- if *aix {
- if modname == "" {
- modname = "libc.a/shr_64.o"
- } else {
- fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct)
- os.Exit(1)
- }
- }
-
- strconvfunc := "C.CString"
-
- // Go function header.
- if outps != "" {
- outps = fmt.Sprintf(" (%s)", outps)
- }
- if text != "" {
- text += "\n"
- }
-
- text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps)
-
- // Prepare arguments to Syscall.
- var args []string
- n := 0
- argN := 0
- for _, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))")
- } else if p.Type == "string" && errvar != "" {
- text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name)
- args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n))
- n++
- } else if p.Type == "string" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
- text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name)
- args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n))
- n++
- } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil {
- // Convert slice into pointer, length.
- // Have to be careful not to take address of &a[0] if len == 0:
- // pass nil in that case.
- text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1])
- text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
- args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n))
- n++
- text += fmt.Sprintf("\tvar _p%d int\n", n)
- text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name)
- args = append(args, fmt.Sprintf("C.size_t(_p%d)", n))
- n++
- } else if p.Type == "int64" && endianness != "" {
- if endianness == "big-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
- }
- n++
- } else if p.Type == "bool" {
- text += fmt.Sprintf("\tvar _p%d uint32\n", n)
- text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n)
- args = append(args, fmt.Sprintf("_p%d", n))
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
- args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name))
- } else if p.Type == "unsafe.Pointer" {
- args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name))
- } else if p.Type == "int" {
- if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) {
- args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name))
- } else if argN == 0 && funct == "fcntl" {
- args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) {
- args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
- }
- } else if p.Type == "int32" {
- args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
- } else if p.Type == "int64" {
- args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name))
- } else if p.Type == "uint32" {
- args = append(args, fmt.Sprintf("C.uint(%s)", p.Name))
- } else if p.Type == "uint64" {
- args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name))
- } else if p.Type == "uintptr" {
- args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("C.int(%s)", p.Name))
- }
- argN++
- }
-
- // Actual call.
- arglist := strings.Join(args, ", ")
- call := ""
- if sysname == "exit" {
- if errvar != "" {
- call += "er :="
- } else {
- call += ""
- }
- } else if errvar != "" {
- call += "r0,er :="
- } else if retvar != "" {
- call += "r0,_ :="
- } else {
- call += ""
- }
- if sysname == "select" {
- // select is a keyword of Go. Its name is
- // changed to c_select.
- call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist)
- } else {
- call += fmt.Sprintf("C.%s(%s)", sysname, arglist)
- }
-
- // Assign return values.
- body := ""
- for i := 0; i < len(out); i++ {
- p := parseParam(out[i])
- reg := ""
- if p.Name == "err" {
- reg = "e1"
- } else {
- reg = "r0"
- }
- if reg != "e1" {
- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
- }
- }
-
- // verify return
- if sysname != "exit" && errvar != "" {
- if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil {
- body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n"
- body += fmt.Sprintf("\t\t%s = er\n", errvar)
- body += "\t}\n"
- } else {
- body += "\tif (r0 ==-1 && er != nil) {\n"
- body += fmt.Sprintf("\t\t%s = er\n", errvar)
- body += "\t}\n"
- }
- } else if errvar != "" {
- body += "\tif (er != nil) {\n"
- body += fmt.Sprintf("\t\t%s = er\n", errvar)
- body += "\t}\n"
- }
-
- text += fmt.Sprintf("\t%s\n", call)
- text += body
-
- text += "\treturn\n"
- text += "}\n"
- }
- if err := s.Err(); err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- file.Close()
- }
- imp := ""
- if pack != "unix" {
- imp = "import \"golang.org/x/sys/unix\"\n"
-
- }
- fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text)
-}
-
-const srcTemplate = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-
-package %s
-
-
-%s
-*/
-import "C"
-import (
- "unsafe"
-)
-
-
-%s
-
-%s
-`
diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go
deleted file mode 100644
index c96009951..000000000
--- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go
+++ /dev/null
@@ -1,614 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-This program reads a file containing function prototypes
-(like syscall_aix.go) and generates system call bodies.
-The prototypes are marked by lines beginning with "//sys"
-and read like func declarations if //sys is replaced by func, but:
- * The parameter lists must give a name for each argument.
- This includes return parameters.
- * The parameter lists must give a type for each argument:
- the (x, y, z int) shorthand is not allowed.
- * If the return parameter is an error number, it must be named err.
- * If go func name needs to be different than its libc name,
- * or the function is not in libc, name could be specified
- * at the end, after "=" sign, like
- //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
-
-
-This program will generate three files and handle both gc and gccgo implementation:
- - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation)
- - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6
- - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type.
-
- The generated code looks like this
-
-zsyscall_aix_ppc64.go
-func asyscall(...) (n int, err error) {
- // Pointer Creation
- r1, e1 := callasyscall(...)
- // Type Conversion
- // Error Handler
- return
-}
-
-zsyscall_aix_ppc64_gc.go
-//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o"
-//go:linkname libc_asyscall libc_asyscall
-var asyscall syscallFunc
-
-func callasyscall(...) (r1 uintptr, e1 Errno) {
- r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... )
- return
-}
-
-zsyscall_aix_ppc64_ggcgo.go
-
-// int asyscall(...)
-
-import "C"
-
-func callasyscall(...) (r1 uintptr, e1 Errno) {
- r1 = uintptr(C.asyscall(...))
- e1 = syscall.GetErrno()
- return
-}
-*/
-
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "io/ioutil"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- b32 = flag.Bool("b32", false, "32bit big-endian")
- l32 = flag.Bool("l32", false, "32bit little-endian")
- aix = flag.Bool("aix", false, "aix")
- tags = flag.String("tags", "", "build tags")
-)
-
-// cmdLine returns this programs's commandline arguments
-func cmdLine() string {
- return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags
-func buildTags() string {
- return *tags
-}
-
-// Param is function parameter
-type Param struct {
- Name string
- Type string
-}
-
-// usage prints the program usage
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n")
- os.Exit(1)
-}
-
-// parseParamList parses parameter list and returns a slice of parameters
-func parseParamList(list string) []string {
- list = strings.TrimSpace(list)
- if list == "" {
- return []string{}
- }
- return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
-}
-
-// parseParam splits a parameter into name and type
-func parseParam(p string) Param {
- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
- if ps == nil {
- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
- os.Exit(1)
- }
- return Param{ps[1], ps[2]}
-}
-
-func main() {
- flag.Usage = usage
- flag.Parse()
- if len(flag.Args()) <= 0 {
- fmt.Fprintf(os.Stderr, "no files to parse provided\n")
- usage()
- }
-
- endianness := ""
- if *b32 {
- endianness = "big-endian"
- } else if *l32 {
- endianness = "little-endian"
- }
-
- pack := ""
- // GCCGO
- textgccgo := ""
- cExtern := "/*\n#include <stdint.h>\n"
- // GC
- textgc := ""
- dynimports := ""
- linknames := ""
- var vars []string
- // COMMON
- textcommon := ""
- for _, path := range flag.Args() {
- file, err := os.Open(path)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- s := bufio.NewScanner(file)
- for s.Scan() {
- t := s.Text()
- t = strings.TrimSpace(t)
- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
- if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
- pack = p[1]
- }
- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
- continue
- }
-
- // Line must be of the form
- // func Open(path string, mode int, perm int) (fd int, err error)
- // Split into name, in params, out params.
- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
- if f == nil {
- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
- os.Exit(1)
- }
- funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
-
- // Split argument lists on comma.
- in := parseParamList(inps)
- out := parseParamList(outps)
-
- inps = strings.Join(in, ", ")
- outps = strings.Join(out, ", ")
-
- if sysname == "" {
- sysname = funct
- }
-
- onlyCommon := false
- if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" {
- // This function call another syscall which is already implemented.
- // Therefore, the gc and gccgo part must not be generated.
- onlyCommon = true
- }
-
- // Try in vain to keep people from editing this file.
- // The theory is that they jump into the middle of the file
- // without reading the header.
-
- textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
- if !onlyCommon {
- textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
- textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
- }
-
- // Check if value return, err return available
- errvar := ""
- rettype := ""
- for _, param := range out {
- p := parseParam(param)
- if p.Type == "error" {
- errvar = p.Name
- } else {
- rettype = p.Type
- }
- }
-
- sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`)
- sysname = strings.ToLower(sysname) // All libc functions are lowercase.
-
- // GCCGO Prototype return type
- cRettype := ""
- if rettype == "unsafe.Pointer" {
- cRettype = "uintptr_t"
- } else if rettype == "uintptr" {
- cRettype = "uintptr_t"
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil {
- cRettype = "uintptr_t"
- } else if rettype == "int" {
- cRettype = "int"
- } else if rettype == "int32" {
- cRettype = "int"
- } else if rettype == "int64" {
- cRettype = "long long"
- } else if rettype == "uint32" {
- cRettype = "unsigned int"
- } else if rettype == "uint64" {
- cRettype = "unsigned long long"
- } else {
- cRettype = "int"
- }
- if sysname == "exit" {
- cRettype = "void"
- }
-
- // GCCGO Prototype arguments type
- var cIn []string
- for i, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "string" {
- cIn = append(cIn, "uintptr_t")
- } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t", "size_t")
- } else if p.Type == "unsafe.Pointer" {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "uintptr" {
- cIn = append(cIn, "uintptr_t")
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil {
- cIn = append(cIn, "uintptr_t")
- } else if p.Type == "int" {
- if (i == 0 || i == 2) && funct == "fcntl" {
- // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock
- cIn = append(cIn, "uintptr_t")
- } else {
- cIn = append(cIn, "int")
- }
-
- } else if p.Type == "int32" {
- cIn = append(cIn, "int")
- } else if p.Type == "int64" {
- cIn = append(cIn, "long long")
- } else if p.Type == "uint32" {
- cIn = append(cIn, "unsigned int")
- } else if p.Type == "uint64" {
- cIn = append(cIn, "unsigned long long")
- } else {
- cIn = append(cIn, "int")
- }
- }
-
- if !onlyCommon {
- // GCCGO Prototype Generation
- // Imports of system calls from libc
- if sysname == "select" {
- // select is a keyword of Go. Its name is
- // changed to c_select.
- cExtern += "#define c_select select\n"
- }
- cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
- cIn := strings.Join(cIn, ", ")
- cExtern += fmt.Sprintf("(%s);\n", cIn)
- }
- // GC Library name
- if modname == "" {
- modname = "libc.a/shr_64.o"
- } else {
- fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct)
- os.Exit(1)
- }
- sysvarname := fmt.Sprintf("libc_%s", sysname)
-
- if !onlyCommon {
- // GC Runtime import of function to allow cross-platform builds.
- dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname)
- // GC Link symbol to proc address variable.
- linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname)
- // GC Library proc address variable.
- vars = append(vars, sysvarname)
- }
-
- strconvfunc := "BytePtrFromString"
- strconvtype := "*byte"
-
- // Go function header.
- if outps != "" {
- outps = fmt.Sprintf(" (%s)", outps)
- }
- if textcommon != "" {
- textcommon += "\n"
- }
-
- textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps)
-
- // Prepare arguments tocall.
- var argscommon []string // Arguments in the common part
- var argscall []string // Arguments for call prototype
- var argsgc []string // Arguments for gc call (with syscall6)
- var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall)
- n := 0
- argN := 0
- for _, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name))
- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
- argsgc = append(argsgc, p.Name)
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else if p.Type == "string" && errvar != "" {
- textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
- textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
- textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
-
- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n))
- argsgc = append(argsgc, fmt.Sprintf("_p%d", n))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n))
- n++
- } else if p.Type == "string" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
- textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
- textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
- textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
-
- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n))
- argsgc = append(argsgc, fmt.Sprintf("_p%d", n))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n))
- n++
- } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil {
- // Convert slice into pointer, length.
- // Have to be careful not to take address of &a[0] if len == 0:
- // pass nil in that case.
- textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1])
- textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
- argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name))
- argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n))
- argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n))
- n++
- } else if p.Type == "int64" && endianness != "" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n")
- } else if p.Type == "bool" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n")
- } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" {
- argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name))
- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
- argsgc = append(argsgc, p.Name)
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else if p.Type == "int" {
- if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) {
- // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock
- argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name))
- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
- argsgc = append(argsgc, p.Name)
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
-
- } else {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s int", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
- }
- } else if p.Type == "int32" {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s int32", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
- } else if p.Type == "int64" {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s int64", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name))
- } else if p.Type == "uint32" {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name))
- } else if p.Type == "uint64" {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name))
- } else if p.Type == "uintptr" {
- argscommon = append(argscommon, p.Name)
- argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name))
- argsgc = append(argsgc, p.Name)
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name))
- } else {
- argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name))
- argscall = append(argscall, fmt.Sprintf("%s int", p.Name))
- argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name))
- argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name))
- }
- argN++
- }
- nargs := len(argsgc)
-
- // COMMON function generation
- argscommonlist := strings.Join(argscommon, ", ")
- callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist)
- ret := []string{"_", "_"}
- body := ""
- doErrno := false
- for i := 0; i < len(out); i++ {
- p := parseParam(out[i])
- reg := ""
- if p.Name == "err" {
- reg = "e1"
- ret[1] = reg
- doErrno = true
- } else {
- reg = "r0"
- ret[0] = reg
- }
- if p.Type == "bool" {
- reg = fmt.Sprintf("%s != 0", reg)
- }
- if reg != "e1" {
- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
- }
- }
- if ret[0] == "_" && ret[1] == "_" {
- textcommon += fmt.Sprintf("\t%s\n", callcommon)
- } else {
- textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon)
- }
- textcommon += body
-
- if doErrno {
- textcommon += "\tif e1 != 0 {\n"
- textcommon += "\t\terr = errnoErr(e1)\n"
- textcommon += "\t}\n"
- }
- textcommon += "\treturn\n"
- textcommon += "}\n"
-
- if onlyCommon {
- continue
- }
-
- // CALL Prototype
- callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", "))
-
- // GC function generation
- asm := "syscall6"
- if nonblock != nil {
- asm = "rawSyscall6"
- }
-
- if len(argsgc) <= 6 {
- for len(argsgc) < 6 {
- argsgc = append(argsgc, "0")
- }
- } else {
- fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct)
- os.Exit(1)
- }
- argsgclist := strings.Join(argsgc, ", ")
- callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist)
-
- textgc += callProto
- textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc)
- textgc += "\treturn\n}\n"
-
- // GCCGO function generation
- argsgccgolist := strings.Join(argsgccgo, ", ")
- var callgccgo string
- if sysname == "select" {
- // select is a keyword of Go. Its name is
- // changed to c_select.
- callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist)
- } else {
- callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist)
- }
- textgccgo += callProto
- textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo)
- textgccgo += "\te1 = syscall.GetErrno()\n"
- textgccgo += "\treturn\n}\n"
- }
- if err := s.Err(); err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- file.Close()
- }
- imp := ""
- if pack != "unix" {
- imp = "import \"golang.org/x/sys/unix\"\n"
-
- }
-
- // Print zsyscall_aix_ppc64.go
- err := ioutil.WriteFile("zsyscall_aix_ppc64.go",
- []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)),
- 0644)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- // Print zsyscall_aix_ppc64_gc.go
- vardecls := "\t" + strings.Join(vars, ",\n\t")
- vardecls += " syscallFunc"
- err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go",
- []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)),
- 0644)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
-
- // Print zsyscall_aix_ppc64_gccgo.go
- err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go",
- []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)),
- 0644)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
-}
-
-const srcTemplate1 = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-
-package %s
-
-import (
- "unsafe"
-)
-
-
-%s
-
-%s
-`
-const srcTemplate2 = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-// +build !gccgo
-
-package %s
-
-import (
- "unsafe"
-)
-%s
-%s
-%s
-type syscallFunc uintptr
-
-var (
-%s
-)
-
-// Implemented in runtime/syscall_aix.go.
-func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
-func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
-
-%s
-`
-const srcTemplate3 = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-// +build gccgo
-
-package %s
-
-%s
-*/
-import "C"
-import (
- "syscall"
-)
-
-
-%s
-
-%s
-`
diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go
deleted file mode 100644
index 3d864738b..000000000
--- a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go
+++ /dev/null
@@ -1,335 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
- This program reads a file containing function prototypes
- (like syscall_solaris.go) and generates system call bodies.
- The prototypes are marked by lines beginning with "//sys"
- and read like func declarations if //sys is replaced by func, but:
- * The parameter lists must give a name for each argument.
- This includes return parameters.
- * The parameter lists must give a type for each argument:
- the (x, y, z int) shorthand is not allowed.
- * If the return parameter is an error number, it must be named err.
- * If go func name needs to be different than its libc name,
- * or the function is not in libc, name could be specified
- * at the end, after "=" sign, like
- //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt
-*/
-
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- b32 = flag.Bool("b32", false, "32bit big-endian")
- l32 = flag.Bool("l32", false, "32bit little-endian")
- tags = flag.String("tags", "", "build tags")
-)
-
-// cmdLine returns this programs's commandline arguments
-func cmdLine() string {
- return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags
-func buildTags() string {
- return *tags
-}
-
-// Param is function parameter
-type Param struct {
- Name string
- Type string
-}
-
-// usage prints the program usage
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n")
- os.Exit(1)
-}
-
-// parseParamList parses parameter list and returns a slice of parameters
-func parseParamList(list string) []string {
- list = strings.TrimSpace(list)
- if list == "" {
- return []string{}
- }
- return regexp.MustCompile(`\s*,\s*`).Split(list, -1)
-}
-
-// parseParam splits a parameter into name and type
-func parseParam(p string) Param {
- ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p)
- if ps == nil {
- fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p)
- os.Exit(1)
- }
- return Param{ps[1], ps[2]}
-}
-
-func main() {
- flag.Usage = usage
- flag.Parse()
- if len(flag.Args()) <= 0 {
- fmt.Fprintf(os.Stderr, "no files to parse provided\n")
- usage()
- }
-
- endianness := ""
- if *b32 {
- endianness = "big-endian"
- } else if *l32 {
- endianness = "little-endian"
- }
-
- pack := ""
- text := ""
- dynimports := ""
- linknames := ""
- var vars []string
- for _, path := range flag.Args() {
- file, err := os.Open(path)
- if err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- s := bufio.NewScanner(file)
- for s.Scan() {
- t := s.Text()
- t = strings.TrimSpace(t)
- t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `)
- if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" {
- pack = p[1]
- }
- nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t)
- if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil {
- continue
- }
-
- // Line must be of the form
- // func Open(path string, mode int, perm int) (fd int, err error)
- // Split into name, in params, out params.
- f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t)
- if f == nil {
- fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t)
- os.Exit(1)
- }
- funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6]
-
- // Split argument lists on comma.
- in := parseParamList(inps)
- out := parseParamList(outps)
-
- inps = strings.Join(in, ", ")
- outps = strings.Join(out, ", ")
-
- // Try in vain to keep people from editing this file.
- // The theory is that they jump into the middle of the file
- // without reading the header.
- text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"
-
- // So file name.
- if modname == "" {
- modname = "libc"
- }
-
- // System call name.
- if sysname == "" {
- sysname = funct
- }
-
- // System call pointer variable name.
- sysvarname := fmt.Sprintf("proc%s", sysname)
-
- strconvfunc := "BytePtrFromString"
- strconvtype := "*byte"
-
- sysname = strings.ToLower(sysname) // All libc functions are lowercase.
-
- // Runtime import of function to allow cross-platform builds.
- dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname)
- // Link symbol to proc address variable.
- linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname)
- // Library proc address variable.
- vars = append(vars, sysvarname)
-
- // Go function header.
- outlist := strings.Join(out, ", ")
- if outlist != "" {
- outlist = fmt.Sprintf(" (%s)", outlist)
- }
- if text != "" {
- text += "\n"
- }
- text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist)
-
- // Check if err return available
- errvar := ""
- for _, param := range out {
- p := parseParam(param)
- if p.Type == "error" {
- errvar = p.Name
- continue
- }
- }
-
- // Prepare arguments to Syscall.
- var args []string
- n := 0
- for _, param := range in {
- p := parseParam(param)
- if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil {
- args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))")
- } else if p.Type == "string" && errvar != "" {
- text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
- text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name)
- text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar)
- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- n++
- } else if p.Type == "string" {
- fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n")
- text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype)
- text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name)
- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n))
- n++
- } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil {
- // Convert slice into pointer, length.
- // Have to be careful not to take address of &a[0] if len == 0:
- // pass nil in that case.
- text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1])
- text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name)
- args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name))
- n++
- } else if p.Type == "int64" && endianness != "" {
- if endianness == "big-endian" {
- args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name))
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name))
- }
- } else if p.Type == "bool" {
- text += fmt.Sprintf("\tvar _p%d uint32\n", n)
- text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n)
- args = append(args, fmt.Sprintf("uintptr(_p%d)", n))
- n++
- } else {
- args = append(args, fmt.Sprintf("uintptr(%s)", p.Name))
- }
- }
- nargs := len(args)
-
- // Determine which form to use; pad args with zeros.
- asm := "sysvicall6"
- if nonblock != nil {
- asm = "rawSysvicall6"
- }
- if len(args) <= 6 {
- for len(args) < 6 {
- args = append(args, "0")
- }
- } else {
- fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path)
- os.Exit(1)
- }
-
- // Actual call.
- arglist := strings.Join(args, ", ")
- call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist)
-
- // Assign return values.
- body := ""
- ret := []string{"_", "_", "_"}
- doErrno := false
- for i := 0; i < len(out); i++ {
- p := parseParam(out[i])
- reg := ""
- if p.Name == "err" {
- reg = "e1"
- ret[2] = reg
- doErrno = true
- } else {
- reg = fmt.Sprintf("r%d", i)
- ret[i] = reg
- }
- if p.Type == "bool" {
- reg = fmt.Sprintf("%d != 0", reg)
- }
- if p.Type == "int64" && endianness != "" {
- // 64-bit number in r1:r0 or r0:r1.
- if i+2 > len(out) {
- fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path)
- os.Exit(1)
- }
- if endianness == "big-endian" {
- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1)
- } else {
- reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i)
- }
- ret[i] = fmt.Sprintf("r%d", i)
- ret[i+1] = fmt.Sprintf("r%d", i+1)
- }
- if reg != "e1" {
- body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg)
- }
- }
- if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" {
- text += fmt.Sprintf("\t%s\n", call)
- } else {
- text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call)
- }
- text += body
-
- if doErrno {
- text += "\tif e1 != 0 {\n"
- text += "\t\terr = e1\n"
- text += "\t}\n"
- }
- text += "\treturn\n"
- text += "}\n"
- }
- if err := s.Err(); err != nil {
- fmt.Fprintf(os.Stderr, err.Error())
- os.Exit(1)
- }
- file.Close()
- }
- imp := ""
- if pack != "unix" {
- imp = "import \"golang.org/x/sys/unix\"\n"
-
- }
- vardecls := "\t" + strings.Join(vars, ",\n\t")
- vardecls += " syscallFunc"
- fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text)
-}
-
-const srcTemplate = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-
-package %s
-
-import (
- "syscall"
- "unsafe"
-)
-%s
-%s
-%s
-var (
-%s
-)
-
-%s
-`
diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go
deleted file mode 100644
index b6b409909..000000000
--- a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go
+++ /dev/null
@@ -1,355 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Parse the header files for OpenBSD and generate a Go usable sysctl MIB.
-//
-// Build a MIB with each entry being an array containing the level, type and
-// a hash that will contain additional entries if the current entry is a node.
-// We then walk this MIB and create a flattened sysctl name to OID hash.
-
-package main
-
-import (
- "bufio"
- "fmt"
- "os"
- "path/filepath"
- "regexp"
- "sort"
- "strings"
-)
-
-var (
- goos, goarch string
-)
-
-// cmdLine returns this programs's commandline arguments.
-func cmdLine() string {
- return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags.
-func buildTags() string {
- return fmt.Sprintf("%s,%s", goarch, goos)
-}
-
-// reMatch performs regular expression match and stores the substring slice to value pointed by m.
-func reMatch(re *regexp.Regexp, str string, m *[]string) bool {
- *m = re.FindStringSubmatch(str)
- if *m != nil {
- return true
- }
- return false
-}
-
-type nodeElement struct {
- n int
- t string
- pE *map[string]nodeElement
-}
-
-var (
- debugEnabled bool
- mib map[string]nodeElement
- node *map[string]nodeElement
- nodeMap map[string]string
- sysCtl []string
-)
-
-var (
- ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`)
- ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`)
- ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`)
- netInetRE = regexp.MustCompile(`^netinet/`)
- netInet6RE = regexp.MustCompile(`^netinet6/`)
- netRE = regexp.MustCompile(`^net/`)
- bracesRE = regexp.MustCompile(`{.*}`)
- ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`)
- fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`)
-)
-
-func debug(s string) {
- if debugEnabled {
- fmt.Fprintln(os.Stderr, s)
- }
-}
-
-// Walk the MIB and build a sysctl name to OID mapping.
-func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) {
- lNode := pNode // local copy of pointer to node
- var keys []string
- for k := range *lNode {
- keys = append(keys, k)
- }
- sort.Strings(keys)
-
- for _, key := range keys {
- nodename := name
- if name != "" {
- nodename += "."
- }
- nodename += key
-
- nodeoid := append(oid, (*pNode)[key].n)
-
- if (*pNode)[key].t == `CTLTYPE_NODE` {
- if _, ok := nodeMap[nodename]; ok {
- lNode = &mib
- ctlName := nodeMap[nodename]
- for _, part := range strings.Split(ctlName, ".") {
- lNode = ((*lNode)[part]).pE
- }
- } else {
- lNode = (*pNode)[key].pE
- }
- buildSysctl(lNode, nodename, nodeoid)
- } else if (*pNode)[key].t != "" {
- oidStr := []string{}
- for j := range nodeoid {
- oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j]))
- }
- text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n"
- sysCtl = append(sysCtl, text)
- }
- }
-}
-
-func main() {
- // Get the OS (using GOOS_TARGET if it exist)
- goos = os.Getenv("GOOS_TARGET")
- if goos == "" {
- goos = os.Getenv("GOOS")
- }
- // Get the architecture (using GOARCH_TARGET if it exists)
- goarch = os.Getenv("GOARCH_TARGET")
- if goarch == "" {
- goarch = os.Getenv("GOARCH")
- }
- // Check if GOOS and GOARCH environment variables are defined
- if goarch == "" || goos == "" {
- fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n")
- os.Exit(1)
- }
-
- mib = make(map[string]nodeElement)
- headers := [...]string{
- `sys/sysctl.h`,
- `sys/socket.h`,
- `sys/tty.h`,
- `sys/malloc.h`,
- `sys/mount.h`,
- `sys/namei.h`,
- `sys/sem.h`,
- `sys/shm.h`,
- `sys/vmmeter.h`,
- `uvm/uvmexp.h`,
- `uvm/uvm_param.h`,
- `uvm/uvm_swap_encrypt.h`,
- `ddb/db_var.h`,
- `net/if.h`,
- `net/if_pfsync.h`,
- `net/pipex.h`,
- `netinet/in.h`,
- `netinet/icmp_var.h`,
- `netinet/igmp_var.h`,
- `netinet/ip_ah.h`,
- `netinet/ip_carp.h`,
- `netinet/ip_divert.h`,
- `netinet/ip_esp.h`,
- `netinet/ip_ether.h`,
- `netinet/ip_gre.h`,
- `netinet/ip_ipcomp.h`,
- `netinet/ip_ipip.h`,
- `netinet/pim_var.h`,
- `netinet/tcp_var.h`,
- `netinet/udp_var.h`,
- `netinet6/in6.h`,
- `netinet6/ip6_divert.h`,
- `netinet6/pim6_var.h`,
- `netinet/icmp6.h`,
- `netmpls/mpls.h`,
- }
-
- ctls := [...]string{
- `kern`,
- `vm`,
- `fs`,
- `net`,
- //debug /* Special handling required */
- `hw`,
- //machdep /* Arch specific */
- `user`,
- `ddb`,
- //vfs /* Special handling required */
- `fs.posix`,
- `kern.forkstat`,
- `kern.intrcnt`,
- `kern.malloc`,
- `kern.nchstats`,
- `kern.seminfo`,
- `kern.shminfo`,
- `kern.timecounter`,
- `kern.tty`,
- `kern.watchdog`,
- `net.bpf`,
- `net.ifq`,
- `net.inet`,
- `net.inet.ah`,
- `net.inet.carp`,
- `net.inet.divert`,
- `net.inet.esp`,
- `net.inet.etherip`,
- `net.inet.gre`,
- `net.inet.icmp`,
- `net.inet.igmp`,
- `net.inet.ip`,
- `net.inet.ip.ifq`,
- `net.inet.ipcomp`,
- `net.inet.ipip`,
- `net.inet.mobileip`,
- `net.inet.pfsync`,
- `net.inet.pim`,
- `net.inet.tcp`,
- `net.inet.udp`,
- `net.inet6`,
- `net.inet6.divert`,
- `net.inet6.ip6`,
- `net.inet6.icmp6`,
- `net.inet6.pim6`,
- `net.inet6.tcp6`,
- `net.inet6.udp6`,
- `net.mpls`,
- `net.mpls.ifq`,
- `net.key`,
- `net.pflow`,
- `net.pfsync`,
- `net.pipex`,
- `net.rt`,
- `vm.swapencrypt`,
- //vfsgenctl /* Special handling required */
- }
-
- // Node name "fixups"
- ctlMap := map[string]string{
- "ipproto": "net.inet",
- "net.inet.ipproto": "net.inet",
- "net.inet6.ipv6proto": "net.inet6",
- "net.inet6.ipv6": "net.inet6.ip6",
- "net.inet.icmpv6": "net.inet6.icmp6",
- "net.inet6.divert6": "net.inet6.divert",
- "net.inet6.tcp6": "net.inet.tcp",
- "net.inet6.udp6": "net.inet.udp",
- "mpls": "net.mpls",
- "swpenc": "vm.swapencrypt",
- }
-
- // Node mappings
- nodeMap = map[string]string{
- "net.inet.ip.ifq": "net.ifq",
- "net.inet.pfsync": "net.pfsync",
- "net.mpls.ifq": "net.ifq",
- }
-
- mCtls := make(map[string]bool)
- for _, ctl := range ctls {
- mCtls[ctl] = true
- }
-
- for _, header := range headers {
- debug("Processing " + header)
- file, err := os.Open(filepath.Join("/usr/include", header))
- if err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(1)
- }
- s := bufio.NewScanner(file)
- for s.Scan() {
- var sub []string
- if reMatch(ctlNames1RE, s.Text(), &sub) ||
- reMatch(ctlNames2RE, s.Text(), &sub) ||
- reMatch(ctlNames3RE, s.Text(), &sub) {
- if sub[1] == `CTL_NAMES` {
- // Top level.
- node = &mib
- } else {
- // Node.
- nodename := strings.ToLower(sub[2])
- ctlName := ""
- if reMatch(netInetRE, header, &sub) {
- ctlName = "net.inet." + nodename
- } else if reMatch(netInet6RE, header, &sub) {
- ctlName = "net.inet6." + nodename
- } else if reMatch(netRE, header, &sub) {
- ctlName = "net." + nodename
- } else {
- ctlName = nodename
- ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`)
- }
-
- if val, ok := ctlMap[ctlName]; ok {
- ctlName = val
- }
- if _, ok := mCtls[ctlName]; !ok {
- debug("Ignoring " + ctlName + "...")
- continue
- }
-
- // Walk down from the top of the MIB.
- node = &mib
- for _, part := range strings.Split(ctlName, ".") {
- if _, ok := (*node)[part]; !ok {
- debug("Missing node " + part)
- (*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}}
- }
- node = (*node)[part].pE
- }
- }
-
- // Populate current node with entries.
- i := -1
- for !strings.HasPrefix(s.Text(), "}") {
- s.Scan()
- if reMatch(bracesRE, s.Text(), &sub) {
- i++
- }
- if !reMatch(ctlTypeRE, s.Text(), &sub) {
- continue
- }
- (*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}}
- }
- }
- }
- err = s.Err()
- if err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(1)
- }
- file.Close()
- }
- buildSysctl(&mib, "", []int{})
-
- sort.Strings(sysCtl)
- text := strings.Join(sysCtl, "")
-
- fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
-}
-
-const srcTemplate = `// %s
-// Code generated by the command above; DO NOT EDIT.
-
-// +build %s
-
-package unix
-
-type mibentry struct {
- ctlname string
- ctloid []_C_int
-}
-
-var sysctlMib = []mibentry {
-%s
-}
-`
diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go
deleted file mode 100644
index baa6ecd85..000000000
--- a/vendor/golang.org/x/sys/unix/mksysnum.go
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Generate system call table for DragonFly, NetBSD,
-// FreeBSD, OpenBSD or Darwin from master list
-// (for example, /usr/src/sys/kern/syscalls.master or
-// sys/syscall.h).
-package main
-
-import (
- "bufio"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- goos, goarch string
-)
-
-// cmdLine returns this programs's commandline arguments
-func cmdLine() string {
- return "go run mksysnum.go " + strings.Join(os.Args[1:], " ")
-}
-
-// buildTags returns build tags
-func buildTags() string {
- return fmt.Sprintf("%s,%s", goarch, goos)
-}
-
-func checkErr(err error) {
- if err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(1)
- }
-}
-
-// source string and substring slice for regexp
-type re struct {
- str string // source string
- sub []string // matched sub-string
-}
-
-// Match performs regular expression match
-func (r *re) Match(exp string) bool {
- r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str)
- if r.sub != nil {
- return true
- }
- return false
-}
-
-// fetchFile fetches a text file from URL
-func fetchFile(URL string) io.Reader {
- resp, err := http.Get(URL)
- checkErr(err)
- defer resp.Body.Close()
- body, err := ioutil.ReadAll(resp.Body)
- checkErr(err)
- return strings.NewReader(string(body))
-}
-
-// readFile reads a text file from path
-func readFile(path string) io.Reader {
- file, err := os.Open(os.Args[1])
- checkErr(err)
- return file
-}
-
-func format(name, num, proto string) string {
- name = strings.ToUpper(name)
- // There are multiple entries for enosys and nosys, so comment them out.
- nm := re{str: name}
- if nm.Match(`^SYS_E?NOSYS$`) {
- name = fmt.Sprintf("// %s", name)
- }
- if name == `SYS_SYS_EXIT` {
- name = `SYS_EXIT`
- }
- return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto)
-}
-
-func main() {
- // Get the OS (using GOOS_TARGET if it exist)
- goos = os.Getenv("GOOS_TARGET")
- if goos == "" {
- goos = os.Getenv("GOOS")
- }
- // Get the architecture (using GOARCH_TARGET if it exists)
- goarch = os.Getenv("GOARCH_TARGET")
- if goarch == "" {
- goarch = os.Getenv("GOARCH")
- }
- // Check if GOOS and GOARCH environment variables are defined
- if goarch == "" || goos == "" {
- fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n")
- os.Exit(1)
- }
-
- file := strings.TrimSpace(os.Args[1])
- var syscalls io.Reader
- if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") {
- // Download syscalls.master file
- syscalls = fetchFile(file)
- } else {
- syscalls = readFile(file)
- }
-
- var text, line string
- s := bufio.NewScanner(syscalls)
- for s.Scan() {
- t := re{str: line}
- if t.Match(`^(.*)\\$`) {
- // Handle continuation
- line = t.sub[1]
- line += strings.TrimLeft(s.Text(), " \t")
- } else {
- // New line
- line = s.Text()
- }
- t = re{str: line}
- if t.Match(`\\$`) {
- continue
- }
- t = re{str: line}
-
- switch goos {
- case "dragonfly":
- if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) {
- num, proto := t.sub[1], t.sub[2]
- name := fmt.Sprintf("SYS_%s", t.sub[3])
- text += format(name, num, proto)
- }
- case "freebsd":
- if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) {
- num, proto := t.sub[1], t.sub[2]
- name := fmt.Sprintf("SYS_%s", t.sub[3])
- text += format(name, num, proto)
- }
- case "openbsd":
- if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) {
- num, proto, name := t.sub[1], t.sub[3], t.sub[4]
- text += format(name, num, proto)
- }
- case "netbsd":
- if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) {
- num, proto, compat := t.sub[1], t.sub[6], t.sub[8]
- name := t.sub[7] + "_" + t.sub[9]
- if t.sub[11] != "" {
- name = t.sub[7] + "_" + t.sub[11]
- }
- name = strings.ToUpper(name)
- if compat == "" || compat == "13" || compat == "30" || compat == "50" {
- text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto)
- }
- }
- case "darwin":
- if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) {
- name, num := t.sub[1], t.sub[2]
- name = strings.ToUpper(name)
- text += fmt.Sprintf(" SYS_%s = %s;\n", name, num)
- }
- default:
- fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos)
- os.Exit(1)
-
- }
- }
- err := s.Err()
- checkErr(err)
-
- fmt.Printf(template, cmdLine(), buildTags(), text)
-}
-
-const template = `// %s
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build %s
-
-package unix
-
-const(
-%s)`
diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go
deleted file mode 100644
index 40d2beede..000000000
--- a/vendor/golang.org/x/sys/unix/types_aix.go
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-// +build aix
-
-/*
-Input to cgo -godefs. See also mkerrors.sh and mkall.sh
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#include <sys/types.h>
-#include <sys/time.h>
-#include <sys/limits.h>
-#include <sys/un.h>
-#include <utime.h>
-#include <sys/utsname.h>
-#include <sys/poll.h>
-#include <sys/resource.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/termio.h>
-#include <sys/ioctl.h>
-
-#include <termios.h>
-
-#include <net/if.h>
-#include <net/if_dl.h>
-#include <netinet/in.h>
-#include <netinet/icmp6.h>
-
-
-#include <dirent.h>
-#include <fcntl.h>
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
- PathMax = C.PATH_MAX
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-type off64 C.off64_t
-type off C.off_t
-type Mode_t C.mode_t
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-type Timeval32 C.struct_timeval32
-
-type Timex C.struct_timex
-
-type Time_t C.time_t
-
-type Tms C.struct_tms
-
-type Utimbuf C.struct_utimbuf
-
-type Timezone C.struct_timezone
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit64
-
-type Pid_t C.pid_t
-
-type _Gid_t C.gid_t
-
-type dev_t C.dev_t
-
-// Files
-
-type Stat_t C.struct_stat
-
-type StatxTimestamp C.struct_statx_timestamp
-
-type Statx_t C.struct_statx
-
-type Dirent C.struct_dirent
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Cmsghdr C.struct_cmsghdr
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type Linger C.struct_linger
-
-type Msghdr C.struct_msghdr
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
-)
-
-type IfMsgHdr C.struct_if_msghdr
-
-// Misc
-
-type FdSet C.fd_set
-
-type Utsname C.struct_utsname
-
-type Ustat_t C.struct_ustat
-
-type Sigset_t C.sigset_t
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_REMOVEDIR = C.AT_REMOVEDIR
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Termio C.struct_termio
-
-type Winsize C.struct_winsize
-
-//poll
-
-type PollFd struct {
- Fd int32
- Events uint16
- Revents uint16
-}
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-//flock_t
-
-type Flock_t C.struct_flock64
-
-// Statfs
-
-type Fsid_t C.struct_fsid_t
-type Fsid64_t C.struct_fsid64_t
-
-type Statfs_t C.struct_statfs
-
-const RNDGETENTCNT = 0x80045200
diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go
deleted file mode 100644
index 155c2e692..000000000
--- a/vendor/golang.org/x/sys/unix/types_darwin.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define __DARWIN_UNIX03 0
-#define KERNEL
-#define _DARWIN_USE_64_BIT_INODE
-#include <dirent.h>
-#include <fcntl.h>
-#include <poll.h>
-#include <signal.h>
-#include <termios.h>
-#include <unistd.h>
-#include <mach/mach.h>
-#include <mach/message.h>
-#include <sys/event.h>
-#include <sys/mman.h>
-#include <sys/mount.h>
-#include <sys/param.h>
-#include <sys/ptrace.h>
-#include <sys/resource.h>
-#include <sys/select.h>
-#include <sys/signal.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/uio.h>
-#include <sys/un.h>
-#include <sys/utsname.h>
-#include <sys/wait.h>
-#include <net/bpf.h>
-#include <net/if.h>
-#include <net/if_dl.h>
-#include <net/if_var.h>
-#include <net/route.h>
-#include <netinet/in.h>
-#include <netinet/icmp6.h>
-#include <netinet/tcp.h>
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-type Timeval32 C.struct_timeval32
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-type Stat_t C.struct_stat64
-
-type Statfs_t C.struct_statfs64
-
-type Flock_t C.struct_flock
-
-type Fstore_t C.struct_fstore
-
-type Radvisory_t C.struct_radvisory
-
-type Fbootstraptransfer_t C.struct_fbootstraptransfer
-
-type Log2phys_t C.struct_log2phys
-
-type Fsid C.struct_fsid
-
-type Dirent C.struct_dirent
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet4Pktinfo C.struct_in_pktinfo
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Ptrace requests
-
-const (
- PTRACE_TRACEME = C.PT_TRACE_ME
- PTRACE_CONT = C.PT_CONTINUE
- PTRACE_KILL = C.PT_KILL
-)
-
-// Events (kqueue, kevent)
-
-type Kevent_t C.struct_kevent
-
-// Select
-
-type FdSet C.fd_set
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfData = C.sizeof_struct_if_data
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
- SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type IfMsghdr C.struct_if_msghdr
-
-type IfData C.struct_if_data
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type IfmaMsghdr C.struct_ifma_msghdr
-
-type IfmaMsghdr2 C.struct_ifma_msghdr2
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfHdr C.struct_bpf_hdr
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Winsize C.struct_winsize
-
-// fchmodat-like syscalls.
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_REMOVEDIR = C.AT_REMOVEDIR
- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-// uname
-
-type Utsname C.struct_utsname
-
-// Clockinfo
-
-const SizeofClockinfo = C.sizeof_struct_clockinfo
-
-type Clockinfo C.struct_clockinfo
diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go
deleted file mode 100644
index 3365dd79d..000000000
--- a/vendor/golang.org/x/sys/unix/types_dragonfly.go
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define KERNEL
-#include <dirent.h>
-#include <fcntl.h>
-#include <poll.h>
-#include <signal.h>
-#include <termios.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <sys/event.h>
-#include <sys/mman.h>
-#include <sys/mount.h>
-#include <sys/param.h>
-#include <sys/ptrace.h>
-#include <sys/resource.h>
-#include <sys/select.h>
-#include <sys/signal.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/un.h>
-#include <sys/utsname.h>
-#include <sys/wait.h>
-#include <net/bpf.h>
-#include <net/if.h>
-#include <net/if_dl.h>
-#include <net/route.h>
-#include <netinet/in.h>
-#include <netinet/icmp6.h>
-#include <netinet/tcp.h>
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-type Stat_t C.struct_stat
-
-type Statfs_t C.struct_statfs
-
-type Flock_t C.struct_flock
-
-type Dirent C.struct_dirent
-
-type Fsid C.struct_fsid
-
-// File system limits
-
-const (
- PathMax = C.PATH_MAX
-)
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Ptrace requests
-
-const (
- PTRACE_TRACEME = C.PT_TRACE_ME
- PTRACE_CONT = C.PT_CONTINUE
- PTRACE_KILL = C.PT_KILL
-)
-
-// Events (kqueue, kevent)
-
-type Kevent_t C.struct_kevent
-
-// Select
-
-type FdSet C.fd_set
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfData = C.sizeof_struct_if_data
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type IfMsghdr C.struct_if_msghdr
-
-type IfData C.struct_if_data
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type IfmaMsghdr C.struct_ifma_msghdr
-
-type IfAnnounceMsghdr C.struct_if_announcemsghdr
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfHdr C.struct_bpf_hdr
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Winsize C.struct_winsize
-
-// fchmodat-like syscalls.
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-// Uname
-
-type Utsname C.struct_utsname
diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go
deleted file mode 100644
index a121dc336..000000000
--- a/vendor/golang.org/x/sys/unix/types_freebsd.go
+++ /dev/null
@@ -1,400 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define _WANT_FREEBSD11_STAT 1
-#define _WANT_FREEBSD11_STATFS 1
-#define _WANT_FREEBSD11_DIRENT 1
-#define _WANT_FREEBSD11_KEVENT 1
-
-#include <dirent.h>
-#include <fcntl.h>
-#include <poll.h>
-#include <signal.h>
-#include <termios.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <sys/capsicum.h>
-#include <sys/event.h>
-#include <sys/mman.h>
-#include <sys/mount.h>
-#include <sys/param.h>
-#include <sys/ptrace.h>
-#include <sys/resource.h>
-#include <sys/select.h>
-#include <sys/signal.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/un.h>
-#include <sys/utsname.h>
-#include <sys/wait.h>
-#include <net/bpf.h>
-#include <net/if.h>
-#include <net/if_dl.h>
-#include <net/route.h>
-#include <netinet/in.h>
-#include <netinet/icmp6.h>
-#include <netinet/tcp.h>
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-// This structure is a duplicate of if_data on FreeBSD 8-STABLE.
-// See /usr/include/net/if.h.
-struct if_data8 {
- u_char ifi_type;
- u_char ifi_physical;
- u_char ifi_addrlen;
- u_char ifi_hdrlen;
- u_char ifi_link_state;
- u_char ifi_spare_char1;
- u_char ifi_spare_char2;
- u_char ifi_datalen;
- u_long ifi_mtu;
- u_long ifi_metric;
- u_long ifi_baudrate;
- u_long ifi_ipackets;
- u_long ifi_ierrors;
- u_long ifi_opackets;
- u_long ifi_oerrors;
- u_long ifi_collisions;
- u_long ifi_ibytes;
- u_long ifi_obytes;
- u_long ifi_imcasts;
- u_long ifi_omcasts;
- u_long ifi_iqdrops;
- u_long ifi_noproto;
- u_long ifi_hwassist;
-// FIXME: these are now unions, so maybe need to change definitions?
-#undef ifi_epoch
- time_t ifi_epoch;
-#undef ifi_lastchange
- struct timeval ifi_lastchange;
-};
-
-// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE.
-// See /usr/include/net/if.h.
-struct if_msghdr8 {
- u_short ifm_msglen;
- u_char ifm_version;
- u_char ifm_type;
- int ifm_addrs;
- int ifm_flags;
- u_short ifm_index;
- struct if_data8 ifm_data;
-};
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-const (
- _statfsVersion = C.STATFS_VERSION
- _dirblksiz = C.DIRBLKSIZ
-)
-
-type Stat_t C.struct_stat
-
-type stat_freebsd11_t C.struct_freebsd11_stat
-
-type Statfs_t C.struct_statfs
-
-type statfs_freebsd11_t C.struct_freebsd11_statfs
-
-type Flock_t C.struct_flock
-
-type Dirent C.struct_dirent
-
-type dirent_freebsd11 C.struct_freebsd11_dirent
-
-type Fsid C.struct_fsid
-
-// File system limits
-
-const (
- PathMax = C.PATH_MAX
-)
-
-// Advice to Fadvise
-
-const (
- FADV_NORMAL = C.POSIX_FADV_NORMAL
- FADV_RANDOM = C.POSIX_FADV_RANDOM
- FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
- FADV_WILLNEED = C.POSIX_FADV_WILLNEED
- FADV_DONTNEED = C.POSIX_FADV_DONTNEED
- FADV_NOREUSE = C.POSIX_FADV_NOREUSE
-)
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPMreqn C.struct_ip_mreqn
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPMreqn = C.sizeof_struct_ip_mreqn
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Ptrace requests
-
-const (
- PTRACE_ATTACH = C.PT_ATTACH
- PTRACE_CONT = C.PT_CONTINUE
- PTRACE_DETACH = C.PT_DETACH
- PTRACE_GETFPREGS = C.PT_GETFPREGS
- PTRACE_GETFSBASE = C.PT_GETFSBASE
- PTRACE_GETLWPLIST = C.PT_GETLWPLIST
- PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS
- PTRACE_GETREGS = C.PT_GETREGS
- PTRACE_GETXSTATE = C.PT_GETXSTATE
- PTRACE_IO = C.PT_IO
- PTRACE_KILL = C.PT_KILL
- PTRACE_LWPEVENTS = C.PT_LWP_EVENTS
- PTRACE_LWPINFO = C.PT_LWPINFO
- PTRACE_SETFPREGS = C.PT_SETFPREGS
- PTRACE_SETREGS = C.PT_SETREGS
- PTRACE_SINGLESTEP = C.PT_STEP
- PTRACE_TRACEME = C.PT_TRACE_ME
-)
-
-const (
- PIOD_READ_D = C.PIOD_READ_D
- PIOD_WRITE_D = C.PIOD_WRITE_D
- PIOD_READ_I = C.PIOD_READ_I
- PIOD_WRITE_I = C.PIOD_WRITE_I
-)
-
-const (
- PL_FLAG_BORN = C.PL_FLAG_BORN
- PL_FLAG_EXITED = C.PL_FLAG_EXITED
- PL_FLAG_SI = C.PL_FLAG_SI
-)
-
-const (
- TRAP_BRKPT = C.TRAP_BRKPT
- TRAP_TRACE = C.TRAP_TRACE
-)
-
-type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo
-
-type __Siginfo C.struct___siginfo
-
-type Sigset_t C.sigset_t
-
-type Reg C.struct_reg
-
-type FpReg C.struct_fpreg
-
-type PtraceIoDesc C.struct_ptrace_io_desc
-
-// Events (kqueue, kevent)
-
-type Kevent_t C.struct_kevent_freebsd11
-
-// Select
-
-type FdSet C.fd_set
-
-// Routing and interface messages
-
-const (
- sizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr8
- sizeofIfData = C.sizeof_struct_if_data
- SizeofIfData = C.sizeof_struct_if_data8
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr
- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type ifMsghdr C.struct_if_msghdr
-
-type IfMsghdr C.struct_if_msghdr8
-
-type ifData C.struct_if_data
-
-type IfData C.struct_if_data8
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type IfmaMsghdr C.struct_ifma_msghdr
-
-type IfAnnounceMsghdr C.struct_if_announcemsghdr
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
- SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfZbuf C.struct_bpf_zbuf
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfHdr C.struct_bpf_hdr
-
-type BpfZbufHeader C.struct_bpf_zbuf_header
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Winsize C.struct_winsize
-
-// fchmodat-like syscalls.
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_REMOVEDIR = C.AT_REMOVEDIR
- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLINIGNEOF = C.POLLINIGNEOF
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-// Capabilities
-
-type CapRights C.struct_cap_rights
-
-// Uname
-
-type Utsname C.struct_utsname
diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go
deleted file mode 100644
index 4a96d72c3..000000000
--- a/vendor/golang.org/x/sys/unix/types_netbsd.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define KERNEL
-#include <dirent.h>
-#include <fcntl.h>
-#include <poll.h>
-#include <signal.h>
-#include <termios.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <sys/param.h>
-#include <sys/types.h>
-#include <sys/event.h>
-#include <sys/mman.h>
-#include <sys/mount.h>
-#include <sys/ptrace.h>
-#include <sys/resource.h>
-#include <sys/select.h>
-#include <sys/signal.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/sysctl.h>
-#include <sys/time.h>
-#include <sys/uio.h>
-#include <sys/un.h>
-#include <sys/utsname.h>
-#include <sys/wait.h>
-#include <net/bpf.h>
-#include <net/if.h>
-#include <net/if_dl.h>
-#include <net/route.h>
-#include <netinet/in.h>
-#include <netinet/icmp6.h>
-#include <netinet/tcp.h>
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-type Stat_t C.struct_stat
-
-type Statfs_t C.struct_statfs
-
-type Flock_t C.struct_flock
-
-type Dirent C.struct_dirent
-
-type Fsid C.fsid_t
-
-// File system limits
-
-const (
- PathMax = C.PATH_MAX
-)
-
-// Advice to Fadvise
-
-const (
- FADV_NORMAL = C.POSIX_FADV_NORMAL
- FADV_RANDOM = C.POSIX_FADV_RANDOM
- FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
- FADV_WILLNEED = C.POSIX_FADV_WILLNEED
- FADV_DONTNEED = C.POSIX_FADV_DONTNEED
- FADV_NOREUSE = C.POSIX_FADV_NOREUSE
-)
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Ptrace requests
-
-const (
- PTRACE_TRACEME = C.PT_TRACE_ME
- PTRACE_CONT = C.PT_CONTINUE
- PTRACE_KILL = C.PT_KILL
-)
-
-// Events (kqueue, kevent)
-
-type Kevent_t C.struct_kevent
-
-// Select
-
-type FdSet C.fd_set
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfData = C.sizeof_struct_if_data
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type IfMsghdr C.struct_if_msghdr
-
-type IfData C.struct_if_data
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type IfAnnounceMsghdr C.struct_if_announcemsghdr
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-type Mclpool C.struct_mclpool
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfHdr C.struct_bpf_hdr
-
-type BpfTimeval C.struct_bpf_timeval
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Winsize C.struct_winsize
-
-type Ptmget C.struct_ptmget
-
-// fchmodat-like syscalls.
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-// Sysctl
-
-type Sysctlnode C.struct_sysctlnode
-
-// Uname
-
-type Utsname C.struct_utsname
-
-// Clockinfo
-
-const SizeofClockinfo = C.sizeof_struct_clockinfo
-
-type Clockinfo C.struct_clockinfo
diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go
deleted file mode 100644
index 775cb57dc..000000000
--- a/vendor/golang.org/x/sys/unix/types_openbsd.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define KERNEL
-#include <dirent.h>
-#include <fcntl.h>
-#include <poll.h>
-#include <signal.h>
-#include <termios.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <sys/param.h>
-#include <sys/types.h>
-#include <sys/event.h>
-#include <sys/mman.h>
-#include <sys/mount.h>
-#include <sys/ptrace.h>
-#include <sys/resource.h>
-#include <sys/select.h>
-#include <sys/signal.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/uio.h>
-#include <sys/un.h>
-#include <sys/utsname.h>
-#include <sys/wait.h>
-#include <uvm/uvmexp.h>
-#include <net/bpf.h>
-#include <net/if.h>
-#include <net/if_dl.h>
-#include <net/route.h>
-#include <netinet/in.h>
-#include <netinet/icmp6.h>
-#include <netinet/tcp.h>
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-type Stat_t C.struct_stat
-
-type Statfs_t C.struct_statfs
-
-type Flock_t C.struct_flock
-
-type Dirent C.struct_dirent
-
-type Fsid C.fsid_t
-
-// File system limits
-
-const (
- PathMax = C.PATH_MAX
-)
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Ptrace requests
-
-const (
- PTRACE_TRACEME = C.PT_TRACE_ME
- PTRACE_CONT = C.PT_CONTINUE
- PTRACE_KILL = C.PT_KILL
-)
-
-// Events (kqueue, kevent)
-
-type Kevent_t C.struct_kevent
-
-// Select
-
-type FdSet C.fd_set
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfData = C.sizeof_struct_if_data
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type IfMsghdr C.struct_if_msghdr
-
-type IfData C.struct_if_data
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type IfAnnounceMsghdr C.struct_if_announcemsghdr
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-type Mclpool C.struct_mclpool
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfHdr C.struct_bpf_hdr
-
-type BpfTimeval C.struct_bpf_timeval
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Winsize C.struct_winsize
-
-// fchmodat-like syscalls.
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
-)
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
-
-// Signal Sets
-
-type Sigset_t C.sigset_t
-
-// Uname
-
-type Utsname C.struct_utsname
-
-// Uvmexp
-
-const SizeofUvmexp = C.sizeof_struct_uvmexp
-
-type Uvmexp C.struct_uvmexp
-
-// Clockinfo
-
-const SizeofClockinfo = C.sizeof_struct_clockinfo
-
-type Clockinfo C.struct_clockinfo
diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go
deleted file mode 100644
index 2b716f934..000000000
--- a/vendor/golang.org/x/sys/unix/types_solaris.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-Input to cgo -godefs. See README.md
-*/
-
-// +godefs map struct_in_addr [4]byte /* in_addr */
-// +godefs map struct_in6_addr [16]byte /* in6_addr */
-
-package unix
-
-/*
-#define KERNEL
-// These defines ensure that builds done on newer versions of Solaris are
-// backwards-compatible with older versions of Solaris and
-// OpenSolaris-based derivatives.
-#define __USE_SUNOS_SOCKETS__ // msghdr
-#define __USE_LEGACY_PROTOTYPES__ // iovec
-#include <dirent.h>
-#include <fcntl.h>
-#include <netdb.h>
-#include <limits.h>
-#include <poll.h>
-#include <signal.h>
-#include <termios.h>
-#include <termio.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <sys/mman.h>
-#include <sys/mount.h>
-#include <sys/param.h>
-#include <sys/resource.h>
-#include <sys/select.h>
-#include <sys/signal.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/statvfs.h>
-#include <sys/time.h>
-#include <sys/times.h>
-#include <sys/types.h>
-#include <sys/utsname.h>
-#include <sys/un.h>
-#include <sys/wait.h>
-#include <net/bpf.h>
-#include <net/if.h>
-#include <net/if_dl.h>
-#include <net/route.h>
-#include <netinet/in.h>
-#include <netinet/icmp6.h>
-#include <netinet/tcp.h>
-#include <ustat.h>
-#include <utime.h>
-
-enum {
- sizeofPtr = sizeof(void*),
-};
-
-union sockaddr_all {
- struct sockaddr s1; // this one gets used for fields
- struct sockaddr_in s2; // these pad it out
- struct sockaddr_in6 s3;
- struct sockaddr_un s4;
- struct sockaddr_dl s5;
-};
-
-struct sockaddr_any {
- struct sockaddr addr;
- char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
-};
-
-*/
-import "C"
-
-// Machine characteristics
-
-const (
- SizeofPtr = C.sizeofPtr
- SizeofShort = C.sizeof_short
- SizeofInt = C.sizeof_int
- SizeofLong = C.sizeof_long
- SizeofLongLong = C.sizeof_longlong
- PathMax = C.PATH_MAX
- MaxHostNameLen = C.MAXHOSTNAMELEN
-)
-
-// Basic types
-
-type (
- _C_short C.short
- _C_int C.int
- _C_long C.long
- _C_long_long C.longlong
-)
-
-// Time
-
-type Timespec C.struct_timespec
-
-type Timeval C.struct_timeval
-
-type Timeval32 C.struct_timeval32
-
-type Tms C.struct_tms
-
-type Utimbuf C.struct_utimbuf
-
-// Processes
-
-type Rusage C.struct_rusage
-
-type Rlimit C.struct_rlimit
-
-type _Gid_t C.gid_t
-
-// Files
-
-type Stat_t C.struct_stat
-
-type Flock_t C.struct_flock
-
-type Dirent C.struct_dirent
-
-// Filesystems
-
-type _Fsblkcnt_t C.fsblkcnt_t
-
-type Statvfs_t C.struct_statvfs
-
-// Sockets
-
-type RawSockaddrInet4 C.struct_sockaddr_in
-
-type RawSockaddrInet6 C.struct_sockaddr_in6
-
-type RawSockaddrUnix C.struct_sockaddr_un
-
-type RawSockaddrDatalink C.struct_sockaddr_dl
-
-type RawSockaddr C.struct_sockaddr
-
-type RawSockaddrAny C.struct_sockaddr_any
-
-type _Socklen C.socklen_t
-
-type Linger C.struct_linger
-
-type Iovec C.struct_iovec
-
-type IPMreq C.struct_ip_mreq
-
-type IPv6Mreq C.struct_ipv6_mreq
-
-type Msghdr C.struct_msghdr
-
-type Cmsghdr C.struct_cmsghdr
-
-type Inet6Pktinfo C.struct_in6_pktinfo
-
-type IPv6MTUInfo C.struct_ip6_mtuinfo
-
-type ICMPv6Filter C.struct_icmp6_filter
-
-const (
- SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
- SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
- SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
- SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
- SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
- SizeofLinger = C.sizeof_struct_linger
- SizeofIPMreq = C.sizeof_struct_ip_mreq
- SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
- SizeofMsghdr = C.sizeof_struct_msghdr
- SizeofCmsghdr = C.sizeof_struct_cmsghdr
- SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
- SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
- SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
-)
-
-// Select
-
-type FdSet C.fd_set
-
-// Misc
-
-type Utsname C.struct_utsname
-
-type Ustat_t C.struct_ustat
-
-const (
- AT_FDCWD = C.AT_FDCWD
- AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
- AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
- AT_REMOVEDIR = C.AT_REMOVEDIR
- AT_EACCESS = C.AT_EACCESS
-)
-
-// Routing and interface messages
-
-const (
- SizeofIfMsghdr = C.sizeof_struct_if_msghdr
- SizeofIfData = C.sizeof_struct_if_data
- SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr
- SizeofRtMsghdr = C.sizeof_struct_rt_msghdr
- SizeofRtMetrics = C.sizeof_struct_rt_metrics
-)
-
-type IfMsghdr C.struct_if_msghdr
-
-type IfData C.struct_if_data
-
-type IfaMsghdr C.struct_ifa_msghdr
-
-type RtMsghdr C.struct_rt_msghdr
-
-type RtMetrics C.struct_rt_metrics
-
-// Berkeley packet filter
-
-const (
- SizeofBpfVersion = C.sizeof_struct_bpf_version
- SizeofBpfStat = C.sizeof_struct_bpf_stat
- SizeofBpfProgram = C.sizeof_struct_bpf_program
- SizeofBpfInsn = C.sizeof_struct_bpf_insn
- SizeofBpfHdr = C.sizeof_struct_bpf_hdr
-)
-
-type BpfVersion C.struct_bpf_version
-
-type BpfStat C.struct_bpf_stat
-
-type BpfProgram C.struct_bpf_program
-
-type BpfInsn C.struct_bpf_insn
-
-type BpfTimeval C.struct_bpf_timeval
-
-type BpfHdr C.struct_bpf_hdr
-
-// Terminal handling
-
-type Termios C.struct_termios
-
-type Termio C.struct_termio
-
-type Winsize C.struct_winsize
-
-// poll
-
-type PollFd C.struct_pollfd
-
-const (
- POLLERR = C.POLLERR
- POLLHUP = C.POLLHUP
- POLLIN = C.POLLIN
- POLLNVAL = C.POLLNVAL
- POLLOUT = C.POLLOUT
- POLLPRI = C.POLLPRI
- POLLRDBAND = C.POLLRDBAND
- POLLRDNORM = C.POLLRDNORM
- POLLWRBAND = C.POLLWRBAND
- POLLWRNORM = C.POLLWRNORM
-)
diff --git a/vendor/golang.org/x/text/encoding/charmap/maketables.go b/vendor/golang.org/x/text/encoding/charmap/maketables.go
deleted file mode 100644
index f7941701e..000000000
--- a/vendor/golang.org/x/text/encoding/charmap/maketables.go
+++ /dev/null
@@ -1,556 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "bufio"
- "fmt"
- "log"
- "net/http"
- "sort"
- "strings"
- "unicode/utf8"
-
- "golang.org/x/text/encoding"
- "golang.org/x/text/internal/gen"
-)
-
-const ascii = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" +
- "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
- ` !"#$%&'()*+,-./0123456789:;<=>?` +
- `@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_` +
- "`abcdefghijklmnopqrstuvwxyz{|}~\u007f"
-
-var encodings = []struct {
- name string
- mib string
- comment string
- varName string
- replacement byte
- mapping string
-}{
- {
- "IBM Code Page 037",
- "IBM037",
- "",
- "CodePage037",
- 0x3f,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM037-2.1.2.ucm",
- },
- {
- "IBM Code Page 437",
- "PC8CodePage437",
- "",
- "CodePage437",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM437-2.1.2.ucm",
- },
- {
- "IBM Code Page 850",
- "PC850Multilingual",
- "",
- "CodePage850",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM850-2.1.2.ucm",
- },
- {
- "IBM Code Page 852",
- "PCp852",
- "",
- "CodePage852",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM852-2.1.2.ucm",
- },
- {
- "IBM Code Page 855",
- "IBM855",
- "",
- "CodePage855",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM855-2.1.2.ucm",
- },
- {
- "Windows Code Page 858", // PC latin1 with Euro
- "IBM00858",
- "",
- "CodePage858",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/windows-858-2000.ucm",
- },
- {
- "IBM Code Page 860",
- "IBM860",
- "",
- "CodePage860",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM860-2.1.2.ucm",
- },
- {
- "IBM Code Page 862",
- "PC862LatinHebrew",
- "",
- "CodePage862",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM862-2.1.2.ucm",
- },
- {
- "IBM Code Page 863",
- "IBM863",
- "",
- "CodePage863",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM863-2.1.2.ucm",
- },
- {
- "IBM Code Page 865",
- "IBM865",
- "",
- "CodePage865",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM865-2.1.2.ucm",
- },
- {
- "IBM Code Page 866",
- "IBM866",
- "",
- "CodePage866",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-ibm866.txt",
- },
- {
- "IBM Code Page 1047",
- "IBM1047",
- "",
- "CodePage1047",
- 0x3f,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM1047-2.1.2.ucm",
- },
- {
- "IBM Code Page 1140",
- "IBM01140",
- "",
- "CodePage1140",
- 0x3f,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/ibm-1140_P100-1997.ucm",
- },
- {
- "ISO 8859-1",
- "ISOLatin1",
- "",
- "ISO8859_1",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_1-1998.ucm",
- },
- {
- "ISO 8859-2",
- "ISOLatin2",
- "",
- "ISO8859_2",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-2.txt",
- },
- {
- "ISO 8859-3",
- "ISOLatin3",
- "",
- "ISO8859_3",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-3.txt",
- },
- {
- "ISO 8859-4",
- "ISOLatin4",
- "",
- "ISO8859_4",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-4.txt",
- },
- {
- "ISO 8859-5",
- "ISOLatinCyrillic",
- "",
- "ISO8859_5",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-5.txt",
- },
- {
- "ISO 8859-6",
- "ISOLatinArabic",
- "",
- "ISO8859_6,ISO8859_6E,ISO8859_6I",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-6.txt",
- },
- {
- "ISO 8859-7",
- "ISOLatinGreek",
- "",
- "ISO8859_7",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-7.txt",
- },
- {
- "ISO 8859-8",
- "ISOLatinHebrew",
- "",
- "ISO8859_8,ISO8859_8E,ISO8859_8I",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-8.txt",
- },
- {
- "ISO 8859-9",
- "ISOLatin5",
- "",
- "ISO8859_9",
- encoding.ASCIISub,
- "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_9-1999.ucm",
- },
- {
- "ISO 8859-10",
- "ISOLatin6",
- "",
- "ISO8859_10",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-10.txt",
- },
- {
- "ISO 8859-13",
- "ISO885913",
- "",
- "ISO8859_13",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-13.txt",
- },
- {
- "ISO 8859-14",
- "ISO885914",
- "",
- "ISO8859_14",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-14.txt",
- },
- {
- "ISO 8859-15",
- "ISO885915",
- "",
- "ISO8859_15",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-15.txt",
- },
- {
- "ISO 8859-16",
- "ISO885916",
- "",
- "ISO8859_16",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-iso-8859-16.txt",
- },
- {
- "KOI8-R",
- "KOI8R",
- "",
- "KOI8R",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-koi8-r.txt",
- },
- {
- "KOI8-U",
- "KOI8U",
- "",
- "KOI8U",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-koi8-u.txt",
- },
- {
- "Macintosh",
- "Macintosh",
- "",
- "Macintosh",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-macintosh.txt",
- },
- {
- "Macintosh Cyrillic",
- "MacintoshCyrillic",
- "",
- "MacintoshCyrillic",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-x-mac-cyrillic.txt",
- },
- {
- "Windows 874",
- "Windows874",
- "",
- "Windows874",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-874.txt",
- },
- {
- "Windows 1250",
- "Windows1250",
- "",
- "Windows1250",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1250.txt",
- },
- {
- "Windows 1251",
- "Windows1251",
- "",
- "Windows1251",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1251.txt",
- },
- {
- "Windows 1252",
- "Windows1252",
- "",
- "Windows1252",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1252.txt",
- },
- {
- "Windows 1253",
- "Windows1253",
- "",
- "Windows1253",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1253.txt",
- },
- {
- "Windows 1254",
- "Windows1254",
- "",
- "Windows1254",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1254.txt",
- },
- {
- "Windows 1255",
- "Windows1255",
- "",
- "Windows1255",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1255.txt",
- },
- {
- "Windows 1256",
- "Windows1256",
- "",
- "Windows1256",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1256.txt",
- },
- {
- "Windows 1257",
- "Windows1257",
- "",
- "Windows1257",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1257.txt",
- },
- {
- "Windows 1258",
- "Windows1258",
- "",
- "Windows1258",
- encoding.ASCIISub,
- "http://encoding.spec.whatwg.org/index-windows-1258.txt",
- },
- {
- "X-User-Defined",
- "XUserDefined",
- "It is defined at http://encoding.spec.whatwg.org/#x-user-defined",
- "XUserDefined",
- encoding.ASCIISub,
- ascii +
- "\uf780\uf781\uf782\uf783\uf784\uf785\uf786\uf787" +
- "\uf788\uf789\uf78a\uf78b\uf78c\uf78d\uf78e\uf78f" +
- "\uf790\uf791\uf792\uf793\uf794\uf795\uf796\uf797" +
- "\uf798\uf799\uf79a\uf79b\uf79c\uf79d\uf79e\uf79f" +
- "\uf7a0\uf7a1\uf7a2\uf7a3\uf7a4\uf7a5\uf7a6\uf7a7" +
- "\uf7a8\uf7a9\uf7aa\uf7ab\uf7ac\uf7ad\uf7ae\uf7af" +
- "\uf7b0\uf7b1\uf7b2\uf7b3\uf7b4\uf7b5\uf7b6\uf7b7" +
- "\uf7b8\uf7b9\uf7ba\uf7bb\uf7bc\uf7bd\uf7be\uf7bf" +
- "\uf7c0\uf7c1\uf7c2\uf7c3\uf7c4\uf7c5\uf7c6\uf7c7" +
- "\uf7c8\uf7c9\uf7ca\uf7cb\uf7cc\uf7cd\uf7ce\uf7cf" +
- "\uf7d0\uf7d1\uf7d2\uf7d3\uf7d4\uf7d5\uf7d6\uf7d7" +
- "\uf7d8\uf7d9\uf7da\uf7db\uf7dc\uf7dd\uf7de\uf7df" +
- "\uf7e0\uf7e1\uf7e2\uf7e3\uf7e4\uf7e5\uf7e6\uf7e7" +
- "\uf7e8\uf7e9\uf7ea\uf7eb\uf7ec\uf7ed\uf7ee\uf7ef" +
- "\uf7f0\uf7f1\uf7f2\uf7f3\uf7f4\uf7f5\uf7f6\uf7f7" +
- "\uf7f8\uf7f9\uf7fa\uf7fb\uf7fc\uf7fd\uf7fe\uf7ff",
- },
-}
-
-func getWHATWG(url string) string {
- res, err := http.Get(url)
- if err != nil {
- log.Fatalf("%q: Get: %v", url, err)
- }
- defer res.Body.Close()
-
- mapping := make([]rune, 128)
- for i := range mapping {
- mapping[i] = '\ufffd'
- }
-
- scanner := bufio.NewScanner(res.Body)
- for scanner.Scan() {
- s := strings.TrimSpace(scanner.Text())
- if s == "" || s[0] == '#' {
- continue
- }
- x, y := 0, 0
- if _, err := fmt.Sscanf(s, "%d\t0x%x", &x, &y); err != nil {
- log.Fatalf("could not parse %q", s)
- }
- if x < 0 || 128 <= x {
- log.Fatalf("code %d is out of range", x)
- }
- if 0x80 <= y && y < 0xa0 {
- // We diverge from the WHATWG spec by mapping control characters
- // in the range [0x80, 0xa0) to U+FFFD.
- continue
- }
- mapping[x] = rune(y)
- }
- return ascii + string(mapping)
-}
-
-func getUCM(url string) string {
- res, err := http.Get(url)
- if err != nil {
- log.Fatalf("%q: Get: %v", url, err)
- }
- defer res.Body.Close()
-
- mapping := make([]rune, 256)
- for i := range mapping {
- mapping[i] = '\ufffd'
- }
-
- charsFound := 0
- scanner := bufio.NewScanner(res.Body)
- for scanner.Scan() {
- s := strings.TrimSpace(scanner.Text())
- if s == "" || s[0] == '#' {
- continue
- }
- var c byte
- var r rune
- if _, err := fmt.Sscanf(s, `<U%x> \x%x |0`, &r, &c); err != nil {
- continue
- }
- mapping[c] = r
- charsFound++
- }
-
- if charsFound < 200 {
- log.Fatalf("%q: only %d characters found (wrong page format?)", url, charsFound)
- }
-
- return string(mapping)
-}
-
-func main() {
- mibs := map[string]bool{}
- all := []string{}
-
- w := gen.NewCodeWriter()
- defer w.WriteGoFile("tables.go", "charmap")
-
- printf := func(s string, a ...interface{}) { fmt.Fprintf(w, s, a...) }
-
- printf("import (\n")
- printf("\t\"golang.org/x/text/encoding\"\n")
- printf("\t\"golang.org/x/text/encoding/internal/identifier\"\n")
- printf(")\n\n")
- for _, e := range encodings {
- varNames := strings.Split(e.varName, ",")
- all = append(all, varNames...)
- varName := varNames[0]
- switch {
- case strings.HasPrefix(e.mapping, "http://encoding.spec.whatwg.org/"):
- e.mapping = getWHATWG(e.mapping)
- case strings.HasPrefix(e.mapping, "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/"):
- e.mapping = getUCM(e.mapping)
- }
-
- asciiSuperset, low := strings.HasPrefix(e.mapping, ascii), 0x00
- if asciiSuperset {
- low = 0x80
- }
- lvn := 1
- if strings.HasPrefix(varName, "ISO") || strings.HasPrefix(varName, "KOI") {
- lvn = 3
- }
- lowerVarName := strings.ToLower(varName[:lvn]) + varName[lvn:]
- printf("// %s is the %s encoding.\n", varName, e.name)
- if e.comment != "" {
- printf("//\n// %s\n", e.comment)
- }
- printf("var %s *Charmap = &%s\n\nvar %s = Charmap{\nname: %q,\n",
- varName, lowerVarName, lowerVarName, e.name)
- if mibs[e.mib] {
- log.Fatalf("MIB type %q declared multiple times.", e.mib)
- }
- printf("mib: identifier.%s,\n", e.mib)
- printf("asciiSuperset: %t,\n", asciiSuperset)
- printf("low: 0x%02x,\n", low)
- printf("replacement: 0x%02x,\n", e.replacement)
-
- printf("decode: [256]utf8Enc{\n")
- i, backMapping := 0, map[rune]byte{}
- for _, c := range e.mapping {
- if _, ok := backMapping[c]; !ok && c != utf8.RuneError {
- backMapping[c] = byte(i)
- }
- var buf [8]byte
- n := utf8.EncodeRune(buf[:], c)
- if n > 3 {
- panic(fmt.Sprintf("rune %q (%U) is too long", c, c))
- }
- printf("{%d,[3]byte{0x%02x,0x%02x,0x%02x}},", n, buf[0], buf[1], buf[2])
- if i%2 == 1 {
- printf("\n")
- }
- i++
- }
- printf("},\n")
-
- printf("encode: [256]uint32{\n")
- encode := make([]uint32, 0, 256)
- for c, i := range backMapping {
- encode = append(encode, uint32(i)<<24|uint32(c))
- }
- sort.Sort(byRune(encode))
- for len(encode) < cap(encode) {
- encode = append(encode, encode[len(encode)-1])
- }
- for i, enc := range encode {
- printf("0x%08x,", enc)
- if i%8 == 7 {
- printf("\n")
- }
- }
- printf("},\n}\n")
-
- // Add an estimate of the size of a single Charmap{} struct value, which
- // includes two 256 elem arrays of 4 bytes and some extra fields, which
- // align to 3 uint64s on 64-bit architectures.
- w.Size += 2*4*256 + 3*8
- }
- // TODO: add proper line breaking.
- printf("var listAll = []encoding.Encoding{\n%s,\n}\n\n", strings.Join(all, ",\n"))
-}
-
-type byRune []uint32
-
-func (b byRune) Len() int { return len(b) }
-func (b byRune) Less(i, j int) bool { return b[i]&0xffffff < b[j]&0xffffff }
-func (b byRune) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
diff --git a/vendor/golang.org/x/text/encoding/htmlindex/gen.go b/vendor/golang.org/x/text/encoding/htmlindex/gen.go
deleted file mode 100644
index ac6b4a77f..000000000
--- a/vendor/golang.org/x/text/encoding/htmlindex/gen.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "log"
- "strings"
-
- "golang.org/x/text/internal/gen"
-)
-
-type group struct {
- Encodings []struct {
- Labels []string
- Name string
- }
-}
-
-func main() {
- gen.Init()
-
- r := gen.Open("https://encoding.spec.whatwg.org", "whatwg", "encodings.json")
- var groups []group
- if err := json.NewDecoder(r).Decode(&groups); err != nil {
- log.Fatalf("Error reading encodings.json: %v", err)
- }
-
- w := &bytes.Buffer{}
- fmt.Fprintln(w, "type htmlEncoding byte")
- fmt.Fprintln(w, "const (")
- for i, g := range groups {
- for _, e := range g.Encodings {
- key := strings.ToLower(e.Name)
- name := consts[key]
- if name == "" {
- log.Fatalf("No const defined for %s.", key)
- }
- if i == 0 {
- fmt.Fprintf(w, "%s htmlEncoding = iota\n", name)
- } else {
- fmt.Fprintf(w, "%s\n", name)
- }
- }
- }
- fmt.Fprintln(w, "numEncodings")
- fmt.Fprint(w, ")\n\n")
-
- fmt.Fprintln(w, "var canonical = [numEncodings]string{")
- for _, g := range groups {
- for _, e := range g.Encodings {
- fmt.Fprintf(w, "%q,\n", strings.ToLower(e.Name))
- }
- }
- fmt.Fprint(w, "}\n\n")
-
- fmt.Fprintln(w, "var nameMap = map[string]htmlEncoding{")
- for _, g := range groups {
- for _, e := range g.Encodings {
- for _, l := range e.Labels {
- key := strings.ToLower(e.Name)
- name := consts[key]
- fmt.Fprintf(w, "%q: %s,\n", l, name)
- }
- }
- }
- fmt.Fprint(w, "}\n\n")
-
- var tags []string
- fmt.Fprintln(w, "var localeMap = []htmlEncoding{")
- for _, loc := range locales {
- tags = append(tags, loc.tag)
- fmt.Fprintf(w, "%s, // %s \n", consts[loc.name], loc.tag)
- }
- fmt.Fprint(w, "}\n\n")
-
- fmt.Fprintf(w, "const locales = %q\n", strings.Join(tags, " "))
-
- gen.WriteGoFile("tables.go", "htmlindex", w.Bytes())
-}
-
-// consts maps canonical encoding name to internal constant.
-var consts = map[string]string{
- "utf-8": "utf8",
- "ibm866": "ibm866",
- "iso-8859-2": "iso8859_2",
- "iso-8859-3": "iso8859_3",
- "iso-8859-4": "iso8859_4",
- "iso-8859-5": "iso8859_5",
- "iso-8859-6": "iso8859_6",
- "iso-8859-7": "iso8859_7",
- "iso-8859-8": "iso8859_8",
- "iso-8859-8-i": "iso8859_8I",
- "iso-8859-10": "iso8859_10",
- "iso-8859-13": "iso8859_13",
- "iso-8859-14": "iso8859_14",
- "iso-8859-15": "iso8859_15",
- "iso-8859-16": "iso8859_16",
- "koi8-r": "koi8r",
- "koi8-u": "koi8u",
- "macintosh": "macintosh",
- "windows-874": "windows874",
- "windows-1250": "windows1250",
- "windows-1251": "windows1251",
- "windows-1252": "windows1252",
- "windows-1253": "windows1253",
- "windows-1254": "windows1254",
- "windows-1255": "windows1255",
- "windows-1256": "windows1256",
- "windows-1257": "windows1257",
- "windows-1258": "windows1258",
- "x-mac-cyrillic": "macintoshCyrillic",
- "gbk": "gbk",
- "gb18030": "gb18030",
- // "hz-gb-2312": "hzgb2312", // Was removed from WhatWG
- "big5": "big5",
- "euc-jp": "eucjp",
- "iso-2022-jp": "iso2022jp",
- "shift_jis": "shiftJIS",
- "euc-kr": "euckr",
- "replacement": "replacement",
- "utf-16be": "utf16be",
- "utf-16le": "utf16le",
- "x-user-defined": "xUserDefined",
-}
-
-// locales is taken from
-// https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm.
-var locales = []struct{ tag, name string }{
- // The default value. Explicitly state latin to benefit from the exact
- // script option, while still making 1252 the default encoding for languages
- // written in Latin script.
- {"und_Latn", "windows-1252"},
- {"ar", "windows-1256"},
- {"ba", "windows-1251"},
- {"be", "windows-1251"},
- {"bg", "windows-1251"},
- {"cs", "windows-1250"},
- {"el", "iso-8859-7"},
- {"et", "windows-1257"},
- {"fa", "windows-1256"},
- {"he", "windows-1255"},
- {"hr", "windows-1250"},
- {"hu", "iso-8859-2"},
- {"ja", "shift_jis"},
- {"kk", "windows-1251"},
- {"ko", "euc-kr"},
- {"ku", "windows-1254"},
- {"ky", "windows-1251"},
- {"lt", "windows-1257"},
- {"lv", "windows-1257"},
- {"mk", "windows-1251"},
- {"pl", "iso-8859-2"},
- {"ru", "windows-1251"},
- {"sah", "windows-1251"},
- {"sk", "windows-1250"},
- {"sl", "iso-8859-2"},
- {"sr", "windows-1251"},
- {"tg", "windows-1251"},
- {"th", "windows-874"},
- {"tr", "windows-1254"},
- {"tt", "windows-1251"},
- {"uk", "windows-1251"},
- {"vi", "windows-1258"},
- {"zh-hans", "gb18030"},
- {"zh-hant", "big5"},
-}
diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go
deleted file mode 100644
index 26cfef9c6..000000000
--- a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "bytes"
- "encoding/xml"
- "fmt"
- "io"
- "log"
- "strings"
-
- "golang.org/x/text/internal/gen"
-)
-
-type registry struct {
- XMLName xml.Name `xml:"registry"`
- Updated string `xml:"updated"`
- Registry []struct {
- ID string `xml:"id,attr"`
- Record []struct {
- Name string `xml:"name"`
- Xref []struct {
- Type string `xml:"type,attr"`
- Data string `xml:"data,attr"`
- } `xml:"xref"`
- Desc struct {
- Data string `xml:",innerxml"`
- // Any []struct {
- // Data string `xml:",chardata"`
- // } `xml:",any"`
- // Data string `xml:",chardata"`
- } `xml:"description,"`
- MIB string `xml:"value"`
- Alias []string `xml:"alias"`
- MIME string `xml:"preferred_alias"`
- } `xml:"record"`
- } `xml:"registry"`
-}
-
-func main() {
- r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml")
- reg := &registry{}
- if err := xml.NewDecoder(r).Decode(&reg); err != nil && err != io.EOF {
- log.Fatalf("Error decoding charset registry: %v", err)
- }
- if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" {
- log.Fatalf("Unexpected ID %s", reg.Registry[0].ID)
- }
-
- w := &bytes.Buffer{}
- fmt.Fprintf(w, "const (\n")
- for _, rec := range reg.Registry[0].Record {
- constName := ""
- for _, a := range rec.Alias {
- if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 {
- // Some of the constant definitions have comments in them. Strip those.
- constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0])
- }
- }
- if constName == "" {
- switch rec.MIB {
- case "2085":
- constName = "HZGB2312" // Not listed as alias for some reason.
- default:
- log.Fatalf("No cs alias defined for %s.", rec.MIB)
- }
- }
- if rec.MIME != "" {
- rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME)
- }
- fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME)
- if len(rec.Desc.Data) > 0 {
- fmt.Fprint(w, "// ")
- d := xml.NewDecoder(strings.NewReader(rec.Desc.Data))
- inElem := true
- attr := ""
- for {
- t, err := d.Token()
- if err != nil {
- if err != io.EOF {
- log.Fatal(err)
- }
- break
- }
- switch x := t.(type) {
- case xml.CharData:
- attr = "" // Don't need attribute info.
- a := bytes.Split([]byte(x), []byte("\n"))
- for i, b := range a {
- if b = bytes.TrimSpace(b); len(b) != 0 {
- if !inElem && i > 0 {
- fmt.Fprint(w, "\n// ")
- }
- inElem = false
- fmt.Fprintf(w, "%s ", string(b))
- }
- }
- case xml.StartElement:
- if x.Name.Local == "xref" {
- inElem = true
- use := false
- for _, a := range x.Attr {
- if a.Name.Local == "type" {
- use = use || a.Value != "person"
- }
- if a.Name.Local == "data" && use {
- // Patch up URLs to use https. From some links, the
- // https version is different from the http one.
- s := a.Value
- s = strings.Replace(s, "http://", "https://", -1)
- s = strings.Replace(s, "/unicode/", "/", -1)
- attr = s + " "
- }
- }
- }
- case xml.EndElement:
- inElem = false
- fmt.Fprint(w, attr)
- }
- }
- fmt.Fprint(w, "\n")
- }
- for _, x := range rec.Xref {
- switch x.Type {
- case "rfc":
- fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data))
- case "uri":
- fmt.Fprintf(w, "// Reference: %s\n", x.Data)
- }
- }
- fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB)
- fmt.Fprintln(w)
- }
- fmt.Fprintln(w, ")")
-
- gen.WriteGoFile("mib.go", "identifier", w.Bytes())
-}
diff --git a/vendor/golang.org/x/text/encoding/japanese/maketables.go b/vendor/golang.org/x/text/encoding/japanese/maketables.go
deleted file mode 100644
index 023957a67..000000000
--- a/vendor/golang.org/x/text/encoding/japanese/maketables.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// This program generates tables.go:
-// go run maketables.go | gofmt > tables.go
-
-// TODO: Emoji extensions?
-// https://www.unicode.org/faq/emoji_dingbats.html
-// https://www.unicode.org/Public/UNIDATA/EmojiSources.txt
-
-import (
- "bufio"
- "fmt"
- "log"
- "net/http"
- "sort"
- "strings"
-)
-
-type entry struct {
- jisCode, table int
-}
-
-func main() {
- fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
- fmt.Printf("// Package japanese provides Japanese encodings such as EUC-JP and Shift JIS.\n")
- fmt.Printf(`package japanese // import "golang.org/x/text/encoding/japanese"` + "\n\n")
-
- reverse := [65536]entry{}
- for i := range reverse {
- reverse[i].table = -1
- }
-
- tables := []struct {
- url string
- name string
- }{
- {"http://encoding.spec.whatwg.org/index-jis0208.txt", "0208"},
- {"http://encoding.spec.whatwg.org/index-jis0212.txt", "0212"},
- }
- for i, table := range tables {
- res, err := http.Get(table.url)
- if err != nil {
- log.Fatalf("%q: Get: %v", table.url, err)
- }
- defer res.Body.Close()
-
- mapping := [65536]uint16{}
-
- scanner := bufio.NewScanner(res.Body)
- for scanner.Scan() {
- s := strings.TrimSpace(scanner.Text())
- if s == "" || s[0] == '#' {
- continue
- }
- x, y := 0, uint16(0)
- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
- log.Fatalf("%q: could not parse %q", table.url, s)
- }
- if x < 0 || 120*94 <= x {
- log.Fatalf("%q: JIS code %d is out of range", table.url, x)
- }
- mapping[x] = y
- if reverse[y].table == -1 {
- reverse[y] = entry{jisCode: x, table: i}
- }
- }
- if err := scanner.Err(); err != nil {
- log.Fatalf("%q: scanner error: %v", table.url, err)
- }
-
- fmt.Printf("// jis%sDecode is the decoding table from JIS %s code to Unicode.\n// It is defined at %s\n",
- table.name, table.name, table.url)
- fmt.Printf("var jis%sDecode = [...]uint16{\n", table.name)
- for i, m := range mapping {
- if m != 0 {
- fmt.Printf("\t%d: 0x%04X,\n", i, m)
- }
- }
- fmt.Printf("}\n\n")
- }
-
- // Any run of at least separation continuous zero entries in the reverse map will
- // be a separate encode table.
- const separation = 1024
-
- intervals := []interval(nil)
- low, high := -1, -1
- for i, v := range reverse {
- if v.table == -1 {
- continue
- }
- if low < 0 {
- low = i
- } else if i-high >= separation {
- if high >= 0 {
- intervals = append(intervals, interval{low, high})
- }
- low = i
- }
- high = i + 1
- }
- if high >= 0 {
- intervals = append(intervals, interval{low, high})
- }
- sort.Sort(byDecreasingLength(intervals))
-
- fmt.Printf("const (\n")
- fmt.Printf("\tjis0208 = 1\n")
- fmt.Printf("\tjis0212 = 2\n")
- fmt.Printf("\tcodeMask = 0x7f\n")
- fmt.Printf("\tcodeShift = 7\n")
- fmt.Printf("\ttableShift = 14\n")
- fmt.Printf(")\n\n")
-
- fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
- fmt.Printf("// encodeX are the encoding tables from Unicode to JIS code,\n")
- fmt.Printf("// sorted by decreasing length.\n")
- for i, v := range intervals {
- fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
- }
- fmt.Printf("//\n")
- fmt.Printf("// The high two bits of the value record whether the JIS code comes from the\n")
- fmt.Printf("// JIS0208 table (high bits == 1) or the JIS0212 table (high bits == 2).\n")
- fmt.Printf("// The low 14 bits are two 7-bit unsigned integers j1 and j2 that form the\n")
- fmt.Printf("// JIS code (94*j1 + j2) within that table.\n")
- fmt.Printf("\n")
-
- for i, v := range intervals {
- fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
- fmt.Printf("var encode%d = [...]uint16{\n", i)
- for j := v.low; j < v.high; j++ {
- x := reverse[j]
- if x.table == -1 {
- continue
- }
- fmt.Printf("\t%d - %d: jis%s<<14 | 0x%02X<<7 | 0x%02X,\n",
- j, v.low, tables[x.table].name, x.jisCode/94, x.jisCode%94)
- }
- fmt.Printf("}\n\n")
- }
-}
-
-// interval is a half-open interval [low, high).
-type interval struct {
- low, high int
-}
-
-func (i interval) len() int { return i.high - i.low }
-
-// byDecreasingLength sorts intervals by decreasing length.
-type byDecreasingLength []interval
-
-func (b byDecreasingLength) Len() int { return len(b) }
-func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
-func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
diff --git a/vendor/golang.org/x/text/encoding/korean/maketables.go b/vendor/golang.org/x/text/encoding/korean/maketables.go
deleted file mode 100644
index c84034fb6..000000000
--- a/vendor/golang.org/x/text/encoding/korean/maketables.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// This program generates tables.go:
-// go run maketables.go | gofmt > tables.go
-
-import (
- "bufio"
- "fmt"
- "log"
- "net/http"
- "sort"
- "strings"
-)
-
-func main() {
- fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
- fmt.Printf("// Package korean provides Korean encodings such as EUC-KR.\n")
- fmt.Printf(`package korean // import "golang.org/x/text/encoding/korean"` + "\n\n")
-
- res, err := http.Get("http://encoding.spec.whatwg.org/index-euc-kr.txt")
- if err != nil {
- log.Fatalf("Get: %v", err)
- }
- defer res.Body.Close()
-
- mapping := [65536]uint16{}
- reverse := [65536]uint16{}
-
- scanner := bufio.NewScanner(res.Body)
- for scanner.Scan() {
- s := strings.TrimSpace(scanner.Text())
- if s == "" || s[0] == '#' {
- continue
- }
- x, y := uint16(0), uint16(0)
- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
- log.Fatalf("could not parse %q", s)
- }
- if x < 0 || 178*(0xc7-0x81)+(0xfe-0xc7)*94+(0xff-0xa1) <= x {
- log.Fatalf("EUC-KR code %d is out of range", x)
- }
- mapping[x] = y
- if reverse[y] == 0 {
- c0, c1 := uint16(0), uint16(0)
- if x < 178*(0xc7-0x81) {
- c0 = uint16(x/178) + 0x81
- c1 = uint16(x % 178)
- switch {
- case c1 < 1*26:
- c1 += 0x41
- case c1 < 2*26:
- c1 += 0x47
- default:
- c1 += 0x4d
- }
- } else {
- x -= 178 * (0xc7 - 0x81)
- c0 = uint16(x/94) + 0xc7
- c1 = uint16(x%94) + 0xa1
- }
- reverse[y] = c0<<8 | c1
- }
- }
- if err := scanner.Err(); err != nil {
- log.Fatalf("scanner error: %v", err)
- }
-
- fmt.Printf("// decode is the decoding table from EUC-KR code to Unicode.\n")
- fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-euc-kr.txt\n")
- fmt.Printf("var decode = [...]uint16{\n")
- for i, v := range mapping {
- if v != 0 {
- fmt.Printf("\t%d: 0x%04X,\n", i, v)
- }
- }
- fmt.Printf("}\n\n")
-
- // Any run of at least separation continuous zero entries in the reverse map will
- // be a separate encode table.
- const separation = 1024
-
- intervals := []interval(nil)
- low, high := -1, -1
- for i, v := range reverse {
- if v == 0 {
- continue
- }
- if low < 0 {
- low = i
- } else if i-high >= separation {
- if high >= 0 {
- intervals = append(intervals, interval{low, high})
- }
- low = i
- }
- high = i + 1
- }
- if high >= 0 {
- intervals = append(intervals, interval{low, high})
- }
- sort.Sort(byDecreasingLength(intervals))
-
- fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
- fmt.Printf("// encodeX are the encoding tables from Unicode to EUC-KR code,\n")
- fmt.Printf("// sorted by decreasing length.\n")
- for i, v := range intervals {
- fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
- }
- fmt.Printf("\n")
-
- for i, v := range intervals {
- fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
- fmt.Printf("var encode%d = [...]uint16{\n", i)
- for j := v.low; j < v.high; j++ {
- x := reverse[j]
- if x == 0 {
- continue
- }
- fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
- }
- fmt.Printf("}\n\n")
- }
-}
-
-// interval is a half-open interval [low, high).
-type interval struct {
- low, high int
-}
-
-func (i interval) len() int { return i.high - i.low }
-
-// byDecreasingLength sorts intervals by decreasing length.
-type byDecreasingLength []interval
-
-func (b byDecreasingLength) Len() int { return len(b) }
-func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
-func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go
deleted file mode 100644
index 55016c786..000000000
--- a/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// This program generates tables.go:
-// go run maketables.go | gofmt > tables.go
-
-import (
- "bufio"
- "fmt"
- "log"
- "net/http"
- "sort"
- "strings"
-)
-
-func main() {
- fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
- fmt.Printf("// Package simplifiedchinese provides Simplified Chinese encodings such as GBK.\n")
- fmt.Printf(`package simplifiedchinese // import "golang.org/x/text/encoding/simplifiedchinese"` + "\n\n")
-
- printGB18030()
- printGBK()
-}
-
-func printGB18030() {
- res, err := http.Get("http://encoding.spec.whatwg.org/index-gb18030.txt")
- if err != nil {
- log.Fatalf("Get: %v", err)
- }
- defer res.Body.Close()
-
- fmt.Printf("// gb18030 is the table from http://encoding.spec.whatwg.org/index-gb18030.txt\n")
- fmt.Printf("var gb18030 = [...][2]uint16{\n")
- scanner := bufio.NewScanner(res.Body)
- for scanner.Scan() {
- s := strings.TrimSpace(scanner.Text())
- if s == "" || s[0] == '#' {
- continue
- }
- x, y := uint32(0), uint32(0)
- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
- log.Fatalf("could not parse %q", s)
- }
- if x < 0x10000 && y < 0x10000 {
- fmt.Printf("\t{0x%04x, 0x%04x},\n", x, y)
- }
- }
- fmt.Printf("}\n\n")
-}
-
-func printGBK() {
- res, err := http.Get("http://encoding.spec.whatwg.org/index-gbk.txt")
- if err != nil {
- log.Fatalf("Get: %v", err)
- }
- defer res.Body.Close()
-
- mapping := [65536]uint16{}
- reverse := [65536]uint16{}
-
- scanner := bufio.NewScanner(res.Body)
- for scanner.Scan() {
- s := strings.TrimSpace(scanner.Text())
- if s == "" || s[0] == '#' {
- continue
- }
- x, y := uint16(0), uint16(0)
- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
- log.Fatalf("could not parse %q", s)
- }
- if x < 0 || 126*190 <= x {
- log.Fatalf("GBK code %d is out of range", x)
- }
- mapping[x] = y
- if reverse[y] == 0 {
- c0, c1 := x/190, x%190
- if c1 >= 0x3f {
- c1++
- }
- reverse[y] = (0x81+c0)<<8 | (0x40 + c1)
- }
- }
- if err := scanner.Err(); err != nil {
- log.Fatalf("scanner error: %v", err)
- }
-
- fmt.Printf("// decode is the decoding table from GBK code to Unicode.\n")
- fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-gbk.txt\n")
- fmt.Printf("var decode = [...]uint16{\n")
- for i, v := range mapping {
- if v != 0 {
- fmt.Printf("\t%d: 0x%04X,\n", i, v)
- }
- }
- fmt.Printf("}\n\n")
-
- // Any run of at least separation continuous zero entries in the reverse map will
- // be a separate encode table.
- const separation = 1024
-
- intervals := []interval(nil)
- low, high := -1, -1
- for i, v := range reverse {
- if v == 0 {
- continue
- }
- if low < 0 {
- low = i
- } else if i-high >= separation {
- if high >= 0 {
- intervals = append(intervals, interval{low, high})
- }
- low = i
- }
- high = i + 1
- }
- if high >= 0 {
- intervals = append(intervals, interval{low, high})
- }
- sort.Sort(byDecreasingLength(intervals))
-
- fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
- fmt.Printf("// encodeX are the encoding tables from Unicode to GBK code,\n")
- fmt.Printf("// sorted by decreasing length.\n")
- for i, v := range intervals {
- fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
- }
- fmt.Printf("\n")
-
- for i, v := range intervals {
- fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
- fmt.Printf("var encode%d = [...]uint16{\n", i)
- for j := v.low; j < v.high; j++ {
- x := reverse[j]
- if x == 0 {
- continue
- }
- fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
- }
- fmt.Printf("}\n\n")
- }
-}
-
-// interval is a half-open interval [low, high).
-type interval struct {
- low, high int
-}
-
-func (i interval) len() int { return i.high - i.low }
-
-// byDecreasingLength sorts intervals by decreasing length.
-type byDecreasingLength []interval
-
-func (b byDecreasingLength) Len() int { return len(b) }
-func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
-func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
diff --git a/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go b/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go
deleted file mode 100644
index cf7fdb31a..000000000
--- a/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// This program generates tables.go:
-// go run maketables.go | gofmt > tables.go
-
-import (
- "bufio"
- "fmt"
- "log"
- "net/http"
- "sort"
- "strings"
-)
-
-func main() {
- fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
- fmt.Printf("// Package traditionalchinese provides Traditional Chinese encodings such as Big5.\n")
- fmt.Printf(`package traditionalchinese // import "golang.org/x/text/encoding/traditionalchinese"` + "\n\n")
-
- res, err := http.Get("http://encoding.spec.whatwg.org/index-big5.txt")
- if err != nil {
- log.Fatalf("Get: %v", err)
- }
- defer res.Body.Close()
-
- mapping := [65536]uint32{}
- reverse := [65536 * 4]uint16{}
-
- scanner := bufio.NewScanner(res.Body)
- for scanner.Scan() {
- s := strings.TrimSpace(scanner.Text())
- if s == "" || s[0] == '#' {
- continue
- }
- x, y := uint16(0), uint32(0)
- if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
- log.Fatalf("could not parse %q", s)
- }
- if x < 0 || 126*157 <= x {
- log.Fatalf("Big5 code %d is out of range", x)
- }
- mapping[x] = y
-
- // The WHATWG spec http://encoding.spec.whatwg.org/#indexes says that
- // "The index pointer for code point in index is the first pointer
- // corresponding to code point in index", which would normally mean
- // that the code below should be guarded by "if reverse[y] == 0", but
- // last instead of first seems to match the behavior of
- // "iconv -f UTF-8 -t BIG5". For example, U+8005 者 occurs twice in
- // http://encoding.spec.whatwg.org/index-big5.txt, as index 2148
- // (encoded as "\x8e\xcd") and index 6543 (encoded as "\xaa\xcc")
- // and "echo 者 | iconv -f UTF-8 -t BIG5 | xxd" gives "\xaa\xcc".
- c0, c1 := x/157, x%157
- if c1 < 0x3f {
- c1 += 0x40
- } else {
- c1 += 0x62
- }
- reverse[y] = (0x81+c0)<<8 | c1
- }
- if err := scanner.Err(); err != nil {
- log.Fatalf("scanner error: %v", err)
- }
-
- fmt.Printf("// decode is the decoding table from Big5 code to Unicode.\n")
- fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-big5.txt\n")
- fmt.Printf("var decode = [...]uint32{\n")
- for i, v := range mapping {
- if v != 0 {
- fmt.Printf("\t%d: 0x%08X,\n", i, v)
- }
- }
- fmt.Printf("}\n\n")
-
- // Any run of at least separation continuous zero entries in the reverse map will
- // be a separate encode table.
- const separation = 1024
-
- intervals := []interval(nil)
- low, high := -1, -1
- for i, v := range reverse {
- if v == 0 {
- continue
- }
- if low < 0 {
- low = i
- } else if i-high >= separation {
- if high >= 0 {
- intervals = append(intervals, interval{low, high})
- }
- low = i
- }
- high = i + 1
- }
- if high >= 0 {
- intervals = append(intervals, interval{low, high})
- }
- sort.Sort(byDecreasingLength(intervals))
-
- fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
- fmt.Printf("// encodeX are the encoding tables from Unicode to Big5 code,\n")
- fmt.Printf("// sorted by decreasing length.\n")
- for i, v := range intervals {
- fmt.Printf("// encode%d: %5d entries for runes in [%6d, %6d).\n", i, v.len(), v.low, v.high)
- }
- fmt.Printf("\n")
-
- for i, v := range intervals {
- fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
- fmt.Printf("var encode%d = [...]uint16{\n", i)
- for j := v.low; j < v.high; j++ {
- x := reverse[j]
- if x == 0 {
- continue
- }
- fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
- }
- fmt.Printf("}\n\n")
- }
-}
-
-// interval is a half-open interval [low, high).
-type interval struct {
- low, high int
-}
-
-func (i interval) len() int { return i.high - i.low }
-
-// byDecreasingLength sorts intervals by decreasing length.
-type byDecreasingLength []interval
-
-func (b byDecreasingLength) Len() int { return len(b) }
-func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
-func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
diff --git a/vendor/golang.org/x/text/internal/language/compact/gen.go b/vendor/golang.org/x/text/internal/language/compact/gen.go
deleted file mode 100644
index 0c36a052f..000000000
--- a/vendor/golang.org/x/text/internal/language/compact/gen.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Language tag table generator.
-// Data read from the web.
-
-package main
-
-import (
- "flag"
- "fmt"
- "log"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/unicode/cldr"
-)
-
-var (
- test = flag.Bool("test",
- false,
- "test existing tables; can be used to compare web data with package data.")
- outputFile = flag.String("output",
- "tables.go",
- "output file for generated tables")
-)
-
-func main() {
- gen.Init()
-
- w := gen.NewCodeWriter()
- defer w.WriteGoFile("tables.go", "compact")
-
- fmt.Fprintln(w, `import "golang.org/x/text/internal/language"`)
-
- b := newBuilder(w)
- gen.WriteCLDRVersion(w)
-
- b.writeCompactIndex()
-}
-
-type builder struct {
- w *gen.CodeWriter
- data *cldr.CLDR
- supp *cldr.SupplementalData
-}
-
-func newBuilder(w *gen.CodeWriter) *builder {
- r := gen.OpenCLDRCoreZip()
- defer r.Close()
- d := &cldr.Decoder{}
- data, err := d.DecodeZip(r)
- if err != nil {
- log.Fatal(err)
- }
- b := builder{
- w: w,
- data: data,
- supp: data.Supplemental(),
- }
- return &b
-}
diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_index.go b/vendor/golang.org/x/text/internal/language/compact/gen_index.go
deleted file mode 100644
index 136cefaf0..000000000
--- a/vendor/golang.org/x/text/internal/language/compact/gen_index.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// This file generates derivative tables based on the language package itself.
-
-import (
- "fmt"
- "log"
- "sort"
- "strings"
-
- "golang.org/x/text/internal/language"
-)
-
-// Compact indices:
-// Note -va-X variants only apply to localization variants.
-// BCP variants only ever apply to language.
-// The only ambiguity between tags is with regions.
-
-func (b *builder) writeCompactIndex() {
- // Collect all language tags for which we have any data in CLDR.
- m := map[language.Tag]bool{}
- for _, lang := range b.data.Locales() {
- // We include all locales unconditionally to be consistent with en_US.
- // We want en_US, even though it has no data associated with it.
-
- // TODO: put any of the languages for which no data exists at the end
- // of the index. This allows all components based on ICU to use that
- // as the cutoff point.
- // if x := data.RawLDML(lang); false ||
- // x.LocaleDisplayNames != nil ||
- // x.Characters != nil ||
- // x.Delimiters != nil ||
- // x.Measurement != nil ||
- // x.Dates != nil ||
- // x.Numbers != nil ||
- // x.Units != nil ||
- // x.ListPatterns != nil ||
- // x.Collations != nil ||
- // x.Segmentations != nil ||
- // x.Rbnf != nil ||
- // x.Annotations != nil ||
- // x.Metadata != nil {
-
- // TODO: support POSIX natively, albeit non-standard.
- tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1))
- m[tag] = true
- // }
- }
-
- // TODO: plural rules are also defined for the deprecated tags:
- // iw mo sh tl
- // Consider removing these as compact tags.
-
- // Include locales for plural rules, which uses a different structure.
- for _, plurals := range b.supp.Plurals {
- for _, rules := range plurals.PluralRules {
- for _, lang := range strings.Split(rules.Locales, " ") {
- m[language.Make(lang)] = true
- }
- }
- }
-
- var coreTags []language.CompactCoreInfo
- var special []string
-
- for t := range m {
- if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" {
- log.Fatalf("Unexpected extension %v in %v", x, t)
- }
- if len(t.Variants()) == 0 && len(t.Extensions()) == 0 {
- cci, ok := language.GetCompactCore(t)
- if !ok {
- log.Fatalf("Locale for non-basic language %q", t)
- }
- coreTags = append(coreTags, cci)
- } else {
- special = append(special, t.String())
- }
- }
-
- w := b.w
-
- sort.Slice(coreTags, func(i, j int) bool { return coreTags[i] < coreTags[j] })
- sort.Strings(special)
-
- w.WriteComment(`
- NumCompactTags is the number of common tags. The maximum tag is
- NumCompactTags-1.`)
- w.WriteConst("NumCompactTags", len(m))
-
- fmt.Fprintln(w, "const (")
- for i, t := range coreTags {
- fmt.Fprintf(w, "%s ID = %d\n", ident(t.Tag().String()), i)
- }
- for i, t := range special {
- fmt.Fprintf(w, "%s ID = %d\n", ident(t), i+len(coreTags))
- }
- fmt.Fprintln(w, ")")
-
- w.WriteVar("coreTags", coreTags)
-
- w.WriteConst("specialTagsStr", strings.Join(special, " "))
-}
-
-func ident(s string) string {
- return strings.Replace(s, "-", "", -1) + "Index"
-}
diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go b/vendor/golang.org/x/text/internal/language/compact/gen_parents.go
deleted file mode 100644
index 9543d5832..000000000
--- a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "log"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/internal/language"
- "golang.org/x/text/internal/language/compact"
- "golang.org/x/text/unicode/cldr"
-)
-
-func main() {
- r := gen.OpenCLDRCoreZip()
- defer r.Close()
-
- d := &cldr.Decoder{}
- data, err := d.DecodeZip(r)
- if err != nil {
- log.Fatalf("DecodeZip: %v", err)
- }
-
- w := gen.NewCodeWriter()
- defer w.WriteGoFile("parents.go", "compact")
-
- // Create parents table.
- type ID uint16
- parents := make([]ID, compact.NumCompactTags)
- for _, loc := range data.Locales() {
- tag := language.MustParse(loc)
- index, ok := compact.FromTag(tag)
- if !ok {
- continue
- }
- parentIndex := compact.ID(0) // und
- for p := tag.Parent(); p != language.Und; p = p.Parent() {
- if x, ok := compact.FromTag(p); ok {
- parentIndex = x
- break
- }
- }
- parents[index] = ID(parentIndex)
- }
-
- w.WriteComment(`
- parents maps a compact index of a tag to the compact index of the parent of
- this tag.`)
- w.WriteVar("parents", parents)
-}
diff --git a/vendor/golang.org/x/text/internal/language/gen.go b/vendor/golang.org/x/text/internal/language/gen.go
deleted file mode 100644
index cdcc7febc..000000000
--- a/vendor/golang.org/x/text/internal/language/gen.go
+++ /dev/null
@@ -1,1520 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Language tag table generator.
-// Data read from the web.
-
-package main
-
-import (
- "bufio"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "math"
- "reflect"
- "regexp"
- "sort"
- "strconv"
- "strings"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/internal/tag"
- "golang.org/x/text/unicode/cldr"
-)
-
-var (
- test = flag.Bool("test",
- false,
- "test existing tables; can be used to compare web data with package data.")
- outputFile = flag.String("output",
- "tables.go",
- "output file for generated tables")
-)
-
-var comment = []string{
- `
-lang holds an alphabetically sorted list of ISO-639 language identifiers.
-All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag.
-For 2-byte language identifiers, the two successive bytes have the following meaning:
- - if the first letter of the 2- and 3-letter ISO codes are the same:
- the second and third letter of the 3-letter ISO code.
- - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3.
-For 3-byte language identifiers the 4th byte is 0.`,
- `
-langNoIndex is a bit vector of all 3-letter language codes that are not used as an index
-in lookup tables. The language ids for these language codes are derived directly
-from the letters and are not consecutive.`,
- `
-altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives
-to 2-letter language codes that cannot be derived using the method described above.
-Each 3-letter code is followed by its 1-byte langID.`,
- `
-altLangIndex is used to convert indexes in altLangISO3 to langIDs.`,
- `
-AliasMap maps langIDs to their suggested replacements.`,
- `
-script is an alphabetically sorted list of ISO 15924 codes. The index
-of the script in the string, divided by 4, is the internal scriptID.`,
- `
-isoRegionOffset needs to be added to the index of regionISO to obtain the regionID
-for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for
-the UN.M49 codes used for groups.)`,
- `
-regionISO holds a list of alphabetically sorted 2-letter ISO region codes.
-Each 2-letter codes is followed by two bytes with the following meaning:
- - [A-Z}{2}: the first letter of the 2-letter code plus these two
- letters form the 3-letter ISO code.
- - 0, n: index into altRegionISO3.`,
- `
-regionTypes defines the status of a region for various standards.`,
- `
-m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are
-codes indicating collections of regions.`,
- `
-m49Index gives indexes into fromM49 based on the three most significant bits
-of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in
- fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]]
-for an entry where the first 7 bits match the 7 lsb of the UN.M49 code.
-The region code is stored in the 9 lsb of the indexed value.`,
- `
-fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details.`,
- `
-altRegionISO3 holds a list of 3-letter region codes that cannot be
-mapped to 2-letter codes using the default algorithm. This is a short list.`,
- `
-altRegionIDs holds a list of regionIDs the positions of which match those
-of the 3-letter ISO codes in altRegionISO3.`,
- `
-variantNumSpecialized is the number of specialized variants in variants.`,
- `
-suppressScript is an index from langID to the dominant script for that language,
-if it exists. If a script is given, it should be suppressed from the language tag.`,
- `
-likelyLang is a lookup table, indexed by langID, for the most likely
-scripts and regions given incomplete information. If more entries exist for a
-given language, region and script are the index and size respectively
-of the list in likelyLangList.`,
- `
-likelyLangList holds lists info associated with likelyLang.`,
- `
-likelyRegion is a lookup table, indexed by regionID, for the most likely
-languages and scripts given incomplete information. If more entries exist
-for a given regionID, lang and script are the index and size respectively
-of the list in likelyRegionList.
-TODO: exclude containers and user-definable regions from the list.`,
- `
-likelyRegionList holds lists info associated with likelyRegion.`,
- `
-likelyScript is a lookup table, indexed by scriptID, for the most likely
-languages and regions given a script.`,
- `
-nRegionGroups is the number of region groups.`,
- `
-regionInclusion maps region identifiers to sets of regions in regionInclusionBits,
-where each set holds all groupings that are directly connected in a region
-containment graph.`,
- `
-regionInclusionBits is an array of bit vectors where every vector represents
-a set of region groupings. These sets are used to compute the distance
-between two regions for the purpose of language matching.`,
- `
-regionInclusionNext marks, for each entry in regionInclusionBits, the set of
-all groups that are reachable from the groups set in the respective entry.`,
-}
-
-// TODO: consider changing some of these structures to tries. This can reduce
-// memory, but may increase the need for memory allocations. This could be
-// mitigated if we can piggyback on language tags for common cases.
-
-func failOnError(e error) {
- if e != nil {
- log.Panic(e)
- }
-}
-
-type setType int
-
-const (
- Indexed setType = 1 + iota // all elements must be of same size
- Linear
-)
-
-type stringSet struct {
- s []string
- sorted, frozen bool
-
- // We often need to update values after the creation of an index is completed.
- // We include a convenience map for keeping track of this.
- update map[string]string
- typ setType // used for checking.
-}
-
-func (ss *stringSet) clone() stringSet {
- c := *ss
- c.s = append([]string(nil), c.s...)
- return c
-}
-
-func (ss *stringSet) setType(t setType) {
- if ss.typ != t && ss.typ != 0 {
- log.Panicf("type %d cannot be assigned as it was already %d", t, ss.typ)
- }
-}
-
-// parse parses a whitespace-separated string and initializes ss with its
-// components.
-func (ss *stringSet) parse(s string) {
- scan := bufio.NewScanner(strings.NewReader(s))
- scan.Split(bufio.ScanWords)
- for scan.Scan() {
- ss.add(scan.Text())
- }
-}
-
-func (ss *stringSet) assertChangeable() {
- if ss.frozen {
- log.Panic("attempt to modify a frozen stringSet")
- }
-}
-
-func (ss *stringSet) add(s string) {
- ss.assertChangeable()
- ss.s = append(ss.s, s)
- ss.sorted = ss.frozen
-}
-
-func (ss *stringSet) freeze() {
- ss.compact()
- ss.frozen = true
-}
-
-func (ss *stringSet) compact() {
- if ss.sorted {
- return
- }
- a := ss.s
- sort.Strings(a)
- k := 0
- for i := 1; i < len(a); i++ {
- if a[k] != a[i] {
- a[k+1] = a[i]
- k++
- }
- }
- ss.s = a[:k+1]
- ss.sorted = ss.frozen
-}
-
-type funcSorter struct {
- fn func(a, b string) bool
- sort.StringSlice
-}
-
-func (s funcSorter) Less(i, j int) bool {
- return s.fn(s.StringSlice[i], s.StringSlice[j])
-}
-
-func (ss *stringSet) sortFunc(f func(a, b string) bool) {
- ss.compact()
- sort.Sort(funcSorter{f, sort.StringSlice(ss.s)})
-}
-
-func (ss *stringSet) remove(s string) {
- ss.assertChangeable()
- if i, ok := ss.find(s); ok {
- copy(ss.s[i:], ss.s[i+1:])
- ss.s = ss.s[:len(ss.s)-1]
- }
-}
-
-func (ss *stringSet) replace(ol, nu string) {
- ss.s[ss.index(ol)] = nu
- ss.sorted = ss.frozen
-}
-
-func (ss *stringSet) index(s string) int {
- ss.setType(Indexed)
- i, ok := ss.find(s)
- if !ok {
- if i < len(ss.s) {
- log.Panicf("find: item %q is not in list. Closest match is %q.", s, ss.s[i])
- }
- log.Panicf("find: item %q is not in list", s)
-
- }
- return i
-}
-
-func (ss *stringSet) find(s string) (int, bool) {
- ss.compact()
- i := sort.SearchStrings(ss.s, s)
- return i, i != len(ss.s) && ss.s[i] == s
-}
-
-func (ss *stringSet) slice() []string {
- ss.compact()
- return ss.s
-}
-
-func (ss *stringSet) updateLater(v, key string) {
- if ss.update == nil {
- ss.update = map[string]string{}
- }
- ss.update[v] = key
-}
-
-// join joins the string and ensures that all entries are of the same length.
-func (ss *stringSet) join() string {
- ss.setType(Indexed)
- n := len(ss.s[0])
- for _, s := range ss.s {
- if len(s) != n {
- log.Panicf("join: not all entries are of the same length: %q", s)
- }
- }
- ss.s = append(ss.s, strings.Repeat("\xff", n))
- return strings.Join(ss.s, "")
-}
-
-// ianaEntry holds information for an entry in the IANA Language Subtag Repository.
-// All types use the same entry.
-// See http://tools.ietf.org/html/bcp47#section-5.1 for a description of the various
-// fields.
-type ianaEntry struct {
- typ string
- description []string
- scope string
- added string
- preferred string
- deprecated string
- suppressScript string
- macro string
- prefix []string
-}
-
-type builder struct {
- w *gen.CodeWriter
- hw io.Writer // MultiWriter for w and w.Hash
- data *cldr.CLDR
- supp *cldr.SupplementalData
-
- // indices
- locale stringSet // common locales
- lang stringSet // canonical language ids (2 or 3 letter ISO codes) with data
- langNoIndex stringSet // 3-letter ISO codes with no associated data
- script stringSet // 4-letter ISO codes
- region stringSet // 2-letter ISO or 3-digit UN M49 codes
- variant stringSet // 4-8-alphanumeric variant code.
-
- // Region codes that are groups with their corresponding group IDs.
- groups map[int]index
-
- // langInfo
- registry map[string]*ianaEntry
-}
-
-type index uint
-
-func newBuilder(w *gen.CodeWriter) *builder {
- r := gen.OpenCLDRCoreZip()
- defer r.Close()
- d := &cldr.Decoder{}
- data, err := d.DecodeZip(r)
- failOnError(err)
- b := builder{
- w: w,
- hw: io.MultiWriter(w, w.Hash),
- data: data,
- supp: data.Supplemental(),
- }
- b.parseRegistry()
- return &b
-}
-
-func (b *builder) parseRegistry() {
- r := gen.OpenIANAFile("assignments/language-subtag-registry")
- defer r.Close()
- b.registry = make(map[string]*ianaEntry)
-
- scan := bufio.NewScanner(r)
- scan.Split(bufio.ScanWords)
- var record *ianaEntry
- for more := scan.Scan(); more; {
- key := scan.Text()
- more = scan.Scan()
- value := scan.Text()
- switch key {
- case "Type:":
- record = &ianaEntry{typ: value}
- case "Subtag:", "Tag:":
- if s := strings.SplitN(value, "..", 2); len(s) > 1 {
- for a := s[0]; a <= s[1]; a = inc(a) {
- b.addToRegistry(a, record)
- }
- } else {
- b.addToRegistry(value, record)
- }
- case "Suppress-Script:":
- record.suppressScript = value
- case "Added:":
- record.added = value
- case "Deprecated:":
- record.deprecated = value
- case "Macrolanguage:":
- record.macro = value
- case "Preferred-Value:":
- record.preferred = value
- case "Prefix:":
- record.prefix = append(record.prefix, value)
- case "Scope:":
- record.scope = value
- case "Description:":
- buf := []byte(value)
- for more = scan.Scan(); more; more = scan.Scan() {
- b := scan.Bytes()
- if b[0] == '%' || b[len(b)-1] == ':' {
- break
- }
- buf = append(buf, ' ')
- buf = append(buf, b...)
- }
- record.description = append(record.description, string(buf))
- continue
- default:
- continue
- }
- more = scan.Scan()
- }
- if scan.Err() != nil {
- log.Panic(scan.Err())
- }
-}
-
-func (b *builder) addToRegistry(key string, entry *ianaEntry) {
- if info, ok := b.registry[key]; ok {
- if info.typ != "language" || entry.typ != "extlang" {
- log.Fatalf("parseRegistry: tag %q already exists", key)
- }
- } else {
- b.registry[key] = entry
- }
-}
-
-var commentIndex = make(map[string]string)
-
-func init() {
- for _, s := range comment {
- key := strings.TrimSpace(strings.SplitN(s, " ", 2)[0])
- commentIndex[key] = s
- }
-}
-
-func (b *builder) comment(name string) {
- if s := commentIndex[name]; len(s) > 0 {
- b.w.WriteComment(s)
- } else {
- fmt.Fprintln(b.w)
- }
-}
-
-func (b *builder) pf(f string, x ...interface{}) {
- fmt.Fprintf(b.hw, f, x...)
- fmt.Fprint(b.hw, "\n")
-}
-
-func (b *builder) p(x ...interface{}) {
- fmt.Fprintln(b.hw, x...)
-}
-
-func (b *builder) addSize(s int) {
- b.w.Size += s
- b.pf("// Size: %d bytes", s)
-}
-
-func (b *builder) writeConst(name string, x interface{}) {
- b.comment(name)
- b.w.WriteConst(name, x)
-}
-
-// writeConsts computes f(v) for all v in values and writes the results
-// as constants named _v to a single constant block.
-func (b *builder) writeConsts(f func(string) int, values ...string) {
- b.pf("const (")
- for _, v := range values {
- b.pf("\t_%s = %v", v, f(v))
- }
- b.pf(")")
-}
-
-// writeType writes the type of the given value, which must be a struct.
-func (b *builder) writeType(value interface{}) {
- b.comment(reflect.TypeOf(value).Name())
- b.w.WriteType(value)
-}
-
-func (b *builder) writeSlice(name string, ss interface{}) {
- b.writeSliceAddSize(name, 0, ss)
-}
-
-func (b *builder) writeSliceAddSize(name string, extraSize int, ss interface{}) {
- b.comment(name)
- b.w.Size += extraSize
- v := reflect.ValueOf(ss)
- t := v.Type().Elem()
- b.pf("// Size: %d bytes, %d elements", v.Len()*int(t.Size())+extraSize, v.Len())
-
- fmt.Fprintf(b.w, "var %s = ", name)
- b.w.WriteArray(ss)
- b.p()
-}
-
-type FromTo struct {
- From, To uint16
-}
-
-func (b *builder) writeSortedMap(name string, ss *stringSet, index func(s string) uint16) {
- ss.sortFunc(func(a, b string) bool {
- return index(a) < index(b)
- })
- m := []FromTo{}
- for _, s := range ss.s {
- m = append(m, FromTo{index(s), index(ss.update[s])})
- }
- b.writeSlice(name, m)
-}
-
-const base = 'z' - 'a' + 1
-
-func strToInt(s string) uint {
- v := uint(0)
- for i := 0; i < len(s); i++ {
- v *= base
- v += uint(s[i] - 'a')
- }
- return v
-}
-
-// converts the given integer to the original ASCII string passed to strToInt.
-// len(s) must match the number of characters obtained.
-func intToStr(v uint, s []byte) {
- for i := len(s) - 1; i >= 0; i-- {
- s[i] = byte(v%base) + 'a'
- v /= base
- }
-}
-
-func (b *builder) writeBitVector(name string, ss []string) {
- vec := make([]uint8, int(math.Ceil(math.Pow(base, float64(len(ss[0])))/8)))
- for _, s := range ss {
- v := strToInt(s)
- vec[v/8] |= 1 << (v % 8)
- }
- b.writeSlice(name, vec)
-}
-
-// TODO: convert this type into a list or two-stage trie.
-func (b *builder) writeMapFunc(name string, m map[string]string, f func(string) uint16) {
- b.comment(name)
- v := reflect.ValueOf(m)
- sz := v.Len() * (2 + int(v.Type().Key().Size()))
- for _, k := range m {
- sz += len(k)
- }
- b.addSize(sz)
- keys := []string{}
- b.pf(`var %s = map[string]uint16{`, name)
- for k := range m {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, k := range keys {
- b.pf("\t%q: %v,", k, f(m[k]))
- }
- b.p("}")
-}
-
-func (b *builder) writeMap(name string, m interface{}) {
- b.comment(name)
- v := reflect.ValueOf(m)
- sz := v.Len() * (2 + int(v.Type().Key().Size()) + int(v.Type().Elem().Size()))
- b.addSize(sz)
- f := strings.FieldsFunc(fmt.Sprintf("%#v", m), func(r rune) bool {
- return strings.IndexRune("{}, ", r) != -1
- })
- sort.Strings(f[1:])
- b.pf(`var %s = %s{`, name, f[0])
- for _, kv := range f[1:] {
- b.pf("\t%s,", kv)
- }
- b.p("}")
-}
-
-func (b *builder) langIndex(s string) uint16 {
- if s == "und" {
- return 0
- }
- if i, ok := b.lang.find(s); ok {
- return uint16(i)
- }
- return uint16(strToInt(s)) + uint16(len(b.lang.s))
-}
-
-// inc advances the string to its lexicographical successor.
-func inc(s string) string {
- const maxTagLength = 4
- var buf [maxTagLength]byte
- intToStr(strToInt(strings.ToLower(s))+1, buf[:len(s)])
- for i := 0; i < len(s); i++ {
- if s[i] <= 'Z' {
- buf[i] -= 'a' - 'A'
- }
- }
- return string(buf[:len(s)])
-}
-
-func (b *builder) parseIndices() {
- meta := b.supp.Metadata
-
- for k, v := range b.registry {
- var ss *stringSet
- switch v.typ {
- case "language":
- if len(k) == 2 || v.suppressScript != "" || v.scope == "special" {
- b.lang.add(k)
- continue
- } else {
- ss = &b.langNoIndex
- }
- case "region":
- ss = &b.region
- case "script":
- ss = &b.script
- case "variant":
- ss = &b.variant
- default:
- continue
- }
- ss.add(k)
- }
- // Include any language for which there is data.
- for _, lang := range b.data.Locales() {
- if x := b.data.RawLDML(lang); false ||
- x.LocaleDisplayNames != nil ||
- x.Characters != nil ||
- x.Delimiters != nil ||
- x.Measurement != nil ||
- x.Dates != nil ||
- x.Numbers != nil ||
- x.Units != nil ||
- x.ListPatterns != nil ||
- x.Collations != nil ||
- x.Segmentations != nil ||
- x.Rbnf != nil ||
- x.Annotations != nil ||
- x.Metadata != nil {
-
- from := strings.Split(lang, "_")
- if lang := from[0]; lang != "root" {
- b.lang.add(lang)
- }
- }
- }
- // Include locales for plural rules, which uses a different structure.
- for _, plurals := range b.data.Supplemental().Plurals {
- for _, rules := range plurals.PluralRules {
- for _, lang := range strings.Split(rules.Locales, " ") {
- if lang = strings.Split(lang, "_")[0]; lang != "root" {
- b.lang.add(lang)
- }
- }
- }
- }
- // Include languages in likely subtags.
- for _, m := range b.supp.LikelySubtags.LikelySubtag {
- from := strings.Split(m.From, "_")
- b.lang.add(from[0])
- }
- // Include ISO-639 alpha-3 bibliographic entries.
- for _, a := range meta.Alias.LanguageAlias {
- if a.Reason == "bibliographic" {
- b.langNoIndex.add(a.Type)
- }
- }
- // Include regions in territoryAlias (not all are in the IANA registry!)
- for _, reg := range b.supp.Metadata.Alias.TerritoryAlias {
- if len(reg.Type) == 2 {
- b.region.add(reg.Type)
- }
- }
-
- for _, s := range b.lang.s {
- if len(s) == 3 {
- b.langNoIndex.remove(s)
- }
- }
- b.writeConst("NumLanguages", len(b.lang.slice())+len(b.langNoIndex.slice()))
- b.writeConst("NumScripts", len(b.script.slice()))
- b.writeConst("NumRegions", len(b.region.slice()))
-
- // Add dummy codes at the start of each list to represent "unspecified".
- b.lang.add("---")
- b.script.add("----")
- b.region.add("---")
-
- // common locales
- b.locale.parse(meta.DefaultContent.Locales)
-}
-
-// TODO: region inclusion data will probably not be use used in future matchers.
-
-func (b *builder) computeRegionGroups() {
- b.groups = make(map[int]index)
-
- // Create group indices.
- for i := 1; b.region.s[i][0] < 'A'; i++ { // Base M49 indices on regionID.
- b.groups[i] = index(len(b.groups))
- }
- for _, g := range b.supp.TerritoryContainment.Group {
- // Skip UN and EURO zone as they are flattening the containment
- // relationship.
- if g.Type == "EZ" || g.Type == "UN" {
- continue
- }
- group := b.region.index(g.Type)
- if _, ok := b.groups[group]; !ok {
- b.groups[group] = index(len(b.groups))
- }
- }
- if len(b.groups) > 64 {
- log.Fatalf("only 64 groups supported, found %d", len(b.groups))
- }
- b.writeConst("nRegionGroups", len(b.groups))
-}
-
-var langConsts = []string{
- "af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el", "en", "es",
- "et", "fa", "fi", "fil", "fr", "gu", "he", "hi", "hr", "hu", "hy", "id", "is",
- "it", "ja", "ka", "kk", "km", "kn", "ko", "ky", "lo", "lt", "lv", "mk", "ml",
- "mn", "mo", "mr", "ms", "mul", "my", "nb", "ne", "nl", "no", "pa", "pl", "pt",
- "ro", "ru", "sh", "si", "sk", "sl", "sq", "sr", "sv", "sw", "ta", "te", "th",
- "tl", "tn", "tr", "uk", "ur", "uz", "vi", "zh", "zu",
-
- // constants for grandfathered tags (if not already defined)
- "jbo", "ami", "bnn", "hak", "tlh", "lb", "nv", "pwn", "tao", "tay", "tsu",
- "nn", "sfb", "vgt", "sgg", "cmn", "nan", "hsn",
-}
-
-// writeLanguage generates all tables needed for language canonicalization.
-func (b *builder) writeLanguage() {
- meta := b.supp.Metadata
-
- b.writeConst("nonCanonicalUnd", b.lang.index("und"))
- b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...)
- b.writeConst("langPrivateStart", b.langIndex("qaa"))
- b.writeConst("langPrivateEnd", b.langIndex("qtz"))
-
- // Get language codes that need to be mapped (overlong 3-letter codes,
- // deprecated 2-letter codes, legacy and grandfathered tags.)
- langAliasMap := stringSet{}
- aliasTypeMap := map[string]AliasType{}
-
- // altLangISO3 get the alternative ISO3 names that need to be mapped.
- altLangISO3 := stringSet{}
- // Add dummy start to avoid the use of index 0.
- altLangISO3.add("---")
- altLangISO3.updateLater("---", "aa")
-
- lang := b.lang.clone()
- for _, a := range meta.Alias.LanguageAlias {
- if a.Replacement == "" {
- a.Replacement = "und"
- }
- // TODO: support mapping to tags
- repl := strings.SplitN(a.Replacement, "_", 2)[0]
- if a.Reason == "overlong" {
- if len(a.Replacement) == 2 && len(a.Type) == 3 {
- lang.updateLater(a.Replacement, a.Type)
- }
- } else if len(a.Type) <= 3 {
- switch a.Reason {
- case "macrolanguage":
- aliasTypeMap[a.Type] = Macro
- case "deprecated":
- // handled elsewhere
- continue
- case "bibliographic", "legacy":
- if a.Type == "no" {
- continue
- }
- aliasTypeMap[a.Type] = Legacy
- default:
- log.Fatalf("new %s alias: %s", a.Reason, a.Type)
- }
- langAliasMap.add(a.Type)
- langAliasMap.updateLater(a.Type, repl)
- }
- }
- // Manually add the mapping of "nb" (Norwegian) to its macro language.
- // This can be removed if CLDR adopts this change.
- langAliasMap.add("nb")
- langAliasMap.updateLater("nb", "no")
- aliasTypeMap["nb"] = Macro
-
- for k, v := range b.registry {
- // Also add deprecated values for 3-letter ISO codes, which CLDR omits.
- if v.typ == "language" && v.deprecated != "" && v.preferred != "" {
- langAliasMap.add(k)
- langAliasMap.updateLater(k, v.preferred)
- aliasTypeMap[k] = Deprecated
- }
- }
- // Fix CLDR mappings.
- lang.updateLater("tl", "tgl")
- lang.updateLater("sh", "hbs")
- lang.updateLater("mo", "mol")
- lang.updateLater("no", "nor")
- lang.updateLater("tw", "twi")
- lang.updateLater("nb", "nob")
- lang.updateLater("ak", "aka")
- lang.updateLater("bh", "bih")
-
- // Ensure that each 2-letter code is matched with a 3-letter code.
- for _, v := range lang.s[1:] {
- s, ok := lang.update[v]
- if !ok {
- if s, ok = lang.update[langAliasMap.update[v]]; !ok {
- continue
- }
- lang.update[v] = s
- }
- if v[0] != s[0] {
- altLangISO3.add(s)
- altLangISO3.updateLater(s, v)
- }
- }
-
- // Complete canonicalized language tags.
- lang.freeze()
- for i, v := range lang.s {
- // We can avoid these manual entries by using the IANA registry directly.
- // Seems easier to update the list manually, as changes are rare.
- // The panic in this loop will trigger if we miss an entry.
- add := ""
- if s, ok := lang.update[v]; ok {
- if s[0] == v[0] {
- add = s[1:]
- } else {
- add = string([]byte{0, byte(altLangISO3.index(s))})
- }
- } else if len(v) == 3 {
- add = "\x00"
- } else {
- log.Panicf("no data for long form of %q", v)
- }
- lang.s[i] += add
- }
- b.writeConst("lang", tag.Index(lang.join()))
-
- b.writeConst("langNoIndexOffset", len(b.lang.s))
-
- // space of all valid 3-letter language identifiers.
- b.writeBitVector("langNoIndex", b.langNoIndex.slice())
-
- altLangIndex := []uint16{}
- for i, s := range altLangISO3.slice() {
- altLangISO3.s[i] += string([]byte{byte(len(altLangIndex))})
- if i > 0 {
- idx := b.lang.index(altLangISO3.update[s])
- altLangIndex = append(altLangIndex, uint16(idx))
- }
- }
- b.writeConst("altLangISO3", tag.Index(altLangISO3.join()))
- b.writeSlice("altLangIndex", altLangIndex)
-
- b.writeSortedMap("AliasMap", &langAliasMap, b.langIndex)
- types := make([]AliasType, len(langAliasMap.s))
- for i, s := range langAliasMap.s {
- types[i] = aliasTypeMap[s]
- }
- b.writeSlice("AliasTypes", types)
-}
-
-var scriptConsts = []string{
- "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy",
- "Zzzz",
-}
-
-func (b *builder) writeScript() {
- b.writeConsts(b.script.index, scriptConsts...)
- b.writeConst("script", tag.Index(b.script.join()))
-
- supp := make([]uint8, len(b.lang.slice()))
- for i, v := range b.lang.slice()[1:] {
- if sc := b.registry[v].suppressScript; sc != "" {
- supp[i+1] = uint8(b.script.index(sc))
- }
- }
- b.writeSlice("suppressScript", supp)
-
- // There is only one deprecated script in CLDR. This value is hard-coded.
- // We check here if the code must be updated.
- for _, a := range b.supp.Metadata.Alias.ScriptAlias {
- if a.Type != "Qaai" {
- log.Panicf("unexpected deprecated stript %q", a.Type)
- }
- }
-}
-
-func parseM49(s string) int16 {
- if len(s) == 0 {
- return 0
- }
- v, err := strconv.ParseUint(s, 10, 10)
- failOnError(err)
- return int16(v)
-}
-
-var regionConsts = []string{
- "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US",
- "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo.
-}
-
-func (b *builder) writeRegion() {
- b.writeConsts(b.region.index, regionConsts...)
-
- isoOffset := b.region.index("AA")
- m49map := make([]int16, len(b.region.slice()))
- fromM49map := make(map[int16]int)
- altRegionISO3 := ""
- altRegionIDs := []uint16{}
-
- b.writeConst("isoRegionOffset", isoOffset)
-
- // 2-letter region lookup and mapping to numeric codes.
- regionISO := b.region.clone()
- regionISO.s = regionISO.s[isoOffset:]
- regionISO.sorted = false
-
- regionTypes := make([]byte, len(b.region.s))
-
- // Is the region valid BCP 47?
- for s, e := range b.registry {
- if len(s) == 2 && s == strings.ToUpper(s) {
- i := b.region.index(s)
- for _, d := range e.description {
- if strings.Contains(d, "Private use") {
- regionTypes[i] = iso3166UserAssigned
- }
- }
- regionTypes[i] |= bcp47Region
- }
- }
-
- // Is the region a valid ccTLD?
- r := gen.OpenIANAFile("domains/root/db")
- defer r.Close()
-
- buf, err := ioutil.ReadAll(r)
- failOnError(err)
- re := regexp.MustCompile(`"/domains/root/db/([a-z]{2}).html"`)
- for _, m := range re.FindAllSubmatch(buf, -1) {
- i := b.region.index(strings.ToUpper(string(m[1])))
- regionTypes[i] |= ccTLD
- }
-
- b.writeSlice("regionTypes", regionTypes)
-
- iso3Set := make(map[string]int)
- update := func(iso2, iso3 string) {
- i := regionISO.index(iso2)
- if j, ok := iso3Set[iso3]; !ok && iso3[0] == iso2[0] {
- regionISO.s[i] += iso3[1:]
- iso3Set[iso3] = -1
- } else {
- if ok && j >= 0 {
- regionISO.s[i] += string([]byte{0, byte(j)})
- } else {
- iso3Set[iso3] = len(altRegionISO3)
- regionISO.s[i] += string([]byte{0, byte(len(altRegionISO3))})
- altRegionISO3 += iso3
- altRegionIDs = append(altRegionIDs, uint16(isoOffset+i))
- }
- }
- }
- for _, tc := range b.supp.CodeMappings.TerritoryCodes {
- i := regionISO.index(tc.Type) + isoOffset
- if d := m49map[i]; d != 0 {
- log.Panicf("%s found as a duplicate UN.M49 code of %03d", tc.Numeric, d)
- }
- m49 := parseM49(tc.Numeric)
- m49map[i] = m49
- if r := fromM49map[m49]; r == 0 {
- fromM49map[m49] = i
- } else if r != i {
- dep := b.registry[regionISO.s[r-isoOffset]].deprecated
- if t := b.registry[tc.Type]; t != nil && dep != "" && (t.deprecated == "" || t.deprecated > dep) {
- fromM49map[m49] = i
- }
- }
- }
- for _, ta := range b.supp.Metadata.Alias.TerritoryAlias {
- if len(ta.Type) == 3 && ta.Type[0] <= '9' && len(ta.Replacement) == 2 {
- from := parseM49(ta.Type)
- if r := fromM49map[from]; r == 0 {
- fromM49map[from] = regionISO.index(ta.Replacement) + isoOffset
- }
- }
- }
- for _, tc := range b.supp.CodeMappings.TerritoryCodes {
- if len(tc.Alpha3) == 3 {
- update(tc.Type, tc.Alpha3)
- }
- }
- // This entries are not included in territoryCodes. Mostly 3-letter variants
- // of deleted codes and an entry for QU.
- for _, m := range []struct{ iso2, iso3 string }{
- {"CT", "CTE"},
- {"DY", "DHY"},
- {"HV", "HVO"},
- {"JT", "JTN"},
- {"MI", "MID"},
- {"NH", "NHB"},
- {"NQ", "ATN"},
- {"PC", "PCI"},
- {"PU", "PUS"},
- {"PZ", "PCZ"},
- {"RH", "RHO"},
- {"VD", "VDR"},
- {"WK", "WAK"},
- // These three-letter codes are used for others as well.
- {"FQ", "ATF"},
- } {
- update(m.iso2, m.iso3)
- }
- for i, s := range regionISO.s {
- if len(s) != 4 {
- regionISO.s[i] = s + " "
- }
- }
- b.writeConst("regionISO", tag.Index(regionISO.join()))
- b.writeConst("altRegionISO3", altRegionISO3)
- b.writeSlice("altRegionIDs", altRegionIDs)
-
- // Create list of deprecated regions.
- // TODO: consider inserting SF -> FI. Not included by CLDR, but is the only
- // Transitionally-reserved mapping not included.
- regionOldMap := stringSet{}
- // Include regions in territoryAlias (not all are in the IANA registry!)
- for _, reg := range b.supp.Metadata.Alias.TerritoryAlias {
- if len(reg.Type) == 2 && reg.Reason == "deprecated" && len(reg.Replacement) == 2 {
- regionOldMap.add(reg.Type)
- regionOldMap.updateLater(reg.Type, reg.Replacement)
- i, _ := regionISO.find(reg.Type)
- j, _ := regionISO.find(reg.Replacement)
- if k := m49map[i+isoOffset]; k == 0 {
- m49map[i+isoOffset] = m49map[j+isoOffset]
- }
- }
- }
- b.writeSortedMap("regionOldMap", &regionOldMap, func(s string) uint16 {
- return uint16(b.region.index(s))
- })
- // 3-digit region lookup, groupings.
- for i := 1; i < isoOffset; i++ {
- m := parseM49(b.region.s[i])
- m49map[i] = m
- fromM49map[m] = i
- }
- b.writeSlice("m49", m49map)
-
- const (
- searchBits = 7
- regionBits = 9
- )
- if len(m49map) >= 1<<regionBits {
- log.Fatalf("Maximum number of regions exceeded: %d > %d", len(m49map), 1<<regionBits)
- }
- m49Index := [9]int16{}
- fromM49 := []uint16{}
- m49 := []int{}
- for k, _ := range fromM49map {
- m49 = append(m49, int(k))
- }
- sort.Ints(m49)
- for _, k := range m49[1:] {
- val := (k & (1<<searchBits - 1)) << regionBits
- fromM49 = append(fromM49, uint16(val|fromM49map[int16(k)]))
- m49Index[1:][k>>searchBits] = int16(len(fromM49))
- }
- b.writeSlice("m49Index", m49Index)
- b.writeSlice("fromM49", fromM49)
-}
-
-const (
- // TODO: put these lists in regionTypes as user data? Could be used for
- // various optimizations and refinements and could be exposed in the API.
- iso3166Except = "AC CP DG EA EU FX IC SU TA UK"
- iso3166Trans = "AN BU CS NT TP YU ZR" // SF is not in our set of Regions.
- // DY and RH are actually not deleted, but indeterminately reserved.
- iso3166DelCLDR = "CT DD DY FQ HV JT MI NH NQ PC PU PZ RH VD WK YD"
-)
-
-const (
- iso3166UserAssigned = 1 << iota
- ccTLD
- bcp47Region
-)
-
-func find(list []string, s string) int {
- for i, t := range list {
- if t == s {
- return i
- }
- }
- return -1
-}
-
-// writeVariants generates per-variant information and creates a map from variant
-// name to index value. We assign index values such that sorting multiple
-// variants by index value will result in the correct order.
-// There are two types of variants: specialized and general. Specialized variants
-// are only applicable to certain language or language-script pairs. Generalized
-// variants apply to any language. Generalized variants always sort after
-// specialized variants. We will therefore always assign a higher index value
-// to a generalized variant than any other variant. Generalized variants are
-// sorted alphabetically among themselves.
-// Specialized variants may also sort after other specialized variants. Such
-// variants will be ordered after any of the variants they may follow.
-// We assume that if a variant x is followed by a variant y, then for any prefix
-// p of x, p-x is a prefix of y. This allows us to order tags based on the
-// maximum of the length of any of its prefixes.
-// TODO: it is possible to define a set of Prefix values on variants such that
-// a total order cannot be defined to the point that this algorithm breaks.
-// In other words, we cannot guarantee the same order of variants for the
-// future using the same algorithm or for non-compliant combinations of
-// variants. For this reason, consider using simple alphabetic sorting
-// of variants and ignore Prefix restrictions altogether.
-func (b *builder) writeVariant() {
- generalized := stringSet{}
- specialized := stringSet{}
- specializedExtend := stringSet{}
- // Collate the variants by type and check assumptions.
- for _, v := range b.variant.slice() {
- e := b.registry[v]
- if len(e.prefix) == 0 {
- generalized.add(v)
- continue
- }
- c := strings.Split(e.prefix[0], "-")
- hasScriptOrRegion := false
- if len(c) > 1 {
- _, hasScriptOrRegion = b.script.find(c[1])
- if !hasScriptOrRegion {
- _, hasScriptOrRegion = b.region.find(c[1])
-
- }
- }
- if len(c) == 1 || len(c) == 2 && hasScriptOrRegion {
- // Variant is preceded by a language.
- specialized.add(v)
- continue
- }
- // Variant is preceded by another variant.
- specializedExtend.add(v)
- prefix := c[0] + "-"
- if hasScriptOrRegion {
- prefix += c[1]
- }
- for _, p := range e.prefix {
- // Verify that the prefix minus the last element is a prefix of the
- // predecessor element.
- i := strings.LastIndex(p, "-")
- pred := b.registry[p[i+1:]]
- if find(pred.prefix, p[:i]) < 0 {
- log.Fatalf("prefix %q for variant %q not consistent with predecessor spec", p, v)
- }
- // The sorting used below does not work in the general case. It works
- // if we assume that variants that may be followed by others only have
- // prefixes of the same length. Verify this.
- count := strings.Count(p[:i], "-")
- for _, q := range pred.prefix {
- if c := strings.Count(q, "-"); c != count {
- log.Fatalf("variant %q preceding %q has a prefix %q of size %d; want %d", p[i+1:], v, q, c, count)
- }
- }
- if !strings.HasPrefix(p, prefix) {
- log.Fatalf("prefix %q of variant %q should start with %q", p, v, prefix)
- }
- }
- }
-
- // Sort extended variants.
- a := specializedExtend.s
- less := func(v, w string) bool {
- // Sort by the maximum number of elements.
- maxCount := func(s string) (max int) {
- for _, p := range b.registry[s].prefix {
- if c := strings.Count(p, "-"); c > max {
- max = c
- }
- }
- return
- }
- if cv, cw := maxCount(v), maxCount(w); cv != cw {
- return cv < cw
- }
- // Sort by name as tie breaker.
- return v < w
- }
- sort.Sort(funcSorter{less, sort.StringSlice(a)})
- specializedExtend.frozen = true
-
- // Create index from variant name to index.
- variantIndex := make(map[string]uint8)
- add := func(s []string) {
- for _, v := range s {
- variantIndex[v] = uint8(len(variantIndex))
- }
- }
- add(specialized.slice())
- add(specializedExtend.s)
- numSpecialized := len(variantIndex)
- add(generalized.slice())
- if n := len(variantIndex); n > 255 {
- log.Fatalf("maximum number of variants exceeded: was %d; want <= 255", n)
- }
- b.writeMap("variantIndex", variantIndex)
- b.writeConst("variantNumSpecialized", numSpecialized)
-}
-
-func (b *builder) writeLanguageInfo() {
-}
-
-// writeLikelyData writes tables that are used both for finding parent relations and for
-// language matching. Each entry contains additional bits to indicate the status of the
-// data to know when it cannot be used for parent relations.
-func (b *builder) writeLikelyData() {
- const (
- isList = 1 << iota
- scriptInFrom
- regionInFrom
- )
- type ( // generated types
- likelyScriptRegion struct {
- region uint16
- script uint8
- flags uint8
- }
- likelyLangScript struct {
- lang uint16
- script uint8
- flags uint8
- }
- likelyLangRegion struct {
- lang uint16
- region uint16
- }
- // likelyTag is used for getting likely tags for group regions, where
- // the likely region might be a region contained in the group.
- likelyTag struct {
- lang uint16
- region uint16
- script uint8
- }
- )
- var ( // generated variables
- likelyRegionGroup = make([]likelyTag, len(b.groups))
- likelyLang = make([]likelyScriptRegion, len(b.lang.s))
- likelyRegion = make([]likelyLangScript, len(b.region.s))
- likelyScript = make([]likelyLangRegion, len(b.script.s))
- likelyLangList = []likelyScriptRegion{}
- likelyRegionList = []likelyLangScript{}
- )
- type fromTo struct {
- from, to []string
- }
- langToOther := map[int][]fromTo{}
- regionToOther := map[int][]fromTo{}
- for _, m := range b.supp.LikelySubtags.LikelySubtag {
- from := strings.Split(m.From, "_")
- to := strings.Split(m.To, "_")
- if len(to) != 3 {
- log.Fatalf("invalid number of subtags in %q: found %d, want 3", m.To, len(to))
- }
- if len(from) > 3 {
- log.Fatalf("invalid number of subtags: found %d, want 1-3", len(from))
- }
- if from[0] != to[0] && from[0] != "und" {
- log.Fatalf("unexpected language change in expansion: %s -> %s", from, to)
- }
- if len(from) == 3 {
- if from[2] != to[2] {
- log.Fatalf("unexpected region change in expansion: %s -> %s", from, to)
- }
- if from[0] != "und" {
- log.Fatalf("unexpected fully specified from tag: %s -> %s", from, to)
- }
- }
- if len(from) == 1 || from[0] != "und" {
- id := 0
- if from[0] != "und" {
- id = b.lang.index(from[0])
- }
- langToOther[id] = append(langToOther[id], fromTo{from, to})
- } else if len(from) == 2 && len(from[1]) == 4 {
- sid := b.script.index(from[1])
- likelyScript[sid].lang = uint16(b.langIndex(to[0]))
- likelyScript[sid].region = uint16(b.region.index(to[2]))
- } else {
- r := b.region.index(from[len(from)-1])
- if id, ok := b.groups[r]; ok {
- if from[0] != "und" {
- log.Fatalf("region changed unexpectedly: %s -> %s", from, to)
- }
- likelyRegionGroup[id].lang = uint16(b.langIndex(to[0]))
- likelyRegionGroup[id].script = uint8(b.script.index(to[1]))
- likelyRegionGroup[id].region = uint16(b.region.index(to[2]))
- } else {
- regionToOther[r] = append(regionToOther[r], fromTo{from, to})
- }
- }
- }
- b.writeType(likelyLangRegion{})
- b.writeSlice("likelyScript", likelyScript)
-
- for id := range b.lang.s {
- list := langToOther[id]
- if len(list) == 1 {
- likelyLang[id].region = uint16(b.region.index(list[0].to[2]))
- likelyLang[id].script = uint8(b.script.index(list[0].to[1]))
- } else if len(list) > 1 {
- likelyLang[id].flags = isList
- likelyLang[id].region = uint16(len(likelyLangList))
- likelyLang[id].script = uint8(len(list))
- for _, x := range list {
- flags := uint8(0)
- if len(x.from) > 1 {
- if x.from[1] == x.to[2] {
- flags = regionInFrom
- } else {
- flags = scriptInFrom
- }
- }
- likelyLangList = append(likelyLangList, likelyScriptRegion{
- region: uint16(b.region.index(x.to[2])),
- script: uint8(b.script.index(x.to[1])),
- flags: flags,
- })
- }
- }
- }
- // TODO: merge suppressScript data with this table.
- b.writeType(likelyScriptRegion{})
- b.writeSlice("likelyLang", likelyLang)
- b.writeSlice("likelyLangList", likelyLangList)
-
- for id := range b.region.s {
- list := regionToOther[id]
- if len(list) == 1 {
- likelyRegion[id].lang = uint16(b.langIndex(list[0].to[0]))
- likelyRegion[id].script = uint8(b.script.index(list[0].to[1]))
- if len(list[0].from) > 2 {
- likelyRegion[id].flags = scriptInFrom
- }
- } else if len(list) > 1 {
- likelyRegion[id].flags = isList
- likelyRegion[id].lang = uint16(len(likelyRegionList))
- likelyRegion[id].script = uint8(len(list))
- for i, x := range list {
- if len(x.from) == 2 && i != 0 || i > 0 && len(x.from) != 3 {
- log.Fatalf("unspecified script must be first in list: %v at %d", x.from, i)
- }
- x := likelyLangScript{
- lang: uint16(b.langIndex(x.to[0])),
- script: uint8(b.script.index(x.to[1])),
- }
- if len(list[0].from) > 2 {
- x.flags = scriptInFrom
- }
- likelyRegionList = append(likelyRegionList, x)
- }
- }
- }
- b.writeType(likelyLangScript{})
- b.writeSlice("likelyRegion", likelyRegion)
- b.writeSlice("likelyRegionList", likelyRegionList)
-
- b.writeType(likelyTag{})
- b.writeSlice("likelyRegionGroup", likelyRegionGroup)
-}
-
-func (b *builder) writeRegionInclusionData() {
- var (
- // mm holds for each group the set of groups with a distance of 1.
- mm = make(map[int][]index)
-
- // containment holds for each group the transitive closure of
- // containment of other groups.
- containment = make(map[index][]index)
- )
- for _, g := range b.supp.TerritoryContainment.Group {
- // Skip UN and EURO zone as they are flattening the containment
- // relationship.
- if g.Type == "EZ" || g.Type == "UN" {
- continue
- }
- group := b.region.index(g.Type)
- groupIdx := b.groups[group]
- for _, mem := range strings.Split(g.Contains, " ") {
- r := b.region.index(mem)
- mm[r] = append(mm[r], groupIdx)
- if g, ok := b.groups[r]; ok {
- mm[group] = append(mm[group], g)
- containment[groupIdx] = append(containment[groupIdx], g)
- }
- }
- }
-
- regionContainment := make([]uint64, len(b.groups))
- for _, g := range b.groups {
- l := containment[g]
-
- // Compute the transitive closure of containment.
- for i := 0; i < len(l); i++ {
- l = append(l, containment[l[i]]...)
- }
-
- // Compute the bitmask.
- regionContainment[g] = 1 << g
- for _, v := range l {
- regionContainment[g] |= 1 << v
- }
- }
- b.writeSlice("regionContainment", regionContainment)
-
- regionInclusion := make([]uint8, len(b.region.s))
- bvs := make(map[uint64]index)
- // Make the first bitvector positions correspond with the groups.
- for r, i := range b.groups {
- bv := uint64(1 << i)
- for _, g := range mm[r] {
- bv |= 1 << g
- }
- bvs[bv] = i
- regionInclusion[r] = uint8(bvs[bv])
- }
- for r := 1; r < len(b.region.s); r++ {
- if _, ok := b.groups[r]; !ok {
- bv := uint64(0)
- for _, g := range mm[r] {
- bv |= 1 << g
- }
- if bv == 0 {
- // Pick the world for unspecified regions.
- bv = 1 << b.groups[b.region.index("001")]
- }
- if _, ok := bvs[bv]; !ok {
- bvs[bv] = index(len(bvs))
- }
- regionInclusion[r] = uint8(bvs[bv])
- }
- }
- b.writeSlice("regionInclusion", regionInclusion)
- regionInclusionBits := make([]uint64, len(bvs))
- for k, v := range bvs {
- regionInclusionBits[v] = uint64(k)
- }
- // Add bit vectors for increasingly large distances until a fixed point is reached.
- regionInclusionNext := []uint8{}
- for i := 0; i < len(regionInclusionBits); i++ {
- bits := regionInclusionBits[i]
- next := bits
- for i := uint(0); i < uint(len(b.groups)); i++ {
- if bits&(1<<i) != 0 {
- next |= regionInclusionBits[i]
- }
- }
- if _, ok := bvs[next]; !ok {
- bvs[next] = index(len(bvs))
- regionInclusionBits = append(regionInclusionBits, next)
- }
- regionInclusionNext = append(regionInclusionNext, uint8(bvs[next]))
- }
- b.writeSlice("regionInclusionBits", regionInclusionBits)
- b.writeSlice("regionInclusionNext", regionInclusionNext)
-}
-
-type parentRel struct {
- lang uint16
- script uint8
- maxScript uint8
- toRegion uint16
- fromRegion []uint16
-}
-
-func (b *builder) writeParents() {
- b.writeType(parentRel{})
-
- parents := []parentRel{}
-
- // Construct parent overrides.
- n := 0
- for _, p := range b.data.Supplemental().ParentLocales.ParentLocale {
- // Skipping non-standard scripts to root is implemented using addTags.
- if p.Parent == "root" {
- continue
- }
-
- sub := strings.Split(p.Parent, "_")
- parent := parentRel{lang: b.langIndex(sub[0])}
- if len(sub) == 2 {
- // TODO: check that all undefined scripts are indeed Latn in these
- // cases.
- parent.maxScript = uint8(b.script.index("Latn"))
- parent.toRegion = uint16(b.region.index(sub[1]))
- } else {
- parent.script = uint8(b.script.index(sub[1]))
- parent.maxScript = parent.script
- parent.toRegion = uint16(b.region.index(sub[2]))
- }
- for _, c := range strings.Split(p.Locales, " ") {
- region := b.region.index(c[strings.LastIndex(c, "_")+1:])
- parent.fromRegion = append(parent.fromRegion, uint16(region))
- }
- parents = append(parents, parent)
- n += len(parent.fromRegion)
- }
- b.writeSliceAddSize("parents", n*2, parents)
-}
-
-func main() {
- gen.Init()
-
- gen.Repackage("gen_common.go", "common.go", "language")
-
- w := gen.NewCodeWriter()
- defer w.WriteGoFile("tables.go", "language")
-
- fmt.Fprintln(w, `import "golang.org/x/text/internal/tag"`)
-
- b := newBuilder(w)
- gen.WriteCLDRVersion(w)
-
- b.parseIndices()
- b.writeType(FromTo{})
- b.writeLanguage()
- b.writeScript()
- b.writeRegion()
- b.writeVariant()
- // TODO: b.writeLocale()
- b.computeRegionGroups()
- b.writeLikelyData()
- b.writeRegionInclusionData()
- b.writeParents()
-}
diff --git a/vendor/golang.org/x/text/internal/language/gen_common.go b/vendor/golang.org/x/text/internal/language/gen_common.go
deleted file mode 100644
index c419ceeb1..000000000
--- a/vendor/golang.org/x/text/internal/language/gen_common.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// This file contains code common to the maketables.go and the package code.
-
-// AliasType is the type of an alias in AliasMap.
-type AliasType int8
-
-const (
- Deprecated AliasType = iota
- Macro
- Legacy
-
- AliasTypeUnknown AliasType = -1
-)
diff --git a/vendor/golang.org/x/text/language/gen.go b/vendor/golang.org/x/text/language/gen.go
deleted file mode 100644
index 3004eb42c..000000000
--- a/vendor/golang.org/x/text/language/gen.go
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Language tag table generator.
-// Data read from the web.
-
-package main
-
-import (
- "flag"
- "fmt"
- "io"
- "log"
- "sort"
- "strconv"
- "strings"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/internal/language"
- "golang.org/x/text/unicode/cldr"
-)
-
-var (
- test = flag.Bool("test",
- false,
- "test existing tables; can be used to compare web data with package data.")
- outputFile = flag.String("output",
- "tables.go",
- "output file for generated tables")
-)
-
-func main() {
- gen.Init()
-
- w := gen.NewCodeWriter()
- defer w.WriteGoFile("tables.go", "language")
-
- b := newBuilder(w)
- gen.WriteCLDRVersion(w)
-
- b.writeConstants()
- b.writeMatchData()
-}
-
-type builder struct {
- w *gen.CodeWriter
- hw io.Writer // MultiWriter for w and w.Hash
- data *cldr.CLDR
- supp *cldr.SupplementalData
-}
-
-func (b *builder) langIndex(s string) uint16 {
- return uint16(language.MustParseBase(s))
-}
-
-func (b *builder) regionIndex(s string) int {
- return int(language.MustParseRegion(s))
-}
-
-func (b *builder) scriptIndex(s string) int {
- return int(language.MustParseScript(s))
-}
-
-func newBuilder(w *gen.CodeWriter) *builder {
- r := gen.OpenCLDRCoreZip()
- defer r.Close()
- d := &cldr.Decoder{}
- data, err := d.DecodeZip(r)
- if err != nil {
- log.Fatal(err)
- }
- b := builder{
- w: w,
- hw: io.MultiWriter(w, w.Hash),
- data: data,
- supp: data.Supplemental(),
- }
- return &b
-}
-
-// writeConsts computes f(v) for all v in values and writes the results
-// as constants named _v to a single constant block.
-func (b *builder) writeConsts(f func(string) int, values ...string) {
- fmt.Fprintln(b.w, "const (")
- for _, v := range values {
- fmt.Fprintf(b.w, "\t_%s = %v\n", v, f(v))
- }
- fmt.Fprintln(b.w, ")")
-}
-
-// TODO: region inclusion data will probably not be use used in future matchers.
-
-var langConsts = []string{
- "de", "en", "fr", "it", "mo", "no", "nb", "pt", "sh", "mul", "und",
-}
-
-var scriptConsts = []string{
- "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy",
- "Zzzz",
-}
-
-var regionConsts = []string{
- "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US",
- "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo.
-}
-
-func (b *builder) writeConstants() {
- b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...)
- b.writeConsts(b.regionIndex, regionConsts...)
- b.writeConsts(b.scriptIndex, scriptConsts...)
-}
-
-type mutualIntelligibility struct {
- want, have uint16
- distance uint8
- oneway bool
-}
-
-type scriptIntelligibility struct {
- wantLang, haveLang uint16
- wantScript, haveScript uint8
- distance uint8
- // Always oneway
-}
-
-type regionIntelligibility struct {
- lang uint16 // compact language id
- script uint8 // 0 means any
- group uint8 // 0 means any; if bit 7 is set it means inverse
- distance uint8
- // Always twoway.
-}
-
-// writeMatchData writes tables with languages and scripts for which there is
-// mutual intelligibility. The data is based on CLDR's languageMatching data.
-// Note that we use a different algorithm than the one defined by CLDR and that
-// we slightly modify the data. For example, we convert scores to confidence levels.
-// We also drop all region-related data as we use a different algorithm to
-// determine region equivalence.
-func (b *builder) writeMatchData() {
- lm := b.supp.LanguageMatching.LanguageMatches
- cldr.MakeSlice(&lm).SelectAnyOf("type", "written_new")
-
- regionHierarchy := map[string][]string{}
- for _, g := range b.supp.TerritoryContainment.Group {
- regions := strings.Split(g.Contains, " ")
- regionHierarchy[g.Type] = append(regionHierarchy[g.Type], regions...)
- }
- regionToGroups := make([]uint8, language.NumRegions)
-
- idToIndex := map[string]uint8{}
- for i, mv := range lm[0].MatchVariable {
- if i > 6 {
- log.Fatalf("Too many groups: %d", i)
- }
- idToIndex[mv.Id] = uint8(i + 1)
- // TODO: also handle '-'
- for _, r := range strings.Split(mv.Value, "+") {
- todo := []string{r}
- for k := 0; k < len(todo); k++ {
- r := todo[k]
- regionToGroups[b.regionIndex(r)] |= 1 << uint8(i)
- todo = append(todo, regionHierarchy[r]...)
- }
- }
- }
- b.w.WriteVar("regionToGroups", regionToGroups)
-
- // maps language id to in- and out-of-group region.
- paradigmLocales := [][3]uint16{}
- locales := strings.Split(lm[0].ParadigmLocales[0].Locales, " ")
- for i := 0; i < len(locales); i += 2 {
- x := [3]uint16{}
- for j := 0; j < 2; j++ {
- pc := strings.SplitN(locales[i+j], "-", 2)
- x[0] = b.langIndex(pc[0])
- if len(pc) == 2 {
- x[1+j] = uint16(b.regionIndex(pc[1]))
- }
- }
- paradigmLocales = append(paradigmLocales, x)
- }
- b.w.WriteVar("paradigmLocales", paradigmLocales)
-
- b.w.WriteType(mutualIntelligibility{})
- b.w.WriteType(scriptIntelligibility{})
- b.w.WriteType(regionIntelligibility{})
-
- matchLang := []mutualIntelligibility{}
- matchScript := []scriptIntelligibility{}
- matchRegion := []regionIntelligibility{}
- // Convert the languageMatch entries in lists keyed by desired language.
- for _, m := range lm[0].LanguageMatch {
- // Different versions of CLDR use different separators.
- desired := strings.Replace(m.Desired, "-", "_", -1)
- supported := strings.Replace(m.Supported, "-", "_", -1)
- d := strings.Split(desired, "_")
- s := strings.Split(supported, "_")
- if len(d) != len(s) {
- log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
- continue
- }
- distance, _ := strconv.ParseInt(m.Distance, 10, 8)
- switch len(d) {
- case 2:
- if desired == supported && desired == "*_*" {
- continue
- }
- // language-script pair.
- matchScript = append(matchScript, scriptIntelligibility{
- wantLang: uint16(b.langIndex(d[0])),
- haveLang: uint16(b.langIndex(s[0])),
- wantScript: uint8(b.scriptIndex(d[1])),
- haveScript: uint8(b.scriptIndex(s[1])),
- distance: uint8(distance),
- })
- if m.Oneway != "true" {
- matchScript = append(matchScript, scriptIntelligibility{
- wantLang: uint16(b.langIndex(s[0])),
- haveLang: uint16(b.langIndex(d[0])),
- wantScript: uint8(b.scriptIndex(s[1])),
- haveScript: uint8(b.scriptIndex(d[1])),
- distance: uint8(distance),
- })
- }
- case 1:
- if desired == supported && desired == "*" {
- continue
- }
- if distance == 1 {
- // nb == no is already handled by macro mapping. Check there
- // really is only this case.
- if d[0] != "no" || s[0] != "nb" {
- log.Fatalf("unhandled equivalence %s == %s", s[0], d[0])
- }
- continue
- }
- // TODO: consider dropping oneway field and just doubling the entry.
- matchLang = append(matchLang, mutualIntelligibility{
- want: uint16(b.langIndex(d[0])),
- have: uint16(b.langIndex(s[0])),
- distance: uint8(distance),
- oneway: m.Oneway == "true",
- })
- case 3:
- if desired == supported && desired == "*_*_*" {
- continue
- }
- if desired != supported {
- // This is now supported by CLDR, but only one case, which
- // should already be covered by paradigm locales. For instance,
- // test case "und, en, en-GU, en-IN, en-GB ; en-ZA ; en-GB" in
- // testdata/CLDRLocaleMatcherTest.txt tests this.
- if supported != "en_*_GB" {
- log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
- }
- continue
- }
- ri := regionIntelligibility{
- lang: b.langIndex(d[0]),
- distance: uint8(distance),
- }
- if d[1] != "*" {
- ri.script = uint8(b.scriptIndex(d[1]))
- }
- switch {
- case d[2] == "*":
- ri.group = 0x80 // not contained in anything
- case strings.HasPrefix(d[2], "$!"):
- ri.group = 0x80
- d[2] = "$" + d[2][len("$!"):]
- fallthrough
- case strings.HasPrefix(d[2], "$"):
- ri.group |= idToIndex[d[2]]
- }
- matchRegion = append(matchRegion, ri)
- default:
- log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
- }
- }
- sort.SliceStable(matchLang, func(i, j int) bool {
- return matchLang[i].distance < matchLang[j].distance
- })
- b.w.WriteComment(`
- matchLang holds pairs of langIDs of base languages that are typically
- mutually intelligible. Each pair is associated with a confidence and
- whether the intelligibility goes one or both ways.`)
- b.w.WriteVar("matchLang", matchLang)
-
- b.w.WriteComment(`
- matchScript holds pairs of scriptIDs where readers of one script
- can typically also read the other. Each is associated with a confidence.`)
- sort.SliceStable(matchScript, func(i, j int) bool {
- return matchScript[i].distance < matchScript[j].distance
- })
- b.w.WriteVar("matchScript", matchScript)
-
- sort.SliceStable(matchRegion, func(i, j int) bool {
- return matchRegion[i].distance < matchRegion[j].distance
- })
- b.w.WriteVar("matchRegion", matchRegion)
-}
diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go
deleted file mode 100644
index 987fc169c..000000000
--- a/vendor/golang.org/x/text/unicode/bidi/gen.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "flag"
- "log"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/internal/triegen"
- "golang.org/x/text/internal/ucd"
-)
-
-var outputFile = flag.String("out", "tables.go", "output file")
-
-func main() {
- gen.Init()
- gen.Repackage("gen_trieval.go", "trieval.go", "bidi")
- gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi")
-
- genTables()
-}
-
-// bidiClass names and codes taken from class "bc" in
-// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt
-var bidiClass = map[string]Class{
- "AL": AL, // ArabicLetter
- "AN": AN, // ArabicNumber
- "B": B, // ParagraphSeparator
- "BN": BN, // BoundaryNeutral
- "CS": CS, // CommonSeparator
- "EN": EN, // EuropeanNumber
- "ES": ES, // EuropeanSeparator
- "ET": ET, // EuropeanTerminator
- "L": L, // LeftToRight
- "NSM": NSM, // NonspacingMark
- "ON": ON, // OtherNeutral
- "R": R, // RightToLeft
- "S": S, // SegmentSeparator
- "WS": WS, // WhiteSpace
-
- "FSI": Control,
- "PDF": Control,
- "PDI": Control,
- "LRE": Control,
- "LRI": Control,
- "LRO": Control,
- "RLE": Control,
- "RLI": Control,
- "RLO": Control,
-}
-
-func genTables() {
- if numClass > 0x0F {
- log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass)
- }
- w := gen.NewCodeWriter()
- defer w.WriteVersionedGoFile(*outputFile, "bidi")
-
- gen.WriteUnicodeVersion(w)
-
- t := triegen.NewTrie("bidi")
-
- // Build data about bracket mapping. These bits need to be or-ed with
- // any other bits.
- orMask := map[rune]uint64{}
-
- xorMap := map[rune]int{}
- xorMasks := []rune{0} // First value is no-op.
-
- ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) {
- r1 := p.Rune(0)
- r2 := p.Rune(1)
- xor := r1 ^ r2
- if _, ok := xorMap[xor]; !ok {
- xorMap[xor] = len(xorMasks)
- xorMasks = append(xorMasks, xor)
- }
- entry := uint64(xorMap[xor]) << xorMaskShift
- switch p.String(2) {
- case "o":
- entry |= openMask
- case "c", "n":
- default:
- log.Fatalf("Unknown bracket class %q.", p.String(2))
- }
- orMask[r1] = entry
- })
-
- w.WriteComment(`
- xorMasks contains masks to be xor-ed with brackets to get the reverse
- version.`)
- w.WriteVar("xorMasks", xorMasks)
-
- done := map[rune]bool{}
-
- insert := func(r rune, c Class) {
- if !done[r] {
- t.Insert(r, orMask[r]|uint64(c))
- done[r] = true
- }
- }
-
- // Insert the derived BiDi properties.
- ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) {
- r := p.Rune(0)
- class, ok := bidiClass[p.String(1)]
- if !ok {
- log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1))
- }
- insert(r, class)
- })
- visitDefaults(insert)
-
- // TODO: use sparse blocks. This would reduce table size considerably
- // from the looks of it.
-
- sz, err := t.Gen(w)
- if err != nil {
- log.Fatal(err)
- }
- w.Size += sz
-}
-
-// dummy values to make methods in gen_common compile. The real versions
-// will be generated by this file to tables.go.
-var (
- xorMasks []rune
-)
diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go
deleted file mode 100644
index 02c3b505d..000000000
--- a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "unicode"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/internal/ucd"
- "golang.org/x/text/unicode/rangetable"
-)
-
-// These tables are hand-extracted from:
-// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt
-func visitDefaults(fn func(r rune, c Class)) {
- // first write default values for ranges listed above.
- visitRunes(fn, AL, []rune{
- 0x0600, 0x07BF, // Arabic
- 0x08A0, 0x08FF, // Arabic Extended-A
- 0xFB50, 0xFDCF, // Arabic Presentation Forms
- 0xFDF0, 0xFDFF,
- 0xFE70, 0xFEFF,
- 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols
- })
- visitRunes(fn, R, []rune{
- 0x0590, 0x05FF, // Hebrew
- 0x07C0, 0x089F, // Nko et al.
- 0xFB1D, 0xFB4F,
- 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al.
- 0x0001E800, 0x0001EDFF,
- 0x0001EF00, 0x0001EFFF,
- })
- visitRunes(fn, ET, []rune{ // European Terminator
- 0x20A0, 0x20Cf, // Currency symbols
- })
- rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) {
- fn(r, BN) // Boundary Neutral
- })
- ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) {
- if p.String(1) == "Default_Ignorable_Code_Point" {
- fn(p.Rune(0), BN) // Boundary Neutral
- }
- })
-}
-
-func visitRunes(fn func(r rune, c Class), c Class, runes []rune) {
- for i := 0; i < len(runes); i += 2 {
- lo, hi := runes[i], runes[i+1]
- for j := lo; j <= hi; j++ {
- fn(j, c)
- }
- }
-}
diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go
deleted file mode 100644
index 9cb994289..000000000
--- a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// Class is the Unicode BiDi class. Each rune has a single class.
-type Class uint
-
-const (
- L Class = iota // LeftToRight
- R // RightToLeft
- EN // EuropeanNumber
- ES // EuropeanSeparator
- ET // EuropeanTerminator
- AN // ArabicNumber
- CS // CommonSeparator
- B // ParagraphSeparator
- S // SegmentSeparator
- WS // WhiteSpace
- ON // OtherNeutral
- BN // BoundaryNeutral
- NSM // NonspacingMark
- AL // ArabicLetter
- Control // Control LRO - PDI
-
- numClass
-
- LRO // LeftToRightOverride
- RLO // RightToLeftOverride
- LRE // LeftToRightEmbedding
- RLE // RightToLeftEmbedding
- PDF // PopDirectionalFormat
- LRI // LeftToRightIsolate
- RLI // RightToLeftIsolate
- FSI // FirstStrongIsolate
- PDI // PopDirectionalIsolate
-
- unknownClass = ^Class(0)
-)
-
-var controlToClass = map[rune]Class{
- 0x202D: LRO, // LeftToRightOverride,
- 0x202E: RLO, // RightToLeftOverride,
- 0x202A: LRE, // LeftToRightEmbedding,
- 0x202B: RLE, // RightToLeftEmbedding,
- 0x202C: PDF, // PopDirectionalFormat,
- 0x2066: LRI, // LeftToRightIsolate,
- 0x2067: RLI, // RightToLeftIsolate,
- 0x2068: FSI, // FirstStrongIsolate,
- 0x2069: PDI, // PopDirectionalIsolate,
-}
-
-// A trie entry has the following bits:
-// 7..5 XOR mask for brackets
-// 4 1: Bracket open, 0: Bracket close
-// 3..0 Class type
-
-const (
- openMask = 0x10
- xorMaskShift = 5
-)
diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go
deleted file mode 100644
index 30a3aa933..000000000
--- a/vendor/golang.org/x/text/unicode/norm/maketables.go
+++ /dev/null
@@ -1,986 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Normalization table generator.
-// Data read from the web.
-// See forminfo.go for a description of the trie values associated with each rune.
-
-package main
-
-import (
- "bytes"
- "encoding/binary"
- "flag"
- "fmt"
- "io"
- "log"
- "sort"
- "strconv"
- "strings"
-
- "golang.org/x/text/internal/gen"
- "golang.org/x/text/internal/triegen"
- "golang.org/x/text/internal/ucd"
-)
-
-func main() {
- gen.Init()
- loadUnicodeData()
- compactCCC()
- loadCompositionExclusions()
- completeCharFields(FCanonical)
- completeCharFields(FCompatibility)
- computeNonStarterCounts()
- verifyComputed()
- printChars()
- testDerived()
- printTestdata()
- makeTables()
-}
-
-var (
- tablelist = flag.String("tables",
- "all",
- "comma-separated list of which tables to generate; "+
- "can be 'decomp', 'recomp', 'info' and 'all'")
- test = flag.Bool("test",
- false,
- "test existing tables against DerivedNormalizationProps and generate test data for regression testing")
- verbose = flag.Bool("verbose",
- false,
- "write data to stdout as it is parsed")
-)
-
-const MaxChar = 0x10FFFF // anything above this shouldn't exist
-
-// Quick Check properties of runes allow us to quickly
-// determine whether a rune may occur in a normal form.
-// For a given normal form, a rune may be guaranteed to occur
-// verbatim (QC=Yes), may or may not combine with another
-// rune (QC=Maybe), or may not occur (QC=No).
-type QCResult int
-
-const (
- QCUnknown QCResult = iota
- QCYes
- QCNo
- QCMaybe
-)
-
-func (r QCResult) String() string {
- switch r {
- case QCYes:
- return "Yes"
- case QCNo:
- return "No"
- case QCMaybe:
- return "Maybe"
- }
- return "***UNKNOWN***"
-}
-
-const (
- FCanonical = iota // NFC or NFD
- FCompatibility // NFKC or NFKD
- FNumberOfFormTypes
-)
-
-const (
- MComposed = iota // NFC or NFKC
- MDecomposed // NFD or NFKD
- MNumberOfModes
-)
-
-// This contains only the properties we're interested in.
-type Char struct {
- name string
- codePoint rune // if zero, this index is not a valid code point.
- ccc uint8 // canonical combining class
- origCCC uint8
- excludeInComp bool // from CompositionExclusions.txt
- compatDecomp bool // it has a compatibility expansion
-
- nTrailingNonStarters uint8
- nLeadingNonStarters uint8 // must be equal to trailing if non-zero
-
- forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility
-
- state State
-}
-
-var chars = make([]Char, MaxChar+1)
-var cccMap = make(map[uint8]uint8)
-
-func (c Char) String() string {
- buf := new(bytes.Buffer)
-
- fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name)
- fmt.Fprintf(buf, " ccc: %v\n", c.ccc)
- fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp)
- fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp)
- fmt.Fprintf(buf, " state: %v\n", c.state)
- fmt.Fprintf(buf, " NFC:\n")
- fmt.Fprint(buf, c.forms[FCanonical])
- fmt.Fprintf(buf, " NFKC:\n")
- fmt.Fprint(buf, c.forms[FCompatibility])
-
- return buf.String()
-}
-
-// In UnicodeData.txt, some ranges are marked like this:
-// 3400;<CJK Ideograph Extension A, First>;Lo;0;L;;;;;N;;;;;
-// 4DB5;<CJK Ideograph Extension A, Last>;Lo;0;L;;;;;N;;;;;
-// parseCharacter keeps a state variable indicating the weirdness.
-type State int
-
-const (
- SNormal State = iota // known to be zero for the type
- SFirst
- SLast
- SMissing
-)
-
-var lastChar = rune('\u0000')
-
-func (c Char) isValid() bool {
- return c.codePoint != 0 && c.state != SMissing
-}
-
-type FormInfo struct {
- quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed
- verified [MNumberOfModes]bool // index: MComposed or MDecomposed
-
- combinesForward bool // May combine with rune on the right
- combinesBackward bool // May combine with rune on the left
- isOneWay bool // Never appears in result
- inDecomp bool // Some decompositions result in this char.
- decomp Decomposition
- expandedDecomp Decomposition
-}
-
-func (f FormInfo) String() string {
- buf := bytes.NewBuffer(make([]byte, 0))
-
- fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed])
- fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed])
- fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward)
- fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward)
- fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay)
- fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp)
- fmt.Fprintf(buf, " decomposition: %X\n", f.decomp)
- fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp)
-
- return buf.String()
-}
-
-type Decomposition []rune
-
-func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
- decomp := strings.Split(s, " ")
- if len(decomp) > 0 && skipfirst {
- decomp = decomp[1:]
- }
- for _, d := range decomp {
- point, err := strconv.ParseUint(d, 16, 64)
- if err != nil {
- return a, err
- }
- a = append(a, rune(point))
- }
- return a, nil
-}
-
-func loadUnicodeData() {
- f := gen.OpenUCDFile("UnicodeData.txt")
- defer f.Close()
- p := ucd.New(f)
- for p.Next() {
- r := p.Rune(ucd.CodePoint)
- char := &chars[r]
-
- char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass))
- decmap := p.String(ucd.DecompMapping)
-
- exp, err := parseDecomposition(decmap, false)
- isCompat := false
- if err != nil {
- if len(decmap) > 0 {
- exp, err = parseDecomposition(decmap, true)
- if err != nil {
- log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
- }
- isCompat = true
- }
- }
-
- char.name = p.String(ucd.Name)
- char.codePoint = r
- char.forms[FCompatibility].decomp = exp
- if !isCompat {
- char.forms[FCanonical].decomp = exp
- } else {
- char.compatDecomp = true
- }
- if len(decmap) > 0 {
- char.forms[FCompatibility].decomp = exp
- }
- }
- if err := p.Err(); err != nil {
- log.Fatal(err)
- }
-}
-
-// compactCCC converts the sparse set of CCC values to a continguous one,
-// reducing the number of bits needed from 8 to 6.
-func compactCCC() {
- m := make(map[uint8]uint8)
- for i := range chars {
- c := &chars[i]
- m[c.ccc] = 0
- }
- cccs := []int{}
- for v, _ := range m {
- cccs = append(cccs, int(v))
- }
- sort.Ints(cccs)
- for i, c := range cccs {
- cccMap[uint8(i)] = uint8(c)
- m[uint8(c)] = uint8(i)
- }
- for i := range chars {
- c := &chars[i]
- c.origCCC = c.ccc
- c.ccc = m[c.ccc]
- }
- if len(m) >= 1<<6 {
- log.Fatalf("too many difference CCC values: %d >= 64", len(m))
- }
-}
-
-// CompositionExclusions.txt has form:
-// 0958 # ...
-// See https://unicode.org/reports/tr44/ for full explanation
-func loadCompositionExclusions() {
- f := gen.OpenUCDFile("CompositionExclusions.txt")
- defer f.Close()
- p := ucd.New(f)
- for p.Next() {
- c := &chars[p.Rune(0)]
- if c.excludeInComp {
- log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
- }
- c.excludeInComp = true
- }
- if e := p.Err(); e != nil {
- log.Fatal(e)
- }
-}
-
-// hasCompatDecomp returns true if any of the recursive
-// decompositions contains a compatibility expansion.
-// In this case, the character may not occur in NFK*.
-func hasCompatDecomp(r rune) bool {
- c := &chars[r]
- if c.compatDecomp {
- return true
- }
- for _, d := range c.forms[FCompatibility].decomp {
- if hasCompatDecomp(d) {
- return true
- }
- }
- return false
-}
-
-// Hangul related constants.
-const (
- HangulBase = 0xAC00
- HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28)
-
- JamoLBase = 0x1100
- JamoLEnd = 0x1113
- JamoVBase = 0x1161
- JamoVEnd = 0x1176
- JamoTBase = 0x11A8
- JamoTEnd = 0x11C3
-
- JamoLVTCount = 19 * 21 * 28
- JamoTCount = 28
-)
-
-func isHangul(r rune) bool {
- return HangulBase <= r && r < HangulEnd
-}
-
-func isHangulWithoutJamoT(r rune) bool {
- if !isHangul(r) {
- return false
- }
- r -= HangulBase
- return r < JamoLVTCount && r%JamoTCount == 0
-}
-
-func ccc(r rune) uint8 {
- return chars[r].ccc
-}
-
-// Insert a rune in a buffer, ordered by Canonical Combining Class.
-func insertOrdered(b Decomposition, r rune) Decomposition {
- n := len(b)
- b = append(b, 0)
- cc := ccc(r)
- if cc > 0 {
- // Use bubble sort.
- for ; n > 0; n-- {
- if ccc(b[n-1]) <= cc {
- break
- }
- b[n] = b[n-1]
- }
- }
- b[n] = r
- return b
-}
-
-// Recursively decompose.
-func decomposeRecursive(form int, r rune, d Decomposition) Decomposition {
- dcomp := chars[r].forms[form].decomp
- if len(dcomp) == 0 {
- return insertOrdered(d, r)
- }
- for _, c := range dcomp {
- d = decomposeRecursive(form, c, d)
- }
- return d
-}
-
-func completeCharFields(form int) {
- // Phase 0: pre-expand decomposition.
- for i := range chars {
- f := &chars[i].forms[form]
- if len(f.decomp) == 0 {
- continue
- }
- exp := make(Decomposition, 0)
- for _, c := range f.decomp {
- exp = decomposeRecursive(form, c, exp)
- }
- f.expandedDecomp = exp
- }
-
- // Phase 1: composition exclusion, mark decomposition.
- for i := range chars {
- c := &chars[i]
- f := &c.forms[form]
-
- // Marks script-specific exclusions and version restricted.
- f.isOneWay = c.excludeInComp
-
- // Singletons
- f.isOneWay = f.isOneWay || len(f.decomp) == 1
-
- // Non-starter decompositions
- if len(f.decomp) > 1 {
- chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0
- f.isOneWay = f.isOneWay || chk
- }
-
- // Runes that decompose into more than two runes.
- f.isOneWay = f.isOneWay || len(f.decomp) > 2
-
- if form == FCompatibility {
- f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint)
- }
-
- for _, r := range f.decomp {
- chars[r].forms[form].inDecomp = true
- }
- }
-
- // Phase 2: forward and backward combining.
- for i := range chars {
- c := &chars[i]
- f := &c.forms[form]
-
- if !f.isOneWay && len(f.decomp) == 2 {
- f0 := &chars[f.decomp[0]].forms[form]
- f1 := &chars[f.decomp[1]].forms[form]
- if !f0.isOneWay {
- f0.combinesForward = true
- }
- if !f1.isOneWay {
- f1.combinesBackward = true
- }
- }
- if isHangulWithoutJamoT(rune(i)) {
- f.combinesForward = true
- }
- }
-
- // Phase 3: quick check values.
- for i := range chars {
- c := &chars[i]
- f := &c.forms[form]
-
- switch {
- case len(f.decomp) > 0:
- f.quickCheck[MDecomposed] = QCNo
- case isHangul(rune(i)):
- f.quickCheck[MDecomposed] = QCNo
- default:
- f.quickCheck[MDecomposed] = QCYes
- }
- switch {
- case f.isOneWay:
- f.quickCheck[MComposed] = QCNo
- case (i & 0xffff00) == JamoLBase:
- f.quickCheck[MComposed] = QCYes
- if JamoLBase <= i && i < JamoLEnd {
- f.combinesForward = true
- }
- if JamoVBase <= i && i < JamoVEnd {
- f.quickCheck[MComposed] = QCMaybe
- f.combinesBackward = true
- f.combinesForward = true
- }
- if JamoTBase <= i && i < JamoTEnd {
- f.quickCheck[MComposed] = QCMaybe
- f.combinesBackward = true
- }
- case !f.combinesBackward:
- f.quickCheck[MComposed] = QCYes
- default:
- f.quickCheck[MComposed] = QCMaybe
- }
- }
-}
-
-func computeNonStarterCounts() {
- // Phase 4: leading and trailing non-starter count
- for i := range chars {
- c := &chars[i]
-
- runes := []rune{rune(i)}
- // We always use FCompatibility so that the CGJ insertion points do not
- // change for repeated normalizations with different forms.
- if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 {
- runes = exp
- }
- // We consider runes that combine backwards to be non-starters for the
- // purpose of Stream-Safe Text Processing.
- for _, r := range runes {
- if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
- break
- }
- c.nLeadingNonStarters++
- }
- for i := len(runes) - 1; i >= 0; i-- {
- if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
- break
- }
- c.nTrailingNonStarters++
- }
- if c.nTrailingNonStarters > 3 {
- log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes)
- }
-
- if isHangul(rune(i)) {
- c.nTrailingNonStarters = 2
- if isHangulWithoutJamoT(rune(i)) {
- c.nTrailingNonStarters = 1
- }
- }
-
- if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t {
- log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t)
- }
- if t := c.nTrailingNonStarters; t > 3 {
- log.Fatalf("%U: number of trailing non-starters is %d > 3", t)
- }
- }
-}
-
-func printBytes(w io.Writer, b []byte, name string) {
- fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b))
- fmt.Fprintf(w, "var %s = [...]byte {", name)
- for i, c := range b {
- switch {
- case i%64 == 0:
- fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63)
- case i%8 == 0:
- fmt.Fprintf(w, "\n")
- }
- fmt.Fprintf(w, "0x%.2X, ", c)
- }
- fmt.Fprint(w, "\n}\n\n")
-}
-
-// See forminfo.go for format.
-func makeEntry(f *FormInfo, c *Char) uint16 {
- e := uint16(0)
- if r := c.codePoint; HangulBase <= r && r < HangulEnd {
- e |= 0x40
- }
- if f.combinesForward {
- e |= 0x20
- }
- if f.quickCheck[MDecomposed] == QCNo {
- e |= 0x4
- }
- switch f.quickCheck[MComposed] {
- case QCYes:
- case QCNo:
- e |= 0x10
- case QCMaybe:
- e |= 0x18
- default:
- log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed])
- }
- e |= uint16(c.nTrailingNonStarters)
- return e
-}
-
-// decompSet keeps track of unique decompositions, grouped by whether
-// the decomposition is followed by a trailing and/or leading CCC.
-type decompSet [7]map[string]bool
-
-const (
- normalDecomp = iota
- firstMulti
- firstCCC
- endMulti
- firstLeadingCCC
- firstCCCZeroExcept
- firstStarterWithNLead
- lastDecomp
-)
-
-var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"}
-
-func makeDecompSet() decompSet {
- m := decompSet{}
- for i := range m {
- m[i] = make(map[string]bool)
- }
- return m
-}
-func (m *decompSet) insert(key int, s string) {
- m[key][s] = true
-}
-
-func printCharInfoTables(w io.Writer) int {
- mkstr := func(r rune, f *FormInfo) (int, string) {
- d := f.expandedDecomp
- s := string([]rune(d))
- if max := 1 << 6; len(s) >= max {
- const msg = "%U: too many bytes in decomposition: %d >= %d"
- log.Fatalf(msg, r, len(s), max)
- }
- head := uint8(len(s))
- if f.quickCheck[MComposed] != QCYes {
- head |= 0x40
- }
- if f.combinesForward {
- head |= 0x80
- }
- s = string([]byte{head}) + s
-
- lccc := ccc(d[0])
- tccc := ccc(d[len(d)-1])
- cc := ccc(r)
- if cc != 0 && lccc == 0 && tccc == 0 {
- log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
- }
- if tccc < lccc && lccc != 0 {
- const msg = "%U: lccc (%d) must be <= tcc (%d)"
- log.Fatalf(msg, r, lccc, tccc)
- }
- index := normalDecomp
- nTrail := chars[r].nTrailingNonStarters
- nLead := chars[r].nLeadingNonStarters
- if tccc > 0 || lccc > 0 || nTrail > 0 {
- tccc <<= 2
- tccc |= nTrail
- s += string([]byte{tccc})
- index = endMulti
- for _, r := range d[1:] {
- if ccc(r) == 0 {
- index = firstCCC
- }
- }
- if lccc > 0 || nLead > 0 {
- s += string([]byte{lccc})
- if index == firstCCC {
- log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
- }
- index = firstLeadingCCC
- }
- if cc != lccc {
- if cc != 0 {
- log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
- }
- index = firstCCCZeroExcept
- }
- } else if len(d) > 1 {
- index = firstMulti
- }
- return index, s
- }
-
- decompSet := makeDecompSet()
- const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail.
- decompSet.insert(firstStarterWithNLead, nLeadStr)
-
- // Store the uniqued decompositions in a byte buffer,
- // preceded by their byte length.
- for _, c := range chars {
- for _, f := range c.forms {
- if len(f.expandedDecomp) == 0 {
- continue
- }
- if f.combinesBackward {
- log.Fatalf("%U: combinesBackward and decompose", c.codePoint)
- }
- index, s := mkstr(c.codePoint, &f)
- decompSet.insert(index, s)
- }
- }
-
- decompositions := bytes.NewBuffer(make([]byte, 0, 10000))
- size := 0
- positionMap := make(map[string]uint16)
- decompositions.WriteString("\000")
- fmt.Fprintln(w, "const (")
- for i, m := range decompSet {
- sa := []string{}
- for s := range m {
- sa = append(sa, s)
- }
- sort.Strings(sa)
- for _, s := range sa {
- p := decompositions.Len()
- decompositions.WriteString(s)
- positionMap[s] = uint16(p)
- }
- if cname[i] != "" {
- fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len())
- }
- }
- fmt.Fprintln(w, "maxDecomp = 0x8000")
- fmt.Fprintln(w, ")")
- b := decompositions.Bytes()
- printBytes(w, b, "decomps")
- size += len(b)
-
- varnames := []string{"nfc", "nfkc"}
- for i := 0; i < FNumberOfFormTypes; i++ {
- trie := triegen.NewTrie(varnames[i])
-
- for r, c := range chars {
- f := c.forms[i]
- d := f.expandedDecomp
- if len(d) != 0 {
- _, key := mkstr(c.codePoint, &f)
- trie.Insert(rune(r), uint64(positionMap[key]))
- if c.ccc != ccc(d[0]) {
- // We assume the lead ccc of a decomposition !=0 in this case.
- if ccc(d[0]) == 0 {
- log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
- }
- }
- } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward {
- // Handle cases where it can't be detected that the nLead should be equal
- // to nTrail.
- trie.Insert(c.codePoint, uint64(positionMap[nLeadStr]))
- } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 {
- trie.Insert(c.codePoint, uint64(0x8000|v))
- }
- }
- sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]}))
- if err != nil {
- log.Fatal(err)
- }
- size += sz
- }
- return size
-}
-
-func contains(sa []string, s string) bool {
- for _, a := range sa {
- if a == s {
- return true
- }
- }
- return false
-}
-
-func makeTables() {
- w := &bytes.Buffer{}
-
- size := 0
- if *tablelist == "" {
- return
- }
- list := strings.Split(*tablelist, ",")
- if *tablelist == "all" {
- list = []string{"recomp", "info"}
- }
-
- // Compute maximum decomposition size.
- max := 0
- for _, c := range chars {
- if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max {
- max = n
- }
- }
- fmt.Fprintln(w, `import "sync"`)
- fmt.Fprintln(w)
-
- fmt.Fprintln(w, "const (")
- fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.")
- fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion())
- fmt.Fprintln(w)
- fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
- fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at")
- fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that")
- fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.")
- fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
- fmt.Fprintln(w, ")\n")
-
- // Print the CCC remap table.
- size += len(cccMap)
- fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap))
- for i := 0; i < len(cccMap); i++ {
- if i%8 == 0 {
- fmt.Fprintln(w)
- }
- fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)])
- }
- fmt.Fprintln(w, "\n}\n")
-
- if contains(list, "info") {
- size += printCharInfoTables(w)
- }
-
- if contains(list, "recomp") {
- // Note that we use 32 bit keys, instead of 64 bit.
- // This clips the bits of three entries, but we know
- // this won't cause a collision. The compiler will catch
- // any changes made to UnicodeData.txt that introduces
- // a collision.
- // Note that the recomposition map for NFC and NFKC
- // are identical.
-
- // Recomposition map
- nrentries := 0
- for _, c := range chars {
- f := c.forms[FCanonical]
- if !f.isOneWay && len(f.decomp) > 0 {
- nrentries++
- }
- }
- sz := nrentries * 8
- size += sz
- fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz)
- fmt.Fprintln(w, "var recompMap map[uint32]rune")
- fmt.Fprintln(w, "var recompMapOnce sync.Once\n")
- fmt.Fprintln(w, `const recompMapPacked = "" +`)
- var buf [8]byte
- for i, c := range chars {
- f := c.forms[FCanonical]
- d := f.decomp
- if !f.isOneWay && len(d) > 0 {
- key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1]))
- binary.BigEndian.PutUint32(buf[:4], key)
- binary.BigEndian.PutUint32(buf[4:], uint32(i))
- fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i))
- }
- }
- // hack so we don't have to special case the trailing plus sign
- fmt.Fprintf(w, ` ""`)
- fmt.Fprintln(w)
- }
-
- fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
- gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes())
-}
-
-func printChars() {
- if *verbose {
- for _, c := range chars {
- if !c.isValid() || c.state == SMissing {
- continue
- }
- fmt.Println(c)
- }
- }
-}
-
-// verifyComputed does various consistency tests.
-func verifyComputed() {
- for i, c := range chars {
- for _, f := range c.forms {
- isNo := (f.quickCheck[MDecomposed] == QCNo)
- if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) {
- log.Fatalf("%U: NF*D QC must be No if rune decomposes", i)
- }
-
- isMaybe := f.quickCheck[MComposed] == QCMaybe
- if f.combinesBackward != isMaybe {
- log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i)
- }
- if len(f.decomp) > 0 && f.combinesForward && isMaybe {
- log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i)
- }
-
- if len(f.expandedDecomp) != 0 {
- continue
- }
- if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b {
- // We accept these runes to be treated differently (it only affects
- // segment breaking in iteration, most likely on improper use), but
- // reconsider if more characters are added.
- // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L;<narrow> 3099;;;;N;;;;;
- // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L;<narrow> 309A;;;;N;;;;;
- // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<compat> 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;;
- // U+318E HANGUL LETTER ARAEAE;Lo;0;L;<compat> 11A1;;;;N;HANGUL LETTER ALAE AE;;;;
- // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<narrow> 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;;
- // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L;<narrow> 3163;;;;N;;;;;
- if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) {
- log.Fatalf("%U: nLead was %v; want %v", i, a, b)
- }
- }
- }
- nfc := c.forms[FCanonical]
- nfkc := c.forms[FCompatibility]
- if nfc.combinesBackward != nfkc.combinesBackward {
- log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
- }
- }
-}
-
-// Use values in DerivedNormalizationProps.txt to compare against the
-// values we computed.
-// DerivedNormalizationProps.txt has form:
-// 00C0..00C5 ; NFD_QC; N # ...
-// 0374 ; NFD_QC; N # ...
-// See https://unicode.org/reports/tr44/ for full explanation
-func testDerived() {
- f := gen.OpenUCDFile("DerivedNormalizationProps.txt")
- defer f.Close()
- p := ucd.New(f)
- for p.Next() {
- r := p.Rune(0)
- c := &chars[r]
-
- var ftype, mode int
- qt := p.String(1)
- switch qt {
- case "NFC_QC":
- ftype, mode = FCanonical, MComposed
- case "NFD_QC":
- ftype, mode = FCanonical, MDecomposed
- case "NFKC_QC":
- ftype, mode = FCompatibility, MComposed
- case "NFKD_QC":
- ftype, mode = FCompatibility, MDecomposed
- default:
- continue
- }
- var qr QCResult
- switch p.String(2) {
- case "Y":
- qr = QCYes
- case "N":
- qr = QCNo
- case "M":
- qr = QCMaybe
- default:
- log.Fatalf(`Unexpected quick check value "%s"`, p.String(2))
- }
- if got := c.forms[ftype].quickCheck[mode]; got != qr {
- log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
- }
- c.forms[ftype].verified[mode] = true
- }
- if err := p.Err(); err != nil {
- log.Fatal(err)
- }
- // Any unspecified value must be QCYes. Verify this.
- for i, c := range chars {
- for j, fd := range c.forms {
- for k, qr := range fd.quickCheck {
- if !fd.verified[k] && qr != QCYes {
- m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n"
- log.Printf(m, i, j, k, qr, c.name)
- }
- }
- }
- }
-}
-
-var testHeader = `const (
- Yes = iota
- No
- Maybe
-)
-
-type formData struct {
- qc uint8
- combinesForward bool
- decomposition string
-}
-
-type runeData struct {
- r rune
- ccc uint8
- nLead uint8
- nTrail uint8
- f [2]formData // 0: canonical; 1: compatibility
-}
-
-func f(qc uint8, cf bool, dec string) [2]formData {
- return [2]formData{{qc, cf, dec}, {qc, cf, dec}}
-}
-
-func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData {
- return [2]formData{{qc, cf, d}, {qck, cfk, dk}}
-}
-
-var testData = []runeData{
-`
-
-func printTestdata() {
- type lastInfo struct {
- ccc uint8
- nLead uint8
- nTrail uint8
- f string
- }
-
- last := lastInfo{}
- w := &bytes.Buffer{}
- fmt.Fprintf(w, testHeader)
- for r, c := range chars {
- f := c.forms[FCanonical]
- qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
- f = c.forms[FCompatibility]
- qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
- s := ""
- if d == dk && qc == qck && cf == cfk {
- s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d)
- } else {
- s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk)
- }
- current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s}
- if last != current {
- fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
- last = current
- }
- }
- fmt.Fprintln(w, "}")
- gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes())
-}
diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go
deleted file mode 100644
index 45d711900..000000000
--- a/vendor/golang.org/x/text/unicode/norm/triegen.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Trie table generator.
-// Used by make*tables tools to generate a go file with trie data structures
-// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte
-// sequence are used to lookup offsets in the index table to be used for the
-// next byte. The last byte is used to index into a table with 16-bit values.
-
-package main
-
-import (
- "fmt"
- "io"
-)
-
-const maxSparseEntries = 16
-
-type normCompacter struct {
- sparseBlocks [][]uint64
- sparseOffset []uint16
- sparseCount int
- name string
-}
-
-func mostFrequentStride(a []uint64) int {
- counts := make(map[int]int)
- var v int
- for _, x := range a {
- if stride := int(x) - v; v != 0 && stride >= 0 {
- counts[stride]++
- }
- v = int(x)
- }
- var maxs, maxc int
- for stride, cnt := range counts {
- if cnt > maxc || (cnt == maxc && stride < maxs) {
- maxs, maxc = stride, cnt
- }
- }
- return maxs
-}
-
-func countSparseEntries(a []uint64) int {
- stride := mostFrequentStride(a)
- var v, count int
- for _, tv := range a {
- if int(tv)-v != stride {
- if tv != 0 {
- count++
- }
- }
- v = int(tv)
- }
- return count
-}
-
-func (c *normCompacter) Size(v []uint64) (sz int, ok bool) {
- if n := countSparseEntries(v); n <= maxSparseEntries {
- return (n+1)*4 + 2, true
- }
- return 0, false
-}
-
-func (c *normCompacter) Store(v []uint64) uint32 {
- h := uint32(len(c.sparseOffset))
- c.sparseBlocks = append(c.sparseBlocks, v)
- c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount))
- c.sparseCount += countSparseEntries(v) + 1
- return h
-}
-
-func (c *normCompacter) Handler() string {
- return c.name + "Sparse.lookup"
-}
-
-func (c *normCompacter) Print(w io.Writer) (retErr error) {
- p := func(f string, x ...interface{}) {
- if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil {
- retErr = err
- }
- }
-
- ls := len(c.sparseBlocks)
- p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2)
- p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset)
-
- ns := c.sparseCount
- p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4)
- p("var %sSparseValues = [%d]valueRange {", c.name, ns)
- for i, b := range c.sparseBlocks {
- p("\n// Block %#x, offset %#x", i, c.sparseOffset[i])
- var v int
- stride := mostFrequentStride(b)
- n := countSparseEntries(b)
- p("\n{value:%#04x,lo:%#02x},", stride, uint8(n))
- for i, nv := range b {
- if int(nv)-v != stride {
- if v != 0 {
- p(",hi:%#02x},", 0x80+i-1)
- }
- if nv != 0 {
- p("\n{value:%#04x,lo:%#02x", nv, 0x80+i)
- }
- }
- v = int(nv)
- }
- if v != 0 {
- p(",hi:%#02x},", 0x80+len(b)-1)
- }
- }
- p("\n}\n\n")
- return
-}
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go
index 531087655..129bc2a97 100644
--- a/vendor/gopkg.in/yaml.v2/decode.go
+++ b/vendor/gopkg.in/yaml.v2/decode.go
@@ -319,10 +319,14 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm
}
const (
- // 400,000 decode operations is ~500kb of dense object declarations, or ~5kb of dense object declarations with 10000% alias expansion
+ // 400,000 decode operations is ~500kb of dense object declarations, or
+ // ~5kb of dense object declarations with 10000% alias expansion
alias_ratio_range_low = 400000
- // 4,000,000 decode operations is ~5MB of dense object declarations, or ~4.5MB of dense object declarations with 10% alias expansion
+
+ // 4,000,000 decode operations is ~5MB of dense object declarations, or
+ // ~4.5MB of dense object declarations with 10% alias expansion
alias_ratio_range_high = 4000000
+
// alias_ratio_range is the range over which we scale allowed alias ratios
alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
)
@@ -784,8 +788,7 @@ func (d *decoder) merge(n *node, out reflect.Value) {
case mappingNode:
d.unmarshal(n, out)
case aliasNode:
- an, ok := d.doc.anchors[n.value]
- if ok && an.kind != mappingNode {
+ if n.alias != nil && n.alias.kind != mappingNode {
failWantMap()
}
d.unmarshal(n, out)
@@ -794,8 +797,7 @@ func (d *decoder) merge(n *node, out reflect.Value) {
for i := len(n.children) - 1; i >= 0; i-- {
ni := n.children[i]
if ni.kind == aliasNode {
- an, ok := d.doc.anchors[ni.value]
- if ok && an.kind != mappingNode {
+ if ni.alias != nil && ni.alias.kind != mappingNode {
failWantMap()
}
} else if ni.kind != mappingNode {
diff --git a/vendor/modules.txt b/vendor/modules.txt
index cda4bcd9e..71c50d78d 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,30 +1,30 @@
# github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
-github.com/Azure/go-ansiterm/winterm
github.com/Azure/go-ansiterm
+github.com/Azure/go-ansiterm/winterm
# github.com/BurntSushi/toml v0.3.1
github.com/BurntSushi/toml
# github.com/Microsoft/go-winio v0.4.14
github.com/Microsoft/go-winio
-github.com/Microsoft/go-winio/pkg/guid
github.com/Microsoft/go-winio/archive/tar
github.com/Microsoft/go-winio/backuptar
+github.com/Microsoft/go-winio/pkg/guid
# github.com/Microsoft/hcsshim v0.8.6
-github.com/Microsoft/hcsshim/osversion
github.com/Microsoft/hcsshim
+github.com/Microsoft/hcsshim/internal/guestrequest
github.com/Microsoft/hcsshim/internal/guid
github.com/Microsoft/hcsshim/internal/hcs
github.com/Microsoft/hcsshim/internal/hcserror
github.com/Microsoft/hcsshim/internal/hns
-github.com/Microsoft/hcsshim/internal/mergemaps
-github.com/Microsoft/hcsshim/internal/schema1
-github.com/Microsoft/hcsshim/internal/wclayer
-github.com/Microsoft/hcsshim/internal/guestrequest
github.com/Microsoft/hcsshim/internal/interop
github.com/Microsoft/hcsshim/internal/logfields
-github.com/Microsoft/hcsshim/internal/timeout
-github.com/Microsoft/hcsshim/internal/schema2
github.com/Microsoft/hcsshim/internal/longpath
+github.com/Microsoft/hcsshim/internal/mergemaps
github.com/Microsoft/hcsshim/internal/safefile
+github.com/Microsoft/hcsshim/internal/schema1
+github.com/Microsoft/hcsshim/internal/schema2
+github.com/Microsoft/hcsshim/internal/timeout
+github.com/Microsoft/hcsshim/internal/wclayer
+github.com/Microsoft/hcsshim/osversion
# github.com/VividCortex/ewma v1.1.1
github.com/VividCortex/ewma
# github.com/beorn7/perks v1.0.1
@@ -40,204 +40,205 @@ github.com/checkpoint-restore/go-criu/rpc
github.com/containerd/containerd/errdefs
# github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc
github.com/containerd/continuity/fs
-github.com/containerd/continuity/sysx
github.com/containerd/continuity/syscallx
-# github.com/containernetworking/cni v0.7.1
-github.com/containernetworking/cni/pkg/types
-github.com/containernetworking/cni/pkg/types/current
-github.com/containernetworking/cni/pkg/version
+github.com/containerd/continuity/sysx
+# github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784
github.com/containernetworking/cni/libcni
github.com/containernetworking/cni/pkg/invoke
+github.com/containernetworking/cni/pkg/types
github.com/containernetworking/cni/pkg/types/020
+github.com/containernetworking/cni/pkg/types/current
+github.com/containernetworking/cni/pkg/utils
+github.com/containernetworking/cni/pkg/version
# github.com/containernetworking/plugins v0.8.2
-github.com/containernetworking/plugins/pkg/ns
github.com/containernetworking/plugins/pkg/ip
-github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator
+github.com/containernetworking/plugins/pkg/ns
github.com/containernetworking/plugins/pkg/utils/hwaddr
github.com/containernetworking/plugins/plugins/ipam/host-local/backend
-# github.com/containers/buildah v1.11.4-0.20191028173731-21b4778b359e
+github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator
+# github.com/containers/buildah v1.11.5-0.20191031204705-20e92ffe0982
github.com/containers/buildah
-github.com/containers/buildah/imagebuildah
-github.com/containers/buildah/pkg/chrootuser
-github.com/containers/buildah/pkg/cli
-github.com/containers/buildah/pkg/formats
-github.com/containers/buildah/util
-github.com/containers/buildah/pkg/secrets
-github.com/containers/buildah/pkg/parse
github.com/containers/buildah/bind
github.com/containers/buildah/chroot
github.com/containers/buildah/docker
+github.com/containers/buildah/imagebuildah
github.com/containers/buildah/pkg/blobcache
github.com/containers/buildah/pkg/cgroups
+github.com/containers/buildah/pkg/chrootuser
+github.com/containers/buildah/pkg/cli
+github.com/containers/buildah/pkg/formats
github.com/containers/buildah/pkg/overlay
-github.com/containers/buildah/pkg/unshare
+github.com/containers/buildah/pkg/parse
+github.com/containers/buildah/pkg/secrets
github.com/containers/buildah/pkg/umask
+github.com/containers/buildah/pkg/unshare
+github.com/containers/buildah/util
# github.com/containers/image/v5 v5.0.0
+github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
+github.com/containers/image/v5/directory/explicitfilepath
github.com/containers/image/v5/docker
github.com/containers/image/v5/docker/archive
-github.com/containers/image/v5/manifest
-github.com/containers/image/v5/pkg/docker/config
-github.com/containers/image/v5/signature
-github.com/containers/image/v5/transports
-github.com/containers/image/v5/transports/alltransports
-github.com/containers/image/v5/types
-github.com/containers/image/v5/oci/archive
-github.com/containers/image/v5/storage
-github.com/containers/image/v5/copy
+github.com/containers/image/v5/docker/daemon
+github.com/containers/image/v5/docker/policyconfiguration
github.com/containers/image/v5/docker/reference
github.com/containers/image/v5/docker/tarfile
github.com/containers/image/v5/image
-github.com/containers/image/v5/oci/layout
-github.com/containers/image/v5/tarball
-github.com/containers/image/v5/pkg/sysregistriesv2
-github.com/containers/image/v5/directory/explicitfilepath
-github.com/containers/image/v5/docker/policyconfiguration
-github.com/containers/image/v5/pkg/blobinfocache/none
-github.com/containers/image/v5/pkg/tlsclientconfig
-github.com/containers/image/v5/pkg/compression
-github.com/containers/image/v5/pkg/strslice
github.com/containers/image/v5/internal/pkg/keyctl
-github.com/containers/image/v5/version
-github.com/containers/image/v5/docker/daemon
-github.com/containers/image/v5/openshift
-github.com/containers/image/v5/ostree
-github.com/containers/image/v5/pkg/compression/types
github.com/containers/image/v5/internal/tmpdir
+github.com/containers/image/v5/manifest
+github.com/containers/image/v5/oci/archive
github.com/containers/image/v5/oci/internal
+github.com/containers/image/v5/oci/layout
+github.com/containers/image/v5/openshift
+github.com/containers/image/v5/ostree
github.com/containers/image/v5/pkg/blobinfocache
-github.com/containers/image/v5/pkg/compression/internal
github.com/containers/image/v5/pkg/blobinfocache/boltdb
-github.com/containers/image/v5/pkg/blobinfocache/memory
github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize
+github.com/containers/image/v5/pkg/blobinfocache/memory
+github.com/containers/image/v5/pkg/blobinfocache/none
+github.com/containers/image/v5/pkg/compression
+github.com/containers/image/v5/pkg/compression/internal
+github.com/containers/image/v5/pkg/compression/types
+github.com/containers/image/v5/pkg/docker/config
+github.com/containers/image/v5/pkg/strslice
+github.com/containers/image/v5/pkg/sysregistriesv2
+github.com/containers/image/v5/pkg/tlsclientconfig
+github.com/containers/image/v5/signature
+github.com/containers/image/v5/storage
+github.com/containers/image/v5/tarball
+github.com/containers/image/v5/transports
+github.com/containers/image/v5/transports/alltransports
+github.com/containers/image/v5/types
+github.com/containers/image/v5/version
# github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b
github.com/containers/libtrust
# github.com/containers/psgo v1.3.2
github.com/containers/psgo
github.com/containers/psgo/internal/capabilities
+github.com/containers/psgo/internal/cgroups
github.com/containers/psgo/internal/dev
+github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-github.com/containers/psgo/internal/cgroups
-github.com/containers/psgo/internal/host
# github.com/containers/storage v1.13.5
github.com/containers/storage
-github.com/containers/storage/pkg/archive
-github.com/containers/storage/pkg/chrootarchive
-github.com/containers/storage/pkg/idtools
-github.com/containers/storage/pkg/reexec
-github.com/containers/storage/pkg/mount
-github.com/containers/storage/pkg/stringid
-github.com/containers/storage/pkg/system
-github.com/containers/storage/pkg/truncindex
-github.com/containers/storage/pkg/parsers/kernel
-github.com/containers/storage/pkg/fileutils
-github.com/containers/storage/pkg/ioutils
-github.com/containers/storage/pkg/pools
-github.com/containers/storage/pkg/homedir
github.com/containers/storage/drivers
-github.com/containers/storage/drivers/register
-github.com/containers/storage/pkg/config
-github.com/containers/storage/pkg/directory
-github.com/containers/storage/pkg/lockfile
-github.com/containers/storage/pkg/parsers
-github.com/containers/storage/pkg/stringutils
-github.com/containers/storage/pkg/tarlog
-github.com/containers/storage/pkg/longpath
-github.com/containers/storage/pkg/promise
github.com/containers/storage/drivers/aufs
github.com/containers/storage/drivers/btrfs
+github.com/containers/storage/drivers/copy
github.com/containers/storage/drivers/devmapper
github.com/containers/storage/drivers/overlay
+github.com/containers/storage/drivers/overlayutils
+github.com/containers/storage/drivers/quota
+github.com/containers/storage/drivers/register
github.com/containers/storage/drivers/vfs
github.com/containers/storage/drivers/windows
github.com/containers/storage/drivers/zfs
-github.com/containers/storage/pkg/locker
+github.com/containers/storage/pkg/archive
+github.com/containers/storage/pkg/chrootarchive
+github.com/containers/storage/pkg/config
github.com/containers/storage/pkg/devicemapper
+github.com/containers/storage/pkg/directory
github.com/containers/storage/pkg/dmesg
-github.com/containers/storage/pkg/loopback
-github.com/containers/storage/drivers/overlayutils
-github.com/containers/storage/drivers/quota
+github.com/containers/storage/pkg/fileutils
github.com/containers/storage/pkg/fsutils
-github.com/containers/storage/drivers/copy
+github.com/containers/storage/pkg/homedir
+github.com/containers/storage/pkg/idtools
+github.com/containers/storage/pkg/ioutils
+github.com/containers/storage/pkg/locker
+github.com/containers/storage/pkg/lockfile
+github.com/containers/storage/pkg/longpath
+github.com/containers/storage/pkg/loopback
+github.com/containers/storage/pkg/mount
+github.com/containers/storage/pkg/parsers
+github.com/containers/storage/pkg/parsers/kernel
+github.com/containers/storage/pkg/pools
+github.com/containers/storage/pkg/promise
+github.com/containers/storage/pkg/reexec
+github.com/containers/storage/pkg/stringid
+github.com/containers/storage/pkg/stringutils
+github.com/containers/storage/pkg/system
+github.com/containers/storage/pkg/tarlog
+github.com/containers/storage/pkg/truncindex
# github.com/coreos/go-iptables v0.4.2
github.com/coreos/go-iptables/iptables
# github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
github.com/coreos/go-systemd/activation
github.com/coreos/go-systemd/dbus
-github.com/coreos/go-systemd/sdjournal
github.com/coreos/go-systemd/journal
+github.com/coreos/go-systemd/sdjournal
# github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f
github.com/coreos/pkg/dlopen
-# github.com/cri-o/ocicni v0.1.1-0.20190702175919-7762645d18ca
+# github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b
github.com/cri-o/ocicni/pkg/ocicni
# github.com/cyphar/filepath-securejoin v0.2.2
github.com/cyphar/filepath-securejoin
# github.com/davecgh/go-spew v1.1.1
github.com/davecgh/go-spew/spew
# github.com/docker/distribution v2.7.1+incompatible
+github.com/docker/distribution
+github.com/docker/distribution/digestset
+github.com/docker/distribution/metrics
github.com/docker/distribution/reference
github.com/docker/distribution/registry/api/errcode
github.com/docker/distribution/registry/api/v2
github.com/docker/distribution/registry/client
-github.com/docker/distribution/digestset
-github.com/docker/distribution
github.com/docker/distribution/registry/client/auth/challenge
github.com/docker/distribution/registry/client/transport
github.com/docker/distribution/registry/storage/cache
github.com/docker/distribution/registry/storage/cache/memory
-github.com/docker/distribution/metrics
# github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce
-github.com/docker/docker/pkg/signal
-github.com/docker/docker/pkg/homedir
-github.com/docker/docker/oci/caps
-github.com/docker/docker/pkg/namesgenerator
-github.com/docker/docker/pkg/term
-github.com/docker/docker/pkg/ioutils
-github.com/docker/docker/pkg/parsers
-github.com/docker/docker/api/types/versions
-github.com/docker/docker/errdefs
-github.com/docker/docker/pkg/term/windows
-github.com/docker/docker/pkg/longpath
-github.com/docker/docker/api/types/registry
-github.com/docker/docker/api/types/swarm
-github.com/docker/docker/pkg/archive
-github.com/docker/docker/pkg/fileutils
-github.com/docker/docker/pkg/jsonmessage
-github.com/docker/docker/pkg/stdcopy
-github.com/docker/docker/pkg/system
-github.com/docker/docker/client
-github.com/docker/docker/api/types/container
-github.com/docker/docker/api/types/mount
-github.com/docker/docker/api/types/network
-github.com/docker/docker/api/types/swarm/runtime
-github.com/docker/docker/pkg/idtools
-github.com/docker/docker/pkg/pools
-github.com/docker/docker/pkg/mount
github.com/docker/docker/api
github.com/docker/docker/api/types
+github.com/docker/docker/api/types/blkiodev
+github.com/docker/docker/api/types/container
github.com/docker/docker/api/types/events
github.com/docker/docker/api/types/filters
github.com/docker/docker/api/types/image
+github.com/docker/docker/api/types/mount
+github.com/docker/docker/api/types/network
+github.com/docker/docker/api/types/registry
+github.com/docker/docker/api/types/strslice
+github.com/docker/docker/api/types/swarm
+github.com/docker/docker/api/types/swarm/runtime
github.com/docker/docker/api/types/time
+github.com/docker/docker/api/types/versions
github.com/docker/docker/api/types/volume
-github.com/docker/docker/api/types/blkiodev
-github.com/docker/docker/api/types/strslice
+github.com/docker/docker/client
+github.com/docker/docker/errdefs
+github.com/docker/docker/oci/caps
+github.com/docker/docker/pkg/archive
+github.com/docker/docker/pkg/fileutils
+github.com/docker/docker/pkg/homedir
+github.com/docker/docker/pkg/idtools
+github.com/docker/docker/pkg/ioutils
+github.com/docker/docker/pkg/jsonmessage
+github.com/docker/docker/pkg/longpath
+github.com/docker/docker/pkg/mount
+github.com/docker/docker/pkg/namesgenerator
+github.com/docker/docker/pkg/parsers
+github.com/docker/docker/pkg/pools
+github.com/docker/docker/pkg/signal
+github.com/docker/docker/pkg/stdcopy
+github.com/docker/docker/pkg/system
+github.com/docker/docker/pkg/term
+github.com/docker/docker/pkg/term/windows
# github.com/docker/docker-credential-helpers v0.6.3
-github.com/docker/docker-credential-helpers/credentials
github.com/docker/docker-credential-helpers/client
+github.com/docker/docker-credential-helpers/credentials
# github.com/docker/go-connections v0.4.0
github.com/docker/go-connections/nat
-github.com/docker/go-connections/tlsconfig
github.com/docker/go-connections/sockets
+github.com/docker/go-connections/tlsconfig
# github.com/docker/go-metrics v0.0.1
github.com/docker/go-metrics
# github.com/docker/go-units v0.4.0
github.com/docker/go-units
# github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
github.com/docker/libnetwork/resolvconf
-github.com/docker/libnetwork/types
github.com/docker/libnetwork/resolvconf/dns
+github.com/docker/libnetwork/types
# github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c
github.com/docker/spdystream
github.com/docker/spdystream/spdy
@@ -284,15 +285,15 @@ github.com/imdario/mergo
github.com/inconshreveable/mousetrap
# github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111
github.com/ishidawataru/sctp
-# github.com/json-iterator/go v1.1.7
+# github.com/json-iterator/go v1.1.8
github.com/json-iterator/go
# github.com/klauspost/compress v1.8.1
-github.com/klauspost/compress/zstd
github.com/klauspost/compress/flate
+github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
github.com/klauspost/compress/snappy
+github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
-github.com/klauspost/compress/fse
# github.com/klauspost/cpuid v1.2.1
github.com/klauspost/cpuid
# github.com/klauspost/pgzip v1.2.1
@@ -317,84 +318,84 @@ github.com/morikuni/aec
github.com/mrunalp/fileutils
# github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c
github.com/mtrmac/gpgme
-# github.com/onsi/ginkgo v1.10.1
-github.com/onsi/ginkgo/ginkgo
+# github.com/onsi/ginkgo v1.10.3
github.com/onsi/ginkgo
github.com/onsi/ginkgo/config
+github.com/onsi/ginkgo/extensions/table
+github.com/onsi/ginkgo/ginkgo
github.com/onsi/ginkgo/ginkgo/convert
github.com/onsi/ginkgo/ginkgo/interrupthandler
github.com/onsi/ginkgo/ginkgo/nodot
github.com/onsi/ginkgo/ginkgo/testrunner
github.com/onsi/ginkgo/ginkgo/testsuite
github.com/onsi/ginkgo/ginkgo/watch
-github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable
-github.com/onsi/ginkgo/types
github.com/onsi/ginkgo/internal/codelocation
+github.com/onsi/ginkgo/internal/containernode
github.com/onsi/ginkgo/internal/failer
+github.com/onsi/ginkgo/internal/leafnodes
github.com/onsi/ginkgo/internal/remote
+github.com/onsi/ginkgo/internal/spec
+github.com/onsi/ginkgo/internal/spec_iterator
+github.com/onsi/ginkgo/internal/specrunner
github.com/onsi/ginkgo/internal/suite
github.com/onsi/ginkgo/internal/testingtproxy
github.com/onsi/ginkgo/internal/writer
github.com/onsi/ginkgo/reporters
github.com/onsi/ginkgo/reporters/stenographer
-github.com/onsi/ginkgo/extensions/table
+github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable
github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty
-github.com/onsi/ginkgo/internal/spec_iterator
-github.com/onsi/ginkgo/internal/containernode
-github.com/onsi/ginkgo/internal/leafnodes
-github.com/onsi/ginkgo/internal/spec
-github.com/onsi/ginkgo/internal/specrunner
-# github.com/onsi/gomega v1.7.0
+github.com/onsi/ginkgo/types
+# github.com/onsi/gomega v1.7.1
github.com/onsi/gomega
-github.com/onsi/gomega/gexec
github.com/onsi/gomega/format
+github.com/onsi/gomega/gbytes
+github.com/onsi/gomega/gexec
github.com/onsi/gomega/internal/assertion
github.com/onsi/gomega/internal/asyncassertion
+github.com/onsi/gomega/internal/oraclematcher
github.com/onsi/gomega/internal/testingtsupport
github.com/onsi/gomega/matchers
-github.com/onsi/gomega/types
-github.com/onsi/gomega/gbytes
-github.com/onsi/gomega/internal/oraclematcher
github.com/onsi/gomega/matchers/support/goraph/bipartitegraph
github.com/onsi/gomega/matchers/support/goraph/edge
github.com/onsi/gomega/matchers/support/goraph/node
github.com/onsi/gomega/matchers/support/goraph/util
+github.com/onsi/gomega/types
# github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/go-digest
# github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
-github.com/opencontainers/image-spec/specs-go/v1
github.com/opencontainers/image-spec/specs-go
+github.com/opencontainers/image-spec/specs-go/v1
# github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158
-github.com/opencontainers/runc/libcontainer/user
github.com/opencontainers/runc/libcontainer/apparmor
+github.com/opencontainers/runc/libcontainer/cgroups
github.com/opencontainers/runc/libcontainer/configs
github.com/opencontainers/runc/libcontainer/devices
-github.com/opencontainers/runc/libcontainer/cgroups
github.com/opencontainers/runc/libcontainer/system
+github.com/opencontainers/runc/libcontainer/user
# github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7
github.com/opencontainers/runtime-spec/specs-go
# github.com/opencontainers/runtime-tools v0.9.0
+github.com/opencontainers/runtime-tools/error
+github.com/opencontainers/runtime-tools/filepath
github.com/opencontainers/runtime-tools/generate
-github.com/opencontainers/runtime-tools/validate
github.com/opencontainers/runtime-tools/generate/seccomp
-github.com/opencontainers/runtime-tools/filepath
github.com/opencontainers/runtime-tools/specerror
-github.com/opencontainers/runtime-tools/error
+github.com/opencontainers/runtime-tools/validate
# github.com/opencontainers/selinux v1.3.0
-github.com/opencontainers/selinux/go-selinux/label
github.com/opencontainers/selinux/go-selinux
+github.com/opencontainers/selinux/go-selinux/label
# github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible
github.com/openshift/api/config/v1
# github.com/openshift/imagebuilder v1.1.1
github.com/openshift/imagebuilder
-github.com/openshift/imagebuilder/dockerfile/parser
github.com/openshift/imagebuilder/dockerfile/command
+github.com/openshift/imagebuilder/dockerfile/parser
github.com/openshift/imagebuilder/signal
github.com/openshift/imagebuilder/strslice
# github.com/opentracing/opentracing-go v1.1.0
github.com/opentracing/opentracing-go
-github.com/opentracing/opentracing-go/log
github.com/opentracing/opentracing-go/ext
+github.com/opentracing/opentracing-go/log
# github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913
github.com/ostreedev/ostree-go/pkg/glibobject
github.com/ostreedev/ostree-go/pkg/otbuiltin
@@ -406,19 +407,19 @@ github.com/pkg/profile
github.com/pmezard/go-difflib/difflib
# github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9
github.com/pquerna/ffjson/fflib/v1
+github.com/pquerna/ffjson/fflib/v1/internal
github.com/pquerna/ffjson/inception
github.com/pquerna/ffjson/shared
-github.com/pquerna/ffjson/fflib/v1/internal
# github.com/prometheus/client_golang v1.1.0
github.com/prometheus/client_golang/prometheus
-github.com/prometheus/client_golang/prometheus/promhttp
github.com/prometheus/client_golang/prometheus/internal
+github.com/prometheus/client_golang/prometheus/promhttp
# github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
github.com/prometheus/client_model/go
# github.com/prometheus/common v0.6.0
github.com/prometheus/common/expfmt
-github.com/prometheus/common/model
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
+github.com/prometheus/common/model
# github.com/prometheus/procfs v0.0.3
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
@@ -442,34 +443,34 @@ github.com/stretchr/testify/require
github.com/syndtr/gocapability/capability
# github.com/tchap/go-patricia v2.3.0+incompatible
github.com/tchap/go-patricia/patricia
-# github.com/uber/jaeger-client-go v2.19.0+incompatible
+# github.com/uber/jaeger-client-go v2.20.0+incompatible
github.com/uber/jaeger-client-go
github.com/uber/jaeger-client-go/config
github.com/uber/jaeger-client-go/internal/baggage
+github.com/uber/jaeger-client-go/internal/baggage/remote
github.com/uber/jaeger-client-go/internal/spanlog
github.com/uber/jaeger-client-go/internal/throttler
+github.com/uber/jaeger-client-go/internal/throttler/remote
github.com/uber/jaeger-client-go/log
+github.com/uber/jaeger-client-go/rpcmetrics
github.com/uber/jaeger-client-go/thrift
+github.com/uber/jaeger-client-go/thrift-gen/agent
+github.com/uber/jaeger-client-go/thrift-gen/baggage
github.com/uber/jaeger-client-go/thrift-gen/jaeger
github.com/uber/jaeger-client-go/thrift-gen/sampling
github.com/uber/jaeger-client-go/thrift-gen/zipkincore
-github.com/uber/jaeger-client-go/utils
-github.com/uber/jaeger-client-go/internal/baggage/remote
-github.com/uber/jaeger-client-go/internal/throttler/remote
-github.com/uber/jaeger-client-go/rpcmetrics
github.com/uber/jaeger-client-go/transport
-github.com/uber/jaeger-client-go/thrift-gen/agent
-github.com/uber/jaeger-client-go/thrift-gen/baggage
+github.com/uber/jaeger-client-go/utils
# github.com/uber/jaeger-lib v0.0.0-20190122222657-d036253de8f5
github.com/uber/jaeger-lib/metrics
# github.com/ulikunitz/xz v0.5.6
github.com/ulikunitz/xz
+github.com/ulikunitz/xz/internal/hash
github.com/ulikunitz/xz/internal/xlog
github.com/ulikunitz/xz/lzma
-github.com/ulikunitz/xz/internal/hash
# github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b
-github.com/varlink/go/varlink
github.com/varlink/go/cmd/varlink-go-interface-generator
+github.com/varlink/go/varlink
github.com/varlink/go/varlink/idl
# github.com/vbatts/tar-split v0.11.1
github.com/vbatts/tar-split/archive/tar
@@ -477,8 +478,8 @@ github.com/vbatts/tar-split/tar/asm
github.com/vbatts/tar-split/tar/storage
# github.com/vbauerster/mpb v3.4.0+incompatible
github.com/vbauerster/mpb
-github.com/vbauerster/mpb/decor
github.com/vbauerster/mpb/cwriter
+github.com/vbauerster/mpb/decor
github.com/vbauerster/mpb/internal
# github.com/vishvananda/netlink v1.0.0
github.com/vishvananda/netlink
@@ -491,33 +492,35 @@ github.com/xeipuuv/gojsonpointer
github.com/xeipuuv/gojsonreference
# github.com/xeipuuv/gojsonschema v1.1.0
github.com/xeipuuv/gojsonschema
+# go.uber.org/atomic v1.4.0
+go.uber.org/atomic
# golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad
-golang.org/x/crypto/ssh/terminal
+golang.org/x/crypto/cast5
golang.org/x/crypto/openpgp
golang.org/x/crypto/openpgp/armor
+golang.org/x/crypto/openpgp/elgamal
golang.org/x/crypto/openpgp/errors
golang.org/x/crypto/openpgp/packet
golang.org/x/crypto/openpgp/s2k
-golang.org/x/crypto/cast5
-golang.org/x/crypto/openpgp/elgamal
+golang.org/x/crypto/ssh/terminal
# golang.org/x/net v0.0.0-20190628185345-da137c7871d7
golang.org/x/net/context
-golang.org/x/net/http2
+golang.org/x/net/context/ctxhttp
+golang.org/x/net/html
+golang.org/x/net/html/atom
golang.org/x/net/html/charset
-golang.org/x/net/proxy
golang.org/x/net/http/httpguts
+golang.org/x/net/http2
golang.org/x/net/http2/hpack
golang.org/x/net/idna
-golang.org/x/net/html
golang.org/x/net/internal/socks
-golang.org/x/net/html/atom
-golang.org/x/net/context/ctxhttp
+golang.org/x/net/proxy
# golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
golang.org/x/oauth2
golang.org/x/oauth2/internal
# golang.org/x/sync v0.0.0-20190423024810-112230192c58
-golang.org/x/sync/semaphore
golang.org/x/sync/errgroup
+golang.org/x/sync/semaphore
# golang.org/x/sys v0.0.0-20190902133755-9109b7679e13
golang.org/x/sys/unix
golang.org/x/sys/windows
@@ -525,107 +528,107 @@ golang.org/x/sys/windows
golang.org/x/text/encoding
golang.org/x/text/encoding/charmap
golang.org/x/text/encoding/htmlindex
-golang.org/x/text/transform
-golang.org/x/text/secure/bidirule
-golang.org/x/text/unicode/bidi
-golang.org/x/text/unicode/norm
-golang.org/x/text/encoding/internal/identifier
golang.org/x/text/encoding/internal
+golang.org/x/text/encoding/internal/identifier
golang.org/x/text/encoding/japanese
golang.org/x/text/encoding/korean
golang.org/x/text/encoding/simplifiedchinese
golang.org/x/text/encoding/traditionalchinese
golang.org/x/text/encoding/unicode
-golang.org/x/text/language
-golang.org/x/text/internal/utf8internal
-golang.org/x/text/runes
golang.org/x/text/internal/language
golang.org/x/text/internal/language/compact
golang.org/x/text/internal/tag
+golang.org/x/text/internal/utf8internal
+golang.org/x/text/language
+golang.org/x/text/runes
+golang.org/x/text/secure/bidirule
+golang.org/x/text/transform
+golang.org/x/text/unicode/bidi
+golang.org/x/text/unicode/norm
# golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0
golang.org/x/time/rate
# google.golang.org/appengine v1.6.1
-google.golang.org/appengine/urlfetch
google.golang.org/appengine/internal
-google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/internal/base
google.golang.org/appengine/internal/datastore
google.golang.org/appengine/internal/log
google.golang.org/appengine/internal/remote_api
+google.golang.org/appengine/internal/urlfetch
+google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.24.0
google.golang.org/grpc/codes
-google.golang.org/grpc/status
-google.golang.org/grpc/internal
google.golang.org/grpc/connectivity
google.golang.org/grpc/grpclog
+google.golang.org/grpc/internal
+google.golang.org/grpc/status
# gopkg.in/fsnotify.v1 v1.4.7
gopkg.in/fsnotify.v1
# gopkg.in/inf.v0 v0.9.1
gopkg.in/inf.v0
# gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
gopkg.in/tomb.v1
-# gopkg.in/yaml.v2 v2.2.4
+# gopkg.in/yaml.v2 v2.2.5
gopkg.in/yaml.v2
# k8s.io/api v0.0.0-20190813020757-36bff7324fb7
k8s.io/api/core/v1
# k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010
-k8s.io/apimachinery/pkg/apis/meta/v1
-k8s.io/apimachinery/pkg/util/runtime
+k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/resource
-k8s.io/apimachinery/pkg/runtime
-k8s.io/apimachinery/pkg/runtime/schema
-k8s.io/apimachinery/pkg/types
-k8s.io/apimachinery/pkg/util/intstr
+k8s.io/apimachinery/pkg/apis/meta/v1
+k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
k8s.io/apimachinery/pkg/conversion
+k8s.io/apimachinery/pkg/conversion/queryparams
k8s.io/apimachinery/pkg/fields
k8s.io/apimachinery/pkg/labels
+k8s.io/apimachinery/pkg/runtime
+k8s.io/apimachinery/pkg/runtime/schema
+k8s.io/apimachinery/pkg/runtime/serializer
+k8s.io/apimachinery/pkg/runtime/serializer/json
+k8s.io/apimachinery/pkg/runtime/serializer/protobuf
+k8s.io/apimachinery/pkg/runtime/serializer/recognizer
+k8s.io/apimachinery/pkg/runtime/serializer/streaming
+k8s.io/apimachinery/pkg/runtime/serializer/versioning
k8s.io/apimachinery/pkg/selection
-k8s.io/apimachinery/pkg/watch
-k8s.io/apimachinery/pkg/util/httpstream
-k8s.io/apimachinery/pkg/util/remotecommand
-k8s.io/apimachinery/pkg/conversion/queryparams
+k8s.io/apimachinery/pkg/types
+k8s.io/apimachinery/pkg/util/clock
k8s.io/apimachinery/pkg/util/errors
+k8s.io/apimachinery/pkg/util/framer
+k8s.io/apimachinery/pkg/util/httpstream
+k8s.io/apimachinery/pkg/util/httpstream/spdy
+k8s.io/apimachinery/pkg/util/intstr
k8s.io/apimachinery/pkg/util/json
k8s.io/apimachinery/pkg/util/naming
+k8s.io/apimachinery/pkg/util/net
+k8s.io/apimachinery/pkg/util/remotecommand
+k8s.io/apimachinery/pkg/util/runtime
k8s.io/apimachinery/pkg/util/sets
-k8s.io/apimachinery/third_party/forked/golang/reflect
k8s.io/apimachinery/pkg/util/validation
-k8s.io/apimachinery/pkg/util/net
-k8s.io/apimachinery/pkg/api/errors
-k8s.io/apimachinery/pkg/runtime/serializer/streaming
-k8s.io/apimachinery/pkg/util/httpstream/spdy
k8s.io/apimachinery/pkg/util/validation/field
+k8s.io/apimachinery/pkg/util/yaml
k8s.io/apimachinery/pkg/version
-k8s.io/apimachinery/pkg/runtime/serializer
-k8s.io/apimachinery/pkg/util/clock
+k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/netutil
-k8s.io/apimachinery/pkg/runtime/serializer/json
-k8s.io/apimachinery/pkg/runtime/serializer/protobuf
-k8s.io/apimachinery/pkg/runtime/serializer/recognizer
-k8s.io/apimachinery/pkg/runtime/serializer/versioning
-k8s.io/apimachinery/pkg/util/framer
-k8s.io/apimachinery/pkg/util/yaml
-k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
+k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/client-go v0.0.0-20190620085101-78d2af792bab
-k8s.io/client-go/tools/remotecommand
-k8s.io/client-go/rest
-k8s.io/client-go/transport/spdy
-k8s.io/client-go/util/exec
-k8s.io/client-go/util/homedir
+k8s.io/client-go/pkg/apis/clientauthentication
+k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1
+k8s.io/client-go/pkg/apis/clientauthentication/v1beta1
k8s.io/client-go/pkg/version
k8s.io/client-go/plugin/pkg/client/auth/exec
+k8s.io/client-go/rest
k8s.io/client-go/rest/watch
k8s.io/client-go/tools/clientcmd/api
k8s.io/client-go/tools/metrics
+k8s.io/client-go/tools/remotecommand
k8s.io/client-go/transport
+k8s.io/client-go/transport/spdy
k8s.io/client-go/util/cert
-k8s.io/client-go/util/flowcontrol
-k8s.io/client-go/pkg/apis/clientauthentication
-k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1
-k8s.io/client-go/pkg/apis/clientauthentication/v1beta1
k8s.io/client-go/util/connrotation
+k8s.io/client-go/util/exec
+k8s.io/client-go/util/flowcontrol
+k8s.io/client-go/util/homedir
k8s.io/client-go/util/keyutil
# k8s.io/klog v0.3.3
k8s.io/klog
diff --git a/version/version.go b/version/version.go
index c0dbeadfe..129a2cd4b 100644
--- a/version/version.go
+++ b/version/version.go
@@ -4,7 +4,7 @@ package version
// NOTE: remember to bump the version at the top
// of the top-level README.md file when this is
// bumped.
-const Version = "1.6.3-dev"
+const Version = "1.6.4-dev"
// RemoteAPIVersion is the version for the remote
// client API. It is used to determine compatibility