summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/common.go344
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/reader.go1002
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go20
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go20
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go32
-rw-r--r--vendor/github.com/Microsoft/go-winio/archive/tar/writer.go444
-rw-r--r--vendor/github.com/Microsoft/go-winio/backuptar/strconv.go68
-rw-r--r--vendor/github.com/Microsoft/go-winio/backuptar/tar.go60
-rw-r--r--vendor/github.com/chzyer/readline/.gitignore1
-rw-r--r--vendor/github.com/chzyer/readline/.travis.yml8
-rw-r--r--vendor/github.com/chzyer/readline/CHANGELOG.md58
-rw-r--r--vendor/github.com/chzyer/readline/LICENSE22
-rw-r--r--vendor/github.com/chzyer/readline/README.md114
-rw-r--r--vendor/github.com/chzyer/readline/ansi_windows.go249
-rw-r--r--vendor/github.com/chzyer/readline/complete.go285
-rw-r--r--vendor/github.com/chzyer/readline/complete_helper.go165
-rw-r--r--vendor/github.com/chzyer/readline/complete_segment.go82
-rw-r--r--vendor/github.com/chzyer/readline/history.go330
-rw-r--r--vendor/github.com/chzyer/readline/operation.go531
-rw-r--r--vendor/github.com/chzyer/readline/password.go33
-rw-r--r--vendor/github.com/chzyer/readline/rawreader_windows.go125
-rw-r--r--vendor/github.com/chzyer/readline/readline.go326
-rw-r--r--vendor/github.com/chzyer/readline/remote.go475
-rw-r--r--vendor/github.com/chzyer/readline/runebuf.go629
-rw-r--r--vendor/github.com/chzyer/readline/runes.go223
-rw-r--r--vendor/github.com/chzyer/readline/search.go164
-rw-r--r--vendor/github.com/chzyer/readline/std.go197
-rw-r--r--vendor/github.com/chzyer/readline/std_windows.go9
-rw-r--r--vendor/github.com/chzyer/readline/term.go123
-rw-r--r--vendor/github.com/chzyer/readline/term_bsd.go29
-rw-r--r--vendor/github.com/chzyer/readline/term_linux.go33
-rw-r--r--vendor/github.com/chzyer/readline/term_solaris.go32
-rw-r--r--vendor/github.com/chzyer/readline/term_unix.go24
-rw-r--r--vendor/github.com/chzyer/readline/term_windows.go171
-rw-r--r--vendor/github.com/chzyer/readline/terminal.go238
-rw-r--r--vendor/github.com/chzyer/readline/utils.go277
-rw-r--r--vendor/github.com/chzyer/readline/utils_unix.go83
-rw-r--r--vendor/github.com/chzyer/readline/utils_windows.go41
-rw-r--r--vendor/github.com/chzyer/readline/vim.go176
-rw-r--r--vendor/github.com/chzyer/readline/windows_api.go152
-rw-r--r--vendor/github.com/containerd/continuity/syscallx/syscall_unix.go26
-rw-r--r--vendor/github.com/containerd/continuity/syscallx/syscall_windows.go112
-rw-r--r--vendor/github.com/containerd/continuity/sysx/file_posix.go128
-rw-r--r--vendor/github.com/containers/buildah/.cirrus.yml45
-rw-r--r--vendor/github.com/containers/buildah/.golangci.yml10
-rw-r--r--vendor/github.com/containers/buildah/CHANGELOG.md125
-rw-r--r--vendor/github.com/containers/buildah/CONTRIBUTING.md60
-rw-r--r--vendor/github.com/containers/buildah/Makefile19
-rw-r--r--vendor/github.com/containers/buildah/README.md2
-rw-r--r--vendor/github.com/containers/buildah/add.go22
-rw-r--r--vendor/github.com/containers/buildah/bind/mount.go2
-rw-r--r--vendor/github.com/containers/buildah/bors.toml44
-rw-r--r--vendor/github.com/containers/buildah/btrfs_installed_tag.sh2
-rw-r--r--vendor/github.com/containers/buildah/btrfs_tag.sh2
-rw-r--r--vendor/github.com/containers/buildah/buildah.go52
-rw-r--r--vendor/github.com/containers/buildah/changelog.txt125
-rw-r--r--vendor/github.com/containers/buildah/commit.go54
-rw-r--r--vendor/github.com/containers/buildah/config.go11
-rw-r--r--vendor/github.com/containers/buildah/copier/copier.go29
-rw-r--r--vendor/github.com/containers/buildah/copier/xattrs.go5
-rw-r--r--vendor/github.com/containers/buildah/define/types.go50
-rw-r--r--vendor/github.com/containers/buildah/go.mod27
-rw-r--r--vendor/github.com/containers/buildah/go.sum340
-rw-r--r--vendor/github.com/containers/buildah/image.go2
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go27
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go12
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/executor.go49
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/stage_executor.go46
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/util.go6
-rw-r--r--vendor/github.com/containers/buildah/import.go2
-rw-r--r--vendor/github.com/containers/buildah/install.md11
-rw-r--r--vendor/github.com/containers/buildah/libdm_tag.sh2
-rw-r--r--vendor/github.com/containers/buildah/mount.go32
-rw-r--r--vendor/github.com/containers/buildah/new.go244
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go167
-rw-r--r--vendor/github.com/containers/buildah/pkg/completion/completion.go23
-rw-r--r--vendor/github.com/containers/buildah/pkg/overlay/overlay.go81
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go12
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go6
-rw-r--r--vendor/github.com/containers/buildah/pkg/secrets/secrets.go2
-rw-r--r--vendor/github.com/containers/buildah/pull.go104
-rw-r--r--vendor/github.com/containers/buildah/run.go3
-rw-r--r--vendor/github.com/containers/buildah/run_linux.go81
-rw-r--r--vendor/github.com/containers/buildah/run_unix.go24
-rw-r--r--vendor/github.com/containers/buildah/run_unsupported.go5
-rw-r--r--vendor/github.com/containers/buildah/util.go2
-rw-r--r--vendor/github.com/containers/buildah/util/util.go69
-rw-r--r--vendor/github.com/containers/buildah/util/util_linux.go9
-rw-r--r--vendor/github.com/containers/buildah/util/util_not_uint64.go2
-rw-r--r--vendor/github.com/containers/buildah/util/util_uint64.go2
-rw-r--r--vendor/github.com/containers/buildah/util/util_unix.go8
-rw-r--r--vendor/github.com/containers/buildah/util/util_unsupported.go12
-rw-r--r--vendor/github.com/containers/buildah/util/util_windows.go8
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go6
-rw-r--r--vendor/github.com/containers/common/pkg/config/containers.conf19
-rw-r--r--vendor/github.com/containers/common/pkg/config/default.go4
-rw-r--r--vendor/github.com/containers/common/pkg/retry/retry.go2
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/default_linux.go2
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/supported.go2
-rw-r--r--vendor/github.com/containers/common/version/version.go2
-rw-r--r--vendor/github.com/containers/image/v5/copy/copy.go17
-rw-r--r--vendor/github.com/containers/image/v5/docker/docker_image.go47
-rw-r--r--vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go458
-rw-r--r--vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go328
-rw-r--r--vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go252
-rw-r--r--vendor/github.com/containers/image/v5/types/types.go34
-rw-r--r--vendor/github.com/containers/image/v5/version/version.go4
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/drivers/chown_unix.go12
-rw-r--r--vendor/github.com/containers/storage/drivers/copy/copy_linux.go5
-rw-r--r--vendor/github.com/containers/storage/drivers/driver.go1
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go86
-rw-r--r--vendor/github.com/containers/storage/drivers/windows/windows.go2
-rw-r--r--vendor/github.com/containers/storage/go.mod4
-rw-r--r--vendor/github.com/containers/storage/go.sum6
-rw-r--r--vendor/github.com/containers/storage/layers.go1
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go64
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go61
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_linux.go12
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_other.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_unix.go10
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive_windows.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/changes_linux.go13
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/diff.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive.go9
-rw-r--r--vendor/github.com/containers/storage/pkg/config/config.go13
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go96
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_others.go2
-rw-r--r--vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go100
-rw-r--r--vendor/github.com/containers/storage/pkg/system/chmod.go17
-rw-r--r--vendor/github.com/containers/storage/pkg/system/lchown.go20
-rw-r--r--vendor/github.com/containers/storage/pkg/system/xattrs_linux.go15
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare.go1
-rw-r--r--vendor/github.com/containers/storage/storage.conf33
-rw-r--r--vendor/github.com/containers/storage/store.go3
-rw-r--r--vendor/github.com/containers/storage/userns.go125
-rw-r--r--vendor/github.com/containers/storage/utils.go6
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go93
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go18
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/util_linux.go79
-rw-r--r--vendor/github.com/cri-o/ocicni/pkg/ocicni/util_unsupported.go19
-rw-r--r--vendor/github.com/docker/docker/api/swagger.yaml2
-rw-r--r--vendor/github.com/docker/docker/api/types/seccomp.go94
-rw-r--r--vendor/github.com/docker/docker/client/client.go4
-rw-r--r--vendor/github.com/docker/docker/pkg/system/rm_unix.go (renamed from vendor/github.com/docker/docker/pkg/system/rm.go)2
-rw-r--r--vendor/github.com/docker/docker/pkg/system/rm_windows.go6
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/.golangci.yaml5
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/AUTHORS1
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE2
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/README.md2
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/client.go22
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container.go1067
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_archive.go58
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_attach.go74
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_changes.go26
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_commit.go44
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_copy.go49
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_create.go78
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_export.go37
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_inspect.go54
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_kill.go45
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_list.go37
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_logs.go58
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_pause.go22
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_prune.go40
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_remove.go39
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_rename.go33
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_resize.go22
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_restart.go62
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_start.go56
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_stats.go214
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_stop.go41
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_top.go38
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_unpause.go22
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_update.go43
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/container_wait.go41
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/event.go8
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/go.mod23
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/go.sum93
-rw-r--r--vendor/github.com/fsouza/go-dockerclient/tls.go1
-rw-r--r--vendor/github.com/juju/ansiterm/LICENSE191
-rw-r--r--vendor/github.com/juju/ansiterm/Makefile14
-rw-r--r--vendor/github.com/juju/ansiterm/README.md323
-rw-r--r--vendor/github.com/juju/ansiterm/attribute.go50
-rw-r--r--vendor/github.com/juju/ansiterm/color.go119
-rw-r--r--vendor/github.com/juju/ansiterm/context.go95
-rw-r--r--vendor/github.com/juju/ansiterm/doc.go6
-rw-r--r--vendor/github.com/juju/ansiterm/style.go72
-rw-r--r--vendor/github.com/juju/ansiterm/tabwriter.go64
-rw-r--r--vendor/github.com/juju/ansiterm/tabwriter/LICENSE (renamed from vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE)0
-rw-r--r--vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go587
-rw-r--r--vendor/github.com/juju/ansiterm/terminal.go32
-rw-r--r--vendor/github.com/juju/ansiterm/writer.go74
-rw-r--r--vendor/github.com/klauspost/compress/flate/gen_inflate.go114
-rw-r--r--vendor/github.com/klauspost/compress/flate/inflate.go7
-rw-r--r--vendor/github.com/klauspost/compress/flate/inflate_gen.go456
-rw-r--r--vendor/github.com/klauspost/compress/zstd/README.md8
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder.go10
-rw-r--r--vendor/github.com/lunixbochs/vtclean/.travis.yml9
-rw-r--r--vendor/github.com/lunixbochs/vtclean/LICENSE19
-rw-r--r--vendor/github.com/lunixbochs/vtclean/README.md46
-rw-r--r--vendor/github.com/lunixbochs/vtclean/io.go93
-rw-r--r--vendor/github.com/lunixbochs/vtclean/line.go113
-rw-r--r--vendor/github.com/lunixbochs/vtclean/vtclean.go95
-rw-r--r--vendor/github.com/manifoldco/promptui/.gitignore3
-rw-r--r--vendor/github.com/manifoldco/promptui/.golangci.yml26
-rw-r--r--vendor/github.com/manifoldco/promptui/.travis.yml14
-rw-r--r--vendor/github.com/manifoldco/promptui/CHANGELOG.md123
-rw-r--r--vendor/github.com/manifoldco/promptui/CODE_OF_CONDUCT.md73
-rw-r--r--vendor/github.com/manifoldco/promptui/LICENSE.md29
-rw-r--r--vendor/github.com/manifoldco/promptui/Makefile49
-rw-r--r--vendor/github.com/manifoldco/promptui/README.md107
-rw-r--r--vendor/github.com/manifoldco/promptui/codes.go120
-rw-r--r--vendor/github.com/manifoldco/promptui/cursor.go232
-rw-r--r--vendor/github.com/manifoldco/promptui/go.mod16
-rw-r--r--vendor/github.com/manifoldco/promptui/go.sum23
-rw-r--r--vendor/github.com/manifoldco/promptui/keycodes.go29
-rw-r--r--vendor/github.com/manifoldco/promptui/keycodes_other.go10
-rw-r--r--vendor/github.com/manifoldco/promptui/keycodes_windows.go10
-rw-r--r--vendor/github.com/manifoldco/promptui/list/list.go237
-rw-r--r--vendor/github.com/manifoldco/promptui/prompt.go341
-rw-r--r--vendor/github.com/manifoldco/promptui/promptui.go27
-rw-r--r--vendor/github.com/manifoldco/promptui/screenbuf/screenbuf.go151
-rw-r--r--vendor/github.com/manifoldco/promptui/select.go637
-rw-r--r--vendor/github.com/manifoldco/promptui/styles.go23
-rw-r--r--vendor/github.com/manifoldco/promptui/styles_windows.go21
-rw-r--r--vendor/github.com/mattn/go-colorable/.travis.yml9
-rw-r--r--vendor/github.com/mattn/go-colorable/LICENSE21
-rw-r--r--vendor/github.com/mattn/go-colorable/README.md48
-rw-r--r--vendor/github.com/mattn/go-colorable/colorable_appengine.go29
-rw-r--r--vendor/github.com/mattn/go-colorable/colorable_others.go30
-rw-r--r--vendor/github.com/mattn/go-colorable/colorable_windows.go884
-rw-r--r--vendor/github.com/mattn/go-colorable/noncolorable.go55
-rw-r--r--vendor/github.com/mattn/go-isatty/.travis.yml13
-rw-r--r--vendor/github.com/mattn/go-isatty/LICENSE9
-rw-r--r--vendor/github.com/mattn/go-isatty/README.md50
-rw-r--r--vendor/github.com/mattn/go-isatty/doc.go2
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_appengine.go15
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_bsd.go18
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_linux.go18
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go19
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_others.go10
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_solaris.go16
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_windows.go94
-rw-r--r--vendor/github.com/prometheus/procfs/arp.go85
-rw-r--r--vendor/github.com/prometheus/procfs/buddyinfo.go2
-rw-r--r--vendor/github.com/prometheus/procfs/cpuinfo.go166
-rw-r--r--vendor/github.com/prometheus/procfs/crypto.go131
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures.ttar2379
-rw-r--r--vendor/github.com/prometheus/procfs/internal/util/parse.go88
-rw-r--r--vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go45
-rw-r--r--vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go26
-rw-r--r--vendor/github.com/prometheus/procfs/internal/util/valueparser.go77
-rw-r--r--vendor/github.com/prometheus/procfs/net_softnet.go91
-rw-r--r--vendor/github.com/prometheus/procfs/proc.go30
-rw-r--r--vendor/github.com/prometheus/procfs/proc_fdinfo.go132
-rw-r--r--vendor/github.com/prometheus/procfs/proc_status.go7
-rw-r--r--vendor/github.com/prometheus/procfs/schedstat.go118
-rw-r--r--vendor/github.com/prometheus/procfs/vm.go210
-rw-r--r--vendor/github.com/prometheus/procfs/zoneinfo.go196
-rw-r--r--vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go2
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions.go752
-rw-r--r--vendor/github.com/spf13/cobra/custom_completions.go101
-rw-r--r--vendor/github.com/spf13/cobra/fish_completions.go115
-rw-r--r--vendor/github.com/spf13/cobra/zsh_completions.go55
265 files changed, 16422 insertions, 9044 deletions
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/common.go b/vendor/github.com/Microsoft/go-winio/archive/tar/common.go
deleted file mode 100644
index 0378401c0..000000000
--- a/vendor/github.com/Microsoft/go-winio/archive/tar/common.go
+++ /dev/null
@@ -1,344 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package tar implements access to tar archives.
-// It aims to cover most of the variations, including those produced
-// by GNU and BSD tars.
-//
-// References:
-// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
-// http://www.gnu.org/software/tar/manual/html_node/Standard.html
-// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
-package tar
-
-import (
- "bytes"
- "errors"
- "fmt"
- "os"
- "path"
- "time"
-)
-
-const (
- blockSize = 512
-
- // Types
- TypeReg = '0' // regular file
- TypeRegA = '\x00' // regular file
- TypeLink = '1' // hard link
- TypeSymlink = '2' // symbolic link
- TypeChar = '3' // character device node
- TypeBlock = '4' // block device node
- TypeDir = '5' // directory
- TypeFifo = '6' // fifo node
- TypeCont = '7' // reserved
- TypeXHeader = 'x' // extended header
- TypeXGlobalHeader = 'g' // global extended header
- TypeGNULongName = 'L' // Next file has a long name
- TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
- TypeGNUSparse = 'S' // sparse file
-)
-
-// A Header represents a single header in a tar archive.
-// Some fields may not be populated.
-type Header struct {
- Name string // name of header file entry
- Mode int64 // permission and mode bits
- Uid int // user id of owner
- Gid int // group id of owner
- Size int64 // length in bytes
- ModTime time.Time // modified time
- Typeflag byte // type of header entry
- Linkname string // target name of link
- Uname string // user name of owner
- Gname string // group name of owner
- Devmajor int64 // major number of character or block device
- Devminor int64 // minor number of character or block device
- AccessTime time.Time // access time
- ChangeTime time.Time // status change time
- CreationTime time.Time // creation time
- Xattrs map[string]string
- Winheaders map[string]string
-}
-
-// File name constants from the tar spec.
-const (
- fileNameSize = 100 // Maximum number of bytes in a standard tar name.
- fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
-)
-
-// FileInfo returns an os.FileInfo for the Header.
-func (h *Header) FileInfo() os.FileInfo {
- return headerFileInfo{h}
-}
-
-// headerFileInfo implements os.FileInfo.
-type headerFileInfo struct {
- h *Header
-}
-
-func (fi headerFileInfo) Size() int64 { return fi.h.Size }
-func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
-func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
-func (fi headerFileInfo) Sys() interface{} { return fi.h }
-
-// Name returns the base name of the file.
-func (fi headerFileInfo) Name() string {
- if fi.IsDir() {
- return path.Base(path.Clean(fi.h.Name))
- }
- return path.Base(fi.h.Name)
-}
-
-// Mode returns the permission and mode bits for the headerFileInfo.
-func (fi headerFileInfo) Mode() (mode os.FileMode) {
- // Set file permission bits.
- mode = os.FileMode(fi.h.Mode).Perm()
-
- // Set setuid, setgid and sticky bits.
- if fi.h.Mode&c_ISUID != 0 {
- // setuid
- mode |= os.ModeSetuid
- }
- if fi.h.Mode&c_ISGID != 0 {
- // setgid
- mode |= os.ModeSetgid
- }
- if fi.h.Mode&c_ISVTX != 0 {
- // sticky
- mode |= os.ModeSticky
- }
-
- // Set file mode bits.
- // clear perm, setuid, setgid and sticky bits.
- m := os.FileMode(fi.h.Mode) &^ 07777
- if m == c_ISDIR {
- // directory
- mode |= os.ModeDir
- }
- if m == c_ISFIFO {
- // named pipe (FIFO)
- mode |= os.ModeNamedPipe
- }
- if m == c_ISLNK {
- // symbolic link
- mode |= os.ModeSymlink
- }
- if m == c_ISBLK {
- // device file
- mode |= os.ModeDevice
- }
- if m == c_ISCHR {
- // Unix character device
- mode |= os.ModeDevice
- mode |= os.ModeCharDevice
- }
- if m == c_ISSOCK {
- // Unix domain socket
- mode |= os.ModeSocket
- }
-
- switch fi.h.Typeflag {
- case TypeSymlink:
- // symbolic link
- mode |= os.ModeSymlink
- case TypeChar:
- // character device node
- mode |= os.ModeDevice
- mode |= os.ModeCharDevice
- case TypeBlock:
- // block device node
- mode |= os.ModeDevice
- case TypeDir:
- // directory
- mode |= os.ModeDir
- case TypeFifo:
- // fifo node
- mode |= os.ModeNamedPipe
- }
-
- return mode
-}
-
-// sysStat, if non-nil, populates h from system-dependent fields of fi.
-var sysStat func(fi os.FileInfo, h *Header) error
-
-// Mode constants from the tar spec.
-const (
- c_ISUID = 04000 // Set uid
- c_ISGID = 02000 // Set gid
- c_ISVTX = 01000 // Save text (sticky bit)
- c_ISDIR = 040000 // Directory
- c_ISFIFO = 010000 // FIFO
- c_ISREG = 0100000 // Regular file
- c_ISLNK = 0120000 // Symbolic link
- c_ISBLK = 060000 // Block special file
- c_ISCHR = 020000 // Character special file
- c_ISSOCK = 0140000 // Socket
-)
-
-// Keywords for the PAX Extended Header
-const (
- paxAtime = "atime"
- paxCharset = "charset"
- paxComment = "comment"
- paxCtime = "ctime" // please note that ctime is not a valid pax header.
- paxCreationTime = "LIBARCHIVE.creationtime"
- paxGid = "gid"
- paxGname = "gname"
- paxLinkpath = "linkpath"
- paxMtime = "mtime"
- paxPath = "path"
- paxSize = "size"
- paxUid = "uid"
- paxUname = "uname"
- paxXattr = "SCHILY.xattr."
- paxWindows = "MSWINDOWS."
- paxNone = ""
-)
-
-// FileInfoHeader creates a partially-populated Header from fi.
-// If fi describes a symlink, FileInfoHeader records link as the link target.
-// If fi describes a directory, a slash is appended to the name.
-// Because os.FileInfo's Name method returns only the base name of
-// the file it describes, it may be necessary to modify the Name field
-// of the returned header to provide the full path name of the file.
-func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
- if fi == nil {
- return nil, errors.New("tar: FileInfo is nil")
- }
- fm := fi.Mode()
- h := &Header{
- Name: fi.Name(),
- ModTime: fi.ModTime(),
- Mode: int64(fm.Perm()), // or'd with c_IS* constants later
- }
- switch {
- case fm.IsRegular():
- h.Mode |= c_ISREG
- h.Typeflag = TypeReg
- h.Size = fi.Size()
- case fi.IsDir():
- h.Typeflag = TypeDir
- h.Mode |= c_ISDIR
- h.Name += "/"
- case fm&os.ModeSymlink != 0:
- h.Typeflag = TypeSymlink
- h.Mode |= c_ISLNK
- h.Linkname = link
- case fm&os.ModeDevice != 0:
- if fm&os.ModeCharDevice != 0 {
- h.Mode |= c_ISCHR
- h.Typeflag = TypeChar
- } else {
- h.Mode |= c_ISBLK
- h.Typeflag = TypeBlock
- }
- case fm&os.ModeNamedPipe != 0:
- h.Typeflag = TypeFifo
- h.Mode |= c_ISFIFO
- case fm&os.ModeSocket != 0:
- h.Mode |= c_ISSOCK
- default:
- return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
- }
- if fm&os.ModeSetuid != 0 {
- h.Mode |= c_ISUID
- }
- if fm&os.ModeSetgid != 0 {
- h.Mode |= c_ISGID
- }
- if fm&os.ModeSticky != 0 {
- h.Mode |= c_ISVTX
- }
- // If possible, populate additional fields from OS-specific
- // FileInfo fields.
- if sys, ok := fi.Sys().(*Header); ok {
- // This FileInfo came from a Header (not the OS). Use the
- // original Header to populate all remaining fields.
- h.Uid = sys.Uid
- h.Gid = sys.Gid
- h.Uname = sys.Uname
- h.Gname = sys.Gname
- h.AccessTime = sys.AccessTime
- h.ChangeTime = sys.ChangeTime
- if sys.Xattrs != nil {
- h.Xattrs = make(map[string]string)
- for k, v := range sys.Xattrs {
- h.Xattrs[k] = v
- }
- }
- if sys.Typeflag == TypeLink {
- // hard link
- h.Typeflag = TypeLink
- h.Size = 0
- h.Linkname = sys.Linkname
- }
- }
- if sysStat != nil {
- return h, sysStat(fi, h)
- }
- return h, nil
-}
-
-var zeroBlock = make([]byte, blockSize)
-
-// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
-// We compute and return both.
-func checksum(header []byte) (unsigned int64, signed int64) {
- for i := 0; i < len(header); i++ {
- if i == 148 {
- // The chksum field (header[148:156]) is special: it should be treated as space bytes.
- unsigned += ' ' * 8
- signed += ' ' * 8
- i += 7
- continue
- }
- unsigned += int64(header[i])
- signed += int64(int8(header[i]))
- }
- return
-}
-
-type slicer []byte
-
-func (sp *slicer) next(n int) (b []byte) {
- s := *sp
- b, *sp = s[0:n], s[n:]
- return
-}
-
-func isASCII(s string) bool {
- for _, c := range s {
- if c >= 0x80 {
- return false
- }
- }
- return true
-}
-
-func toASCII(s string) string {
- if isASCII(s) {
- return s
- }
- var buf bytes.Buffer
- for _, c := range s {
- if c < 0x80 {
- buf.WriteByte(byte(c))
- }
- }
- return buf.String()
-}
-
-// isHeaderOnlyType checks if the given type flag is of the type that has no
-// data section even if a size is specified.
-func isHeaderOnlyType(flag byte) bool {
- switch flag {
- case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
- return true
- default:
- return false
- }
-}
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go b/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go
deleted file mode 100644
index e210c618a..000000000
--- a/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go
+++ /dev/null
@@ -1,1002 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-// TODO(dsymonds):
-// - pax extensions
-
-import (
- "bytes"
- "errors"
- "io"
- "io/ioutil"
- "math"
- "os"
- "strconv"
- "strings"
- "time"
-)
-
-var (
- ErrHeader = errors.New("archive/tar: invalid tar header")
-)
-
-const maxNanoSecondIntSize = 9
-
-// A Reader provides sequential access to the contents of a tar archive.
-// A tar archive consists of a sequence of files.
-// The Next method advances to the next file in the archive (including the first),
-// and then it can be treated as an io.Reader to access the file's data.
-type Reader struct {
- r io.Reader
- err error
- pad int64 // amount of padding (ignored) after current file entry
- curr numBytesReader // reader for current file entry
- hdrBuff [blockSize]byte // buffer to use in readHeader
-}
-
-type parser struct {
- err error // Last error seen
-}
-
-// A numBytesReader is an io.Reader with a numBytes method, returning the number
-// of bytes remaining in the underlying encoded data.
-type numBytesReader interface {
- io.Reader
- numBytes() int64
-}
-
-// A regFileReader is a numBytesReader for reading file data from a tar archive.
-type regFileReader struct {
- r io.Reader // underlying reader
- nb int64 // number of unread bytes for current file entry
-}
-
-// A sparseFileReader is a numBytesReader for reading sparse file data from a
-// tar archive.
-type sparseFileReader struct {
- rfr numBytesReader // Reads the sparse-encoded file data
- sp []sparseEntry // The sparse map for the file
- pos int64 // Keeps track of file position
- total int64 // Total size of the file
-}
-
-// A sparseEntry holds a single entry in a sparse file's sparse map.
-//
-// Sparse files are represented using a series of sparseEntrys.
-// Despite the name, a sparseEntry represents an actual data fragment that
-// references data found in the underlying archive stream. All regions not
-// covered by a sparseEntry are logically filled with zeros.
-//
-// For example, if the underlying raw file contains the 10-byte data:
-// var compactData = "abcdefgh"
-//
-// And the sparse map has the following entries:
-// var sp = []sparseEntry{
-// {offset: 2, numBytes: 5} // Data fragment for [2..7]
-// {offset: 18, numBytes: 3} // Data fragment for [18..21]
-// }
-//
-// Then the content of the resulting sparse file with a "real" size of 25 is:
-// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
-type sparseEntry struct {
- offset int64 // Starting position of the fragment
- numBytes int64 // Length of the fragment
-}
-
-// Keywords for GNU sparse files in a PAX extended header
-const (
- paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
- paxGNUSparseOffset = "GNU.sparse.offset"
- paxGNUSparseNumBytes = "GNU.sparse.numbytes"
- paxGNUSparseMap = "GNU.sparse.map"
- paxGNUSparseName = "GNU.sparse.name"
- paxGNUSparseMajor = "GNU.sparse.major"
- paxGNUSparseMinor = "GNU.sparse.minor"
- paxGNUSparseSize = "GNU.sparse.size"
- paxGNUSparseRealSize = "GNU.sparse.realsize"
-)
-
-// Keywords for old GNU sparse headers
-const (
- oldGNUSparseMainHeaderOffset = 386
- oldGNUSparseMainHeaderIsExtendedOffset = 482
- oldGNUSparseMainHeaderNumEntries = 4
- oldGNUSparseExtendedHeaderIsExtendedOffset = 504
- oldGNUSparseExtendedHeaderNumEntries = 21
- oldGNUSparseOffsetSize = 12
- oldGNUSparseNumBytesSize = 12
-)
-
-// NewReader creates a new Reader reading from r.
-func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
-
-// Next advances to the next entry in the tar archive.
-//
-// io.EOF is returned at the end of the input.
-func (tr *Reader) Next() (*Header, error) {
- if tr.err != nil {
- return nil, tr.err
- }
-
- var hdr *Header
- var extHdrs map[string]string
-
- // Externally, Next iterates through the tar archive as if it is a series of
- // files. Internally, the tar format often uses fake "files" to add meta
- // data that describes the next file. These meta data "files" should not
- // normally be visible to the outside. As such, this loop iterates through
- // one or more "header files" until it finds a "normal file".
-loop:
- for {
- tr.err = tr.skipUnread()
- if tr.err != nil {
- return nil, tr.err
- }
-
- hdr = tr.readHeader()
- if tr.err != nil {
- return nil, tr.err
- }
-
- // Check for PAX/GNU special headers and files.
- switch hdr.Typeflag {
- case TypeXHeader:
- extHdrs, tr.err = parsePAX(tr)
- if tr.err != nil {
- return nil, tr.err
- }
- continue loop // This is a meta header affecting the next header
- case TypeGNULongName, TypeGNULongLink:
- var realname []byte
- realname, tr.err = ioutil.ReadAll(tr)
- if tr.err != nil {
- return nil, tr.err
- }
-
- // Convert GNU extensions to use PAX headers.
- if extHdrs == nil {
- extHdrs = make(map[string]string)
- }
- var p parser
- switch hdr.Typeflag {
- case TypeGNULongName:
- extHdrs[paxPath] = p.parseString(realname)
- case TypeGNULongLink:
- extHdrs[paxLinkpath] = p.parseString(realname)
- }
- if p.err != nil {
- tr.err = p.err
- return nil, tr.err
- }
- continue loop // This is a meta header affecting the next header
- default:
- mergePAX(hdr, extHdrs)
-
- // Check for a PAX format sparse file
- sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs)
- if err != nil {
- tr.err = err
- return nil, err
- }
- if sp != nil {
- // Current file is a PAX format GNU sparse file.
- // Set the current file reader to a sparse file reader.
- tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
- if tr.err != nil {
- return nil, tr.err
- }
- }
- break loop // This is a file, so stop
- }
- }
- return hdr, nil
-}
-
-// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
-// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
-// be treated as a regular file.
-func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
- var sparseFormat string
-
- // Check for sparse format indicators
- major, majorOk := headers[paxGNUSparseMajor]
- minor, minorOk := headers[paxGNUSparseMinor]
- sparseName, sparseNameOk := headers[paxGNUSparseName]
- _, sparseMapOk := headers[paxGNUSparseMap]
- sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
- sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
-
- // Identify which, if any, sparse format applies from which PAX headers are set
- if majorOk && minorOk {
- sparseFormat = major + "." + minor
- } else if sparseNameOk && sparseMapOk {
- sparseFormat = "0.1"
- } else if sparseSizeOk {
- sparseFormat = "0.0"
- } else {
- // Not a PAX format GNU sparse file.
- return nil, nil
- }
-
- // Check for unknown sparse format
- if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
- return nil, nil
- }
-
- // Update hdr from GNU sparse PAX headers
- if sparseNameOk {
- hdr.Name = sparseName
- }
- if sparseSizeOk {
- realSize, err := strconv.ParseInt(sparseSize, 10, 0)
- if err != nil {
- return nil, ErrHeader
- }
- hdr.Size = realSize
- } else if sparseRealSizeOk {
- realSize, err := strconv.ParseInt(sparseRealSize, 10, 0)
- if err != nil {
- return nil, ErrHeader
- }
- hdr.Size = realSize
- }
-
- // Set up the sparse map, according to the particular sparse format in use
- var sp []sparseEntry
- var err error
- switch sparseFormat {
- case "0.0", "0.1":
- sp, err = readGNUSparseMap0x1(headers)
- case "1.0":
- sp, err = readGNUSparseMap1x0(tr.curr)
- }
- return sp, err
-}
-
-// mergePAX merges well known headers according to PAX standard.
-// In general headers with the same name as those found
-// in the header struct overwrite those found in the header
-// struct with higher precision or longer values. Esp. useful
-// for name and linkname fields.
-func mergePAX(hdr *Header, headers map[string]string) error {
- for k, v := range headers {
- switch k {
- case paxPath:
- hdr.Name = v
- case paxLinkpath:
- hdr.Linkname = v
- case paxGname:
- hdr.Gname = v
- case paxUname:
- hdr.Uname = v
- case paxUid:
- uid, err := strconv.ParseInt(v, 10, 0)
- if err != nil {
- return err
- }
- hdr.Uid = int(uid)
- case paxGid:
- gid, err := strconv.ParseInt(v, 10, 0)
- if err != nil {
- return err
- }
- hdr.Gid = int(gid)
- case paxAtime:
- t, err := parsePAXTime(v)
- if err != nil {
- return err
- }
- hdr.AccessTime = t
- case paxMtime:
- t, err := parsePAXTime(v)
- if err != nil {
- return err
- }
- hdr.ModTime = t
- case paxCtime:
- t, err := parsePAXTime(v)
- if err != nil {
- return err
- }
- hdr.ChangeTime = t
- case paxCreationTime:
- t, err := parsePAXTime(v)
- if err != nil {
- return err
- }
- hdr.CreationTime = t
- case paxSize:
- size, err := strconv.ParseInt(v, 10, 0)
- if err != nil {
- return err
- }
- hdr.Size = int64(size)
- default:
- if strings.HasPrefix(k, paxXattr) {
- if hdr.Xattrs == nil {
- hdr.Xattrs = make(map[string]string)
- }
- hdr.Xattrs[k[len(paxXattr):]] = v
- } else if strings.HasPrefix(k, paxWindows) {
- if hdr.Winheaders == nil {
- hdr.Winheaders = make(map[string]string)
- }
- hdr.Winheaders[k[len(paxWindows):]] = v
- }
- }
- }
- return nil
-}
-
-// parsePAXTime takes a string of the form %d.%d as described in
-// the PAX specification.
-func parsePAXTime(t string) (time.Time, error) {
- buf := []byte(t)
- pos := bytes.IndexByte(buf, '.')
- var seconds, nanoseconds int64
- var err error
- if pos == -1 {
- seconds, err = strconv.ParseInt(t, 10, 0)
- if err != nil {
- return time.Time{}, err
- }
- } else {
- seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
- if err != nil {
- return time.Time{}, err
- }
- nano_buf := string(buf[pos+1:])
- // Pad as needed before converting to a decimal.
- // For example .030 -> .030000000 -> 30000000 nanoseconds
- if len(nano_buf) < maxNanoSecondIntSize {
- // Right pad
- nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
- } else if len(nano_buf) > maxNanoSecondIntSize {
- // Right truncate
- nano_buf = nano_buf[:maxNanoSecondIntSize]
- }
- nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
- if err != nil {
- return time.Time{}, err
- }
- }
- ts := time.Unix(seconds, nanoseconds)
- return ts, nil
-}
-
-// parsePAX parses PAX headers.
-// If an extended header (type 'x') is invalid, ErrHeader is returned
-func parsePAX(r io.Reader) (map[string]string, error) {
- buf, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, err
- }
- sbuf := string(buf)
-
- // For GNU PAX sparse format 0.0 support.
- // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers.
- var sparseMap bytes.Buffer
-
- headers := make(map[string]string)
- // Each record is constructed as
- // "%d %s=%s\n", length, keyword, value
- for len(sbuf) > 0 {
- key, value, residual, err := parsePAXRecord(sbuf)
- if err != nil {
- return nil, ErrHeader
- }
- sbuf = residual
-
- keyStr := string(key)
- if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
- // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
- sparseMap.WriteString(value)
- sparseMap.Write([]byte{','})
- } else {
- // Normal key. Set the value in the headers map.
- headers[keyStr] = string(value)
- }
- }
- if sparseMap.Len() != 0 {
- // Add sparse info to headers, chopping off the extra comma
- sparseMap.Truncate(sparseMap.Len() - 1)
- headers[paxGNUSparseMap] = sparseMap.String()
- }
- return headers, nil
-}
-
-// parsePAXRecord parses the input PAX record string into a key-value pair.
-// If parsing is successful, it will slice off the currently read record and
-// return the remainder as r.
-//
-// A PAX record is of the following form:
-// "%d %s=%s\n" % (size, key, value)
-func parsePAXRecord(s string) (k, v, r string, err error) {
- // The size field ends at the first space.
- sp := strings.IndexByte(s, ' ')
- if sp == -1 {
- return "", "", s, ErrHeader
- }
-
- // Parse the first token as a decimal integer.
- n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
- if perr != nil || n < 5 || int64(len(s)) < n {
- return "", "", s, ErrHeader
- }
-
- // Extract everything between the space and the final newline.
- rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:]
- if nl != "\n" {
- return "", "", s, ErrHeader
- }
-
- // The first equals separates the key from the value.
- eq := strings.IndexByte(rec, '=')
- if eq == -1 {
- return "", "", s, ErrHeader
- }
- return rec[:eq], rec[eq+1:], rem, nil
-}
-
-// parseString parses bytes as a NUL-terminated C-style string.
-// If a NUL byte is not found then the whole slice is returned as a string.
-func (*parser) parseString(b []byte) string {
- n := 0
- for n < len(b) && b[n] != 0 {
- n++
- }
- return string(b[0:n])
-}
-
-// parseNumeric parses the input as being encoded in either base-256 or octal.
-// This function may return negative numbers.
-// If parsing fails or an integer overflow occurs, err will be set.
-func (p *parser) parseNumeric(b []byte) int64 {
- // Check for base-256 (binary) format first.
- // If the first bit is set, then all following bits constitute a two's
- // complement encoded number in big-endian byte order.
- if len(b) > 0 && b[0]&0x80 != 0 {
- // Handling negative numbers relies on the following identity:
- // -a-1 == ^a
- //
- // If the number is negative, we use an inversion mask to invert the
- // data bytes and treat the value as an unsigned number.
- var inv byte // 0x00 if positive or zero, 0xff if negative
- if b[0]&0x40 != 0 {
- inv = 0xff
- }
-
- var x uint64
- for i, c := range b {
- c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
- if i == 0 {
- c &= 0x7f // Ignore signal bit in first byte
- }
- if (x >> 56) > 0 {
- p.err = ErrHeader // Integer overflow
- return 0
- }
- x = x<<8 | uint64(c)
- }
- if (x >> 63) > 0 {
- p.err = ErrHeader // Integer overflow
- return 0
- }
- if inv == 0xff {
- return ^int64(x)
- }
- return int64(x)
- }
-
- // Normal case is base-8 (octal) format.
- return p.parseOctal(b)
-}
-
-func (p *parser) parseOctal(b []byte) int64 {
- // Because unused fields are filled with NULs, we need
- // to skip leading NULs. Fields may also be padded with
- // spaces or NULs.
- // So we remove leading and trailing NULs and spaces to
- // be sure.
- b = bytes.Trim(b, " \x00")
-
- if len(b) == 0 {
- return 0
- }
- x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
- if perr != nil {
- p.err = ErrHeader
- }
- return int64(x)
-}
-
-// skipUnread skips any unread bytes in the existing file entry, as well as any
-// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is
-// encountered in the data portion; it is okay to hit io.EOF in the padding.
-//
-// Note that this function still works properly even when sparse files are being
-// used since numBytes returns the bytes remaining in the underlying io.Reader.
-func (tr *Reader) skipUnread() error {
- dataSkip := tr.numBytes() // Number of data bytes to skip
- totalSkip := dataSkip + tr.pad // Total number of bytes to skip
- tr.curr, tr.pad = nil, 0
-
- // If possible, Seek to the last byte before the end of the data section.
- // Do this because Seek is often lazy about reporting errors; this will mask
- // the fact that the tar stream may be truncated. We can rely on the
- // io.CopyN done shortly afterwards to trigger any IO errors.
- var seekSkipped int64 // Number of bytes skipped via Seek
- if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 {
- // Not all io.Seeker can actually Seek. For example, os.Stdin implements
- // io.Seeker, but calling Seek always returns an error and performs
- // no action. Thus, we try an innocent seek to the current position
- // to see if Seek is really supported.
- pos1, err := sr.Seek(0, os.SEEK_CUR)
- if err == nil {
- // Seek seems supported, so perform the real Seek.
- pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR)
- if err != nil {
- tr.err = err
- return tr.err
- }
- seekSkipped = pos2 - pos1
- }
- }
-
- var copySkipped int64 // Number of bytes skipped via CopyN
- copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped)
- if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip {
- tr.err = io.ErrUnexpectedEOF
- }
- return tr.err
-}
-
-func (tr *Reader) verifyChecksum(header []byte) bool {
- if tr.err != nil {
- return false
- }
-
- var p parser
- given := p.parseOctal(header[148:156])
- unsigned, signed := checksum(header)
- return p.err == nil && (given == unsigned || given == signed)
-}
-
-// readHeader reads the next block header and assumes that the underlying reader
-// is already aligned to a block boundary.
-//
-// The err will be set to io.EOF only when one of the following occurs:
-// * Exactly 0 bytes are read and EOF is hit.
-// * Exactly 1 block of zeros is read and EOF is hit.
-// * At least 2 blocks of zeros are read.
-func (tr *Reader) readHeader() *Header {
- header := tr.hdrBuff[:]
- copy(header, zeroBlock)
-
- if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
- return nil // io.EOF is okay here
- }
-
- // Two blocks of zero bytes marks the end of the archive.
- if bytes.Equal(header, zeroBlock[0:blockSize]) {
- if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
- return nil // io.EOF is okay here
- }
- if bytes.Equal(header, zeroBlock[0:blockSize]) {
- tr.err = io.EOF
- } else {
- tr.err = ErrHeader // zero block and then non-zero block
- }
- return nil
- }
-
- if !tr.verifyChecksum(header) {
- tr.err = ErrHeader
- return nil
- }
-
- // Unpack
- var p parser
- hdr := new(Header)
- s := slicer(header)
-
- hdr.Name = p.parseString(s.next(100))
- hdr.Mode = p.parseNumeric(s.next(8))
- hdr.Uid = int(p.parseNumeric(s.next(8)))
- hdr.Gid = int(p.parseNumeric(s.next(8)))
- hdr.Size = p.parseNumeric(s.next(12))
- hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0)
- s.next(8) // chksum
- hdr.Typeflag = s.next(1)[0]
- hdr.Linkname = p.parseString(s.next(100))
-
- // The remainder of the header depends on the value of magic.
- // The original (v7) version of tar had no explicit magic field,
- // so its magic bytes, like the rest of the block, are NULs.
- magic := string(s.next(8)) // contains version field as well.
- var format string
- switch {
- case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988)
- if string(header[508:512]) == "tar\x00" {
- format = "star"
- } else {
- format = "posix"
- }
- case magic == "ustar \x00": // old GNU tar
- format = "gnu"
- }
-
- switch format {
- case "posix", "gnu", "star":
- hdr.Uname = p.parseString(s.next(32))
- hdr.Gname = p.parseString(s.next(32))
- devmajor := s.next(8)
- devminor := s.next(8)
- if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
- hdr.Devmajor = p.parseNumeric(devmajor)
- hdr.Devminor = p.parseNumeric(devminor)
- }
- var prefix string
- switch format {
- case "posix", "gnu":
- prefix = p.parseString(s.next(155))
- case "star":
- prefix = p.parseString(s.next(131))
- hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0)
- hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0)
- }
- if len(prefix) > 0 {
- hdr.Name = prefix + "/" + hdr.Name
- }
- }
-
- if p.err != nil {
- tr.err = p.err
- return nil
- }
-
- nb := hdr.Size
- if isHeaderOnlyType(hdr.Typeflag) {
- nb = 0
- }
- if nb < 0 {
- tr.err = ErrHeader
- return nil
- }
-
- // Set the current file reader.
- tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
- tr.curr = &regFileReader{r: tr.r, nb: nb}
-
- // Check for old GNU sparse format entry.
- if hdr.Typeflag == TypeGNUSparse {
- // Get the real size of the file.
- hdr.Size = p.parseNumeric(header[483:495])
- if p.err != nil {
- tr.err = p.err
- return nil
- }
-
- // Read the sparse map.
- sp := tr.readOldGNUSparseMap(header)
- if tr.err != nil {
- return nil
- }
-
- // Current file is a GNU sparse file. Update the current file reader.
- tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
- if tr.err != nil {
- return nil
- }
- }
-
- return hdr
-}
-
-// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
-// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
-// then one or more extension headers are used to store the rest of the sparse map.
-func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
- var p parser
- isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
- spCap := oldGNUSparseMainHeaderNumEntries
- if isExtended {
- spCap += oldGNUSparseExtendedHeaderNumEntries
- }
- sp := make([]sparseEntry, 0, spCap)
- s := slicer(header[oldGNUSparseMainHeaderOffset:])
-
- // Read the four entries from the main tar header
- for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
- offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
- numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
- if p.err != nil {
- tr.err = p.err
- return nil
- }
- if offset == 0 && numBytes == 0 {
- break
- }
- sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
- }
-
- for isExtended {
- // There are more entries. Read an extension header and parse its entries.
- sparseHeader := make([]byte, blockSize)
- if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil {
- return nil
- }
- isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
- s = slicer(sparseHeader)
- for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
- offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
- numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
- if p.err != nil {
- tr.err = p.err
- return nil
- }
- if offset == 0 && numBytes == 0 {
- break
- }
- sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
- }
- }
- return sp
-}
-
-// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
-// version 1.0. The format of the sparse map consists of a series of
-// newline-terminated numeric fields. The first field is the number of entries
-// and is always present. Following this are the entries, consisting of two
-// fields (offset, numBytes). This function must stop reading at the end
-// boundary of the block containing the last newline.
-//
-// Note that the GNU manual says that numeric values should be encoded in octal
-// format. However, the GNU tar utility itself outputs these values in decimal.
-// As such, this library treats values as being encoded in decimal.
-func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
- var cntNewline int64
- var buf bytes.Buffer
- var blk = make([]byte, blockSize)
-
- // feedTokens copies data in numBlock chunks from r into buf until there are
- // at least cnt newlines in buf. It will not read more blocks than needed.
- var feedTokens = func(cnt int64) error {
- for cntNewline < cnt {
- if _, err := io.ReadFull(r, blk); err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return err
- }
- buf.Write(blk)
- for _, c := range blk {
- if c == '\n' {
- cntNewline++
- }
- }
- }
- return nil
- }
-
- // nextToken gets the next token delimited by a newline. This assumes that
- // at least one newline exists in the buffer.
- var nextToken = func() string {
- cntNewline--
- tok, _ := buf.ReadString('\n')
- return tok[:len(tok)-1] // Cut off newline
- }
-
- // Parse for the number of entries.
- // Use integer overflow resistant math to check this.
- if err := feedTokens(1); err != nil {
- return nil, err
- }
- numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
- if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
- return nil, ErrHeader
- }
-
- // Parse for all member entries.
- // numEntries is trusted after this since a potential attacker must have
- // committed resources proportional to what this library used.
- if err := feedTokens(2 * numEntries); err != nil {
- return nil, err
- }
- sp := make([]sparseEntry, 0, numEntries)
- for i := int64(0); i < numEntries; i++ {
- offset, err := strconv.ParseInt(nextToken(), 10, 64)
- if err != nil {
- return nil, ErrHeader
- }
- numBytes, err := strconv.ParseInt(nextToken(), 10, 64)
- if err != nil {
- return nil, ErrHeader
- }
- sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
- }
- return sp, nil
-}
-
-// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
-// version 0.1. The sparse map is stored in the PAX headers.
-func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) {
- // Get number of entries.
- // Use integer overflow resistant math to check this.
- numEntriesStr := extHdrs[paxGNUSparseNumBlocks]
- numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
- if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
- return nil, ErrHeader
- }
-
- // There should be two numbers in sparseMap for each entry.
- sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",")
- if int64(len(sparseMap)) != 2*numEntries {
- return nil, ErrHeader
- }
-
- // Loop through the entries in the sparse map.
- // numEntries is trusted now.
- sp := make([]sparseEntry, 0, numEntries)
- for i := int64(0); i < numEntries; i++ {
- offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64)
- if err != nil {
- return nil, ErrHeader
- }
- numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64)
- if err != nil {
- return nil, ErrHeader
- }
- sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
- }
- return sp, nil
-}
-
-// numBytes returns the number of bytes left to read in the current file's entry
-// in the tar archive, or 0 if there is no current file.
-func (tr *Reader) numBytes() int64 {
- if tr.curr == nil {
- // No current file, so no bytes
- return 0
- }
- return tr.curr.numBytes()
-}
-
-// Read reads from the current entry in the tar archive.
-// It returns 0, io.EOF when it reaches the end of that entry,
-// until Next is called to advance to the next entry.
-//
-// Calling Read on special types like TypeLink, TypeSymLink, TypeChar,
-// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what
-// the Header.Size claims.
-func (tr *Reader) Read(b []byte) (n int, err error) {
- if tr.err != nil {
- return 0, tr.err
- }
- if tr.curr == nil {
- return 0, io.EOF
- }
-
- n, err = tr.curr.Read(b)
- if err != nil && err != io.EOF {
- tr.err = err
- }
- return
-}
-
-func (rfr *regFileReader) Read(b []byte) (n int, err error) {
- if rfr.nb == 0 {
- // file consumed
- return 0, io.EOF
- }
- if int64(len(b)) > rfr.nb {
- b = b[0:rfr.nb]
- }
- n, err = rfr.r.Read(b)
- rfr.nb -= int64(n)
-
- if err == io.EOF && rfr.nb > 0 {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-// numBytes returns the number of bytes left to read in the file's data in the tar archive.
-func (rfr *regFileReader) numBytes() int64 {
- return rfr.nb
-}
-
-// newSparseFileReader creates a new sparseFileReader, but validates all of the
-// sparse entries before doing so.
-func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) {
- if total < 0 {
- return nil, ErrHeader // Total size cannot be negative
- }
-
- // Validate all sparse entries. These are the same checks as performed by
- // the BSD tar utility.
- for i, s := range sp {
- switch {
- case s.offset < 0 || s.numBytes < 0:
- return nil, ErrHeader // Negative values are never okay
- case s.offset > math.MaxInt64-s.numBytes:
- return nil, ErrHeader // Integer overflow with large length
- case s.offset+s.numBytes > total:
- return nil, ErrHeader // Region extends beyond the "real" size
- case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset:
- return nil, ErrHeader // Regions can't overlap and must be in order
- }
- }
- return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil
-}
-
-// readHole reads a sparse hole ending at endOffset.
-func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int {
- n64 := endOffset - sfr.pos
- if n64 > int64(len(b)) {
- n64 = int64(len(b))
- }
- n := int(n64)
- for i := 0; i < n; i++ {
- b[i] = 0
- }
- sfr.pos += n64
- return n
-}
-
-// Read reads the sparse file data in expanded form.
-func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
- // Skip past all empty fragments.
- for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 {
- sfr.sp = sfr.sp[1:]
- }
-
- // If there are no more fragments, then it is possible that there
- // is one last sparse hole.
- if len(sfr.sp) == 0 {
- // This behavior matches the BSD tar utility.
- // However, GNU tar stops returning data even if sfr.total is unmet.
- if sfr.pos < sfr.total {
- return sfr.readHole(b, sfr.total), nil
- }
- return 0, io.EOF
- }
-
- // In front of a data fragment, so read a hole.
- if sfr.pos < sfr.sp[0].offset {
- return sfr.readHole(b, sfr.sp[0].offset), nil
- }
-
- // In a data fragment, so read from it.
- // This math is overflow free since we verify that offset and numBytes can
- // be safely added when creating the sparseFileReader.
- endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment
- bytesLeft := endPos - sfr.pos // Bytes left in fragment
- if int64(len(b)) > bytesLeft {
- b = b[:bytesLeft]
- }
-
- n, err = sfr.rfr.Read(b)
- sfr.pos += int64(n)
- if err == io.EOF {
- if sfr.pos < endPos {
- err = io.ErrUnexpectedEOF // There was supposed to be more data
- } else if sfr.pos < sfr.total {
- err = nil // There is still an implicit sparse hole at the end
- }
- }
-
- if sfr.pos == endPos {
- sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it
- }
- return n, err
-}
-
-// numBytes returns the number of bytes left to read in the sparse file's
-// sparse-encoded data in the tar archive.
-func (sfr *sparseFileReader) numBytes() int64 {
- return sfr.rfr.numBytes()
-}
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go
deleted file mode 100644
index cf9cc79c5..000000000
--- a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux dragonfly openbsd solaris
-
-package tar
-
-import (
- "syscall"
- "time"
-)
-
-func statAtime(st *syscall.Stat_t) time.Time {
- return time.Unix(st.Atim.Unix())
-}
-
-func statCtime(st *syscall.Stat_t) time.Time {
- return time.Unix(st.Ctim.Unix())
-}
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go
deleted file mode 100644
index 6f17dbe30..000000000
--- a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin freebsd netbsd
-
-package tar
-
-import (
- "syscall"
- "time"
-)
-
-func statAtime(st *syscall.Stat_t) time.Time {
- return time.Unix(st.Atimespec.Unix())
-}
-
-func statCtime(st *syscall.Stat_t) time.Time {
- return time.Unix(st.Ctimespec.Unix())
-}
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go
deleted file mode 100644
index cb843db4c..000000000
--- a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux darwin dragonfly freebsd openbsd netbsd solaris
-
-package tar
-
-import (
- "os"
- "syscall"
-)
-
-func init() {
- sysStat = statUnix
-}
-
-func statUnix(fi os.FileInfo, h *Header) error {
- sys, ok := fi.Sys().(*syscall.Stat_t)
- if !ok {
- return nil
- }
- h.Uid = int(sys.Uid)
- h.Gid = int(sys.Gid)
- // TODO(bradfitz): populate username & group. os/user
- // doesn't cache LookupId lookups, and lacks group
- // lookup functions.
- h.AccessTime = statAtime(sys)
- h.ChangeTime = statCtime(sys)
- // TODO(bradfitz): major/minor device numbers?
- return nil
-}
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go b/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go
deleted file mode 100644
index 30d7e606d..000000000
--- a/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go
+++ /dev/null
@@ -1,444 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-// TODO(dsymonds):
-// - catch more errors (no first header, etc.)
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "path"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-var (
- ErrWriteTooLong = errors.New("archive/tar: write too long")
- ErrFieldTooLong = errors.New("archive/tar: header field too long")
- ErrWriteAfterClose = errors.New("archive/tar: write after close")
- errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
-)
-
-// A Writer provides sequential writing of a tar archive in POSIX.1 format.
-// A tar archive consists of a sequence of files.
-// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
-// writing at most hdr.Size bytes in total.
-type Writer struct {
- w io.Writer
- err error
- nb int64 // number of unwritten bytes for current file entry
- pad int64 // amount of padding to write after current file entry
- closed bool
- usedBinary bool // whether the binary numeric field extension was used
- preferPax bool // use pax header instead of binary numeric header
- hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
- paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
-}
-
-type formatter struct {
- err error // Last error seen
-}
-
-// NewWriter creates a new Writer writing to w.
-func NewWriter(w io.Writer) *Writer { return &Writer{w: w, preferPax: true} }
-
-// Flush finishes writing the current file (optional).
-func (tw *Writer) Flush() error {
- if tw.nb > 0 {
- tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
- return tw.err
- }
-
- n := tw.nb + tw.pad
- for n > 0 && tw.err == nil {
- nr := n
- if nr > blockSize {
- nr = blockSize
- }
- var nw int
- nw, tw.err = tw.w.Write(zeroBlock[0:nr])
- n -= int64(nw)
- }
- tw.nb = 0
- tw.pad = 0
- return tw.err
-}
-
-// Write s into b, terminating it with a NUL if there is room.
-func (f *formatter) formatString(b []byte, s string) {
- if len(s) > len(b) {
- f.err = ErrFieldTooLong
- return
- }
- ascii := toASCII(s)
- copy(b, ascii)
- if len(ascii) < len(b) {
- b[len(ascii)] = 0
- }
-}
-
-// Encode x as an octal ASCII string and write it into b with leading zeros.
-func (f *formatter) formatOctal(b []byte, x int64) {
- s := strconv.FormatInt(x, 8)
- // leading zeros, but leave room for a NUL.
- for len(s)+1 < len(b) {
- s = "0" + s
- }
- f.formatString(b, s)
-}
-
-// fitsInBase256 reports whether x can be encoded into n bytes using base-256
-// encoding. Unlike octal encoding, base-256 encoding does not require that the
-// string ends with a NUL character. Thus, all n bytes are available for output.
-//
-// If operating in binary mode, this assumes strict GNU binary mode; which means
-// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
-// equivalent to the sign bit in two's complement form.
-func fitsInBase256(n int, x int64) bool {
- var binBits = uint(n-1) * 8
- return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
-}
-
-// Write x into b, as binary (GNUtar/star extension).
-func (f *formatter) formatNumeric(b []byte, x int64) {
- if fitsInBase256(len(b), x) {
- for i := len(b) - 1; i >= 0; i-- {
- b[i] = byte(x)
- x >>= 8
- }
- b[0] |= 0x80 // Highest bit indicates binary format
- return
- }
-
- f.formatOctal(b, 0) // Last resort, just write zero
- f.err = ErrFieldTooLong
-}
-
-var (
- minTime = time.Unix(0, 0)
- // There is room for 11 octal digits (33 bits) of mtime.
- maxTime = minTime.Add((1<<33 - 1) * time.Second)
-)
-
-// WriteHeader writes hdr and prepares to accept the file's contents.
-// WriteHeader calls Flush if it is not the first header.
-// Calling after a Close will return ErrWriteAfterClose.
-func (tw *Writer) WriteHeader(hdr *Header) error {
- return tw.writeHeader(hdr, true)
-}
-
-// WriteHeader writes hdr and prepares to accept the file's contents.
-// WriteHeader calls Flush if it is not the first header.
-// Calling after a Close will return ErrWriteAfterClose.
-// As this method is called internally by writePax header to allow it to
-// suppress writing the pax header.
-func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
- if tw.closed {
- return ErrWriteAfterClose
- }
- if tw.err == nil {
- tw.Flush()
- }
- if tw.err != nil {
- return tw.err
- }
-
- // a map to hold pax header records, if any are needed
- paxHeaders := make(map[string]string)
-
- // TODO(shanemhansen): we might want to use PAX headers for
- // subsecond time resolution, but for now let's just capture
- // too long fields or non ascii characters
-
- var f formatter
- var header []byte
-
- // We need to select which scratch buffer to use carefully,
- // since this method is called recursively to write PAX headers.
- // If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
- // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
- // already being used by the non-recursive call, so we must use paxHdrBuff.
- header = tw.hdrBuff[:]
- if !allowPax {
- header = tw.paxHdrBuff[:]
- }
- copy(header, zeroBlock)
- s := slicer(header)
-
- // Wrappers around formatter that automatically sets paxHeaders if the
- // argument extends beyond the capacity of the input byte slice.
- var formatString = func(b []byte, s string, paxKeyword string) {
- needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
- if needsPaxHeader {
- paxHeaders[paxKeyword] = s
- return
- }
- f.formatString(b, s)
- }
- var formatNumeric = func(b []byte, x int64, paxKeyword string) {
- // Try octal first.
- s := strconv.FormatInt(x, 8)
- if len(s) < len(b) {
- f.formatOctal(b, x)
- return
- }
-
- // If it is too long for octal, and PAX is preferred, use a PAX header.
- if paxKeyword != paxNone && tw.preferPax {
- f.formatOctal(b, 0)
- s := strconv.FormatInt(x, 10)
- paxHeaders[paxKeyword] = s
- return
- }
-
- tw.usedBinary = true
- f.formatNumeric(b, x)
- }
- var formatTime = func(b []byte, t time.Time, paxKeyword string) {
- var unixTime int64
- if !t.Before(minTime) && !t.After(maxTime) {
- unixTime = t.Unix()
- }
- formatNumeric(b, unixTime, paxNone)
-
- // Write a PAX header if the time didn't fit precisely.
- if paxKeyword != "" && tw.preferPax && allowPax && (t.Nanosecond() != 0 || !t.Before(minTime) || !t.After(maxTime)) {
- paxHeaders[paxKeyword] = formatPAXTime(t)
- }
- }
-
- // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
- pathHeaderBytes := s.next(fileNameSize)
-
- formatString(pathHeaderBytes, hdr.Name, paxPath)
-
- f.formatOctal(s.next(8), hdr.Mode) // 100:108
- formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
- formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
- formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
- formatTime(s.next(12), hdr.ModTime, paxMtime) // 136:148
- s.next(8) // chksum (148:156)
- s.next(1)[0] = hdr.Typeflag // 156:157
-
- formatString(s.next(100), hdr.Linkname, paxLinkpath)
-
- copy(s.next(8), []byte("ustar\x0000")) // 257:265
- formatString(s.next(32), hdr.Uname, paxUname) // 265:297
- formatString(s.next(32), hdr.Gname, paxGname) // 297:329
- formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
- formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
-
- // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
- prefixHeaderBytes := s.next(155)
- formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
-
- // Use the GNU magic instead of POSIX magic if we used any GNU extensions.
- if tw.usedBinary {
- copy(header[257:265], []byte("ustar \x00"))
- }
-
- _, paxPathUsed := paxHeaders[paxPath]
- // try to use a ustar header when only the name is too long
- if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
- prefix, suffix, ok := splitUSTARPath(hdr.Name)
- if ok {
- // Since we can encode in USTAR format, disable PAX header.
- delete(paxHeaders, paxPath)
-
- // Update the path fields
- formatString(pathHeaderBytes, suffix, paxNone)
- formatString(prefixHeaderBytes, prefix, paxNone)
- }
- }
-
- // The chksum field is terminated by a NUL and a space.
- // This is different from the other octal fields.
- chksum, _ := checksum(header)
- f.formatOctal(header[148:155], chksum) // Never fails
- header[155] = ' '
-
- // Check if there were any formatting errors.
- if f.err != nil {
- tw.err = f.err
- return tw.err
- }
-
- if allowPax {
- if !hdr.AccessTime.IsZero() {
- paxHeaders[paxAtime] = formatPAXTime(hdr.AccessTime)
- }
- if !hdr.ChangeTime.IsZero() {
- paxHeaders[paxCtime] = formatPAXTime(hdr.ChangeTime)
- }
- if !hdr.CreationTime.IsZero() {
- paxHeaders[paxCreationTime] = formatPAXTime(hdr.CreationTime)
- }
- for k, v := range hdr.Xattrs {
- paxHeaders[paxXattr+k] = v
- }
- for k, v := range hdr.Winheaders {
- paxHeaders[paxWindows+k] = v
- }
- }
-
- if len(paxHeaders) > 0 {
- if !allowPax {
- return errInvalidHeader
- }
- if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
- return err
- }
- }
- tw.nb = int64(hdr.Size)
- tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
-
- _, tw.err = tw.w.Write(header)
- return tw.err
-}
-
-func formatPAXTime(t time.Time) string {
- sec := t.Unix()
- usec := t.Nanosecond()
- s := strconv.FormatInt(sec, 10)
- if usec != 0 {
- s = fmt.Sprintf("%s.%09d", s, usec)
- }
- return s
-}
-
-// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
-// If the path is not splittable, then it will return ("", "", false).
-func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
- length := len(name)
- if length <= fileNameSize || !isASCII(name) {
- return "", "", false
- } else if length > fileNamePrefixSize+1 {
- length = fileNamePrefixSize + 1
- } else if name[length-1] == '/' {
- length--
- }
-
- i := strings.LastIndex(name[:length], "/")
- nlen := len(name) - i - 1 // nlen is length of suffix
- plen := i // plen is length of prefix
- if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
- return "", "", false
- }
- return name[:i], name[i+1:], true
-}
-
-// writePaxHeader writes an extended pax header to the
-// archive.
-func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
- // Prepare extended header
- ext := new(Header)
- ext.Typeflag = TypeXHeader
- // Setting ModTime is required for reader parsing to
- // succeed, and seems harmless enough.
- ext.ModTime = hdr.ModTime
- // The spec asks that we namespace our pseudo files
- // with the current pid. However, this results in differing outputs
- // for identical inputs. As such, the constant 0 is now used instead.
- // golang.org/issue/12358
- dir, file := path.Split(hdr.Name)
- fullName := path.Join(dir, "PaxHeaders.0", file)
-
- ascii := toASCII(fullName)
- if len(ascii) > 100 {
- ascii = ascii[:100]
- }
- ext.Name = ascii
- // Construct the body
- var buf bytes.Buffer
-
- // Keys are sorted before writing to body to allow deterministic output.
- var keys []string
- for k := range paxHeaders {
- keys = append(keys, k)
- }
- sort.Strings(keys)
-
- for _, k := range keys {
- fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
- }
-
- ext.Size = int64(len(buf.Bytes()))
- if err := tw.writeHeader(ext, false); err != nil {
- return err
- }
- if _, err := tw.Write(buf.Bytes()); err != nil {
- return err
- }
- if err := tw.Flush(); err != nil {
- return err
- }
- return nil
-}
-
-// formatPAXRecord formats a single PAX record, prefixing it with the
-// appropriate length.
-func formatPAXRecord(k, v string) string {
- const padding = 3 // Extra padding for ' ', '=', and '\n'
- size := len(k) + len(v) + padding
- size += len(strconv.Itoa(size))
- record := fmt.Sprintf("%d %s=%s\n", size, k, v)
-
- // Final adjustment if adding size field increased the record size.
- if len(record) != size {
- size = len(record)
- record = fmt.Sprintf("%d %s=%s\n", size, k, v)
- }
- return record
-}
-
-// Write writes to the current entry in the tar archive.
-// Write returns the error ErrWriteTooLong if more than
-// hdr.Size bytes are written after WriteHeader.
-func (tw *Writer) Write(b []byte) (n int, err error) {
- if tw.closed {
- err = ErrWriteAfterClose
- return
- }
- overwrite := false
- if int64(len(b)) > tw.nb {
- b = b[0:tw.nb]
- overwrite = true
- }
- n, err = tw.w.Write(b)
- tw.nb -= int64(n)
- if err == nil && overwrite {
- err = ErrWriteTooLong
- return
- }
- tw.err = err
- return
-}
-
-// Close closes the tar archive, flushing any unwritten
-// data to the underlying writer.
-func (tw *Writer) Close() error {
- if tw.err != nil || tw.closed {
- return tw.err
- }
- tw.Flush()
- tw.closed = true
- if tw.err != nil {
- return tw.err
- }
-
- // trailer: two zero blocks
- for i := 0; i < 2; i++ {
- _, tw.err = tw.w.Write(zeroBlock)
- if tw.err != nil {
- break
- }
- }
- return tw.err
-}
diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go
new file mode 100644
index 000000000..341609663
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go
@@ -0,0 +1,68 @@
+package backuptar
+
+import (
+ "archive/tar"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Functions copied from https://github.com/golang/go/blob/master/src/archive/tar/strconv.go
+// as we need to manage the LIBARCHIVE.creationtime PAXRecord manually.
+// Idea taken from containerd which did the same thing.
+
+// parsePAXTime takes a string of the form %d.%d as described in the PAX
+// specification. Note that this implementation allows for negative timestamps,
+// which is allowed for by the PAX specification, but not always portable.
+func parsePAXTime(s string) (time.Time, error) {
+ const maxNanoSecondDigits = 9
+
+ // Split string into seconds and sub-seconds parts.
+ ss, sn := s, ""
+ if pos := strings.IndexByte(s, '.'); pos >= 0 {
+ ss, sn = s[:pos], s[pos+1:]
+ }
+
+ // Parse the seconds.
+ secs, err := strconv.ParseInt(ss, 10, 64)
+ if err != nil {
+ return time.Time{}, tar.ErrHeader
+ }
+ if len(sn) == 0 {
+ return time.Unix(secs, 0), nil // No sub-second values
+ }
+
+ // Parse the nanoseconds.
+ if strings.Trim(sn, "0123456789") != "" {
+ return time.Time{}, tar.ErrHeader
+ }
+ if len(sn) < maxNanoSecondDigits {
+ sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
+ } else {
+ sn = sn[:maxNanoSecondDigits] // Right truncate
+ }
+ nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
+ if len(ss) > 0 && ss[0] == '-' {
+ return time.Unix(secs, -1*nsecs), nil // Negative correction
+ }
+ return time.Unix(secs, nsecs), nil
+}
+
+// formatPAXTime converts ts into a time of the form %d.%d as described in the
+// PAX specification. This function is capable of negative timestamps.
+func formatPAXTime(ts time.Time) (s string) {
+ secs, nsecs := ts.Unix(), ts.Nanosecond()
+ if nsecs == 0 {
+ return strconv.FormatInt(secs, 10)
+ }
+
+ // If seconds is negative, then perform correction.
+ sign := ""
+ if secs < 0 {
+ sign = "-" // Remember sign
+ secs = -(secs + 1) // Add a second to secs
+ nsecs = -(nsecs - 1e9) // Take that second away from nsecs
+ }
+ return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0")
+}
diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go
index d6566dbf0..088a43c68 100644
--- a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go
+++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go
@@ -3,6 +3,7 @@
package backuptar
import (
+ "archive/tar"
"encoding/base64"
"errors"
"fmt"
@@ -15,7 +16,6 @@ import (
"time"
"github.com/Microsoft/go-winio"
- "github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface
)
const (
@@ -32,11 +32,13 @@ const (
)
const (
- hdrFileAttributes = "fileattr"
- hdrSecurityDescriptor = "sd"
- hdrRawSecurityDescriptor = "rawsd"
- hdrMountPoint = "mountpoint"
- hdrEaPrefix = "xattr."
+ hdrFileAttributes = "MSWINDOWS.fileattr"
+ hdrSecurityDescriptor = "MSWINDOWS.sd"
+ hdrRawSecurityDescriptor = "MSWINDOWS.rawsd"
+ hdrMountPoint = "MSWINDOWS.mountpoint"
+ hdrEaPrefix = "MSWINDOWS.xattr."
+
+ hdrCreationTime = "LIBARCHIVE.creationtime"
)
func writeZeroes(w io.Writer, count int64) error {
@@ -86,16 +88,17 @@ func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
// BasicInfoHeader creates a tar header from basic file information.
func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header {
hdr := &tar.Header{
- Name: filepath.ToSlash(name),
- Size: size,
- Typeflag: tar.TypeReg,
- ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
- ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
- AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
- CreationTime: time.Unix(0, fileInfo.CreationTime.Nanoseconds()),
- Winheaders: make(map[string]string),
+ Format: tar.FormatPAX,
+ Name: filepath.ToSlash(name),
+ Size: size,
+ Typeflag: tar.TypeReg,
+ ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
+ ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
+ AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
+ PAXRecords: make(map[string]string),
}
- hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
+ hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
+ hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds()))
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
hdr.Mode |= c_ISDIR
@@ -155,7 +158,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
if err != nil {
return err
}
- hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
+ hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
case winio.BackupReparseData:
hdr.Mode |= c_ISLNK
@@ -166,7 +169,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
return err
}
if rp.IsMountPoint {
- hdr.Winheaders[hdrMountPoint] = "1"
+ hdr.PAXRecords[hdrMountPoint] = "1"
}
hdr.Linkname = rp.Target
@@ -183,7 +186,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
// Use base64 encoding for the binary value. Note that there
// is no way to encode the EA's flags, since their use doesn't
// make any sense for persisted EAs.
- hdr.Winheaders[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
+ hdr.PAXRecords[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
}
case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
@@ -254,6 +257,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size
}
if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
hdr = &tar.Header{
+ Format: hdr.Format,
Name: name + altName,
Mode: hdr.Mode,
Typeflag: tar.TypeReg,
@@ -296,9 +300,10 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()),
LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()),
- CreationTime: syscall.NsecToFiletime(hdr.CreationTime.UnixNano()),
+ // Default to ModTime, we'll pull hdrCreationTime below if present
+ CreationTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
}
- if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok {
+ if attrStr, ok := hdr.PAXRecords[hdrFileAttributes]; ok {
attr, err := strconv.ParseUint(attrStr, 10, 32)
if err != nil {
return "", 0, nil, err
@@ -309,6 +314,13 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
}
}
+ if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok {
+ creationTime, err := parsePAXTime(creationTimeStr)
+ if err != nil {
+ return "", 0, nil, err
+ }
+ fileInfo.CreationTime = syscall.NsecToFiletime(creationTime.UnixNano())
+ }
return
}
@@ -321,13 +333,13 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
var err error
// Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
// by this library will have raw binary for the security descriptor.
- if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok {
+ if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
sd, err = winio.SddlToSecurityDescriptor(sddl)
if err != nil {
return nil, err
}
}
- if sdraw, ok := hdr.Winheaders[hdrRawSecurityDescriptor]; ok {
+ if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
sd, err = base64.StdEncoding.DecodeString(sdraw)
if err != nil {
return nil, err
@@ -348,7 +360,7 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
}
}
var eas []winio.ExtendedAttribute
- for k, v := range hdr.Winheaders {
+ for k, v := range hdr.PAXRecords {
if !strings.HasPrefix(k, hdrEaPrefix) {
continue
}
@@ -380,7 +392,7 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
}
}
if hdr.Typeflag == tar.TypeSymlink {
- _, isMountPoint := hdr.Winheaders[hdrMountPoint]
+ _, isMountPoint := hdr.PAXRecords[hdrMountPoint]
rp := winio.ReparsePoint{
Target: filepath.FromSlash(hdr.Linkname),
IsMountPoint: isMountPoint,
diff --git a/vendor/github.com/chzyer/readline/.gitignore b/vendor/github.com/chzyer/readline/.gitignore
new file mode 100644
index 000000000..a3062beae
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/.gitignore
@@ -0,0 +1 @@
+.vscode/*
diff --git a/vendor/github.com/chzyer/readline/.travis.yml b/vendor/github.com/chzyer/readline/.travis.yml
new file mode 100644
index 000000000..9c3595543
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+go:
+ - 1.x
+script:
+ - GOOS=windows go install github.com/chzyer/readline/example/...
+ - GOOS=linux go install github.com/chzyer/readline/example/...
+ - GOOS=darwin go install github.com/chzyer/readline/example/...
+ - go test -race -v
diff --git a/vendor/github.com/chzyer/readline/CHANGELOG.md b/vendor/github.com/chzyer/readline/CHANGELOG.md
new file mode 100644
index 000000000..14ff5be13
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/CHANGELOG.md
@@ -0,0 +1,58 @@
+# ChangeLog
+
+### 1.4 - 2016-07-25
+
+* [#60][60] Support dynamic autocompletion
+* Fix ANSI parser on Windows
+* Fix wrong column width in complete mode on Windows
+* Remove dependent package "golang.org/x/crypto/ssh/terminal"
+
+### 1.3 - 2016-05-09
+
+* [#38][38] add SetChildren for prefix completer interface
+* [#42][42] improve multiple lines compatibility
+* [#43][43] remove sub-package(runes) for gopkg compatibility
+* [#46][46] Auto complete with space prefixed line
+* [#48][48] support suspend process (ctrl+Z)
+* [#49][49] fix bug that check equals with previous command
+* [#53][53] Fix bug which causes integer divide by zero panicking when input buffer is empty
+
+### 1.2 - 2016-03-05
+
+* Add a demo for checking password strength [example/readline-pass-strength](https://github.com/chzyer/readline/blob/master/example/readline-pass-strength/readline-pass-strength.go), , written by [@sahib](https://github.com/sahib)
+* [#23][23], support stdin remapping
+* [#27][27], add a `UniqueEditLine` to `Config`, which will erase the editing line after user submited it, usually use in IM.
+* Add a demo for multiline [example/readline-multiline](https://github.com/chzyer/readline/blob/master/example/readline-multiline/readline-multiline.go) which can submit one SQL by multiple lines.
+* Supports performs even stdin/stdout is not a tty.
+* Add a new simple apis for single instance, check by [here](https://github.com/chzyer/readline/blob/master/std.go). It need to save history manually if using this api.
+* [#28][28], fixes the history is not working as expected.
+* [#33][33], vim mode now support `c`, `d`, `x (delete character)`, `r (replace character)`
+
+### 1.1 - 2015-11-20
+
+* [#12][12] Add support for key `<Delete>`/`<Home>`/`<End>`
+* Only enter raw mode as needed (calling `Readline()`), program will receive signal(e.g. Ctrl+C) if not interact with `readline`.
+* Bugs fixed for `PrefixCompleter`
+* Press `Ctrl+D` in empty line will cause `io.EOF` in error, Press `Ctrl+C` in anytime will cause `ErrInterrupt` instead of `io.EOF`, this will privodes a shell-like user experience.
+* Customable Interrupt/EOF prompt in `Config`
+* [#17][17] Change atomic package to use 32bit function to let it runnable on arm 32bit devices
+* Provides a new password user experience(`readline.ReadPasswordEx()`).
+
+### 1.0 - 2015-10-14
+
+* Initial public release.
+
+[12]: https://github.com/chzyer/readline/pull/12
+[17]: https://github.com/chzyer/readline/pull/17
+[23]: https://github.com/chzyer/readline/pull/23
+[27]: https://github.com/chzyer/readline/pull/27
+[28]: https://github.com/chzyer/readline/pull/28
+[33]: https://github.com/chzyer/readline/pull/33
+[38]: https://github.com/chzyer/readline/pull/38
+[42]: https://github.com/chzyer/readline/pull/42
+[43]: https://github.com/chzyer/readline/pull/43
+[46]: https://github.com/chzyer/readline/pull/46
+[48]: https://github.com/chzyer/readline/pull/48
+[49]: https://github.com/chzyer/readline/pull/49
+[53]: https://github.com/chzyer/readline/pull/53
+[60]: https://github.com/chzyer/readline/pull/60
diff --git a/vendor/github.com/chzyer/readline/LICENSE b/vendor/github.com/chzyer/readline/LICENSE
new file mode 100644
index 000000000..c9afab3dc
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Chzyer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/chzyer/readline/README.md b/vendor/github.com/chzyer/readline/README.md
new file mode 100644
index 000000000..fab974b7f
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/README.md
@@ -0,0 +1,114 @@
+[![Build Status](https://travis-ci.org/chzyer/readline.svg?branch=master)](https://travis-ci.org/chzyer/readline)
+[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.md)
+[![Version](https://img.shields.io/github/tag/chzyer/readline.svg)](https://github.com/chzyer/readline/releases)
+[![GoDoc](https://godoc.org/github.com/chzyer/readline?status.svg)](https://godoc.org/github.com/chzyer/readline)
+[![OpenCollective](https://opencollective.com/readline/badge/backers.svg)](#backers)
+[![OpenCollective](https://opencollective.com/readline/badge/sponsors.svg)](#sponsors)
+
+<p align="center">
+<img src="https://raw.githubusercontent.com/chzyer/readline/assets/logo.png" />
+<a href="https://asciinema.org/a/32oseof9mkilg7t7d4780qt4m" target="_blank"><img src="https://asciinema.org/a/32oseof9mkilg7t7d4780qt4m.png" width="654"/></a>
+<img src="https://raw.githubusercontent.com/chzyer/readline/assets/logo_f.png" />
+</p>
+
+A powerful readline library in `Linux` `macOS` `Windows` `Solaris`
+
+## Guide
+
+* [Demo](example/readline-demo/readline-demo.go)
+* [Shortcut](doc/shortcut.md)
+
+## Repos using readline
+
+[![cockroachdb](https://img.shields.io/github/stars/cockroachdb/cockroach.svg?label=cockroachdb/cockroach)](https://github.com/cockroachdb/cockroach)
+[![robertkrimen/otto](https://img.shields.io/github/stars/robertkrimen/otto.svg?label=robertkrimen/otto)](https://github.com/robertkrimen/otto)
+[![empire](https://img.shields.io/github/stars/remind101/empire.svg?label=remind101/empire)](https://github.com/remind101/empire)
+[![mehrdadrad/mylg](https://img.shields.io/github/stars/mehrdadrad/mylg.svg?label=mehrdadrad/mylg)](https://github.com/mehrdadrad/mylg)
+[![knq/usql](https://img.shields.io/github/stars/knq/usql.svg?label=knq/usql)](https://github.com/knq/usql)
+[![youtube/doorman](https://img.shields.io/github/stars/youtube/doorman.svg?label=youtube/doorman)](https://github.com/youtube/doorman)
+[![bom-d-van/harp](https://img.shields.io/github/stars/bom-d-van/harp.svg?label=bom-d-van/harp)](https://github.com/bom-d-van/harp)
+[![abiosoft/ishell](https://img.shields.io/github/stars/abiosoft/ishell.svg?label=abiosoft/ishell)](https://github.com/abiosoft/ishell)
+[![Netflix/hal-9001](https://img.shields.io/github/stars/Netflix/hal-9001.svg?label=Netflix/hal-9001)](https://github.com/Netflix/hal-9001)
+[![docker/go-p9p](https://img.shields.io/github/stars/docker/go-p9p.svg?label=docker/go-p9p)](https://github.com/docker/go-p9p)
+
+
+## Feedback
+
+If you have any questions, please submit a github issue and any pull requests is welcomed :)
+
+* [https://twitter.com/chzyer](https://twitter.com/chzyer)
+* [http://weibo.com/2145262190](http://weibo.com/2145262190)
+
+
+## Backers
+
+Love Readline? Help me keep it alive by donating funds to cover project expenses!<br />
+[[Become a backer](https://opencollective.com/readline#backer)]
+
+<a href="https://opencollective.com/readline/backer/0/website" target="_blank"><img src="https://opencollective.com/readline/backer/0/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/1/website" target="_blank"><img src="https://opencollective.com/readline/backer/1/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/2/website" target="_blank"><img src="https://opencollective.com/readline/backer/2/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/3/website" target="_blank"><img src="https://opencollective.com/readline/backer/3/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/4/website" target="_blank"><img src="https://opencollective.com/readline/backer/4/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/5/website" target="_blank"><img src="https://opencollective.com/readline/backer/5/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/6/website" target="_blank"><img src="https://opencollective.com/readline/backer/6/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/7/website" target="_blank"><img src="https://opencollective.com/readline/backer/7/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/8/website" target="_blank"><img src="https://opencollective.com/readline/backer/8/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/9/website" target="_blank"><img src="https://opencollective.com/readline/backer/9/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/10/website" target="_blank"><img src="https://opencollective.com/readline/backer/10/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/11/website" target="_blank"><img src="https://opencollective.com/readline/backer/11/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/12/website" target="_blank"><img src="https://opencollective.com/readline/backer/12/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/13/website" target="_blank"><img src="https://opencollective.com/readline/backer/13/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/14/website" target="_blank"><img src="https://opencollective.com/readline/backer/14/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/15/website" target="_blank"><img src="https://opencollective.com/readline/backer/15/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/16/website" target="_blank"><img src="https://opencollective.com/readline/backer/16/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/17/website" target="_blank"><img src="https://opencollective.com/readline/backer/17/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/18/website" target="_blank"><img src="https://opencollective.com/readline/backer/18/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/19/website" target="_blank"><img src="https://opencollective.com/readline/backer/19/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/20/website" target="_blank"><img src="https://opencollective.com/readline/backer/20/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/21/website" target="_blank"><img src="https://opencollective.com/readline/backer/21/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/22/website" target="_blank"><img src="https://opencollective.com/readline/backer/22/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/23/website" target="_blank"><img src="https://opencollective.com/readline/backer/23/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/24/website" target="_blank"><img src="https://opencollective.com/readline/backer/24/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/25/website" target="_blank"><img src="https://opencollective.com/readline/backer/25/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/26/website" target="_blank"><img src="https://opencollective.com/readline/backer/26/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/27/website" target="_blank"><img src="https://opencollective.com/readline/backer/27/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/28/website" target="_blank"><img src="https://opencollective.com/readline/backer/28/avatar.svg"></a>
+<a href="https://opencollective.com/readline/backer/29/website" target="_blank"><img src="https://opencollective.com/readline/backer/29/avatar.svg"></a>
+
+
+## Sponsors
+
+Become a sponsor and get your logo here on our Github page. [[Become a sponsor](https://opencollective.com/readline#sponsor)]
+
+<a href="https://opencollective.com/readline/sponsor/0/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/0/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/1/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/1/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/2/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/2/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/3/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/3/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/4/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/4/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/5/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/5/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/6/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/6/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/7/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/7/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/8/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/8/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/9/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/9/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/10/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/10/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/11/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/11/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/12/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/12/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/13/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/13/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/14/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/14/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/15/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/15/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/16/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/16/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/17/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/17/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/18/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/18/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/19/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/19/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/20/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/20/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/21/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/21/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/22/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/22/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/23/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/23/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/24/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/24/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/25/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/25/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/26/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/26/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/27/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/27/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/28/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/28/avatar.svg"></a>
+<a href="https://opencollective.com/readline/sponsor/29/website" target="_blank"><img src="https://opencollective.com/readline/sponsor/29/avatar.svg"></a>
+
diff --git a/vendor/github.com/chzyer/readline/ansi_windows.go b/vendor/github.com/chzyer/readline/ansi_windows.go
new file mode 100644
index 000000000..63b908c18
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/ansi_windows.go
@@ -0,0 +1,249 @@
+// +build windows
+
+package readline
+
+import (
+ "bufio"
+ "io"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode/utf8"
+ "unsafe"
+)
+
+const (
+ _ = uint16(0)
+ COLOR_FBLUE = 0x0001
+ COLOR_FGREEN = 0x0002
+ COLOR_FRED = 0x0004
+ COLOR_FINTENSITY = 0x0008
+
+ COLOR_BBLUE = 0x0010
+ COLOR_BGREEN = 0x0020
+ COLOR_BRED = 0x0040
+ COLOR_BINTENSITY = 0x0080
+
+ COMMON_LVB_UNDERSCORE = 0x8000
+ COMMON_LVB_BOLD = 0x0007
+)
+
+var ColorTableFg = []word{
+ 0, // 30: Black
+ COLOR_FRED, // 31: Red
+ COLOR_FGREEN, // 32: Green
+ COLOR_FRED | COLOR_FGREEN, // 33: Yellow
+ COLOR_FBLUE, // 34: Blue
+ COLOR_FRED | COLOR_FBLUE, // 35: Magenta
+ COLOR_FGREEN | COLOR_FBLUE, // 36: Cyan
+ COLOR_FRED | COLOR_FBLUE | COLOR_FGREEN, // 37: White
+}
+
+var ColorTableBg = []word{
+ 0, // 40: Black
+ COLOR_BRED, // 41: Red
+ COLOR_BGREEN, // 42: Green
+ COLOR_BRED | COLOR_BGREEN, // 43: Yellow
+ COLOR_BBLUE, // 44: Blue
+ COLOR_BRED | COLOR_BBLUE, // 45: Magenta
+ COLOR_BGREEN | COLOR_BBLUE, // 46: Cyan
+ COLOR_BRED | COLOR_BBLUE | COLOR_BGREEN, // 47: White
+}
+
+type ANSIWriter struct {
+ target io.Writer
+ wg sync.WaitGroup
+ ctx *ANSIWriterCtx
+ sync.Mutex
+}
+
+func NewANSIWriter(w io.Writer) *ANSIWriter {
+ a := &ANSIWriter{
+ target: w,
+ ctx: NewANSIWriterCtx(w),
+ }
+ return a
+}
+
+func (a *ANSIWriter) Close() error {
+ a.wg.Wait()
+ return nil
+}
+
+type ANSIWriterCtx struct {
+ isEsc bool
+ isEscSeq bool
+ arg []string
+ target *bufio.Writer
+ wantFlush bool
+}
+
+func NewANSIWriterCtx(target io.Writer) *ANSIWriterCtx {
+ return &ANSIWriterCtx{
+ target: bufio.NewWriter(target),
+ }
+}
+
+func (a *ANSIWriterCtx) Flush() {
+ a.target.Flush()
+}
+
+func (a *ANSIWriterCtx) process(r rune) bool {
+ if a.wantFlush {
+ if r == 0 || r == CharEsc {
+ a.wantFlush = false
+ a.target.Flush()
+ }
+ }
+ if a.isEscSeq {
+ a.isEscSeq = a.ioloopEscSeq(a.target, r, &a.arg)
+ return true
+ }
+
+ switch r {
+ case CharEsc:
+ a.isEsc = true
+ case '[':
+ if a.isEsc {
+ a.arg = nil
+ a.isEscSeq = true
+ a.isEsc = false
+ break
+ }
+ fallthrough
+ default:
+ a.target.WriteRune(r)
+ a.wantFlush = true
+ }
+ return true
+}
+
+func (a *ANSIWriterCtx) ioloopEscSeq(w *bufio.Writer, r rune, argptr *[]string) bool {
+ arg := *argptr
+ var err error
+
+ if r >= 'A' && r <= 'D' {
+ count := short(GetInt(arg, 1))
+ info, err := GetConsoleScreenBufferInfo()
+ if err != nil {
+ return false
+ }
+ switch r {
+ case 'A': // up
+ info.dwCursorPosition.y -= count
+ case 'B': // down
+ info.dwCursorPosition.y += count
+ case 'C': // right
+ info.dwCursorPosition.x += count
+ case 'D': // left
+ info.dwCursorPosition.x -= count
+ }
+ SetConsoleCursorPosition(&info.dwCursorPosition)
+ return false
+ }
+
+ switch r {
+ case 'J':
+ killLines()
+ case 'K':
+ eraseLine()
+ case 'm':
+ color := word(0)
+ for _, item := range arg {
+ var c int
+ c, err = strconv.Atoi(item)
+ if err != nil {
+ w.WriteString("[" + strings.Join(arg, ";") + "m")
+ break
+ }
+ if c >= 30 && c < 40 {
+ color ^= COLOR_FINTENSITY
+ color |= ColorTableFg[c-30]
+ } else if c >= 40 && c < 50 {
+ color ^= COLOR_BINTENSITY
+ color |= ColorTableBg[c-40]
+ } else if c == 4 {
+ color |= COMMON_LVB_UNDERSCORE | ColorTableFg[7]
+ } else if c == 1 {
+ color |= COMMON_LVB_BOLD | COLOR_FINTENSITY
+ } else { // unknown code treat as reset
+ color = ColorTableFg[7]
+ }
+ }
+ if err != nil {
+ break
+ }
+ kernel.SetConsoleTextAttribute(stdout, uintptr(color))
+ case '\007': // set title
+ case ';':
+ if len(arg) == 0 || arg[len(arg)-1] != "" {
+ arg = append(arg, "")
+ *argptr = arg
+ }
+ return true
+ default:
+ if len(arg) == 0 {
+ arg = append(arg, "")
+ }
+ arg[len(arg)-1] += string(r)
+ *argptr = arg
+ return true
+ }
+ *argptr = nil
+ return false
+}
+
+func (a *ANSIWriter) Write(b []byte) (int, error) {
+ a.Lock()
+ defer a.Unlock()
+
+ off := 0
+ for len(b) > off {
+ r, size := utf8.DecodeRune(b[off:])
+ if size == 0 {
+ return off, io.ErrShortWrite
+ }
+ off += size
+ a.ctx.process(r)
+ }
+ a.ctx.Flush()
+ return off, nil
+}
+
+func killLines() error {
+ sbi, err := GetConsoleScreenBufferInfo()
+ if err != nil {
+ return err
+ }
+
+ size := (sbi.dwCursorPosition.y - sbi.dwSize.y) * sbi.dwSize.x
+ size += sbi.dwCursorPosition.x
+
+ var written int
+ kernel.FillConsoleOutputAttribute(stdout, uintptr(ColorTableFg[7]),
+ uintptr(size),
+ sbi.dwCursorPosition.ptr(),
+ uintptr(unsafe.Pointer(&written)),
+ )
+ return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '),
+ uintptr(size),
+ sbi.dwCursorPosition.ptr(),
+ uintptr(unsafe.Pointer(&written)),
+ )
+}
+
+func eraseLine() error {
+ sbi, err := GetConsoleScreenBufferInfo()
+ if err != nil {
+ return err
+ }
+
+ size := sbi.dwSize.x
+ sbi.dwCursorPosition.x = 0
+ var written int
+ return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '),
+ uintptr(size),
+ sbi.dwCursorPosition.ptr(),
+ uintptr(unsafe.Pointer(&written)),
+ )
+}
diff --git a/vendor/github.com/chzyer/readline/complete.go b/vendor/github.com/chzyer/readline/complete.go
new file mode 100644
index 000000000..c08c99414
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/complete.go
@@ -0,0 +1,285 @@
+package readline
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+)
+
+type AutoCompleter interface {
+ // Readline will pass the whole line and current offset to it
+ // Completer need to pass all the candidates, and how long they shared the same characters in line
+ // Example:
+ // [go, git, git-shell, grep]
+ // Do("g", 1) => ["o", "it", "it-shell", "rep"], 1
+ // Do("gi", 2) => ["t", "t-shell"], 2
+ // Do("git", 3) => ["", "-shell"], 3
+ Do(line []rune, pos int) (newLine [][]rune, length int)
+}
+
+type TabCompleter struct{}
+
+func (t *TabCompleter) Do([]rune, int) ([][]rune, int) {
+ return [][]rune{[]rune("\t")}, 0
+}
+
+type opCompleter struct {
+ w io.Writer
+ op *Operation
+ width int
+
+ inCompleteMode bool
+ inSelectMode bool
+ candidate [][]rune
+ candidateSource []rune
+ candidateOff int
+ candidateChoise int
+ candidateColNum int
+}
+
+func newOpCompleter(w io.Writer, op *Operation, width int) *opCompleter {
+ return &opCompleter{
+ w: w,
+ op: op,
+ width: width,
+ }
+}
+
+func (o *opCompleter) doSelect() {
+ if len(o.candidate) == 1 {
+ o.op.buf.WriteRunes(o.candidate[0])
+ o.ExitCompleteMode(false)
+ return
+ }
+ o.nextCandidate(1)
+ o.CompleteRefresh()
+}
+
+func (o *opCompleter) nextCandidate(i int) {
+ o.candidateChoise += i
+ o.candidateChoise = o.candidateChoise % len(o.candidate)
+ if o.candidateChoise < 0 {
+ o.candidateChoise = len(o.candidate) + o.candidateChoise
+ }
+}
+
+func (o *opCompleter) OnComplete() bool {
+ if o.width == 0 {
+ return false
+ }
+ if o.IsInCompleteSelectMode() {
+ o.doSelect()
+ return true
+ }
+
+ buf := o.op.buf
+ rs := buf.Runes()
+
+ if o.IsInCompleteMode() && o.candidateSource != nil && runes.Equal(rs, o.candidateSource) {
+ o.EnterCompleteSelectMode()
+ o.doSelect()
+ return true
+ }
+
+ o.ExitCompleteSelectMode()
+ o.candidateSource = rs
+ newLines, offset := o.op.cfg.AutoComplete.Do(rs, buf.idx)
+ if len(newLines) == 0 {
+ o.ExitCompleteMode(false)
+ return true
+ }
+
+ // only Aggregate candidates in non-complete mode
+ if !o.IsInCompleteMode() {
+ if len(newLines) == 1 {
+ buf.WriteRunes(newLines[0])
+ o.ExitCompleteMode(false)
+ return true
+ }
+
+ same, size := runes.Aggregate(newLines)
+ if size > 0 {
+ buf.WriteRunes(same)
+ o.ExitCompleteMode(false)
+ return true
+ }
+ }
+
+ o.EnterCompleteMode(offset, newLines)
+ return true
+}
+
+func (o *opCompleter) IsInCompleteSelectMode() bool {
+ return o.inSelectMode
+}
+
+func (o *opCompleter) IsInCompleteMode() bool {
+ return o.inCompleteMode
+}
+
+func (o *opCompleter) HandleCompleteSelect(r rune) bool {
+ next := true
+ switch r {
+ case CharEnter, CharCtrlJ:
+ next = false
+ o.op.buf.WriteRunes(o.op.candidate[o.op.candidateChoise])
+ o.ExitCompleteMode(false)
+ case CharLineStart:
+ num := o.candidateChoise % o.candidateColNum
+ o.nextCandidate(-num)
+ case CharLineEnd:
+ num := o.candidateColNum - o.candidateChoise%o.candidateColNum - 1
+ o.candidateChoise += num
+ if o.candidateChoise >= len(o.candidate) {
+ o.candidateChoise = len(o.candidate) - 1
+ }
+ case CharBackspace:
+ o.ExitCompleteSelectMode()
+ next = false
+ case CharTab, CharForward:
+ o.doSelect()
+ case CharBell, CharInterrupt:
+ o.ExitCompleteMode(true)
+ next = false
+ case CharNext:
+ tmpChoise := o.candidateChoise + o.candidateColNum
+ if tmpChoise >= o.getMatrixSize() {
+ tmpChoise -= o.getMatrixSize()
+ } else if tmpChoise >= len(o.candidate) {
+ tmpChoise += o.candidateColNum
+ tmpChoise -= o.getMatrixSize()
+ }
+ o.candidateChoise = tmpChoise
+ case CharBackward:
+ o.nextCandidate(-1)
+ case CharPrev:
+ tmpChoise := o.candidateChoise - o.candidateColNum
+ if tmpChoise < 0 {
+ tmpChoise += o.getMatrixSize()
+ if tmpChoise >= len(o.candidate) {
+ tmpChoise -= o.candidateColNum
+ }
+ }
+ o.candidateChoise = tmpChoise
+ default:
+ next = false
+ o.ExitCompleteSelectMode()
+ }
+ if next {
+ o.CompleteRefresh()
+ return true
+ }
+ return false
+}
+
+func (o *opCompleter) getMatrixSize() int {
+ line := len(o.candidate) / o.candidateColNum
+ if len(o.candidate)%o.candidateColNum != 0 {
+ line++
+ }
+ return line * o.candidateColNum
+}
+
+func (o *opCompleter) OnWidthChange(newWidth int) {
+ o.width = newWidth
+}
+
+func (o *opCompleter) CompleteRefresh() {
+ if !o.inCompleteMode {
+ return
+ }
+ lineCnt := o.op.buf.CursorLineCount()
+ colWidth := 0
+ for _, c := range o.candidate {
+ w := runes.WidthAll(c)
+ if w > colWidth {
+ colWidth = w
+ }
+ }
+ colWidth += o.candidateOff + 1
+ same := o.op.buf.RuneSlice(-o.candidateOff)
+
+ // -1 to avoid reach the end of line
+ width := o.width - 1
+ colNum := width / colWidth
+ if colNum != 0 {
+ colWidth += (width - (colWidth * colNum)) / colNum
+ }
+
+ o.candidateColNum = colNum
+ buf := bufio.NewWriter(o.w)
+ buf.Write(bytes.Repeat([]byte("\n"), lineCnt))
+
+ colIdx := 0
+ lines := 1
+ buf.WriteString("\033[J")
+ for idx, c := range o.candidate {
+ inSelect := idx == o.candidateChoise && o.IsInCompleteSelectMode()
+ if inSelect {
+ buf.WriteString("\033[30;47m")
+ }
+ buf.WriteString(string(same))
+ buf.WriteString(string(c))
+ buf.Write(bytes.Repeat([]byte(" "), colWidth-runes.WidthAll(c)-runes.WidthAll(same)))
+
+ if inSelect {
+ buf.WriteString("\033[0m")
+ }
+
+ colIdx++
+ if colIdx == colNum {
+ buf.WriteString("\n")
+ lines++
+ colIdx = 0
+ }
+ }
+
+ // move back
+ fmt.Fprintf(buf, "\033[%dA\r", lineCnt-1+lines)
+ fmt.Fprintf(buf, "\033[%dC", o.op.buf.idx+o.op.buf.PromptLen())
+ buf.Flush()
+}
+
+func (o *opCompleter) aggCandidate(candidate [][]rune) int {
+ offset := 0
+ for i := 0; i < len(candidate[0]); i++ {
+ for j := 0; j < len(candidate)-1; j++ {
+ if i > len(candidate[j]) {
+ goto aggregate
+ }
+ if candidate[j][i] != candidate[j+1][i] {
+ goto aggregate
+ }
+ }
+ offset = i
+ }
+aggregate:
+ return offset
+}
+
+func (o *opCompleter) EnterCompleteSelectMode() {
+ o.inSelectMode = true
+ o.candidateChoise = -1
+ o.CompleteRefresh()
+}
+
+func (o *opCompleter) EnterCompleteMode(offset int, candidate [][]rune) {
+ o.inCompleteMode = true
+ o.candidate = candidate
+ o.candidateOff = offset
+ o.CompleteRefresh()
+}
+
+func (o *opCompleter) ExitCompleteSelectMode() {
+ o.inSelectMode = false
+ o.candidate = nil
+ o.candidateChoise = -1
+ o.candidateOff = -1
+ o.candidateSource = nil
+}
+
+func (o *opCompleter) ExitCompleteMode(revent bool) {
+ o.inCompleteMode = false
+ o.ExitCompleteSelectMode()
+}
diff --git a/vendor/github.com/chzyer/readline/complete_helper.go b/vendor/github.com/chzyer/readline/complete_helper.go
new file mode 100644
index 000000000..58d724872
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/complete_helper.go
@@ -0,0 +1,165 @@
+package readline
+
+import (
+ "bytes"
+ "strings"
+)
+
+// Caller type for dynamic completion
+type DynamicCompleteFunc func(string) []string
+
+type PrefixCompleterInterface interface {
+ Print(prefix string, level int, buf *bytes.Buffer)
+ Do(line []rune, pos int) (newLine [][]rune, length int)
+ GetName() []rune
+ GetChildren() []PrefixCompleterInterface
+ SetChildren(children []PrefixCompleterInterface)
+}
+
+type DynamicPrefixCompleterInterface interface {
+ PrefixCompleterInterface
+ IsDynamic() bool
+ GetDynamicNames(line []rune) [][]rune
+}
+
+type PrefixCompleter struct {
+ Name []rune
+ Dynamic bool
+ Callback DynamicCompleteFunc
+ Children []PrefixCompleterInterface
+}
+
+func (p *PrefixCompleter) Tree(prefix string) string {
+ buf := bytes.NewBuffer(nil)
+ p.Print(prefix, 0, buf)
+ return buf.String()
+}
+
+func Print(p PrefixCompleterInterface, prefix string, level int, buf *bytes.Buffer) {
+ if strings.TrimSpace(string(p.GetName())) != "" {
+ buf.WriteString(prefix)
+ if level > 0 {
+ buf.WriteString("├")
+ buf.WriteString(strings.Repeat("─", (level*4)-2))
+ buf.WriteString(" ")
+ }
+ buf.WriteString(string(p.GetName()) + "\n")
+ level++
+ }
+ for _, ch := range p.GetChildren() {
+ ch.Print(prefix, level, buf)
+ }
+}
+
+func (p *PrefixCompleter) Print(prefix string, level int, buf *bytes.Buffer) {
+ Print(p, prefix, level, buf)
+}
+
+func (p *PrefixCompleter) IsDynamic() bool {
+ return p.Dynamic
+}
+
+func (p *PrefixCompleter) GetName() []rune {
+ return p.Name
+}
+
+func (p *PrefixCompleter) GetDynamicNames(line []rune) [][]rune {
+ var names = [][]rune{}
+ for _, name := range p.Callback(string(line)) {
+ names = append(names, []rune(name+" "))
+ }
+ return names
+}
+
+func (p *PrefixCompleter) GetChildren() []PrefixCompleterInterface {
+ return p.Children
+}
+
+func (p *PrefixCompleter) SetChildren(children []PrefixCompleterInterface) {
+ p.Children = children
+}
+
+func NewPrefixCompleter(pc ...PrefixCompleterInterface) *PrefixCompleter {
+ return PcItem("", pc...)
+}
+
+func PcItem(name string, pc ...PrefixCompleterInterface) *PrefixCompleter {
+ name += " "
+ return &PrefixCompleter{
+ Name: []rune(name),
+ Dynamic: false,
+ Children: pc,
+ }
+}
+
+func PcItemDynamic(callback DynamicCompleteFunc, pc ...PrefixCompleterInterface) *PrefixCompleter {
+ return &PrefixCompleter{
+ Callback: callback,
+ Dynamic: true,
+ Children: pc,
+ }
+}
+
+func (p *PrefixCompleter) Do(line []rune, pos int) (newLine [][]rune, offset int) {
+ return doInternal(p, line, pos, line)
+}
+
+func Do(p PrefixCompleterInterface, line []rune, pos int) (newLine [][]rune, offset int) {
+ return doInternal(p, line, pos, line)
+}
+
+func doInternal(p PrefixCompleterInterface, line []rune, pos int, origLine []rune) (newLine [][]rune, offset int) {
+ line = runes.TrimSpaceLeft(line[:pos])
+ goNext := false
+ var lineCompleter PrefixCompleterInterface
+ for _, child := range p.GetChildren() {
+ childNames := make([][]rune, 1)
+
+ childDynamic, ok := child.(DynamicPrefixCompleterInterface)
+ if ok && childDynamic.IsDynamic() {
+ childNames = childDynamic.GetDynamicNames(origLine)
+ } else {
+ childNames[0] = child.GetName()
+ }
+
+ for _, childName := range childNames {
+ if len(line) >= len(childName) {
+ if runes.HasPrefix(line, childName) {
+ if len(line) == len(childName) {
+ newLine = append(newLine, []rune{' '})
+ } else {
+ newLine = append(newLine, childName)
+ }
+ offset = len(childName)
+ lineCompleter = child
+ goNext = true
+ }
+ } else {
+ if runes.HasPrefix(childName, line) {
+ newLine = append(newLine, childName[len(line):])
+ offset = len(line)
+ lineCompleter = child
+ }
+ }
+ }
+ }
+
+ if len(newLine) != 1 {
+ return
+ }
+
+ tmpLine := make([]rune, 0, len(line))
+ for i := offset; i < len(line); i++ {
+ if line[i] == ' ' {
+ continue
+ }
+
+ tmpLine = append(tmpLine, line[i:]...)
+ return doInternal(lineCompleter, tmpLine, len(tmpLine), origLine)
+ }
+
+ if goNext {
+ return doInternal(lineCompleter, nil, 0, origLine)
+ }
+ return
+}
diff --git a/vendor/github.com/chzyer/readline/complete_segment.go b/vendor/github.com/chzyer/readline/complete_segment.go
new file mode 100644
index 000000000..5ceadd80f
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/complete_segment.go
@@ -0,0 +1,82 @@
+package readline
+
+type SegmentCompleter interface {
+ // a
+ // |- a1
+ // |--- a11
+ // |- a2
+ // b
+ // input:
+ // DoTree([], 0) [a, b]
+ // DoTree([a], 1) [a]
+ // DoTree([a, ], 0) [a1, a2]
+ // DoTree([a, a], 1) [a1, a2]
+ // DoTree([a, a1], 2) [a1]
+ // DoTree([a, a1, ], 0) [a11]
+ // DoTree([a, a1, a], 1) [a11]
+ DoSegment([][]rune, int) [][]rune
+}
+
+type dumpSegmentCompleter struct {
+ f func([][]rune, int) [][]rune
+}
+
+func (d *dumpSegmentCompleter) DoSegment(segment [][]rune, n int) [][]rune {
+ return d.f(segment, n)
+}
+
+func SegmentFunc(f func([][]rune, int) [][]rune) AutoCompleter {
+ return &SegmentComplete{&dumpSegmentCompleter{f}}
+}
+
+func SegmentAutoComplete(completer SegmentCompleter) *SegmentComplete {
+ return &SegmentComplete{
+ SegmentCompleter: completer,
+ }
+}
+
+type SegmentComplete struct {
+ SegmentCompleter
+}
+
+func RetSegment(segments [][]rune, cands [][]rune, idx int) ([][]rune, int) {
+ ret := make([][]rune, 0, len(cands))
+ lastSegment := segments[len(segments)-1]
+ for _, cand := range cands {
+ if !runes.HasPrefix(cand, lastSegment) {
+ continue
+ }
+ ret = append(ret, cand[len(lastSegment):])
+ }
+ return ret, idx
+}
+
+func SplitSegment(line []rune, pos int) ([][]rune, int) {
+ segs := [][]rune{}
+ lastIdx := -1
+ line = line[:pos]
+ pos = 0
+ for idx, l := range line {
+ if l == ' ' {
+ pos = 0
+ segs = append(segs, line[lastIdx+1:idx])
+ lastIdx = idx
+ } else {
+ pos++
+ }
+ }
+ segs = append(segs, line[lastIdx+1:])
+ return segs, pos
+}
+
+func (c *SegmentComplete) Do(line []rune, pos int) (newLine [][]rune, offset int) {
+
+ segment, idx := SplitSegment(line, pos)
+
+ cands := c.DoSegment(segment, idx)
+ newLine, offset = RetSegment(segment, cands, idx)
+ for idx := range newLine {
+ newLine[idx] = append(newLine[idx], ' ')
+ }
+ return newLine, offset
+}
diff --git a/vendor/github.com/chzyer/readline/history.go b/vendor/github.com/chzyer/readline/history.go
new file mode 100644
index 000000000..6b17c464b
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/history.go
@@ -0,0 +1,330 @@
+package readline
+
+import (
+ "bufio"
+ "container/list"
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+)
+
+type hisItem struct {
+ Source []rune
+ Version int64
+ Tmp []rune
+}
+
+func (h *hisItem) Clean() {
+ h.Source = nil
+ h.Tmp = nil
+}
+
+type opHistory struct {
+ cfg *Config
+ history *list.List
+ historyVer int64
+ current *list.Element
+ fd *os.File
+ fdLock sync.Mutex
+ enable bool
+}
+
+func newOpHistory(cfg *Config) (o *opHistory) {
+ o = &opHistory{
+ cfg: cfg,
+ history: list.New(),
+ enable: true,
+ }
+ return o
+}
+
+func (o *opHistory) Reset() {
+ o.history = list.New()
+ o.current = nil
+}
+
+func (o *opHistory) IsHistoryClosed() bool {
+ o.fdLock.Lock()
+ defer o.fdLock.Unlock()
+ return o.fd.Fd() == ^(uintptr(0))
+}
+
+func (o *opHistory) Init() {
+ if o.IsHistoryClosed() {
+ o.initHistory()
+ }
+}
+
+func (o *opHistory) initHistory() {
+ if o.cfg.HistoryFile != "" {
+ o.historyUpdatePath(o.cfg.HistoryFile)
+ }
+}
+
+// only called by newOpHistory
+func (o *opHistory) historyUpdatePath(path string) {
+ o.fdLock.Lock()
+ defer o.fdLock.Unlock()
+ f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666)
+ if err != nil {
+ return
+ }
+ o.fd = f
+ r := bufio.NewReader(o.fd)
+ total := 0
+ for ; ; total++ {
+ line, err := r.ReadString('\n')
+ if err != nil {
+ break
+ }
+ // ignore the empty line
+ line = strings.TrimSpace(line)
+ if len(line) == 0 {
+ continue
+ }
+ o.Push([]rune(line))
+ o.Compact()
+ }
+ if total > o.cfg.HistoryLimit {
+ o.rewriteLocked()
+ }
+ o.historyVer++
+ o.Push(nil)
+ return
+}
+
+func (o *opHistory) Compact() {
+ for o.history.Len() > o.cfg.HistoryLimit && o.history.Len() > 0 {
+ o.history.Remove(o.history.Front())
+ }
+}
+
+func (o *opHistory) Rewrite() {
+ o.fdLock.Lock()
+ defer o.fdLock.Unlock()
+ o.rewriteLocked()
+}
+
+func (o *opHistory) rewriteLocked() {
+ if o.cfg.HistoryFile == "" {
+ return
+ }
+
+ tmpFile := o.cfg.HistoryFile + ".tmp"
+ fd, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_APPEND, 0666)
+ if err != nil {
+ return
+ }
+
+ buf := bufio.NewWriter(fd)
+ for elem := o.history.Front(); elem != nil; elem = elem.Next() {
+ buf.WriteString(string(elem.Value.(*hisItem).Source) + "\n")
+ }
+ buf.Flush()
+
+ // replace history file
+ if err = os.Rename(tmpFile, o.cfg.HistoryFile); err != nil {
+ fd.Close()
+ return
+ }
+
+ if o.fd != nil {
+ o.fd.Close()
+ }
+ // fd is write only, just satisfy what we need.
+ o.fd = fd
+}
+
+func (o *opHistory) Close() {
+ o.fdLock.Lock()
+ defer o.fdLock.Unlock()
+ if o.fd != nil {
+ o.fd.Close()
+ }
+}
+
+func (o *opHistory) FindBck(isNewSearch bool, rs []rune, start int) (int, *list.Element) {
+ for elem := o.current; elem != nil; elem = elem.Prev() {
+ item := o.showItem(elem.Value)
+ if isNewSearch {
+ start += len(rs)
+ }
+ if elem == o.current {
+ if len(item) >= start {
+ item = item[:start]
+ }
+ }
+ idx := runes.IndexAllBckEx(item, rs, o.cfg.HistorySearchFold)
+ if idx < 0 {
+ continue
+ }
+ return idx, elem
+ }
+ return -1, nil
+}
+
+func (o *opHistory) FindFwd(isNewSearch bool, rs []rune, start int) (int, *list.Element) {
+ for elem := o.current; elem != nil; elem = elem.Next() {
+ item := o.showItem(elem.Value)
+ if isNewSearch {
+ start -= len(rs)
+ if start < 0 {
+ start = 0
+ }
+ }
+ if elem == o.current {
+ if len(item)-1 >= start {
+ item = item[start:]
+ } else {
+ continue
+ }
+ }
+ idx := runes.IndexAllEx(item, rs, o.cfg.HistorySearchFold)
+ if idx < 0 {
+ continue
+ }
+ if elem == o.current {
+ idx += start
+ }
+ return idx, elem
+ }
+ return -1, nil
+}
+
+func (o *opHistory) showItem(obj interface{}) []rune {
+ item := obj.(*hisItem)
+ if item.Version == o.historyVer {
+ return item.Tmp
+ }
+ return item.Source
+}
+
+func (o *opHistory) Prev() []rune {
+ if o.current == nil {
+ return nil
+ }
+ current := o.current.Prev()
+ if current == nil {
+ return nil
+ }
+ o.current = current
+ return runes.Copy(o.showItem(current.Value))
+}
+
+func (o *opHistory) Next() ([]rune, bool) {
+ if o.current == nil {
+ return nil, false
+ }
+ current := o.current.Next()
+ if current == nil {
+ return nil, false
+ }
+
+ o.current = current
+ return runes.Copy(o.showItem(current.Value)), true
+}
+
+// Disable the current history
+func (o *opHistory) Disable() {
+ o.enable = false
+}
+
+// Enable the current history
+func (o *opHistory) Enable() {
+ o.enable = true
+}
+
+func (o *opHistory) debug() {
+ Debug("-------")
+ for item := o.history.Front(); item != nil; item = item.Next() {
+ Debug(fmt.Sprintf("%+v", item.Value))
+ }
+}
+
+// save history
+func (o *opHistory) New(current []rune) (err error) {
+
+ // history deactivated
+ if !o.enable {
+ return nil
+ }
+
+ current = runes.Copy(current)
+
+ // if just use last command without modify
+ // just clean lastest history
+ if back := o.history.Back(); back != nil {
+ prev := back.Prev()
+ if prev != nil {
+ if runes.Equal(current, prev.Value.(*hisItem).Source) {
+ o.current = o.history.Back()
+ o.current.Value.(*hisItem).Clean()
+ o.historyVer++
+ return nil
+ }
+ }
+ }
+
+ if len(current) == 0 {
+ o.current = o.history.Back()
+ if o.current != nil {
+ o.current.Value.(*hisItem).Clean()
+ o.historyVer++
+ return nil
+ }
+ }
+
+ if o.current != o.history.Back() {
+ // move history item to current command
+ currentItem := o.current.Value.(*hisItem)
+ // set current to last item
+ o.current = o.history.Back()
+
+ current = runes.Copy(currentItem.Tmp)
+ }
+
+ // err only can be a IO error, just report
+ err = o.Update(current, true)
+
+ // push a new one to commit current command
+ o.historyVer++
+ o.Push(nil)
+ return
+}
+
+func (o *opHistory) Revert() {
+ o.historyVer++
+ o.current = o.history.Back()
+}
+
+func (o *opHistory) Update(s []rune, commit bool) (err error) {
+ o.fdLock.Lock()
+ defer o.fdLock.Unlock()
+ s = runes.Copy(s)
+ if o.current == nil {
+ o.Push(s)
+ o.Compact()
+ return
+ }
+ r := o.current.Value.(*hisItem)
+ r.Version = o.historyVer
+ if commit {
+ r.Source = s
+ if o.fd != nil {
+ // just report the error
+ _, err = o.fd.Write([]byte(string(r.Source) + "\n"))
+ }
+ } else {
+ r.Tmp = append(r.Tmp[:0], s...)
+ }
+ o.current.Value = r
+ o.Compact()
+ return
+}
+
+func (o *opHistory) Push(s []rune) {
+ s = runes.Copy(s)
+ elem := o.history.PushBack(&hisItem{Source: s})
+ o.current = elem
+}
diff --git a/vendor/github.com/chzyer/readline/operation.go b/vendor/github.com/chzyer/readline/operation.go
new file mode 100644
index 000000000..4c31624f8
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/operation.go
@@ -0,0 +1,531 @@
+package readline
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+var (
+ ErrInterrupt = errors.New("Interrupt")
+)
+
+type InterruptError struct {
+ Line []rune
+}
+
+func (*InterruptError) Error() string {
+ return "Interrupted"
+}
+
+type Operation struct {
+ m sync.Mutex
+ cfg *Config
+ t *Terminal
+ buf *RuneBuffer
+ outchan chan []rune
+ errchan chan error
+ w io.Writer
+
+ history *opHistory
+ *opSearch
+ *opCompleter
+ *opPassword
+ *opVim
+}
+
+func (o *Operation) SetBuffer(what string) {
+ o.buf.Set([]rune(what))
+}
+
+type wrapWriter struct {
+ r *Operation
+ t *Terminal
+ target io.Writer
+}
+
+func (w *wrapWriter) Write(b []byte) (int, error) {
+ if !w.t.IsReading() {
+ return w.target.Write(b)
+ }
+
+ var (
+ n int
+ err error
+ )
+ w.r.buf.Refresh(func() {
+ n, err = w.target.Write(b)
+ })
+
+ if w.r.IsSearchMode() {
+ w.r.SearchRefresh(-1)
+ }
+ if w.r.IsInCompleteMode() {
+ w.r.CompleteRefresh()
+ }
+ return n, err
+}
+
+func NewOperation(t *Terminal, cfg *Config) *Operation {
+ width := cfg.FuncGetWidth()
+ op := &Operation{
+ t: t,
+ buf: NewRuneBuffer(t, cfg.Prompt, cfg, width),
+ outchan: make(chan []rune),
+ errchan: make(chan error, 1),
+ }
+ op.w = op.buf.w
+ op.SetConfig(cfg)
+ op.opVim = newVimMode(op)
+ op.opCompleter = newOpCompleter(op.buf.w, op, width)
+ op.opPassword = newOpPassword(op)
+ op.cfg.FuncOnWidthChanged(func() {
+ newWidth := cfg.FuncGetWidth()
+ op.opCompleter.OnWidthChange(newWidth)
+ op.opSearch.OnWidthChange(newWidth)
+ op.buf.OnWidthChange(newWidth)
+ })
+ go op.ioloop()
+ return op
+}
+
+func (o *Operation) SetPrompt(s string) {
+ o.buf.SetPrompt(s)
+}
+
+func (o *Operation) SetMaskRune(r rune) {
+ o.buf.SetMask(r)
+}
+
+func (o *Operation) GetConfig() *Config {
+ o.m.Lock()
+ cfg := *o.cfg
+ o.m.Unlock()
+ return &cfg
+}
+
+func (o *Operation) ioloop() {
+ for {
+ keepInSearchMode := false
+ keepInCompleteMode := false
+ r := o.t.ReadRune()
+ if o.GetConfig().FuncFilterInputRune != nil {
+ var process bool
+ r, process = o.GetConfig().FuncFilterInputRune(r)
+ if !process {
+ o.buf.Refresh(nil) // to refresh the line
+ continue // ignore this rune
+ }
+ }
+
+ if r == 0 { // io.EOF
+ if o.buf.Len() == 0 {
+ o.buf.Clean()
+ select {
+ case o.errchan <- io.EOF:
+ }
+ break
+ } else {
+ // if stdin got io.EOF and there is something left in buffer,
+ // let's flush them by sending CharEnter.
+ // And we will got io.EOF int next loop.
+ r = CharEnter
+ }
+ }
+ isUpdateHistory := true
+
+ if o.IsInCompleteSelectMode() {
+ keepInCompleteMode = o.HandleCompleteSelect(r)
+ if keepInCompleteMode {
+ continue
+ }
+
+ o.buf.Refresh(nil)
+ switch r {
+ case CharEnter, CharCtrlJ:
+ o.history.Update(o.buf.Runes(), false)
+ fallthrough
+ case CharInterrupt:
+ o.t.KickRead()
+ fallthrough
+ case CharBell:
+ continue
+ }
+ }
+
+ if o.IsEnableVimMode() {
+ r = o.HandleVim(r, o.t.ReadRune)
+ if r == 0 {
+ continue
+ }
+ }
+
+ switch r {
+ case CharBell:
+ if o.IsSearchMode() {
+ o.ExitSearchMode(true)
+ o.buf.Refresh(nil)
+ }
+ if o.IsInCompleteMode() {
+ o.ExitCompleteMode(true)
+ o.buf.Refresh(nil)
+ }
+ case CharTab:
+ if o.GetConfig().AutoComplete == nil {
+ o.t.Bell()
+ break
+ }
+ if o.OnComplete() {
+ keepInCompleteMode = true
+ } else {
+ o.t.Bell()
+ break
+ }
+
+ case CharBckSearch:
+ if !o.SearchMode(S_DIR_BCK) {
+ o.t.Bell()
+ break
+ }
+ keepInSearchMode = true
+ case CharCtrlU:
+ o.buf.KillFront()
+ case CharFwdSearch:
+ if !o.SearchMode(S_DIR_FWD) {
+ o.t.Bell()
+ break
+ }
+ keepInSearchMode = true
+ case CharKill:
+ o.buf.Kill()
+ keepInCompleteMode = true
+ case MetaForward:
+ o.buf.MoveToNextWord()
+ case CharTranspose:
+ o.buf.Transpose()
+ case MetaBackward:
+ o.buf.MoveToPrevWord()
+ case MetaDelete:
+ o.buf.DeleteWord()
+ case CharLineStart:
+ o.buf.MoveToLineStart()
+ case CharLineEnd:
+ o.buf.MoveToLineEnd()
+ case CharBackspace, CharCtrlH:
+ if o.IsSearchMode() {
+ o.SearchBackspace()
+ keepInSearchMode = true
+ break
+ }
+
+ if o.buf.Len() == 0 {
+ o.t.Bell()
+ break
+ }
+ o.buf.Backspace()
+ if o.IsInCompleteMode() {
+ o.OnComplete()
+ }
+ case CharCtrlZ:
+ o.buf.Clean()
+ o.t.SleepToResume()
+ o.Refresh()
+ case CharCtrlL:
+ ClearScreen(o.w)
+ o.Refresh()
+ case MetaBackspace, CharCtrlW:
+ o.buf.BackEscapeWord()
+ case CharCtrlY:
+ o.buf.Yank()
+ case CharEnter, CharCtrlJ:
+ if o.IsSearchMode() {
+ o.ExitSearchMode(false)
+ }
+ o.buf.MoveToLineEnd()
+ var data []rune
+ if !o.GetConfig().UniqueEditLine {
+ o.buf.WriteRune('\n')
+ data = o.buf.Reset()
+ data = data[:len(data)-1] // trim \n
+ } else {
+ o.buf.Clean()
+ data = o.buf.Reset()
+ }
+ o.outchan <- data
+ if !o.GetConfig().DisableAutoSaveHistory {
+ // ignore IO error
+ _ = o.history.New(data)
+ } else {
+ isUpdateHistory = false
+ }
+ case CharBackward:
+ o.buf.MoveBackward()
+ case CharForward:
+ o.buf.MoveForward()
+ case CharPrev:
+ buf := o.history.Prev()
+ if buf != nil {
+ o.buf.Set(buf)
+ } else {
+ o.t.Bell()
+ }
+ case CharNext:
+ buf, ok := o.history.Next()
+ if ok {
+ o.buf.Set(buf)
+ } else {
+ o.t.Bell()
+ }
+ case CharDelete:
+ if o.buf.Len() > 0 || !o.IsNormalMode() {
+ o.t.KickRead()
+ if !o.buf.Delete() {
+ o.t.Bell()
+ }
+ break
+ }
+
+ // treat as EOF
+ if !o.GetConfig().UniqueEditLine {
+ o.buf.WriteString(o.GetConfig().EOFPrompt + "\n")
+ }
+ o.buf.Reset()
+ isUpdateHistory = false
+ o.history.Revert()
+ o.errchan <- io.EOF
+ if o.GetConfig().UniqueEditLine {
+ o.buf.Clean()
+ }
+ case CharInterrupt:
+ if o.IsSearchMode() {
+ o.t.KickRead()
+ o.ExitSearchMode(true)
+ break
+ }
+ if o.IsInCompleteMode() {
+ o.t.KickRead()
+ o.ExitCompleteMode(true)
+ o.buf.Refresh(nil)
+ break
+ }
+ o.buf.MoveToLineEnd()
+ o.buf.Refresh(nil)
+ hint := o.GetConfig().InterruptPrompt + "\n"
+ if !o.GetConfig().UniqueEditLine {
+ o.buf.WriteString(hint)
+ }
+ remain := o.buf.Reset()
+ if !o.GetConfig().UniqueEditLine {
+ remain = remain[:len(remain)-len([]rune(hint))]
+ }
+ isUpdateHistory = false
+ o.history.Revert()
+ o.errchan <- &InterruptError{remain}
+ default:
+ if o.IsSearchMode() {
+ o.SearchChar(r)
+ keepInSearchMode = true
+ break
+ }
+ o.buf.WriteRune(r)
+ if o.IsInCompleteMode() {
+ o.OnComplete()
+ keepInCompleteMode = true
+ }
+ }
+
+ listener := o.GetConfig().Listener
+ if listener != nil {
+ newLine, newPos, ok := listener.OnChange(o.buf.Runes(), o.buf.Pos(), r)
+ if ok {
+ o.buf.SetWithIdx(newPos, newLine)
+ }
+ }
+
+ o.m.Lock()
+ if !keepInSearchMode && o.IsSearchMode() {
+ o.ExitSearchMode(false)
+ o.buf.Refresh(nil)
+ } else if o.IsInCompleteMode() {
+ if !keepInCompleteMode {
+ o.ExitCompleteMode(false)
+ o.Refresh()
+ } else {
+ o.buf.Refresh(nil)
+ o.CompleteRefresh()
+ }
+ }
+ if isUpdateHistory && !o.IsSearchMode() {
+ // it will cause null history
+ o.history.Update(o.buf.Runes(), false)
+ }
+ o.m.Unlock()
+ }
+}
+
+func (o *Operation) Stderr() io.Writer {
+ return &wrapWriter{target: o.GetConfig().Stderr, r: o, t: o.t}
+}
+
+func (o *Operation) Stdout() io.Writer {
+ return &wrapWriter{target: o.GetConfig().Stdout, r: o, t: o.t}
+}
+
+func (o *Operation) String() (string, error) {
+ r, err := o.Runes()
+ return string(r), err
+}
+
+func (o *Operation) Runes() ([]rune, error) {
+ o.t.EnterRawMode()
+ defer o.t.ExitRawMode()
+
+ listener := o.GetConfig().Listener
+ if listener != nil {
+ listener.OnChange(nil, 0, 0)
+ }
+
+ o.buf.Refresh(nil) // print prompt
+ o.t.KickRead()
+ select {
+ case r := <-o.outchan:
+ return r, nil
+ case err := <-o.errchan:
+ if e, ok := err.(*InterruptError); ok {
+ return e.Line, ErrInterrupt
+ }
+ return nil, err
+ }
+}
+
+func (o *Operation) PasswordEx(prompt string, l Listener) ([]byte, error) {
+ cfg := o.GenPasswordConfig()
+ cfg.Prompt = prompt
+ cfg.Listener = l
+ return o.PasswordWithConfig(cfg)
+}
+
+func (o *Operation) GenPasswordConfig() *Config {
+ return o.opPassword.PasswordConfig()
+}
+
+func (o *Operation) PasswordWithConfig(cfg *Config) ([]byte, error) {
+ if err := o.opPassword.EnterPasswordMode(cfg); err != nil {
+ return nil, err
+ }
+ defer o.opPassword.ExitPasswordMode()
+ return o.Slice()
+}
+
+func (o *Operation) Password(prompt string) ([]byte, error) {
+ return o.PasswordEx(prompt, nil)
+}
+
+func (o *Operation) SetTitle(t string) {
+ o.w.Write([]byte("\033[2;" + t + "\007"))
+}
+
+func (o *Operation) Slice() ([]byte, error) {
+ r, err := o.Runes()
+ if err != nil {
+ return nil, err
+ }
+ return []byte(string(r)), nil
+}
+
+func (o *Operation) Close() {
+ o.history.Close()
+}
+
+func (o *Operation) SetHistoryPath(path string) {
+ if o.history != nil {
+ o.history.Close()
+ }
+ o.cfg.HistoryFile = path
+ o.history = newOpHistory(o.cfg)
+}
+
+func (o *Operation) IsNormalMode() bool {
+ return !o.IsInCompleteMode() && !o.IsSearchMode()
+}
+
+func (op *Operation) SetConfig(cfg *Config) (*Config, error) {
+ op.m.Lock()
+ defer op.m.Unlock()
+ if op.cfg == cfg {
+ return op.cfg, nil
+ }
+ if err := cfg.Init(); err != nil {
+ return op.cfg, err
+ }
+ old := op.cfg
+ op.cfg = cfg
+ op.SetPrompt(cfg.Prompt)
+ op.SetMaskRune(cfg.MaskRune)
+ op.buf.SetConfig(cfg)
+ width := op.cfg.FuncGetWidth()
+
+ if cfg.opHistory == nil {
+ op.SetHistoryPath(cfg.HistoryFile)
+ cfg.opHistory = op.history
+ cfg.opSearch = newOpSearch(op.buf.w, op.buf, op.history, cfg, width)
+ }
+ op.history = cfg.opHistory
+
+ // SetHistoryPath will close opHistory which already exists
+ // so if we use it next time, we need to reopen it by `InitHistory()`
+ op.history.Init()
+
+ if op.cfg.AutoComplete != nil {
+ op.opCompleter = newOpCompleter(op.buf.w, op, width)
+ }
+
+ op.opSearch = cfg.opSearch
+ return old, nil
+}
+
+func (o *Operation) ResetHistory() {
+ o.history.Reset()
+}
+
+// if err is not nil, it just mean it fail to write to file
+// other things goes fine.
+func (o *Operation) SaveHistory(content string) error {
+ return o.history.New([]rune(content))
+}
+
+func (o *Operation) Refresh() {
+ if o.t.IsReading() {
+ o.buf.Refresh(nil)
+ }
+}
+
+func (o *Operation) Clean() {
+ o.buf.Clean()
+}
+
+func FuncListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) Listener {
+ return &DumpListener{f: f}
+}
+
+type DumpListener struct {
+ f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)
+}
+
+func (d *DumpListener) OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) {
+ return d.f(line, pos, key)
+}
+
+type Listener interface {
+ OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)
+}
+
+type Painter interface {
+ Paint(line []rune, pos int) []rune
+}
+
+type defaultPainter struct{}
+
+func (p *defaultPainter) Paint(line []rune, _ int) []rune {
+ return line
+}
diff --git a/vendor/github.com/chzyer/readline/password.go b/vendor/github.com/chzyer/readline/password.go
new file mode 100644
index 000000000..414288c2a
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/password.go
@@ -0,0 +1,33 @@
+package readline
+
+type opPassword struct {
+ o *Operation
+ backupCfg *Config
+}
+
+func newOpPassword(o *Operation) *opPassword {
+ return &opPassword{o: o}
+}
+
+func (o *opPassword) ExitPasswordMode() {
+ o.o.SetConfig(o.backupCfg)
+ o.backupCfg = nil
+}
+
+func (o *opPassword) EnterPasswordMode(cfg *Config) (err error) {
+ o.backupCfg, err = o.o.SetConfig(cfg)
+ return
+}
+
+func (o *opPassword) PasswordConfig() *Config {
+ return &Config{
+ EnableMask: true,
+ InterruptPrompt: "\n",
+ EOFPrompt: "\n",
+ HistoryLimit: -1,
+ Painter: &defaultPainter{},
+
+ Stdout: o.o.cfg.Stdout,
+ Stderr: o.o.cfg.Stderr,
+ }
+}
diff --git a/vendor/github.com/chzyer/readline/rawreader_windows.go b/vendor/github.com/chzyer/readline/rawreader_windows.go
new file mode 100644
index 000000000..073ef150a
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/rawreader_windows.go
@@ -0,0 +1,125 @@
+// +build windows
+
+package readline
+
+import "unsafe"
+
+const (
+ VK_CANCEL = 0x03
+ VK_BACK = 0x08
+ VK_TAB = 0x09
+ VK_RETURN = 0x0D
+ VK_SHIFT = 0x10
+ VK_CONTROL = 0x11
+ VK_MENU = 0x12
+ VK_ESCAPE = 0x1B
+ VK_LEFT = 0x25
+ VK_UP = 0x26
+ VK_RIGHT = 0x27
+ VK_DOWN = 0x28
+ VK_DELETE = 0x2E
+ VK_LSHIFT = 0xA0
+ VK_RSHIFT = 0xA1
+ VK_LCONTROL = 0xA2
+ VK_RCONTROL = 0xA3
+)
+
+// RawReader translate input record to ANSI escape sequence.
+// To provides same behavior as unix terminal.
+type RawReader struct {
+ ctrlKey bool
+ altKey bool
+}
+
+func NewRawReader() *RawReader {
+ r := new(RawReader)
+ return r
+}
+
+// only process one action in one read
+func (r *RawReader) Read(buf []byte) (int, error) {
+ ir := new(_INPUT_RECORD)
+ var read int
+ var err error
+next:
+ err = kernel.ReadConsoleInputW(stdin,
+ uintptr(unsafe.Pointer(ir)),
+ 1,
+ uintptr(unsafe.Pointer(&read)),
+ )
+ if err != nil {
+ return 0, err
+ }
+ if ir.EventType != EVENT_KEY {
+ goto next
+ }
+ ker := (*_KEY_EVENT_RECORD)(unsafe.Pointer(&ir.Event[0]))
+ if ker.bKeyDown == 0 { // keyup
+ if r.ctrlKey || r.altKey {
+ switch ker.wVirtualKeyCode {
+ case VK_RCONTROL, VK_LCONTROL:
+ r.ctrlKey = false
+ case VK_MENU: //alt
+ r.altKey = false
+ }
+ }
+ goto next
+ }
+
+ if ker.unicodeChar == 0 {
+ var target rune
+ switch ker.wVirtualKeyCode {
+ case VK_RCONTROL, VK_LCONTROL:
+ r.ctrlKey = true
+ case VK_MENU: //alt
+ r.altKey = true
+ case VK_LEFT:
+ target = CharBackward
+ case VK_RIGHT:
+ target = CharForward
+ case VK_UP:
+ target = CharPrev
+ case VK_DOWN:
+ target = CharNext
+ }
+ if target != 0 {
+ return r.write(buf, target)
+ }
+ goto next
+ }
+ char := rune(ker.unicodeChar)
+ if r.ctrlKey {
+ switch char {
+ case 'A':
+ char = CharLineStart
+ case 'E':
+ char = CharLineEnd
+ case 'R':
+ char = CharBckSearch
+ case 'S':
+ char = CharFwdSearch
+ }
+ } else if r.altKey {
+ switch char {
+ case VK_BACK:
+ char = CharBackspace
+ }
+ return r.writeEsc(buf, char)
+ }
+ return r.write(buf, char)
+}
+
+func (r *RawReader) writeEsc(b []byte, char rune) (int, error) {
+ b[0] = '\033'
+ n := copy(b[1:], []byte(string(char)))
+ return n + 1, nil
+}
+
+func (r *RawReader) write(b []byte, char rune) (int, error) {
+ n := copy(b, []byte(string(char)))
+ return n, nil
+}
+
+func (r *RawReader) Close() error {
+ return nil
+}
diff --git a/vendor/github.com/chzyer/readline/readline.go b/vendor/github.com/chzyer/readline/readline.go
new file mode 100644
index 000000000..0e7aca06d
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/readline.go
@@ -0,0 +1,326 @@
+// Readline is a pure go implementation for GNU-Readline kind library.
+//
+// example:
+// rl, err := readline.New("> ")
+// if err != nil {
+// panic(err)
+// }
+// defer rl.Close()
+//
+// for {
+// line, err := rl.Readline()
+// if err != nil { // io.EOF
+// break
+// }
+// println(line)
+// }
+//
+package readline
+
+import "io"
+
+type Instance struct {
+ Config *Config
+ Terminal *Terminal
+ Operation *Operation
+}
+
+type Config struct {
+ // prompt supports ANSI escape sequence, so we can color some characters even in windows
+ Prompt string
+
+ // readline will persist historys to file where HistoryFile specified
+ HistoryFile string
+ // specify the max length of historys, it's 500 by default, set it to -1 to disable history
+ HistoryLimit int
+ DisableAutoSaveHistory bool
+ // enable case-insensitive history searching
+ HistorySearchFold bool
+
+ // AutoCompleter will called once user press TAB
+ AutoComplete AutoCompleter
+
+ // Any key press will pass to Listener
+ // NOTE: Listener will be triggered by (nil, 0, 0) immediately
+ Listener Listener
+
+ Painter Painter
+
+ // If VimMode is true, readline will in vim.insert mode by default
+ VimMode bool
+
+ InterruptPrompt string
+ EOFPrompt string
+
+ FuncGetWidth func() int
+
+ Stdin io.ReadCloser
+ StdinWriter io.Writer
+ Stdout io.Writer
+ Stderr io.Writer
+
+ EnableMask bool
+ MaskRune rune
+
+ // erase the editing line after user submited it
+ // it use in IM usually.
+ UniqueEditLine bool
+
+ // filter input runes (may be used to disable CtrlZ or for translating some keys to different actions)
+ // -> output = new (translated) rune and true/false if continue with processing this one
+ FuncFilterInputRune func(rune) (rune, bool)
+
+ // force use interactive even stdout is not a tty
+ FuncIsTerminal func() bool
+ FuncMakeRaw func() error
+ FuncExitRaw func() error
+ FuncOnWidthChanged func(func())
+ ForceUseInteractive bool
+
+ // private fields
+ inited bool
+ opHistory *opHistory
+ opSearch *opSearch
+}
+
+func (c *Config) useInteractive() bool {
+ if c.ForceUseInteractive {
+ return true
+ }
+ return c.FuncIsTerminal()
+}
+
+func (c *Config) Init() error {
+ if c.inited {
+ return nil
+ }
+ c.inited = true
+ if c.Stdin == nil {
+ c.Stdin = NewCancelableStdin(Stdin)
+ }
+
+ c.Stdin, c.StdinWriter = NewFillableStdin(c.Stdin)
+
+ if c.Stdout == nil {
+ c.Stdout = Stdout
+ }
+ if c.Stderr == nil {
+ c.Stderr = Stderr
+ }
+ if c.HistoryLimit == 0 {
+ c.HistoryLimit = 500
+ }
+
+ if c.InterruptPrompt == "" {
+ c.InterruptPrompt = "^C"
+ } else if c.InterruptPrompt == "\n" {
+ c.InterruptPrompt = ""
+ }
+ if c.EOFPrompt == "" {
+ c.EOFPrompt = "^D"
+ } else if c.EOFPrompt == "\n" {
+ c.EOFPrompt = ""
+ }
+
+ if c.AutoComplete == nil {
+ c.AutoComplete = &TabCompleter{}
+ }
+ if c.FuncGetWidth == nil {
+ c.FuncGetWidth = GetScreenWidth
+ }
+ if c.FuncIsTerminal == nil {
+ c.FuncIsTerminal = DefaultIsTerminal
+ }
+ rm := new(RawMode)
+ if c.FuncMakeRaw == nil {
+ c.FuncMakeRaw = rm.Enter
+ }
+ if c.FuncExitRaw == nil {
+ c.FuncExitRaw = rm.Exit
+ }
+ if c.FuncOnWidthChanged == nil {
+ c.FuncOnWidthChanged = DefaultOnWidthChanged
+ }
+
+ return nil
+}
+
+func (c Config) Clone() *Config {
+ c.opHistory = nil
+ c.opSearch = nil
+ return &c
+}
+
+func (c *Config) SetListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) {
+ c.Listener = FuncListener(f)
+}
+
+func (c *Config) SetPainter(p Painter) {
+ c.Painter = p
+}
+
+func NewEx(cfg *Config) (*Instance, error) {
+ t, err := NewTerminal(cfg)
+ if err != nil {
+ return nil, err
+ }
+ rl := t.Readline()
+ if cfg.Painter == nil {
+ cfg.Painter = &defaultPainter{}
+ }
+ return &Instance{
+ Config: cfg,
+ Terminal: t,
+ Operation: rl,
+ }, nil
+}
+
+func New(prompt string) (*Instance, error) {
+ return NewEx(&Config{Prompt: prompt})
+}
+
+func (i *Instance) ResetHistory() {
+ i.Operation.ResetHistory()
+}
+
+func (i *Instance) SetPrompt(s string) {
+ i.Operation.SetPrompt(s)
+}
+
+func (i *Instance) SetMaskRune(r rune) {
+ i.Operation.SetMaskRune(r)
+}
+
+// change history persistence in runtime
+func (i *Instance) SetHistoryPath(p string) {
+ i.Operation.SetHistoryPath(p)
+}
+
+// readline will refresh automatic when write through Stdout()
+func (i *Instance) Stdout() io.Writer {
+ return i.Operation.Stdout()
+}
+
+// readline will refresh automatic when write through Stdout()
+func (i *Instance) Stderr() io.Writer {
+ return i.Operation.Stderr()
+}
+
+// switch VimMode in runtime
+func (i *Instance) SetVimMode(on bool) {
+ i.Operation.SetVimMode(on)
+}
+
+func (i *Instance) IsVimMode() bool {
+ return i.Operation.IsEnableVimMode()
+}
+
+func (i *Instance) GenPasswordConfig() *Config {
+ return i.Operation.GenPasswordConfig()
+}
+
+// we can generate a config by `i.GenPasswordConfig()`
+func (i *Instance) ReadPasswordWithConfig(cfg *Config) ([]byte, error) {
+ return i.Operation.PasswordWithConfig(cfg)
+}
+
+func (i *Instance) ReadPasswordEx(prompt string, l Listener) ([]byte, error) {
+ return i.Operation.PasswordEx(prompt, l)
+}
+
+func (i *Instance) ReadPassword(prompt string) ([]byte, error) {
+ return i.Operation.Password(prompt)
+}
+
+type Result struct {
+ Line string
+ Error error
+}
+
+func (l *Result) CanContinue() bool {
+ return len(l.Line) != 0 && l.Error == ErrInterrupt
+}
+
+func (l *Result) CanBreak() bool {
+ return !l.CanContinue() && l.Error != nil
+}
+
+func (i *Instance) Line() *Result {
+ ret, err := i.Readline()
+ return &Result{ret, err}
+}
+
+// err is one of (nil, io.EOF, readline.ErrInterrupt)
+func (i *Instance) Readline() (string, error) {
+ return i.Operation.String()
+}
+
+func (i *Instance) ReadlineWithDefault(what string) (string, error) {
+ i.Operation.SetBuffer(what)
+ return i.Operation.String()
+}
+
+func (i *Instance) SaveHistory(content string) error {
+ return i.Operation.SaveHistory(content)
+}
+
+// same as readline
+func (i *Instance) ReadSlice() ([]byte, error) {
+ return i.Operation.Slice()
+}
+
+// we must make sure that call Close() before process exit.
+func (i *Instance) Close() error {
+ if err := i.Terminal.Close(); err != nil {
+ return err
+ }
+ i.Config.Stdin.Close()
+ i.Operation.Close()
+ return nil
+}
+func (i *Instance) Clean() {
+ i.Operation.Clean()
+}
+
+func (i *Instance) Write(b []byte) (int, error) {
+ return i.Stdout().Write(b)
+}
+
+// WriteStdin prefill the next Stdin fetch
+// Next time you call ReadLine() this value will be writen before the user input
+// ie :
+// i := readline.New()
+// i.WriteStdin([]byte("test"))
+// _, _= i.Readline()
+//
+// gives
+//
+// > test[cursor]
+func (i *Instance) WriteStdin(val []byte) (int, error) {
+ return i.Terminal.WriteStdin(val)
+}
+
+func (i *Instance) SetConfig(cfg *Config) *Config {
+ if i.Config == cfg {
+ return cfg
+ }
+ old := i.Config
+ i.Config = cfg
+ i.Operation.SetConfig(cfg)
+ i.Terminal.SetConfig(cfg)
+ return old
+}
+
+func (i *Instance) Refresh() {
+ i.Operation.Refresh()
+}
+
+// HistoryDisable the save of the commands into the history
+func (i *Instance) HistoryDisable() {
+ i.Operation.history.Disable()
+}
+
+// HistoryEnable the save of the commands into the history (default on)
+func (i *Instance) HistoryEnable() {
+ i.Operation.history.Enable()
+}
diff --git a/vendor/github.com/chzyer/readline/remote.go b/vendor/github.com/chzyer/readline/remote.go
new file mode 100644
index 000000000..74dbf5690
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/remote.go
@@ -0,0 +1,475 @@
+package readline
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "sync"
+ "sync/atomic"
+)
+
+type MsgType int16
+
+const (
+ T_DATA = MsgType(iota)
+ T_WIDTH
+ T_WIDTH_REPORT
+ T_ISTTY_REPORT
+ T_RAW
+ T_ERAW // exit raw
+ T_EOF
+)
+
+type RemoteSvr struct {
+ eof int32
+ closed int32
+ width int32
+ reciveChan chan struct{}
+ writeChan chan *writeCtx
+ conn net.Conn
+ isTerminal bool
+ funcWidthChan func()
+ stopChan chan struct{}
+
+ dataBufM sync.Mutex
+ dataBuf bytes.Buffer
+}
+
+type writeReply struct {
+ n int
+ err error
+}
+
+type writeCtx struct {
+ msg *Message
+ reply chan *writeReply
+}
+
+func newWriteCtx(msg *Message) *writeCtx {
+ return &writeCtx{
+ msg: msg,
+ reply: make(chan *writeReply),
+ }
+}
+
+func NewRemoteSvr(conn net.Conn) (*RemoteSvr, error) {
+ rs := &RemoteSvr{
+ width: -1,
+ conn: conn,
+ writeChan: make(chan *writeCtx),
+ reciveChan: make(chan struct{}),
+ stopChan: make(chan struct{}),
+ }
+ buf := bufio.NewReader(rs.conn)
+
+ if err := rs.init(buf); err != nil {
+ return nil, err
+ }
+
+ go rs.readLoop(buf)
+ go rs.writeLoop()
+ return rs, nil
+}
+
+func (r *RemoteSvr) init(buf *bufio.Reader) error {
+ m, err := ReadMessage(buf)
+ if err != nil {
+ return err
+ }
+ // receive isTerminal
+ if m.Type != T_ISTTY_REPORT {
+ return fmt.Errorf("unexpected init message")
+ }
+ r.GotIsTerminal(m.Data)
+
+ // receive width
+ m, err = ReadMessage(buf)
+ if err != nil {
+ return err
+ }
+ if m.Type != T_WIDTH_REPORT {
+ return fmt.Errorf("unexpected init message")
+ }
+ r.GotReportWidth(m.Data)
+
+ return nil
+}
+
+func (r *RemoteSvr) HandleConfig(cfg *Config) {
+ cfg.Stderr = r
+ cfg.Stdout = r
+ cfg.Stdin = r
+ cfg.FuncExitRaw = r.ExitRawMode
+ cfg.FuncIsTerminal = r.IsTerminal
+ cfg.FuncMakeRaw = r.EnterRawMode
+ cfg.FuncExitRaw = r.ExitRawMode
+ cfg.FuncGetWidth = r.GetWidth
+ cfg.FuncOnWidthChanged = func(f func()) {
+ r.funcWidthChan = f
+ }
+}
+
+func (r *RemoteSvr) IsTerminal() bool {
+ return r.isTerminal
+}
+
+func (r *RemoteSvr) checkEOF() error {
+ if atomic.LoadInt32(&r.eof) == 1 {
+ return io.EOF
+ }
+ return nil
+}
+
+func (r *RemoteSvr) Read(b []byte) (int, error) {
+ r.dataBufM.Lock()
+ n, err := r.dataBuf.Read(b)
+ r.dataBufM.Unlock()
+ if n == 0 {
+ if err := r.checkEOF(); err != nil {
+ return 0, err
+ }
+ }
+
+ if n == 0 && err == io.EOF {
+ <-r.reciveChan
+ r.dataBufM.Lock()
+ n, err = r.dataBuf.Read(b)
+ r.dataBufM.Unlock()
+ }
+ if n == 0 {
+ if err := r.checkEOF(); err != nil {
+ return 0, err
+ }
+ }
+
+ return n, err
+}
+
+func (r *RemoteSvr) writeMsg(m *Message) error {
+ ctx := newWriteCtx(m)
+ r.writeChan <- ctx
+ reply := <-ctx.reply
+ return reply.err
+}
+
+func (r *RemoteSvr) Write(b []byte) (int, error) {
+ ctx := newWriteCtx(NewMessage(T_DATA, b))
+ r.writeChan <- ctx
+ reply := <-ctx.reply
+ return reply.n, reply.err
+}
+
+func (r *RemoteSvr) EnterRawMode() error {
+ return r.writeMsg(NewMessage(T_RAW, nil))
+}
+
+func (r *RemoteSvr) ExitRawMode() error {
+ return r.writeMsg(NewMessage(T_ERAW, nil))
+}
+
+func (r *RemoteSvr) writeLoop() {
+ defer r.Close()
+
+loop:
+ for {
+ select {
+ case ctx, ok := <-r.writeChan:
+ if !ok {
+ break
+ }
+ n, err := ctx.msg.WriteTo(r.conn)
+ ctx.reply <- &writeReply{n, err}
+ case <-r.stopChan:
+ break loop
+ }
+ }
+}
+
+func (r *RemoteSvr) Close() error {
+ if atomic.CompareAndSwapInt32(&r.closed, 0, 1) {
+ close(r.stopChan)
+ r.conn.Close()
+ }
+ return nil
+}
+
+func (r *RemoteSvr) readLoop(buf *bufio.Reader) {
+ defer r.Close()
+ for {
+ m, err := ReadMessage(buf)
+ if err != nil {
+ break
+ }
+ switch m.Type {
+ case T_EOF:
+ atomic.StoreInt32(&r.eof, 1)
+ select {
+ case r.reciveChan <- struct{}{}:
+ default:
+ }
+ case T_DATA:
+ r.dataBufM.Lock()
+ r.dataBuf.Write(m.Data)
+ r.dataBufM.Unlock()
+ select {
+ case r.reciveChan <- struct{}{}:
+ default:
+ }
+ case T_WIDTH_REPORT:
+ r.GotReportWidth(m.Data)
+ case T_ISTTY_REPORT:
+ r.GotIsTerminal(m.Data)
+ }
+ }
+}
+
+func (r *RemoteSvr) GotIsTerminal(data []byte) {
+ if binary.BigEndian.Uint16(data) == 0 {
+ r.isTerminal = false
+ } else {
+ r.isTerminal = true
+ }
+}
+
+func (r *RemoteSvr) GotReportWidth(data []byte) {
+ atomic.StoreInt32(&r.width, int32(binary.BigEndian.Uint16(data)))
+ if r.funcWidthChan != nil {
+ r.funcWidthChan()
+ }
+}
+
+func (r *RemoteSvr) GetWidth() int {
+ return int(atomic.LoadInt32(&r.width))
+}
+
+// -----------------------------------------------------------------------------
+
+type Message struct {
+ Type MsgType
+ Data []byte
+}
+
+func ReadMessage(r io.Reader) (*Message, error) {
+ m := new(Message)
+ var length int32
+ if err := binary.Read(r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ if err := binary.Read(r, binary.BigEndian, &m.Type); err != nil {
+ return nil, err
+ }
+ m.Data = make([]byte, int(length)-2)
+ if _, err := io.ReadFull(r, m.Data); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func NewMessage(t MsgType, data []byte) *Message {
+ return &Message{t, data}
+}
+
+func (m *Message) WriteTo(w io.Writer) (int, error) {
+ buf := bytes.NewBuffer(make([]byte, 0, len(m.Data)+2+4))
+ binary.Write(buf, binary.BigEndian, int32(len(m.Data)+2))
+ binary.Write(buf, binary.BigEndian, m.Type)
+ buf.Write(m.Data)
+ n, err := buf.WriteTo(w)
+ return int(n), err
+}
+
+// -----------------------------------------------------------------------------
+
+type RemoteCli struct {
+ conn net.Conn
+ raw RawMode
+ receiveChan chan struct{}
+ inited int32
+ isTerminal *bool
+
+ data bytes.Buffer
+ dataM sync.Mutex
+}
+
+func NewRemoteCli(conn net.Conn) (*RemoteCli, error) {
+ r := &RemoteCli{
+ conn: conn,
+ receiveChan: make(chan struct{}),
+ }
+ return r, nil
+}
+
+func (r *RemoteCli) MarkIsTerminal(is bool) {
+ r.isTerminal = &is
+}
+
+func (r *RemoteCli) init() error {
+ if !atomic.CompareAndSwapInt32(&r.inited, 0, 1) {
+ return nil
+ }
+
+ if err := r.reportIsTerminal(); err != nil {
+ return err
+ }
+
+ if err := r.reportWidth(); err != nil {
+ return err
+ }
+
+ // register sig for width changed
+ DefaultOnWidthChanged(func() {
+ r.reportWidth()
+ })
+ return nil
+}
+
+func (r *RemoteCli) writeMsg(m *Message) error {
+ r.dataM.Lock()
+ _, err := m.WriteTo(r.conn)
+ r.dataM.Unlock()
+ return err
+}
+
+func (r *RemoteCli) Write(b []byte) (int, error) {
+ m := NewMessage(T_DATA, b)
+ r.dataM.Lock()
+ _, err := m.WriteTo(r.conn)
+ r.dataM.Unlock()
+ return len(b), err
+}
+
+func (r *RemoteCli) reportWidth() error {
+ screenWidth := GetScreenWidth()
+ data := make([]byte, 2)
+ binary.BigEndian.PutUint16(data, uint16(screenWidth))
+ msg := NewMessage(T_WIDTH_REPORT, data)
+
+ if err := r.writeMsg(msg); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (r *RemoteCli) reportIsTerminal() error {
+ var isTerminal bool
+ if r.isTerminal != nil {
+ isTerminal = *r.isTerminal
+ } else {
+ isTerminal = DefaultIsTerminal()
+ }
+ data := make([]byte, 2)
+ if isTerminal {
+ binary.BigEndian.PutUint16(data, 1)
+ } else {
+ binary.BigEndian.PutUint16(data, 0)
+ }
+ msg := NewMessage(T_ISTTY_REPORT, data)
+ if err := r.writeMsg(msg); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (r *RemoteCli) readLoop() {
+ buf := bufio.NewReader(r.conn)
+ for {
+ msg, err := ReadMessage(buf)
+ if err != nil {
+ break
+ }
+ switch msg.Type {
+ case T_ERAW:
+ r.raw.Exit()
+ case T_RAW:
+ r.raw.Enter()
+ case T_DATA:
+ os.Stdout.Write(msg.Data)
+ }
+ }
+}
+
+func (r *RemoteCli) ServeBy(source io.Reader) error {
+ if err := r.init(); err != nil {
+ return err
+ }
+
+ go func() {
+ defer r.Close()
+ for {
+ n, _ := io.Copy(r, source)
+ if n == 0 {
+ break
+ }
+ }
+ }()
+ defer r.raw.Exit()
+ r.readLoop()
+ return nil
+}
+
+func (r *RemoteCli) Close() {
+ r.writeMsg(NewMessage(T_EOF, nil))
+}
+
+func (r *RemoteCli) Serve() error {
+ return r.ServeBy(os.Stdin)
+}
+
+func ListenRemote(n, addr string, cfg *Config, h func(*Instance), onListen ...func(net.Listener) error) error {
+ ln, err := net.Listen(n, addr)
+ if err != nil {
+ return err
+ }
+ if len(onListen) > 0 {
+ if err := onListen[0](ln); err != nil {
+ return err
+ }
+ }
+ for {
+ conn, err := ln.Accept()
+ if err != nil {
+ break
+ }
+ go func() {
+ defer conn.Close()
+ rl, err := HandleConn(*cfg, conn)
+ if err != nil {
+ return
+ }
+ h(rl)
+ }()
+ }
+ return nil
+}
+
+func HandleConn(cfg Config, conn net.Conn) (*Instance, error) {
+ r, err := NewRemoteSvr(conn)
+ if err != nil {
+ return nil, err
+ }
+ r.HandleConfig(&cfg)
+
+ rl, err := NewEx(&cfg)
+ if err != nil {
+ return nil, err
+ }
+ return rl, nil
+}
+
+func DialRemote(n, addr string) error {
+ conn, err := net.Dial(n, addr)
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ cli, err := NewRemoteCli(conn)
+ if err != nil {
+ return err
+ }
+ return cli.Serve()
+}
diff --git a/vendor/github.com/chzyer/readline/runebuf.go b/vendor/github.com/chzyer/readline/runebuf.go
new file mode 100644
index 000000000..81d2da50c
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/runebuf.go
@@ -0,0 +1,629 @@
+package readline
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+type runeBufferBck struct {
+ buf []rune
+ idx int
+}
+
+type RuneBuffer struct {
+ buf []rune
+ idx int
+ prompt []rune
+ w io.Writer
+
+ hadClean bool
+ interactive bool
+ cfg *Config
+
+ width int
+
+ bck *runeBufferBck
+
+ offset string
+
+ lastKill []rune
+
+ sync.Mutex
+}
+
+func (r* RuneBuffer) pushKill(text []rune) {
+ r.lastKill = append([]rune{}, text...)
+}
+
+func (r *RuneBuffer) OnWidthChange(newWidth int) {
+ r.Lock()
+ r.width = newWidth
+ r.Unlock()
+}
+
+func (r *RuneBuffer) Backup() {
+ r.Lock()
+ r.bck = &runeBufferBck{r.buf, r.idx}
+ r.Unlock()
+}
+
+func (r *RuneBuffer) Restore() {
+ r.Refresh(func() {
+ if r.bck == nil {
+ return
+ }
+ r.buf = r.bck.buf
+ r.idx = r.bck.idx
+ })
+}
+
+func NewRuneBuffer(w io.Writer, prompt string, cfg *Config, width int) *RuneBuffer {
+ rb := &RuneBuffer{
+ w: w,
+ interactive: cfg.useInteractive(),
+ cfg: cfg,
+ width: width,
+ }
+ rb.SetPrompt(prompt)
+ return rb
+}
+
+func (r *RuneBuffer) SetConfig(cfg *Config) {
+ r.Lock()
+ r.cfg = cfg
+ r.interactive = cfg.useInteractive()
+ r.Unlock()
+}
+
+func (r *RuneBuffer) SetMask(m rune) {
+ r.Lock()
+ r.cfg.MaskRune = m
+ r.Unlock()
+}
+
+func (r *RuneBuffer) CurrentWidth(x int) int {
+ r.Lock()
+ defer r.Unlock()
+ return runes.WidthAll(r.buf[:x])
+}
+
+func (r *RuneBuffer) PromptLen() int {
+ r.Lock()
+ width := r.promptLen()
+ r.Unlock()
+ return width
+}
+
+func (r *RuneBuffer) promptLen() int {
+ return runes.WidthAll(runes.ColorFilter(r.prompt))
+}
+
+func (r *RuneBuffer) RuneSlice(i int) []rune {
+ r.Lock()
+ defer r.Unlock()
+
+ if i > 0 {
+ rs := make([]rune, i)
+ copy(rs, r.buf[r.idx:r.idx+i])
+ return rs
+ }
+ rs := make([]rune, -i)
+ copy(rs, r.buf[r.idx+i:r.idx])
+ return rs
+}
+
+func (r *RuneBuffer) Runes() []rune {
+ r.Lock()
+ newr := make([]rune, len(r.buf))
+ copy(newr, r.buf)
+ r.Unlock()
+ return newr
+}
+
+func (r *RuneBuffer) Pos() int {
+ r.Lock()
+ defer r.Unlock()
+ return r.idx
+}
+
+func (r *RuneBuffer) Len() int {
+ r.Lock()
+ defer r.Unlock()
+ return len(r.buf)
+}
+
+func (r *RuneBuffer) MoveToLineStart() {
+ r.Refresh(func() {
+ if r.idx == 0 {
+ return
+ }
+ r.idx = 0
+ })
+}
+
+func (r *RuneBuffer) MoveBackward() {
+ r.Refresh(func() {
+ if r.idx == 0 {
+ return
+ }
+ r.idx--
+ })
+}
+
+func (r *RuneBuffer) WriteString(s string) {
+ r.WriteRunes([]rune(s))
+}
+
+func (r *RuneBuffer) WriteRune(s rune) {
+ r.WriteRunes([]rune{s})
+}
+
+func (r *RuneBuffer) WriteRunes(s []rune) {
+ r.Refresh(func() {
+ tail := append(s, r.buf[r.idx:]...)
+ r.buf = append(r.buf[:r.idx], tail...)
+ r.idx += len(s)
+ })
+}
+
+func (r *RuneBuffer) MoveForward() {
+ r.Refresh(func() {
+ if r.idx == len(r.buf) {
+ return
+ }
+ r.idx++
+ })
+}
+
+func (r *RuneBuffer) IsCursorInEnd() bool {
+ r.Lock()
+ defer r.Unlock()
+ return r.idx == len(r.buf)
+}
+
+func (r *RuneBuffer) Replace(ch rune) {
+ r.Refresh(func() {
+ r.buf[r.idx] = ch
+ })
+}
+
+func (r *RuneBuffer) Erase() {
+ r.Refresh(func() {
+ r.idx = 0
+ r.pushKill(r.buf[:])
+ r.buf = r.buf[:0]
+ })
+}
+
+func (r *RuneBuffer) Delete() (success bool) {
+ r.Refresh(func() {
+ if r.idx == len(r.buf) {
+ return
+ }
+ r.pushKill(r.buf[r.idx : r.idx+1])
+ r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...)
+ success = true
+ })
+ return
+}
+
+func (r *RuneBuffer) DeleteWord() {
+ if r.idx == len(r.buf) {
+ return
+ }
+ init := r.idx
+ for init < len(r.buf) && IsWordBreak(r.buf[init]) {
+ init++
+ }
+ for i := init + 1; i < len(r.buf); i++ {
+ if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) {
+ r.pushKill(r.buf[r.idx:i-1])
+ r.Refresh(func() {
+ r.buf = append(r.buf[:r.idx], r.buf[i-1:]...)
+ })
+ return
+ }
+ }
+ r.Kill()
+}
+
+func (r *RuneBuffer) MoveToPrevWord() (success bool) {
+ r.Refresh(func() {
+ if r.idx == 0 {
+ return
+ }
+
+ for i := r.idx - 1; i > 0; i-- {
+ if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) {
+ r.idx = i
+ success = true
+ return
+ }
+ }
+ r.idx = 0
+ success = true
+ })
+ return
+}
+
+func (r *RuneBuffer) KillFront() {
+ r.Refresh(func() {
+ if r.idx == 0 {
+ return
+ }
+
+ length := len(r.buf) - r.idx
+ r.pushKill(r.buf[:r.idx])
+ copy(r.buf[:length], r.buf[r.idx:])
+ r.idx = 0
+ r.buf = r.buf[:length]
+ })
+}
+
+func (r *RuneBuffer) Kill() {
+ r.Refresh(func() {
+ r.pushKill(r.buf[r.idx:])
+ r.buf = r.buf[:r.idx]
+ })
+}
+
+func (r *RuneBuffer) Transpose() {
+ r.Refresh(func() {
+ if len(r.buf) == 1 {
+ r.idx++
+ }
+
+ if len(r.buf) < 2 {
+ return
+ }
+
+ if r.idx == 0 {
+ r.idx = 1
+ } else if r.idx >= len(r.buf) {
+ r.idx = len(r.buf) - 1
+ }
+ r.buf[r.idx], r.buf[r.idx-1] = r.buf[r.idx-1], r.buf[r.idx]
+ r.idx++
+ })
+}
+
+func (r *RuneBuffer) MoveToNextWord() {
+ r.Refresh(func() {
+ for i := r.idx + 1; i < len(r.buf); i++ {
+ if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) {
+ r.idx = i
+ return
+ }
+ }
+
+ r.idx = len(r.buf)
+ })
+}
+
+func (r *RuneBuffer) MoveToEndWord() {
+ r.Refresh(func() {
+ // already at the end, so do nothing
+ if r.idx == len(r.buf) {
+ return
+ }
+ // if we are at the end of a word already, go to next
+ if !IsWordBreak(r.buf[r.idx]) && IsWordBreak(r.buf[r.idx+1]) {
+ r.idx++
+ }
+
+ // keep going until at the end of a word
+ for i := r.idx + 1; i < len(r.buf); i++ {
+ if IsWordBreak(r.buf[i]) && !IsWordBreak(r.buf[i-1]) {
+ r.idx = i - 1
+ return
+ }
+ }
+ r.idx = len(r.buf)
+ })
+}
+
+func (r *RuneBuffer) BackEscapeWord() {
+ r.Refresh(func() {
+ if r.idx == 0 {
+ return
+ }
+ for i := r.idx - 1; i > 0; i-- {
+ if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) {
+ r.pushKill(r.buf[i:r.idx])
+ r.buf = append(r.buf[:i], r.buf[r.idx:]...)
+ r.idx = i
+ return
+ }
+ }
+
+ r.buf = r.buf[:0]
+ r.idx = 0
+ })
+}
+
+func (r *RuneBuffer) Yank() {
+ if len(r.lastKill) == 0 {
+ return
+ }
+ r.Refresh(func() {
+ buf := make([]rune, 0, len(r.buf) + len(r.lastKill))
+ buf = append(buf, r.buf[:r.idx]...)
+ buf = append(buf, r.lastKill...)
+ buf = append(buf, r.buf[r.idx:]...)
+ r.buf = buf
+ r.idx += len(r.lastKill)
+ })
+}
+
+func (r *RuneBuffer) Backspace() {
+ r.Refresh(func() {
+ if r.idx == 0 {
+ return
+ }
+
+ r.idx--
+ r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...)
+ })
+}
+
+func (r *RuneBuffer) MoveToLineEnd() {
+ r.Refresh(func() {
+ if r.idx == len(r.buf) {
+ return
+ }
+
+ r.idx = len(r.buf)
+ })
+}
+
+func (r *RuneBuffer) LineCount(width int) int {
+ if width == -1 {
+ width = r.width
+ }
+ return LineCount(width,
+ runes.WidthAll(r.buf)+r.PromptLen())
+}
+
+func (r *RuneBuffer) MoveTo(ch rune, prevChar, reverse bool) (success bool) {
+ r.Refresh(func() {
+ if reverse {
+ for i := r.idx - 1; i >= 0; i-- {
+ if r.buf[i] == ch {
+ r.idx = i
+ if prevChar {
+ r.idx++
+ }
+ success = true
+ return
+ }
+ }
+ return
+ }
+ for i := r.idx + 1; i < len(r.buf); i++ {
+ if r.buf[i] == ch {
+ r.idx = i
+ if prevChar {
+ r.idx--
+ }
+ success = true
+ return
+ }
+ }
+ })
+ return
+}
+
+func (r *RuneBuffer) isInLineEdge() bool {
+ if isWindows {
+ return false
+ }
+ sp := r.getSplitByLine(r.buf)
+ return len(sp[len(sp)-1]) == 0
+}
+
+func (r *RuneBuffer) getSplitByLine(rs []rune) []string {
+ return SplitByLine(r.promptLen(), r.width, rs)
+}
+
+func (r *RuneBuffer) IdxLine(width int) int {
+ r.Lock()
+ defer r.Unlock()
+ return r.idxLine(width)
+}
+
+func (r *RuneBuffer) idxLine(width int) int {
+ if width == 0 {
+ return 0
+ }
+ sp := r.getSplitByLine(r.buf[:r.idx])
+ return len(sp) - 1
+}
+
+func (r *RuneBuffer) CursorLineCount() int {
+ return r.LineCount(r.width) - r.IdxLine(r.width)
+}
+
+func (r *RuneBuffer) Refresh(f func()) {
+ r.Lock()
+ defer r.Unlock()
+
+ if !r.interactive {
+ if f != nil {
+ f()
+ }
+ return
+ }
+
+ r.clean()
+ if f != nil {
+ f()
+ }
+ r.print()
+}
+
+func (r *RuneBuffer) SetOffset(offset string) {
+ r.Lock()
+ r.offset = offset
+ r.Unlock()
+}
+
+func (r *RuneBuffer) print() {
+ r.w.Write(r.output())
+ r.hadClean = false
+}
+
+func (r *RuneBuffer) output() []byte {
+ buf := bytes.NewBuffer(nil)
+ buf.WriteString(string(r.prompt))
+ if r.cfg.EnableMask && len(r.buf) > 0 {
+ buf.Write([]byte(strings.Repeat(string(r.cfg.MaskRune), len(r.buf)-1)))
+ if r.buf[len(r.buf)-1] == '\n' {
+ buf.Write([]byte{'\n'})
+ } else {
+ buf.Write([]byte(string(r.cfg.MaskRune)))
+ }
+ if len(r.buf) > r.idx {
+ buf.Write(r.getBackspaceSequence())
+ }
+
+ } else {
+ for _, e := range r.cfg.Painter.Paint(r.buf, r.idx) {
+ if e == '\t' {
+ buf.WriteString(strings.Repeat(" ", TabWidth))
+ } else {
+ buf.WriteRune(e)
+ }
+ }
+ if r.isInLineEdge() {
+ buf.Write([]byte(" \b"))
+ }
+ }
+ // cursor position
+ if len(r.buf) > r.idx {
+ buf.Write(r.getBackspaceSequence())
+ }
+ return buf.Bytes()
+}
+
+func (r *RuneBuffer) getBackspaceSequence() []byte {
+ var sep = map[int]bool{}
+
+ var i int
+ for {
+ if i >= runes.WidthAll(r.buf) {
+ break
+ }
+
+ if i == 0 {
+ i -= r.promptLen()
+ }
+ i += r.width
+
+ sep[i] = true
+ }
+ var buf []byte
+ for i := len(r.buf); i > r.idx; i-- {
+ // move input to the left of one
+ buf = append(buf, '\b')
+ if sep[i] {
+ // up one line, go to the start of the line and move cursor right to the end (r.width)
+ buf = append(buf, "\033[A\r"+"\033["+strconv.Itoa(r.width)+"C"...)
+ }
+ }
+
+ return buf
+
+}
+
+func (r *RuneBuffer) Reset() []rune {
+ ret := runes.Copy(r.buf)
+ r.buf = r.buf[:0]
+ r.idx = 0
+ return ret
+}
+
+func (r *RuneBuffer) calWidth(m int) int {
+ if m > 0 {
+ return runes.WidthAll(r.buf[r.idx : r.idx+m])
+ }
+ return runes.WidthAll(r.buf[r.idx+m : r.idx])
+}
+
+func (r *RuneBuffer) SetStyle(start, end int, style string) {
+ if end < start {
+ panic("end < start")
+ }
+
+ // goto start
+ move := start - r.idx
+ if move > 0 {
+ r.w.Write([]byte(string(r.buf[r.idx : r.idx+move])))
+ } else {
+ r.w.Write(bytes.Repeat([]byte("\b"), r.calWidth(move)))
+ }
+ r.w.Write([]byte("\033[" + style + "m"))
+ r.w.Write([]byte(string(r.buf[start:end])))
+ r.w.Write([]byte("\033[0m"))
+ // TODO: move back
+}
+
+func (r *RuneBuffer) SetWithIdx(idx int, buf []rune) {
+ r.Refresh(func() {
+ r.buf = buf
+ r.idx = idx
+ })
+}
+
+func (r *RuneBuffer) Set(buf []rune) {
+ r.SetWithIdx(len(buf), buf)
+}
+
+func (r *RuneBuffer) SetPrompt(prompt string) {
+ r.Lock()
+ r.prompt = []rune(prompt)
+ r.Unlock()
+}
+
+func (r *RuneBuffer) cleanOutput(w io.Writer, idxLine int) {
+ buf := bufio.NewWriter(w)
+
+ if r.width == 0 {
+ buf.WriteString(strings.Repeat("\r\b", len(r.buf)+r.promptLen()))
+ buf.Write([]byte("\033[J"))
+ } else {
+ buf.Write([]byte("\033[J")) // just like ^k :)
+ if idxLine == 0 {
+ buf.WriteString("\033[2K")
+ buf.WriteString("\r")
+ } else {
+ for i := 0; i < idxLine; i++ {
+ io.WriteString(buf, "\033[2K\r\033[A")
+ }
+ io.WriteString(buf, "\033[2K\r")
+ }
+ }
+ buf.Flush()
+ return
+}
+
+func (r *RuneBuffer) Clean() {
+ r.Lock()
+ r.clean()
+ r.Unlock()
+}
+
+func (r *RuneBuffer) clean() {
+ r.cleanWithIdxLine(r.idxLine(r.width))
+}
+
+func (r *RuneBuffer) cleanWithIdxLine(idxLine int) {
+ if r.hadClean || !r.interactive {
+ return
+ }
+ r.hadClean = true
+ r.cleanOutput(r.w, idxLine)
+}
diff --git a/vendor/github.com/chzyer/readline/runes.go b/vendor/github.com/chzyer/readline/runes.go
new file mode 100644
index 000000000..a669bc48c
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/runes.go
@@ -0,0 +1,223 @@
+package readline
+
+import (
+ "bytes"
+ "unicode"
+ "unicode/utf8"
+)
+
+var runes = Runes{}
+var TabWidth = 4
+
+type Runes struct{}
+
+func (Runes) EqualRune(a, b rune, fold bool) bool {
+ if a == b {
+ return true
+ }
+ if !fold {
+ return false
+ }
+ if a > b {
+ a, b = b, a
+ }
+ if b < utf8.RuneSelf && 'A' <= a && a <= 'Z' {
+ if b == a+'a'-'A' {
+ return true
+ }
+ }
+ return false
+}
+
+func (r Runes) EqualRuneFold(a, b rune) bool {
+ return r.EqualRune(a, b, true)
+}
+
+func (r Runes) EqualFold(a, b []rune) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := 0; i < len(a); i++ {
+ if r.EqualRuneFold(a[i], b[i]) {
+ continue
+ }
+ return false
+ }
+
+ return true
+}
+
+func (Runes) Equal(a, b []rune) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := 0; i < len(a); i++ {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (rs Runes) IndexAllBckEx(r, sub []rune, fold bool) int {
+ for i := len(r) - len(sub); i >= 0; i-- {
+ found := true
+ for j := 0; j < len(sub); j++ {
+ if !rs.EqualRune(r[i+j], sub[j], fold) {
+ found = false
+ break
+ }
+ }
+ if found {
+ return i
+ }
+ }
+ return -1
+}
+
+// Search in runes from end to front
+func (rs Runes) IndexAllBck(r, sub []rune) int {
+ return rs.IndexAllBckEx(r, sub, false)
+}
+
+// Search in runes from front to end
+func (rs Runes) IndexAll(r, sub []rune) int {
+ return rs.IndexAllEx(r, sub, false)
+}
+
+func (rs Runes) IndexAllEx(r, sub []rune, fold bool) int {
+ for i := 0; i < len(r); i++ {
+ found := true
+ if len(r[i:]) < len(sub) {
+ return -1
+ }
+ for j := 0; j < len(sub); j++ {
+ if !rs.EqualRune(r[i+j], sub[j], fold) {
+ found = false
+ break
+ }
+ }
+ if found {
+ return i
+ }
+ }
+ return -1
+}
+
+func (Runes) Index(r rune, rs []rune) int {
+ for i := 0; i < len(rs); i++ {
+ if rs[i] == r {
+ return i
+ }
+ }
+ return -1
+}
+
+func (Runes) ColorFilter(r []rune) []rune {
+ newr := make([]rune, 0, len(r))
+ for pos := 0; pos < len(r); pos++ {
+ if r[pos] == '\033' && r[pos+1] == '[' {
+ idx := runes.Index('m', r[pos+2:])
+ if idx == -1 {
+ continue
+ }
+ pos += idx + 2
+ continue
+ }
+ newr = append(newr, r[pos])
+ }
+ return newr
+}
+
+var zeroWidth = []*unicode.RangeTable{
+ unicode.Mn,
+ unicode.Me,
+ unicode.Cc,
+ unicode.Cf,
+}
+
+var doubleWidth = []*unicode.RangeTable{
+ unicode.Han,
+ unicode.Hangul,
+ unicode.Hiragana,
+ unicode.Katakana,
+}
+
+func (Runes) Width(r rune) int {
+ if r == '\t' {
+ return TabWidth
+ }
+ if unicode.IsOneOf(zeroWidth, r) {
+ return 0
+ }
+ if unicode.IsOneOf(doubleWidth, r) {
+ return 2
+ }
+ return 1
+}
+
+func (Runes) WidthAll(r []rune) (length int) {
+ for i := 0; i < len(r); i++ {
+ length += runes.Width(r[i])
+ }
+ return
+}
+
+func (Runes) Backspace(r []rune) []byte {
+ return bytes.Repeat([]byte{'\b'}, runes.WidthAll(r))
+}
+
+func (Runes) Copy(r []rune) []rune {
+ n := make([]rune, len(r))
+ copy(n, r)
+ return n
+}
+
+func (Runes) HasPrefixFold(r, prefix []rune) bool {
+ if len(r) < len(prefix) {
+ return false
+ }
+ return runes.EqualFold(r[:len(prefix)], prefix)
+}
+
+func (Runes) HasPrefix(r, prefix []rune) bool {
+ if len(r) < len(prefix) {
+ return false
+ }
+ return runes.Equal(r[:len(prefix)], prefix)
+}
+
+func (Runes) Aggregate(candicate [][]rune) (same []rune, size int) {
+ for i := 0; i < len(candicate[0]); i++ {
+ for j := 0; j < len(candicate)-1; j++ {
+ if i >= len(candicate[j]) || i >= len(candicate[j+1]) {
+ goto aggregate
+ }
+ if candicate[j][i] != candicate[j+1][i] {
+ goto aggregate
+ }
+ }
+ size = i + 1
+ }
+aggregate:
+ if size > 0 {
+ same = runes.Copy(candicate[0][:size])
+ for i := 0; i < len(candicate); i++ {
+ n := runes.Copy(candicate[i])
+ copy(n, n[size:])
+ candicate[i] = n[:len(n)-size]
+ }
+ }
+ return
+}
+
+func (Runes) TrimSpaceLeft(in []rune) []rune {
+ firstIndex := len(in)
+ for i, r := range in {
+ if unicode.IsSpace(r) == false {
+ firstIndex = i
+ break
+ }
+ }
+ return in[firstIndex:]
+}
diff --git a/vendor/github.com/chzyer/readline/search.go b/vendor/github.com/chzyer/readline/search.go
new file mode 100644
index 000000000..52e8ff099
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/search.go
@@ -0,0 +1,164 @@
+package readline
+
+import (
+ "bytes"
+ "container/list"
+ "fmt"
+ "io"
+)
+
+const (
+ S_STATE_FOUND = iota
+ S_STATE_FAILING
+)
+
+const (
+ S_DIR_BCK = iota
+ S_DIR_FWD
+)
+
+type opSearch struct {
+ inMode bool
+ state int
+ dir int
+ source *list.Element
+ w io.Writer
+ buf *RuneBuffer
+ data []rune
+ history *opHistory
+ cfg *Config
+ markStart int
+ markEnd int
+ width int
+}
+
+func newOpSearch(w io.Writer, buf *RuneBuffer, history *opHistory, cfg *Config, width int) *opSearch {
+ return &opSearch{
+ w: w,
+ buf: buf,
+ cfg: cfg,
+ history: history,
+ width: width,
+ }
+}
+
+func (o *opSearch) OnWidthChange(newWidth int) {
+ o.width = newWidth
+}
+
+func (o *opSearch) IsSearchMode() bool {
+ return o.inMode
+}
+
+func (o *opSearch) SearchBackspace() {
+ if len(o.data) > 0 {
+ o.data = o.data[:len(o.data)-1]
+ o.search(true)
+ }
+}
+
+func (o *opSearch) findHistoryBy(isNewSearch bool) (int, *list.Element) {
+ if o.dir == S_DIR_BCK {
+ return o.history.FindBck(isNewSearch, o.data, o.buf.idx)
+ }
+ return o.history.FindFwd(isNewSearch, o.data, o.buf.idx)
+}
+
+func (o *opSearch) search(isChange bool) bool {
+ if len(o.data) == 0 {
+ o.state = S_STATE_FOUND
+ o.SearchRefresh(-1)
+ return true
+ }
+ idx, elem := o.findHistoryBy(isChange)
+ if elem == nil {
+ o.SearchRefresh(-2)
+ return false
+ }
+ o.history.current = elem
+
+ item := o.history.showItem(o.history.current.Value)
+ start, end := 0, 0
+ if o.dir == S_DIR_BCK {
+ start, end = idx, idx+len(o.data)
+ } else {
+ start, end = idx, idx+len(o.data)
+ idx += len(o.data)
+ }
+ o.buf.SetWithIdx(idx, item)
+ o.markStart, o.markEnd = start, end
+ o.SearchRefresh(idx)
+ return true
+}
+
+func (o *opSearch) SearchChar(r rune) {
+ o.data = append(o.data, r)
+ o.search(true)
+}
+
+func (o *opSearch) SearchMode(dir int) bool {
+ if o.width == 0 {
+ return false
+ }
+ alreadyInMode := o.inMode
+ o.inMode = true
+ o.dir = dir
+ o.source = o.history.current
+ if alreadyInMode {
+ o.search(false)
+ } else {
+ o.SearchRefresh(-1)
+ }
+ return true
+}
+
+func (o *opSearch) ExitSearchMode(revert bool) {
+ if revert {
+ o.history.current = o.source
+ o.buf.Set(o.history.showItem(o.history.current.Value))
+ }
+ o.markStart, o.markEnd = 0, 0
+ o.state = S_STATE_FOUND
+ o.inMode = false
+ o.source = nil
+ o.data = nil
+}
+
+func (o *opSearch) SearchRefresh(x int) {
+ if x == -2 {
+ o.state = S_STATE_FAILING
+ } else if x >= 0 {
+ o.state = S_STATE_FOUND
+ }
+ if x < 0 {
+ x = o.buf.idx
+ }
+ x = o.buf.CurrentWidth(x)
+ x += o.buf.PromptLen()
+ x = x % o.width
+
+ if o.markStart > 0 {
+ o.buf.SetStyle(o.markStart, o.markEnd, "4")
+ }
+
+ lineCnt := o.buf.CursorLineCount()
+ buf := bytes.NewBuffer(nil)
+ buf.Write(bytes.Repeat([]byte("\n"), lineCnt))
+ buf.WriteString("\033[J")
+ if o.state == S_STATE_FAILING {
+ buf.WriteString("failing ")
+ }
+ if o.dir == S_DIR_BCK {
+ buf.WriteString("bck")
+ } else if o.dir == S_DIR_FWD {
+ buf.WriteString("fwd")
+ }
+ buf.WriteString("-i-search: ")
+ buf.WriteString(string(o.data)) // keyword
+ buf.WriteString("\033[4m \033[0m") // _
+ fmt.Fprintf(buf, "\r\033[%dA", lineCnt) // move prev
+ if x > 0 {
+ fmt.Fprintf(buf, "\033[%dC", x) // move forward
+ }
+ o.w.Write(buf.Bytes())
+}
diff --git a/vendor/github.com/chzyer/readline/std.go b/vendor/github.com/chzyer/readline/std.go
new file mode 100644
index 000000000..61d44b759
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/std.go
@@ -0,0 +1,197 @@
+package readline
+
+import (
+ "io"
+ "os"
+ "sync"
+ "sync/atomic"
+)
+
+var (
+ Stdin io.ReadCloser = os.Stdin
+ Stdout io.WriteCloser = os.Stdout
+ Stderr io.WriteCloser = os.Stderr
+)
+
+var (
+ std *Instance
+ stdOnce sync.Once
+)
+
+// global instance will not submit history automatic
+func getInstance() *Instance {
+ stdOnce.Do(func() {
+ std, _ = NewEx(&Config{
+ DisableAutoSaveHistory: true,
+ })
+ })
+ return std
+}
+
+// let readline load history from filepath
+// and try to persist history into disk
+// set fp to "" to prevent readline persisting history to disk
+// so the `AddHistory` will return nil error forever.
+func SetHistoryPath(fp string) {
+ ins := getInstance()
+ cfg := ins.Config.Clone()
+ cfg.HistoryFile = fp
+ ins.SetConfig(cfg)
+}
+
+// set auto completer to global instance
+func SetAutoComplete(completer AutoCompleter) {
+ ins := getInstance()
+ cfg := ins.Config.Clone()
+ cfg.AutoComplete = completer
+ ins.SetConfig(cfg)
+}
+
+// add history to global instance manually
+// raise error only if `SetHistoryPath` is set with a non-empty path
+func AddHistory(content string) error {
+ ins := getInstance()
+ return ins.SaveHistory(content)
+}
+
+func Password(prompt string) ([]byte, error) {
+ ins := getInstance()
+ return ins.ReadPassword(prompt)
+}
+
+// readline with global configs
+func Line(prompt string) (string, error) {
+ ins := getInstance()
+ ins.SetPrompt(prompt)
+ return ins.Readline()
+}
+
+type CancelableStdin struct {
+ r io.Reader
+ mutex sync.Mutex
+ stop chan struct{}
+ closed int32
+ notify chan struct{}
+ data []byte
+ read int
+ err error
+}
+
+func NewCancelableStdin(r io.Reader) *CancelableStdin {
+ c := &CancelableStdin{
+ r: r,
+ notify: make(chan struct{}),
+ stop: make(chan struct{}),
+ }
+ go c.ioloop()
+ return c
+}
+
+func (c *CancelableStdin) ioloop() {
+loop:
+ for {
+ select {
+ case <-c.notify:
+ c.read, c.err = c.r.Read(c.data)
+ select {
+ case c.notify <- struct{}{}:
+ case <-c.stop:
+ break loop
+ }
+ case <-c.stop:
+ break loop
+ }
+ }
+}
+
+func (c *CancelableStdin) Read(b []byte) (n int, err error) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ if atomic.LoadInt32(&c.closed) == 1 {
+ return 0, io.EOF
+ }
+
+ c.data = b
+ select {
+ case c.notify <- struct{}{}:
+ case <-c.stop:
+ return 0, io.EOF
+ }
+ select {
+ case <-c.notify:
+ return c.read, c.err
+ case <-c.stop:
+ return 0, io.EOF
+ }
+}
+
+func (c *CancelableStdin) Close() error {
+ if atomic.CompareAndSwapInt32(&c.closed, 0, 1) {
+ close(c.stop)
+ }
+ return nil
+}
+
+// FillableStdin is a stdin reader which can prepend some data before
+// reading into the real stdin
+type FillableStdin struct {
+ sync.Mutex
+ stdin io.Reader
+ stdinBuffer io.ReadCloser
+ buf []byte
+ bufErr error
+}
+
+// NewFillableStdin gives you FillableStdin
+func NewFillableStdin(stdin io.Reader) (io.ReadCloser, io.Writer) {
+ r, w := io.Pipe()
+ s := &FillableStdin{
+ stdinBuffer: r,
+ stdin: stdin,
+ }
+ s.ioloop()
+ return s, w
+}
+
+func (s *FillableStdin) ioloop() {
+ go func() {
+ for {
+ bufR := make([]byte, 100)
+ var n int
+ n, s.bufErr = s.stdinBuffer.Read(bufR)
+ if s.bufErr != nil {
+ if s.bufErr == io.ErrClosedPipe {
+ break
+ }
+ }
+ s.Lock()
+ s.buf = append(s.buf, bufR[:n]...)
+ s.Unlock()
+ }
+ }()
+}
+
+// Read will read from the local buffer and if no data, read from stdin
+func (s *FillableStdin) Read(p []byte) (n int, err error) {
+ s.Lock()
+ i := len(s.buf)
+ if len(p) < i {
+ i = len(p)
+ }
+ if i > 0 {
+ n := copy(p, s.buf)
+ s.buf = s.buf[:0]
+ cerr := s.bufErr
+ s.bufErr = nil
+ s.Unlock()
+ return n, cerr
+ }
+ s.Unlock()
+ n, err = s.stdin.Read(p)
+ return n, err
+}
+
+func (s *FillableStdin) Close() error {
+ s.stdinBuffer.Close()
+ return nil
+}
diff --git a/vendor/github.com/chzyer/readline/std_windows.go b/vendor/github.com/chzyer/readline/std_windows.go
new file mode 100644
index 000000000..b10f91bcb
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/std_windows.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package readline
+
+func init() {
+ Stdin = NewRawReader()
+ Stdout = NewANSIWriter(Stdout)
+ Stderr = NewANSIWriter(Stderr)
+}
diff --git a/vendor/github.com/chzyer/readline/term.go b/vendor/github.com/chzyer/readline/term.go
new file mode 100644
index 000000000..133993ca8
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/term.go
@@ -0,0 +1,123 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris
+
+// Package terminal provides support functions for dealing with terminals, as
+// commonly found on UNIX systems.
+//
+// Putting a terminal into raw mode is the most common requirement:
+//
+// oldState, err := terminal.MakeRaw(0)
+// if err != nil {
+// panic(err)
+// }
+// defer terminal.Restore(0, oldState)
+package readline
+
+import (
+ "io"
+ "syscall"
+)
+
+// State contains the state of a terminal.
+type State struct {
+ termios Termios
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+ _, err := getTermios(fd)
+ return err == nil
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd int) (*State, error) {
+ var oldState State
+
+ if termios, err := getTermios(fd); err != nil {
+ return nil, err
+ } else {
+ oldState.termios = *termios
+ }
+
+ newState := oldState.termios
+ // This attempts to replicate the behaviour documented for cfmakeraw in
+ // the termios(3) manpage.
+ newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON
+ // newState.Oflag &^= syscall.OPOST
+ newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN
+ newState.Cflag &^= syscall.CSIZE | syscall.PARENB
+ newState.Cflag |= syscall.CS8
+
+ newState.Cc[syscall.VMIN] = 1
+ newState.Cc[syscall.VTIME] = 0
+
+ return &oldState, setTermios(fd, &newState)
+}
+
+// GetState returns the current state of a terminal which may be useful to
+// restore the terminal after a signal.
+func GetState(fd int) (*State, error) {
+ termios, err := getTermios(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ return &State{termios: *termios}, nil
+}
+
+// Restore restores the terminal connected to the given file descriptor to a
+// previous state.
+func restoreTerm(fd int, state *State) error {
+ return setTermios(fd, &state.termios)
+}
+
+// ReadPassword reads a line of input from a terminal without local echo. This
+// is commonly used for inputting passwords and other sensitive data. The slice
+// returned does not include the \n.
+func ReadPassword(fd int) ([]byte, error) {
+ oldState, err := getTermios(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ newState := oldState
+ newState.Lflag &^= syscall.ECHO
+ newState.Lflag |= syscall.ICANON | syscall.ISIG
+ newState.Iflag |= syscall.ICRNL
+ if err := setTermios(fd, newState); err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ setTermios(fd, oldState)
+ }()
+
+ var buf [16]byte
+ var ret []byte
+ for {
+ n, err := syscall.Read(fd, buf[:])
+ if err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ if len(ret) == 0 {
+ return nil, io.EOF
+ }
+ break
+ }
+ if buf[n-1] == '\n' {
+ n--
+ }
+ ret = append(ret, buf[:n]...)
+ if n < len(buf) {
+ break
+ }
+ }
+
+ return ret, nil
+}
diff --git a/vendor/github.com/chzyer/readline/term_bsd.go b/vendor/github.com/chzyer/readline/term_bsd.go
new file mode 100644
index 000000000..68b56ea6b
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/term_bsd.go
@@ -0,0 +1,29 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package readline
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func getTermios(fd int) (*Termios, error) {
+ termios := new(Termios)
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCGETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
+ if err != 0 {
+ return nil, err
+ }
+ return termios, nil
+}
+
+func setTermios(fd int, termios *Termios) error {
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCSETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/chzyer/readline/term_linux.go b/vendor/github.com/chzyer/readline/term_linux.go
new file mode 100644
index 000000000..e3392b4ac
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/term_linux.go
@@ -0,0 +1,33 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package readline
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// These constants are declared here, rather than importing
+// them from the syscall package as some syscall packages, even
+// on linux, for example gccgo, do not declare them.
+const ioctlReadTermios = 0x5401 // syscall.TCGETS
+const ioctlWriteTermios = 0x5402 // syscall.TCSETS
+
+func getTermios(fd int) (*Termios, error) {
+ termios := new(Termios)
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
+ if err != 0 {
+ return nil, err
+ }
+ return termios, nil
+}
+
+func setTermios(fd int, termios *Termios) error {
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/chzyer/readline/term_solaris.go b/vendor/github.com/chzyer/readline/term_solaris.go
new file mode 100644
index 000000000..4c27273c7
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/term_solaris.go
@@ -0,0 +1,32 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package readline
+
+import "golang.org/x/sys/unix"
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (int, int, error) {
+ ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
+ if err != nil {
+ return 0, 0, err
+ }
+ return int(ws.Col), int(ws.Row), nil
+}
+
+type Termios unix.Termios
+
+func getTermios(fd int) (*Termios, error) {
+ termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
+ if err != nil {
+ return nil, err
+ }
+ return (*Termios)(termios), nil
+}
+
+func setTermios(fd int, termios *Termios) error {
+ return unix.IoctlSetTermios(fd, unix.TCSETSF, (*unix.Termios)(termios))
+}
diff --git a/vendor/github.com/chzyer/readline/term_unix.go b/vendor/github.com/chzyer/readline/term_unix.go
new file mode 100644
index 000000000..d3ea24244
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/term_unix.go
@@ -0,0 +1,24 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd
+
+package readline
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type Termios syscall.Termios
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (int, int, error) {
+ var dimensions [4]uint16
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0)
+ if err != 0 {
+ return 0, 0, err
+ }
+ return int(dimensions[1]), int(dimensions[0]), nil
+}
diff --git a/vendor/github.com/chzyer/readline/term_windows.go b/vendor/github.com/chzyer/readline/term_windows.go
new file mode 100644
index 000000000..1290e00bc
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/term_windows.go
@@ -0,0 +1,171 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+// Package terminal provides support functions for dealing with terminals, as
+// commonly found on UNIX systems.
+//
+// Putting a terminal into raw mode is the most common requirement:
+//
+// oldState, err := terminal.MakeRaw(0)
+// if err != nil {
+// panic(err)
+// }
+// defer terminal.Restore(0, oldState)
+package readline
+
+import (
+ "io"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ enableLineInput = 2
+ enableEchoInput = 4
+ enableProcessedInput = 1
+ enableWindowInput = 8
+ enableMouseInput = 16
+ enableInsertMode = 32
+ enableQuickEditMode = 64
+ enableExtendedFlags = 128
+ enableAutoPosition = 256
+ enableProcessedOutput = 1
+ enableWrapAtEolOutput = 2
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+ procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
+ procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+)
+
+type (
+ coord struct {
+ x short
+ y short
+ }
+ smallRect struct {
+ left short
+ top short
+ right short
+ bottom short
+ }
+ consoleScreenBufferInfo struct {
+ size coord
+ cursorPosition coord
+ attributes word
+ window smallRect
+ maximumWindowSize coord
+ }
+)
+
+type State struct {
+ mode uint32
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd int) (*State, error) {
+ var st uint32
+ _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ if e != 0 {
+ return nil, error(e)
+ }
+ raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput)
+ _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0)
+ if e != 0 {
+ return nil, error(e)
+ }
+ return &State{st}, nil
+}
+
+// GetState returns the current state of a terminal which may be useful to
+// restore the terminal after a signal.
+func GetState(fd int) (*State, error) {
+ var st uint32
+ _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ if e != 0 {
+ return nil, error(e)
+ }
+ return &State{st}, nil
+}
+
+// Restore restores the terminal connected to the given file descriptor to a
+// previous state.
+func restoreTerm(fd int, state *State) error {
+ _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0)
+ return err
+}
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (width, height int, err error) {
+ var info consoleScreenBufferInfo
+ _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0)
+ if e != 0 {
+ return 0, 0, error(e)
+ }
+ return int(info.size.x), int(info.size.y), nil
+}
+
+// ReadPassword reads a line of input from a terminal without local echo. This
+// is commonly used for inputting passwords and other sensitive data. The slice
+// returned does not include the \n.
+func ReadPassword(fd int) ([]byte, error) {
+ var st uint32
+ _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ if e != 0 {
+ return nil, error(e)
+ }
+ old := st
+
+ st &^= (enableEchoInput)
+ st |= (enableProcessedInput | enableLineInput | enableProcessedOutput)
+ _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
+ if e != 0 {
+ return nil, error(e)
+ }
+
+ defer func() {
+ syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0)
+ }()
+
+ var buf [16]byte
+ var ret []byte
+ for {
+ n, err := syscall.Read(syscall.Handle(fd), buf[:])
+ if err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ if len(ret) == 0 {
+ return nil, io.EOF
+ }
+ break
+ }
+ if buf[n-1] == '\n' {
+ n--
+ }
+ if n > 0 && buf[n-1] == '\r' {
+ n--
+ }
+ ret = append(ret, buf[:n]...)
+ if n < len(buf) {
+ break
+ }
+ }
+
+ return ret, nil
+}
diff --git a/vendor/github.com/chzyer/readline/terminal.go b/vendor/github.com/chzyer/readline/terminal.go
new file mode 100644
index 000000000..1078631c1
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/terminal.go
@@ -0,0 +1,238 @@
+package readline
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+type Terminal struct {
+ m sync.Mutex
+ cfg *Config
+ outchan chan rune
+ closed int32
+ stopChan chan struct{}
+ kickChan chan struct{}
+ wg sync.WaitGroup
+ isReading int32
+ sleeping int32
+
+ sizeChan chan string
+}
+
+func NewTerminal(cfg *Config) (*Terminal, error) {
+ if err := cfg.Init(); err != nil {
+ return nil, err
+ }
+ t := &Terminal{
+ cfg: cfg,
+ kickChan: make(chan struct{}, 1),
+ outchan: make(chan rune),
+ stopChan: make(chan struct{}, 1),
+ sizeChan: make(chan string, 1),
+ }
+
+ go t.ioloop()
+ return t, nil
+}
+
+// SleepToResume will sleep myself, and return only if I'm resumed.
+func (t *Terminal) SleepToResume() {
+ if !atomic.CompareAndSwapInt32(&t.sleeping, 0, 1) {
+ return
+ }
+ defer atomic.StoreInt32(&t.sleeping, 0)
+
+ t.ExitRawMode()
+ ch := WaitForResume()
+ SuspendMe()
+ <-ch
+ t.EnterRawMode()
+}
+
+func (t *Terminal) EnterRawMode() (err error) {
+ return t.cfg.FuncMakeRaw()
+}
+
+func (t *Terminal) ExitRawMode() (err error) {
+ return t.cfg.FuncExitRaw()
+}
+
+func (t *Terminal) Write(b []byte) (int, error) {
+ return t.cfg.Stdout.Write(b)
+}
+
+// WriteStdin prefill the next Stdin fetch
+// Next time you call ReadLine() this value will be writen before the user input
+func (t *Terminal) WriteStdin(b []byte) (int, error) {
+ return t.cfg.StdinWriter.Write(b)
+}
+
+type termSize struct {
+ left int
+ top int
+}
+
+func (t *Terminal) GetOffset(f func(offset string)) {
+ go func() {
+ f(<-t.sizeChan)
+ }()
+ t.Write([]byte("\033[6n"))
+}
+
+func (t *Terminal) Print(s string) {
+ fmt.Fprintf(t.cfg.Stdout, "%s", s)
+}
+
+func (t *Terminal) PrintRune(r rune) {
+ fmt.Fprintf(t.cfg.Stdout, "%c", r)
+}
+
+func (t *Terminal) Readline() *Operation {
+ return NewOperation(t, t.cfg)
+}
+
+// return rune(0) if meet EOF
+func (t *Terminal) ReadRune() rune {
+ ch, ok := <-t.outchan
+ if !ok {
+ return rune(0)
+ }
+ return ch
+}
+
+func (t *Terminal) IsReading() bool {
+ return atomic.LoadInt32(&t.isReading) == 1
+}
+
+func (t *Terminal) KickRead() {
+ select {
+ case t.kickChan <- struct{}{}:
+ default:
+ }
+}
+
+func (t *Terminal) ioloop() {
+ t.wg.Add(1)
+ defer func() {
+ t.wg.Done()
+ close(t.outchan)
+ }()
+
+ var (
+ isEscape bool
+ isEscapeEx bool
+ expectNextChar bool
+ )
+
+ buf := bufio.NewReader(t.getStdin())
+ for {
+ if !expectNextChar {
+ atomic.StoreInt32(&t.isReading, 0)
+ select {
+ case <-t.kickChan:
+ atomic.StoreInt32(&t.isReading, 1)
+ case <-t.stopChan:
+ return
+ }
+ }
+ expectNextChar = false
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ if strings.Contains(err.Error(), "interrupted system call") {
+ expectNextChar = true
+ continue
+ }
+ break
+ }
+
+ if isEscape {
+ isEscape = false
+ if r == CharEscapeEx {
+ expectNextChar = true
+ isEscapeEx = true
+ continue
+ }
+ r = escapeKey(r, buf)
+ } else if isEscapeEx {
+ isEscapeEx = false
+ if key := readEscKey(r, buf); key != nil {
+ r = escapeExKey(key)
+ // offset
+ if key.typ == 'R' {
+ if _, _, ok := key.Get2(); ok {
+ select {
+ case t.sizeChan <- key.attr:
+ default:
+ }
+ }
+ expectNextChar = true
+ continue
+ }
+ }
+ if r == 0 {
+ expectNextChar = true
+ continue
+ }
+ }
+
+ expectNextChar = true
+ switch r {
+ case CharEsc:
+ if t.cfg.VimMode {
+ t.outchan <- r
+ break
+ }
+ isEscape = true
+ case CharInterrupt, CharEnter, CharCtrlJ, CharDelete:
+ expectNextChar = false
+ fallthrough
+ default:
+ t.outchan <- r
+ }
+ }
+
+}
+
+func (t *Terminal) Bell() {
+ fmt.Fprintf(t, "%c", CharBell)
+}
+
+func (t *Terminal) Close() error {
+ if atomic.SwapInt32(&t.closed, 1) != 0 {
+ return nil
+ }
+ if closer, ok := t.cfg.Stdin.(io.Closer); ok {
+ closer.Close()
+ }
+ close(t.stopChan)
+ t.wg.Wait()
+ return t.ExitRawMode()
+}
+
+func (t *Terminal) GetConfig() *Config {
+ t.m.Lock()
+ cfg := *t.cfg
+ t.m.Unlock()
+ return &cfg
+}
+
+func (t *Terminal) getStdin() io.Reader {
+ t.m.Lock()
+ r := t.cfg.Stdin
+ t.m.Unlock()
+ return r
+}
+
+func (t *Terminal) SetConfig(c *Config) error {
+ if err := c.Init(); err != nil {
+ return err
+ }
+ t.m.Lock()
+ t.cfg = c
+ t.m.Unlock()
+ return nil
+}
diff --git a/vendor/github.com/chzyer/readline/utils.go b/vendor/github.com/chzyer/readline/utils.go
new file mode 100644
index 000000000..af4e00521
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/utils.go
@@ -0,0 +1,277 @@
+package readline
+
+import (
+ "bufio"
+ "bytes"
+ "container/list"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unicode"
+)
+
+var (
+ isWindows = false
+)
+
+const (
+ CharLineStart = 1
+ CharBackward = 2
+ CharInterrupt = 3
+ CharDelete = 4
+ CharLineEnd = 5
+ CharForward = 6
+ CharBell = 7
+ CharCtrlH = 8
+ CharTab = 9
+ CharCtrlJ = 10
+ CharKill = 11
+ CharCtrlL = 12
+ CharEnter = 13
+ CharNext = 14
+ CharPrev = 16
+ CharBckSearch = 18
+ CharFwdSearch = 19
+ CharTranspose = 20
+ CharCtrlU = 21
+ CharCtrlW = 23
+ CharCtrlY = 25
+ CharCtrlZ = 26
+ CharEsc = 27
+ CharEscapeEx = 91
+ CharBackspace = 127
+)
+
+const (
+ MetaBackward rune = -iota - 1
+ MetaForward
+ MetaDelete
+ MetaBackspace
+ MetaTranspose
+)
+
+// WaitForResume need to call before current process got suspend.
+// It will run a ticker until a long duration is occurs,
+// which means this process is resumed.
+func WaitForResume() chan struct{} {
+ ch := make(chan struct{})
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ ticker := time.NewTicker(10 * time.Millisecond)
+ t := time.Now()
+ wg.Done()
+ for {
+ now := <-ticker.C
+ if now.Sub(t) > 100*time.Millisecond {
+ break
+ }
+ t = now
+ }
+ ticker.Stop()
+ ch <- struct{}{}
+ }()
+ wg.Wait()
+ return ch
+}
+
+func Restore(fd int, state *State) error {
+ err := restoreTerm(fd, state)
+ if err != nil {
+ // errno 0 means everything is ok :)
+ if err.Error() == "errno 0" {
+ return nil
+ } else {
+ return err
+ }
+ }
+ return nil
+}
+
+func IsPrintable(key rune) bool {
+ isInSurrogateArea := key >= 0xd800 && key <= 0xdbff
+ return key >= 32 && !isInSurrogateArea
+}
+
+// translate Esc[X
+func escapeExKey(key *escapeKeyPair) rune {
+ var r rune
+ switch key.typ {
+ case 'D':
+ r = CharBackward
+ case 'C':
+ r = CharForward
+ case 'A':
+ r = CharPrev
+ case 'B':
+ r = CharNext
+ case 'H':
+ r = CharLineStart
+ case 'F':
+ r = CharLineEnd
+ case '~':
+ if key.attr == "3" {
+ r = CharDelete
+ }
+ default:
+ }
+ return r
+}
+
+type escapeKeyPair struct {
+ attr string
+ typ rune
+}
+
+func (e *escapeKeyPair) Get2() (int, int, bool) {
+ sp := strings.Split(e.attr, ";")
+ if len(sp) < 2 {
+ return -1, -1, false
+ }
+ s1, err := strconv.Atoi(sp[0])
+ if err != nil {
+ return -1, -1, false
+ }
+ s2, err := strconv.Atoi(sp[1])
+ if err != nil {
+ return -1, -1, false
+ }
+ return s1, s2, true
+}
+
+func readEscKey(r rune, reader *bufio.Reader) *escapeKeyPair {
+ p := escapeKeyPair{}
+ buf := bytes.NewBuffer(nil)
+ for {
+ if r == ';' {
+ } else if unicode.IsNumber(r) {
+ } else {
+ p.typ = r
+ break
+ }
+ buf.WriteRune(r)
+ r, _, _ = reader.ReadRune()
+ }
+ p.attr = buf.String()
+ return &p
+}
+
+// translate EscX to Meta+X
+func escapeKey(r rune, reader *bufio.Reader) rune {
+ switch r {
+ case 'b':
+ r = MetaBackward
+ case 'f':
+ r = MetaForward
+ case 'd':
+ r = MetaDelete
+ case CharTranspose:
+ r = MetaTranspose
+ case CharBackspace:
+ r = MetaBackspace
+ case 'O':
+ d, _, _ := reader.ReadRune()
+ switch d {
+ case 'H':
+ r = CharLineStart
+ case 'F':
+ r = CharLineEnd
+ default:
+ reader.UnreadRune()
+ }
+ case CharEsc:
+
+ }
+ return r
+}
+
+func SplitByLine(start, screenWidth int, rs []rune) []string {
+ var ret []string
+ buf := bytes.NewBuffer(nil)
+ currentWidth := start
+ for _, r := range rs {
+ w := runes.Width(r)
+ currentWidth += w
+ buf.WriteRune(r)
+ if currentWidth >= screenWidth {
+ ret = append(ret, buf.String())
+ buf.Reset()
+ currentWidth = 0
+ }
+ }
+ ret = append(ret, buf.String())
+ return ret
+}
+
+// calculate how many lines for N character
+func LineCount(screenWidth, w int) int {
+ r := w / screenWidth
+ if w%screenWidth != 0 {
+ r++
+ }
+ return r
+}
+
+func IsWordBreak(i rune) bool {
+ switch {
+ case i >= 'a' && i <= 'z':
+ case i >= 'A' && i <= 'Z':
+ case i >= '0' && i <= '9':
+ default:
+ return true
+ }
+ return false
+}
+
+func GetInt(s []string, def int) int {
+ if len(s) == 0 {
+ return def
+ }
+ c, err := strconv.Atoi(s[0])
+ if err != nil {
+ return def
+ }
+ return c
+}
+
+type RawMode struct {
+ state *State
+}
+
+func (r *RawMode) Enter() (err error) {
+ r.state, err = MakeRaw(GetStdin())
+ return err
+}
+
+func (r *RawMode) Exit() error {
+ if r.state == nil {
+ return nil
+ }
+ return Restore(GetStdin(), r.state)
+}
+
+// -----------------------------------------------------------------------------
+
+func sleep(n int) {
+ Debug(n)
+ time.Sleep(2000 * time.Millisecond)
+}
+
+// print a linked list to Debug()
+func debugList(l *list.List) {
+ idx := 0
+ for e := l.Front(); e != nil; e = e.Next() {
+ Debug(idx, fmt.Sprintf("%+v", e.Value))
+ idx++
+ }
+}
+
+// append log info to another file
+func Debug(o ...interface{}) {
+ f, _ := os.OpenFile("debug.tmp", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
+ fmt.Fprintln(f, o...)
+ f.Close()
+}
diff --git a/vendor/github.com/chzyer/readline/utils_unix.go b/vendor/github.com/chzyer/readline/utils_unix.go
new file mode 100644
index 000000000..f88dac97b
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/utils_unix.go
@@ -0,0 +1,83 @@
+// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris
+
+package readline
+
+import (
+ "io"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+)
+
+type winsize struct {
+ Row uint16
+ Col uint16
+ Xpixel uint16
+ Ypixel uint16
+}
+
+// SuspendMe use to send suspend signal to myself, when we in the raw mode.
+// For OSX it need to send to parent's pid
+// For Linux it need to send to myself
+func SuspendMe() {
+ p, _ := os.FindProcess(os.Getppid())
+ p.Signal(syscall.SIGTSTP)
+ p, _ = os.FindProcess(os.Getpid())
+ p.Signal(syscall.SIGTSTP)
+}
+
+// get width of the terminal
+func getWidth(stdoutFd int) int {
+ cols, _, err := GetSize(stdoutFd)
+ if err != nil {
+ return -1
+ }
+ return cols
+}
+
+func GetScreenWidth() int {
+ w := getWidth(syscall.Stdout)
+ if w < 0 {
+ w = getWidth(syscall.Stderr)
+ }
+ return w
+}
+
+// ClearScreen clears the console screen
+func ClearScreen(w io.Writer) (int, error) {
+ return w.Write([]byte("\033[H"))
+}
+
+func DefaultIsTerminal() bool {
+ return IsTerminal(syscall.Stdin) && (IsTerminal(syscall.Stdout) || IsTerminal(syscall.Stderr))
+}
+
+func GetStdin() int {
+ return syscall.Stdin
+}
+
+// -----------------------------------------------------------------------------
+
+var (
+ widthChange sync.Once
+ widthChangeCallback func()
+)
+
+func DefaultOnWidthChanged(f func()) {
+ widthChangeCallback = f
+ widthChange.Do(func() {
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, syscall.SIGWINCH)
+
+ go func() {
+ for {
+ _, ok := <-ch
+ if !ok {
+ break
+ }
+ widthChangeCallback()
+ }
+ }()
+ })
+}
diff --git a/vendor/github.com/chzyer/readline/utils_windows.go b/vendor/github.com/chzyer/readline/utils_windows.go
new file mode 100644
index 000000000..5bfa55dcc
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/utils_windows.go
@@ -0,0 +1,41 @@
+// +build windows
+
+package readline
+
+import (
+ "io"
+ "syscall"
+)
+
+func SuspendMe() {
+}
+
+func GetStdin() int {
+ return int(syscall.Stdin)
+}
+
+func init() {
+ isWindows = true
+}
+
+// get width of the terminal
+func GetScreenWidth() int {
+ info, _ := GetConsoleScreenBufferInfo()
+ if info == nil {
+ return -1
+ }
+ return int(info.dwSize.x)
+}
+
+// ClearScreen clears the console screen
+func ClearScreen(_ io.Writer) error {
+ return SetConsoleCursorPosition(&_COORD{0, 0})
+}
+
+func DefaultIsTerminal() bool {
+ return true
+}
+
+func DefaultOnWidthChanged(func()) {
+
+}
diff --git a/vendor/github.com/chzyer/readline/vim.go b/vendor/github.com/chzyer/readline/vim.go
new file mode 100644
index 000000000..bedf2c1a6
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/vim.go
@@ -0,0 +1,176 @@
+package readline
+
+const (
+ VIM_NORMAL = iota
+ VIM_INSERT
+ VIM_VISUAL
+)
+
+type opVim struct {
+ cfg *Config
+ op *Operation
+ vimMode int
+}
+
+func newVimMode(op *Operation) *opVim {
+ ov := &opVim{
+ cfg: op.cfg,
+ op: op,
+ }
+ ov.SetVimMode(ov.cfg.VimMode)
+ return ov
+}
+
+func (o *opVim) SetVimMode(on bool) {
+ if o.cfg.VimMode && !on { // turn off
+ o.ExitVimMode()
+ }
+ o.cfg.VimMode = on
+ o.vimMode = VIM_INSERT
+}
+
+func (o *opVim) ExitVimMode() {
+ o.vimMode = VIM_INSERT
+}
+
+func (o *opVim) IsEnableVimMode() bool {
+ return o.cfg.VimMode
+}
+
+func (o *opVim) handleVimNormalMovement(r rune, readNext func() rune) (t rune, handled bool) {
+ rb := o.op.buf
+ handled = true
+ switch r {
+ case 'h':
+ t = CharBackward
+ case 'j':
+ t = CharNext
+ case 'k':
+ t = CharPrev
+ case 'l':
+ t = CharForward
+ case '0', '^':
+ rb.MoveToLineStart()
+ case '$':
+ rb.MoveToLineEnd()
+ case 'x':
+ rb.Delete()
+ if rb.IsCursorInEnd() {
+ rb.MoveBackward()
+ }
+ case 'r':
+ rb.Replace(readNext())
+ case 'd':
+ next := readNext()
+ switch next {
+ case 'd':
+ rb.Erase()
+ case 'w':
+ rb.DeleteWord()
+ case 'h':
+ rb.Backspace()
+ case 'l':
+ rb.Delete()
+ }
+ case 'p':
+ rb.Yank()
+ case 'b', 'B':
+ rb.MoveToPrevWord()
+ case 'w', 'W':
+ rb.MoveToNextWord()
+ case 'e', 'E':
+ rb.MoveToEndWord()
+ case 'f', 'F', 't', 'T':
+ next := readNext()
+ prevChar := r == 't' || r == 'T'
+ reverse := r == 'F' || r == 'T'
+ switch next {
+ case CharEsc:
+ default:
+ rb.MoveTo(next, prevChar, reverse)
+ }
+ default:
+ return r, false
+ }
+ return t, true
+}
+
+func (o *opVim) handleVimNormalEnterInsert(r rune, readNext func() rune) (t rune, handled bool) {
+ rb := o.op.buf
+ handled = true
+ switch r {
+ case 'i':
+ case 'I':
+ rb.MoveToLineStart()
+ case 'a':
+ rb.MoveForward()
+ case 'A':
+ rb.MoveToLineEnd()
+ case 's':
+ rb.Delete()
+ case 'S':
+ rb.Erase()
+ case 'c':
+ next := readNext()
+ switch next {
+ case 'c':
+ rb.Erase()
+ case 'w':
+ rb.DeleteWord()
+ case 'h':
+ rb.Backspace()
+ case 'l':
+ rb.Delete()
+ }
+ default:
+ return r, false
+ }
+
+ o.EnterVimInsertMode()
+ return
+}
+
+func (o *opVim) HandleVimNormal(r rune, readNext func() rune) (t rune) {
+ switch r {
+ case CharEnter, CharInterrupt:
+ o.ExitVimMode()
+ return r
+ }
+
+ if r, handled := o.handleVimNormalMovement(r, readNext); handled {
+ return r
+ }
+
+ if r, handled := o.handleVimNormalEnterInsert(r, readNext); handled {
+ return r
+ }
+
+ // invalid operation
+ o.op.t.Bell()
+ return 0
+}
+
+func (o *opVim) EnterVimInsertMode() {
+ o.vimMode = VIM_INSERT
+}
+
+func (o *opVim) ExitVimInsertMode() {
+ o.vimMode = VIM_NORMAL
+}
+
+func (o *opVim) HandleVim(r rune, readNext func() rune) rune {
+ if o.vimMode == VIM_NORMAL {
+ return o.HandleVimNormal(r, readNext)
+ }
+ if r == CharEsc {
+ o.ExitVimInsertMode()
+ return 0
+ }
+
+ switch o.vimMode {
+ case VIM_INSERT:
+ return r
+ case VIM_VISUAL:
+ }
+ return r
+}
diff --git a/vendor/github.com/chzyer/readline/windows_api.go b/vendor/github.com/chzyer/readline/windows_api.go
new file mode 100644
index 000000000..63f4f7b78
--- /dev/null
+++ b/vendor/github.com/chzyer/readline/windows_api.go
@@ -0,0 +1,152 @@
+// +build windows
+
+package readline
+
+import (
+ "reflect"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ kernel = NewKernel()
+ stdout = uintptr(syscall.Stdout)
+ stdin = uintptr(syscall.Stdin)
+)
+
+type Kernel struct {
+ SetConsoleCursorPosition,
+ SetConsoleTextAttribute,
+ FillConsoleOutputCharacterW,
+ FillConsoleOutputAttribute,
+ ReadConsoleInputW,
+ GetConsoleScreenBufferInfo,
+ GetConsoleCursorInfo,
+ GetStdHandle CallFunc
+}
+
+type short int16
+type word uint16
+type dword uint32
+type wchar uint16
+
+type _COORD struct {
+ x short
+ y short
+}
+
+func (c *_COORD) ptr() uintptr {
+ return uintptr(*(*int32)(unsafe.Pointer(c)))
+}
+
+const (
+ EVENT_KEY = 0x0001
+ EVENT_MOUSE = 0x0002
+ EVENT_WINDOW_BUFFER_SIZE = 0x0004
+ EVENT_MENU = 0x0008
+ EVENT_FOCUS = 0x0010
+)
+
+type _KEY_EVENT_RECORD struct {
+ bKeyDown int32
+ wRepeatCount word
+ wVirtualKeyCode word
+ wVirtualScanCode word
+ unicodeChar wchar
+ dwControlKeyState dword
+}
+
+// KEY_EVENT_RECORD KeyEvent;
+// MOUSE_EVENT_RECORD MouseEvent;
+// WINDOW_BUFFER_SIZE_RECORD WindowBufferSizeEvent;
+// MENU_EVENT_RECORD MenuEvent;
+// FOCUS_EVENT_RECORD FocusEvent;
+type _INPUT_RECORD struct {
+ EventType word
+ Padding uint16
+ Event [16]byte
+}
+
+type _CONSOLE_SCREEN_BUFFER_INFO struct {
+ dwSize _COORD
+ dwCursorPosition _COORD
+ wAttributes word
+ srWindow _SMALL_RECT
+ dwMaximumWindowSize _COORD
+}
+
+type _SMALL_RECT struct {
+ left short
+ top short
+ right short
+ bottom short
+}
+
+type _CONSOLE_CURSOR_INFO struct {
+ dwSize dword
+ bVisible bool
+}
+
+type CallFunc func(u ...uintptr) error
+
+func NewKernel() *Kernel {
+ k := &Kernel{}
+ kernel32 := syscall.NewLazyDLL("kernel32.dll")
+ v := reflect.ValueOf(k).Elem()
+ t := v.Type()
+ for i := 0; i < t.NumField(); i++ {
+ name := t.Field(i).Name
+ f := kernel32.NewProc(name)
+ v.Field(i).Set(reflect.ValueOf(k.Wrap(f)))
+ }
+ return k
+}
+
+func (k *Kernel) Wrap(p *syscall.LazyProc) CallFunc {
+ return func(args ...uintptr) error {
+ var r0 uintptr
+ var e1 syscall.Errno
+ size := uintptr(len(args))
+ if len(args) <= 3 {
+ buf := make([]uintptr, 3)
+ copy(buf, args)
+ r0, _, e1 = syscall.Syscall(p.Addr(), size,
+ buf[0], buf[1], buf[2])
+ } else {
+ buf := make([]uintptr, 6)
+ copy(buf, args)
+ r0, _, e1 = syscall.Syscall6(p.Addr(), size,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
+ )
+ }
+
+ if int(r0) == 0 {
+ if e1 != 0 {
+ return error(e1)
+ } else {
+ return syscall.EINVAL
+ }
+ }
+ return nil
+ }
+
+}
+
+func GetConsoleScreenBufferInfo() (*_CONSOLE_SCREEN_BUFFER_INFO, error) {
+ t := new(_CONSOLE_SCREEN_BUFFER_INFO)
+ err := kernel.GetConsoleScreenBufferInfo(
+ stdout,
+ uintptr(unsafe.Pointer(t)),
+ )
+ return t, err
+}
+
+func GetConsoleCursorInfo() (*_CONSOLE_CURSOR_INFO, error) {
+ t := new(_CONSOLE_CURSOR_INFO)
+ err := kernel.GetConsoleCursorInfo(stdout, uintptr(unsafe.Pointer(t)))
+ return t, err
+}
+
+func SetConsoleCursorPosition(c *_COORD) error {
+ return kernel.SetConsoleCursorPosition(stdout, c.ptr())
+}
diff --git a/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go b/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go
deleted file mode 100644
index 0bfa6a040..000000000
--- a/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// +build !windows
-
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package syscallx
-
-import "syscall"
-
-// Readlink returns the destination of the named symbolic link.
-func Readlink(path string, buf []byte) (n int, err error) {
- return syscall.Readlink(path, buf)
-}
diff --git a/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go b/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go
deleted file mode 100644
index 2ba814990..000000000
--- a/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package syscallx
-
-import (
- "syscall"
- "unsafe"
-)
-
-type reparseDataBuffer struct {
- ReparseTag uint32
- ReparseDataLength uint16
- Reserved uint16
-
- // GenericReparseBuffer
- reparseBuffer byte
-}
-
-type mountPointReparseBuffer struct {
- SubstituteNameOffset uint16
- SubstituteNameLength uint16
- PrintNameOffset uint16
- PrintNameLength uint16
- PathBuffer [1]uint16
-}
-
-type symbolicLinkReparseBuffer struct {
- SubstituteNameOffset uint16
- SubstituteNameLength uint16
- PrintNameOffset uint16
- PrintNameLength uint16
- Flags uint32
- PathBuffer [1]uint16
-}
-
-const (
- _IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
- _SYMLINK_FLAG_RELATIVE = 1
-)
-
-// Readlink returns the destination of the named symbolic link.
-func Readlink(path string, buf []byte) (n int, err error) {
- fd, err := syscall.CreateFile(syscall.StringToUTF16Ptr(path), syscall.GENERIC_READ, 0, nil, syscall.OPEN_EXISTING,
- syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
- if err != nil {
- return -1, err
- }
- defer syscall.CloseHandle(fd)
-
- rdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE)
- var bytesReturned uint32
- err = syscall.DeviceIoControl(fd, syscall.FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil)
- if err != nil {
- return -1, err
- }
-
- rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0]))
- var s string
- switch rdb.ReparseTag {
- case syscall.IO_REPARSE_TAG_SYMLINK:
- data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
- p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
- s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2])
- if data.Flags&_SYMLINK_FLAG_RELATIVE == 0 {
- if len(s) >= 4 && s[:4] == `\??\` {
- s = s[4:]
- switch {
- case len(s) >= 2 && s[1] == ':': // \??\C:\foo\bar
- // do nothing
- case len(s) >= 4 && s[:4] == `UNC\`: // \??\UNC\foo\bar
- s = `\\` + s[4:]
- default:
- // unexpected; do nothing
- }
- } else {
- // unexpected; do nothing
- }
- }
- case _IO_REPARSE_TAG_MOUNT_POINT:
- data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
- p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
- s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2])
- if len(s) >= 4 && s[:4] == `\??\` { // \??\C:\foo\bar
- if len(s) < 48 || s[:11] != `\??\Volume{` {
- s = s[4:]
- }
- } else {
- // unexpected; do nothing
- }
- default:
- // the path is not a symlink or junction but another type of reparse
- // point
- return -1, syscall.ENOENT
- }
- n = copy(buf, []byte(s))
-
- return n, nil
-}
diff --git a/vendor/github.com/containerd/continuity/sysx/file_posix.go b/vendor/github.com/containerd/continuity/sysx/file_posix.go
deleted file mode 100644
index e28f3a1b5..000000000
--- a/vendor/github.com/containerd/continuity/sysx/file_posix.go
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package sysx
-
-import (
- "os"
- "path/filepath"
-
- "github.com/containerd/continuity/syscallx"
-)
-
-// Readlink returns the destination of the named symbolic link.
-// If there is an error, it will be of type *PathError.
-func Readlink(name string) (string, error) {
- for len := 128; ; len *= 2 {
- b := make([]byte, len)
- n, e := fixCount(syscallx.Readlink(fixLongPath(name), b))
- if e != nil {
- return "", &os.PathError{Op: "readlink", Path: name, Err: e}
- }
- if n < len {
- return string(b[0:n]), nil
- }
- }
-}
-
-// Many functions in package syscall return a count of -1 instead of 0.
-// Using fixCount(call()) instead of call() corrects the count.
-func fixCount(n int, err error) (int, error) {
- if n < 0 {
- n = 0
- }
- return n, err
-}
-
-// fixLongPath returns the extended-length (\\?\-prefixed) form of
-// path when needed, in order to avoid the default 260 character file
-// path limit imposed by Windows. If path is not easily converted to
-// the extended-length form (for example, if path is a relative path
-// or contains .. elements), or is short enough, fixLongPath returns
-// path unmodified.
-//
-// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
-func fixLongPath(path string) string {
- // Do nothing (and don't allocate) if the path is "short".
- // Empirically (at least on the Windows Server 2013 builder),
- // the kernel is arbitrarily okay with < 248 bytes. That
- // matches what the docs above say:
- // "When using an API to create a directory, the specified
- // path cannot be so long that you cannot append an 8.3 file
- // name (that is, the directory name cannot exceed MAX_PATH
- // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248.
- //
- // The MSDN docs appear to say that a normal path that is 248 bytes long
- // will work; empirically the path must be less then 248 bytes long.
- if len(path) < 248 {
- // Don't fix. (This is how Go 1.7 and earlier worked,
- // not automatically generating the \\?\ form)
- return path
- }
-
- // The extended form begins with \\?\, as in
- // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt.
- // The extended form disables evaluation of . and .. path
- // elements and disables the interpretation of / as equivalent
- // to \. The conversion here rewrites / to \ and elides
- // . elements as well as trailing or duplicate separators. For
- // simplicity it avoids the conversion entirely for relative
- // paths or paths containing .. elements. For now,
- // \\server\share paths are not converted to
- // \\?\UNC\server\share paths because the rules for doing so
- // are less well-specified.
- if len(path) >= 2 && path[:2] == `\\` {
- // Don't canonicalize UNC paths.
- return path
- }
- if !filepath.IsAbs(path) {
- // Relative path
- return path
- }
-
- const prefix = `\\?`
-
- pathbuf := make([]byte, len(prefix)+len(path)+len(`\`))
- copy(pathbuf, prefix)
- n := len(path)
- r, w := 0, len(prefix)
- for r < n {
- switch {
- case os.IsPathSeparator(path[r]):
- // empty block
- r++
- case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])):
- // /./
- r++
- case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
- // /../ is currently unhandled
- return path
- default:
- pathbuf[w] = '\\'
- w++
- for ; r < n && !os.IsPathSeparator(path[r]); r++ {
- pathbuf[w] = path[r]
- w++
- }
- }
- }
- // A drive's root directory needs a trailing \
- if w == len(`\\?\c:`) {
- pathbuf[w] = '\\'
- w++
- }
- return string(pathbuf[:w])
-}
diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml
index 4921c7b8a..65ce26080 100644
--- a/vendor/github.com/containers/buildah/.cirrus.yml
+++ b/vendor/github.com/containers/buildah/.cirrus.yml
@@ -6,7 +6,7 @@ env:
#### Global variables used for all tasks
####
# Name of the ultimate destination branch for this CI run, PR or post-merge.
- DEST_BRANCH: "release-1.16"
+ DEST_BRANCH: "master"
GOPATH: "/var/tmp/go"
GOSRC: "${GOPATH}/src/github.com/containers/buildah"
# Overrides default location (/tmp/cirrus) for repo clone
@@ -64,9 +64,6 @@ gce_instance:
# Update metadata on VM images referenced by this repository state
'cirrus-ci/only_prs/meta_task':
- # see bors.toml
- skip: $CIRRUS_BRANCH =~ ".*\.tmp"
-
container:
image: "quay.io/libpod/imgts:latest" # see contrib/imgts
cpu: 1
@@ -93,8 +90,9 @@ gce_instance:
gce_instance:
memory: "12Gb"
- # see bors.toml
- skip: $CIRRUS_BRANCH =~ ".*\.tmp"
+ # N/B: Skip running this on branches due to multiple bugs in
+ # the git-validate tool which are difficult to debug and fix.
+ skip: $CIRRUS_PR == ''
timeout_in: 10m
@@ -108,12 +106,6 @@ gce_instance:
'cirrus-ci/only_prs/unit_task':
- # see bors.toml
- skip: $CIRRUS_BRANCH =~ ".*\.tmp"
-
- # not supported by bors-ng
- # allow_failures: $CI == $CI
-
timeout_in: 45m
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
@@ -128,12 +120,6 @@ gce_instance:
gce_instance: # Only need to specify differences from defaults (above)
image_name: "${UBUNTU_CACHE_IMAGE_NAME}"
- # see bors.toml
- skip: $CIRRUS_BRANCH =~ ".*\.tmp"
-
- # don't fail the PR when we fail until #2480 is merged
- allow_failures: true
-
timeout_in: 20m
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
@@ -146,9 +132,6 @@ gce_instance:
# in sync at all times.
'cirrus-ci/only_prs/vendor_task':
- # see bors.toml
- skip: $CIRRUS_BRANCH =~ ".*\.tmp"
-
env:
CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/buildah"
GOPATH: "/var/tmp/go"
@@ -169,16 +152,13 @@ gce_instance:
'cirrus-ci/only_prs/cross_task':
- # see bors.toml
- skip: $CIRRUS_BRANCH =~ ".*\.tmp"
-
depends_on:
- 'cirrus-ci/only_prs/gate'
- 'cirrus-ci/only_prs/vendor'
env:
matrix:
- CROSS_TARGET: bin/buildah.darwin
+ CROSS_TARGET: cross
setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}'
@@ -189,9 +169,6 @@ gce_instance:
'cirrus-ci/required/testing_task':
- # see bors.toml
- skip: $CIRRUS_BRANCH =~ ".*\.tmp"
-
depends_on:
- 'cirrus-ci/only_prs/gate'
- 'cirrus-ci/only_prs/vendor'
@@ -223,9 +200,6 @@ gce_instance:
'cirrus-ci/required/in_podman_task':
- # see bors.toml
- skip: $CIRRUS_BRANCH =~ ".*\.tmp"
-
depends_on:
- 'cirrus-ci/only_prs/gate'
- 'cirrus-ci/only_prs/vendor'
@@ -254,9 +228,6 @@ gce_instance:
# required checks.
'cirrus-ci/success_task':
- # see bors.toml
- skip: $CIRRUS_BRANCH =~ ".*\.tmp"
-
depends_on:
- "cirrus-ci/required/testing"
- "cirrus-ci/required/in_podman"
@@ -301,8 +272,10 @@ gce_instance:
mv .cache /nix
if [[ -z $(ls -A /nix) ]]; then podman run --rm --privileged -ti -v /:/mnt nixos/nix cp -rfT /nix /mnt/nix; fi
podman run --rm --privileged -ti -v /nix:/nix -v ${PWD}:${PWD} -w ${PWD} nixos/nix nix --print-build-logs --option cores 8 --option max-jobs 8 build --file nix/
- mv /nix .cache
- chown -Rf $(whoami) .cache
binaries_artifacts:
path: "result/bin/buildah"
+
+ save_cache_script: |
+ mv /nix .cache
+ chown -Rf $(whoami) .cache
diff --git a/vendor/github.com/containers/buildah/.golangci.yml b/vendor/github.com/containers/buildah/.golangci.yml
index f8247f049..0c7e31007 100644
--- a/vendor/github.com/containers/buildah/.golangci.yml
+++ b/vendor/github.com/containers/buildah/.golangci.yml
@@ -10,23 +10,15 @@ linters:
enable-all: true
disable:
# All these break for one reason or another
- - deadcode
- - depguard
- dupl
- - errcheck
+ - funlen
- gochecknoglobals
- gochecknoinits
- goconst
- gocritic
- gocyclo
- - golint
- gosec
- - gosimple
- lll
- maligned
- prealloc
- scopelint
- - structcheck
- - typecheck
- - unconvert
- - varcheck
diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md
index ecdcb14fe..f37f387e7 100644
--- a/vendor/github.com/containers/buildah/CHANGELOG.md
+++ b/vendor/github.com/containers/buildah/CHANGELOG.md
@@ -2,32 +2,127 @@
# Changelog
-## v1.16.5 (2020-10-21)
+## v1.18.0 (2020-11-16)
+ Fix testing error caused by simultanious merge
+ Vendor in containers/storage v1.24.0
+ short-names aliasing
+ Add --policy flag to buildah pull
+ Stop overwrapping and stuttering
+ copier.Get(): ignore ENOTSUP/ENOSYS when listing xattrs
+ Run: don't forcibly disable UTS namespaces in rootless mode
+ test: ensure non-directory in a Dockerfile path is handled correctly
+ Add a few tests for `pull` command
+ Fix buildah config --cmd to handle array
+ build(deps): bump github.com/containers/storage from 1.23.8 to 1.23.9
+ Fix NPE when Dockerfile path contains non-directory entries
+ Update buildah bud man page from podman build man page
+ Move declaration of decryption-keys to common cli
+ Run: correctly call copier.Mkdir
+ util: digging UID/GID out of os.FileInfo should work on Unix
+ imagebuildah.getImageTypeAndHistoryAndDiffIDs: cache results
+ Verify userns-uid-map and userns-gid-map input
+ Use CPP, CC and flags in dep check scripts
+ Avoid overriding LDFLAGS in Makefile
+ ADD: handle --chown on URLs
+ Update nix pin with `make nixpkgs`
+ (*Builder).Run: MkdirAll: handle EEXIST error
+ copier: try to force loading of nsswitch modules before chroot()
+ fix MkdirAll usage
+ build(deps): bump github.com/containers/common from 0.26.2 to 0.26.3
+ build(deps): bump github.com/containers/storage from 1.23.7 to 1.23.8
+ Use osusergo build tag for static build
+ imagebuildah: cache should take image format into account
+ Bump to v1.18.0-dev
+
+## v1.17.0 (2020-10-29)
+ Handle cases where other tools mount/unmount containers
+ overlay.MountReadOnly: support RO overlay mounts
+ overlay: use fusermount for rootless umounts
+ overlay: fix umount
+ Switch default log level of Buildah to Warn. Users need to see these messages
+ Drop error messages about OCI/Docker format to Warning level
+ build(deps): bump github.com/containers/common from 0.26.0 to 0.26.2
+ tests/testreport: adjust for API break in storage v1.23.6
+ build(deps): bump github.com/containers/storage from 1.23.5 to 1.23.7
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.6.5 to 1.6.6
+ copier: put: ignore Typeflag="g"
+ Use curl to get repo file (fix #2714)
+ build(deps): bump github.com/containers/common from 0.25.0 to 0.26.0
+ build(deps): bump github.com/spf13/cobra from 1.0.0 to 1.1.1
+ Remove docs that refer to bors, since we're not using it
+ Buildah bud should not use stdin by default
+ bump containerd, docker, and golang.org/x/sys
+ Makefile: cross: remove windows.386 target
copier.copierHandlerPut: don't check length when there are errors
- CI: run gating tasks with a lot more memory
- Run(): ignore containers.conf's environment configuration
+ Stop excessive wrapping
+ CI: require that conformance tests pass
bump(github.com/openshift/imagebuilder) to v1.1.8
+ Skip tlsVerify insecure BUILD_REGISTRY_SOURCES
+ Fix build path wrong https://github.com/containers/podman/issues/7993
+ refactor pullpolicy to avoid deps
+ build(deps): bump github.com/containers/common from 0.24.0 to 0.25.0
+ CI: run gating tasks with a lot more memory
ADD and COPY: descend into excluded directories, sometimes
copier: add more context to a couple of error messages
copier: check an error earlier
+ copier: log stderr output as debug on success
+ Update nix pin with `make nixpkgs`
Set directory ownership when copied with ID mapping
-
-## v1.16.4 (2020-10-01)
+ build(deps): bump github.com/sirupsen/logrus from 1.6.0 to 1.7.0
+ build(deps): bump github.com/containers/common from 0.23.0 to 0.24.0
+ Cirrus: Remove bors artifacts
+ Sort build flag definitions alphabetically
ADD: only expand archives at the right time
-
-## v1.16.3 (2020-09-30)
- Lint: Use same linters as podman
+ Remove configuration for bors
+ Shell Completion for podman build flags
+ Bump c/common to v0.24.0
+ New CI check: xref --help vs man pages
+ CI: re-enable several linters
+ Move --userns-uid-map/--userns-gid-map description into buildah man page
add: preserve ownerships and permissions on ADDed archives
- chroot: fix handling of errno seccomp rules
- git-validation.sh: set the base for comparison to v1.16.0
+ Makefile: tweak the cross-compile target
+ Bump containers/common to v0.23.0
chroot: create bind mount targets 0755 instead of 0700
-
-## v1.16.2 (2020-09-21)
+ Change call to Split() to safer SplitN()
+ chroot: fix handling of errno seccomp rules
+ build(deps): bump github.com/containers/image/v5 from 5.5.2 to 5.6.0
+ Add In Progress section to contributing
+ integration tests: make sure tests run in ${topdir}/tests
+ Run(): ignore containers.conf's environment configuration
+ Warn when setting healthcheck in OCI format
+ Cirrus: Skip git-validate on branches
+ tools: update git-validation to the latest commit
+ tools: update golangci-lint to v1.18.0
+ Add a few tests of push command
Add(): fix handling of relative paths with no ContextDir
-
-## v1.16.1 (2020-09-10)
- CI: use release-1.16 as the basis for validation tests
+ build(deps): bump github.com/containers/common from 0.21.0 to 0.22.0
+ Lint: Use same linters as podman
+ Validate: reference HEAD
+ Fix buildah mount to display container names not ids
+ Update nix pin with `make nixpkgs`
+ Add missing --format option in buildah from man page
+ Fix up code based on codespell
+ build(deps): bump github.com/openshift/imagebuilder from 1.1.6 to 1.1.7
+ build(deps): bump github.com/containers/storage from 1.23.4 to 1.23.5
+ Improve buildah completions
+ Cirrus: Fix validate commit epoch
+ Fix bash completion of manifest flags
+ Uniform some man pages
+ Update Buildah Tutorial to address BZ1867426
+ Update bash completion of `manifest add` sub command
copier.Get(): hard link targets shouldn't be relative paths
+ build(deps): bump github.com/onsi/gomega from 1.10.1 to 1.10.2
+ Pass timestamp down to history lines
+ Timestamp gets updated everytime you inspect an image
+ bud.bats: use absolute paths in newly-added tests
+ contrib/cirrus/lib.sh: don't use CN for the hostname
+ tests: Add some tests
+ Update `manifest add` man page
+ Extend flags of `manifest add`
+ build(deps): bump github.com/containers/storage from 1.23.3 to 1.23.4
+ build(deps): bump github.com/onsi/ginkgo from 1.14.0 to 1.14.1
+ Bump to v1.17.0-dev
+ CI: expand cross-compile checks
## v1.16.0 (2020-09-03)
fix build on 32bit arches
diff --git a/vendor/github.com/containers/buildah/CONTRIBUTING.md b/vendor/github.com/containers/buildah/CONTRIBUTING.md
index 553cb15dc..e5171f8a4 100644
--- a/vendor/github.com/containers/buildah/CONTRIBUTING.md
+++ b/vendor/github.com/containers/buildah/CONTRIBUTING.md
@@ -8,6 +8,7 @@ that we follow.
## Topics
* [Reporting Issues](#reporting-issues)
+* [Working On Issues](#working-on-issues)
* [Submitting Pull Requests](#submitting-pull-requests)
* [Sign your PRs](#sign-your-prs)
* [Merge bot interaction](#merge-bot-interaction)
@@ -30,6 +31,17 @@ The easier it is for us to reproduce it, the faster it'll be fixed!
Please don't include any private/sensitive information in your issue!
+## Working On Issues
+
+Once you have decided to contribute to Buildah by working on an issue, check our
+backlog of [open issues](https://github.com/containers/buildah/issues) looking
+for any that do not have an "In Progress" label attached to it. Often issues
+will be assigned to someone, to be worked on at a later time. If you have the
+time to work on the issue now add yourself as an assignee, and set the
+"In Progress" label if you’re a member of the “Containers” GitHub organization.
+If you can not set the label, just add a quick comment in the issue asking that
+the “In Progress” label be set and a member will do so for you.
+
## Submitting Pull Requests
No Pull Request (PR) is too small! Typos, additional comments in the code,
@@ -120,54 +132,14 @@ commit automatically with `git commit -s`.
## Merge bot interaction
Maintainers should never merge anything directly into upstream
-branches. Instead, interact with the [bors-ng bot](https://bors.tech/)
-through PR comments as summarized below. This ensures all upstream
+branches. Instead, interact with the [openshift-ci-robot](https://github.com/openshift-ci-robot/)
+through PR comments as summarized [here](https://prow.ci.openshift.org/command-help?repo=containers%2Fbuildah).
+This ensures all upstream
branches contain commits in a predictable order, and that every commit
has passed automated testing at some point in the past. A
-[Maintainer portal](https://app.bors.tech/repositories/22803)
+[Maintainer portal](https://prow.ci.openshift.org/pr?query=is%3Apr%20state%3Aopen%20repo%3Acontainers%2Fbuildah)
is available, showing all PRs awaiting review and approval.
-### Common [bors-ng comment commands](https://bors.tech/documentation/):
-
-(must be on a single comment-line, without any other extraneous text)
-
-* `bors r+` - Check the current number of Github Code-review Approvals.
- If the PR has that many approvals or more, the bot will add the PR into
- the queue for testing and possible merging. Both the success criteria
- and minimum approval number are set in the configuration file (see below).
-* `bors retry` - Re-run whatever request was previously issued to the bot. Useful
- when there was a testing flake upon attempted merge.
-* `bors try` - Optional / simulate the actions of `bors r+` (see above) having
- met the minimum number of required approvals. The result will be reported
- back as a comment in the PR, by the bors bot.
-* `bors ping` - Confirm bot is functioning, it will post a comment in the PR if so.
-
-
-### Interaction/Monitoring
-
-Bors-ng relies on the regular branch-testing occurring when it updates the
-special branches ('trying' or 'staging'). Therefore you may use the full
-capabilities available within the CI system. ***Note:*** A single bors-ng
-run may include multiple PRs at once.
-
-The easiest way to access a running 'bors try' or 'bors r+' run, is by clicking the
-yellow-circle "status" icon that shows up in an affected PR, for example:
-
-![Screenshot of Github PR](contrib/cirrus/bors-ng.png)
-
-This will cause a pop-up window to appear with the relevant test-statuses and 'details'
-links available. Since bors-ng will wait for success, as long as one test is still
-running, it's possible to manually re-run any failed tests (e.g. due to flakes).
-
-
-### Configuration of bors-ng:
-
-* The `bors.toml` file in the repository root. This controls
- runtime options for timeouts, blocking labels, and required status names.
-* The [settings page](https://app.bors.tech/repositories/22803/settings).
- This contains mostly security-related and branch-control options.
-
-
## Communications
For general questions or discussions, please use the
diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile
index 5b0e00850..e70dd161d 100644
--- a/vendor/github.com/containers/buildah/Makefile
+++ b/vendor/github.com/containers/buildah/Makefile
@@ -34,7 +34,7 @@ RUNC_COMMIT := v1.0.0-rc8
LIBSECCOMP_COMMIT := release-2.3
EXTRA_LDFLAGS ?=
-LDFLAGS := -ldflags '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)'
+BUILDAH_LDFLAGS := -ldflags '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)'
SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go cmd/buildah/*.go copier/*.go docker/*.go pkg/blobcache/*.go pkg/cli/*.go pkg/parse/*.go util/*.go
LINTFLAGS ?=
@@ -56,18 +56,22 @@ static:
.PHONY: bin/buildah
bin/buildah: $(SOURCES)
- $(GO_BUILD) $(LDFLAGS) -o $@ $(BUILDFLAGS) ./cmd/buildah
+ $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./cmd/buildah
.PHONY: buildah
buildah: bin/buildah
-.PHONY: bin/buildah.darwin
-bin/buildah.darwin:
- GOOS=darwin $(GO_BUILD) $(LDFLAGS) -o $@ -tags "containers_image_openpgp" ./cmd/buildah
+.PHONY: cross
+cross: bin/buildah.darwin.amd64 bin/buildah.linux.386 bin/buildah.linux.amd64 bin/buildah.linux.arm64 bin/buildah.linux.arm bin/buildah.linux.mips64 bin/buildah.linux.mips64le bin/buildah.linux.mips bin/buildah.linux.mipsle bin/buildah.linux.ppc64 bin/buildah.linux.ppc64le bin/buildah.linux.riscv64 bin/buildah.linux.s390x bin/buildah.windows.amd64.exe
+
+.PHONY: bin/buildah.%
+bin/buildah.%:
+ mkdir -p ./bin
+ GOOS=$(word 2,$(subst ., ,$@)) GOARCH=$(word 3,$(subst ., ,$@)) $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ -tags "containers_image_openpgp" ./cmd/buildah
.PHONY: bin/imgtype
bin/imgtype: *.go docker/*.go util/*.go tests/imgtype/imgtype.go
- $(GO_BUILD) $(LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/imgtype/imgtype.go
+ $(GO_BUILD) $(BUILDAH_LDFLAGS) -o $@ $(BUILDFLAGS) ./tests/imgtype/imgtype.go
.PHONY: clean
clean:
@@ -85,12 +89,13 @@ gopath:
test $(shell pwd) = $(shell cd ../../../../src/github.com/containers/buildah ; pwd)
codespell:
- codespell -S build,buildah,buildah.spec,imgtype,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L uint,iff,od
+ codespell -S Makefile,build,buildah,buildah.spec,imgtype,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L uint,iff,od
.PHONY: validate
validate: install.tools
@./tests/validate/whitespace.sh
@./tests/validate/git-validation.sh
+ @./hack/xref-helpmsgs-manpages
.PHONY: install.tools
install.tools:
diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md
index 1605aaf73..44a54dbee 100644
--- a/vendor/github.com/containers/buildah/README.md
+++ b/vendor/github.com/containers/buildah/README.md
@@ -4,8 +4,6 @@
[![Go Report Card](https://goreportcard.com/badge/github.com/containers/buildah)](https://goreportcard.com/report/github.com/containers/buildah)
-[![Bors enabled](https://bors.tech/images/badge_small.svg)](https://app.bors.tech/repositories/22803)
-
The Buildah package provides a command line tool that can be used to
* create a working container, either from scratch or using an image as a starting point
diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go
index 45b5c6a94..6cfd6a09f 100644
--- a/vendor/github.com/containers/buildah/add.go
+++ b/vendor/github.com/containers/buildah/add.go
@@ -71,14 +71,14 @@ func sourceIsRemote(source string) bool {
}
// getURL writes a tar archive containing the named content
-func getURL(src, mountpoint, renameTarget string, writer io.Writer) error {
+func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer) error {
url, err := url.Parse(src)
if err != nil {
- return errors.Wrapf(err, "error parsing URL %q", url)
+ return err
}
response, err := http.Get(src)
if err != nil {
- return errors.Wrapf(err, "error parsing URL %q", url)
+ return err
}
defer response.Body.Close()
// Figure out what to name the new content.
@@ -93,7 +93,7 @@ func getURL(src, mountpoint, renameTarget string, writer io.Writer) error {
if lastModified != "" {
d, err := time.Parse(time.RFC1123, lastModified)
if err != nil {
- return errors.Wrapf(err, "error parsing last-modified time %q", lastModified)
+ return errors.Wrapf(err, "error parsing last-modified time")
}
date = d
}
@@ -122,10 +122,18 @@ func getURL(src, mountpoint, renameTarget string, writer io.Writer) error {
// Write the output archive. Set permissions for compatibility.
tw := tar.NewWriter(writer)
defer tw.Close()
+ uid := 0
+ gid := 0
+ if chown != nil {
+ uid = chown.UID
+ gid = chown.GID
+ }
hdr := tar.Header{
Typeflag: tar.TypeReg,
Name: name,
Size: size,
+ Uid: uid,
+ Gid: gid,
Mode: 0600,
ModTime: date,
}
@@ -323,7 +331,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
pipeReader, pipeWriter := io.Pipe()
wg.Add(1)
go func() {
- getErr = getURL(src, mountPoint, renameTarget, pipeWriter)
+ getErr = getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter)
pipeWriter.Close()
wg.Done()
}()
@@ -341,9 +349,9 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
putOptions := copier.PutOptions{
UIDMap: destUIDMap,
GIDMap: destGIDMap,
- ChownDirs: chownDirs,
+ ChownDirs: nil,
ChmodDirs: nil,
- ChownFiles: chownFiles,
+ ChownFiles: nil,
ChmodFiles: nil,
}
putErr = copier.Put(mountPoint, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
diff --git a/vendor/github.com/containers/buildah/bind/mount.go b/vendor/github.com/containers/buildah/bind/mount.go
index adde901fd..789233405 100644
--- a/vendor/github.com/containers/buildah/bind/mount.go
+++ b/vendor/github.com/containers/buildah/bind/mount.go
@@ -270,7 +270,7 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error {
}
return errors.Wrapf(err, "error checking if %q is mounted", mount.Mountpoint)
}
- if mount.Major != int(unix.Major(st.Dev)) || mount.Minor != int(unix.Minor(st.Dev)) {
+ if uint64(mount.Major) != uint64(st.Dev) || uint64(mount.Minor) != uint64(st.Dev) { // nolint:unconvert (required for some OS/arch combinations)
logrus.Debugf("%q is apparently not really mounted, skipping", mount.Mountpoint)
continue
}
diff --git a/vendor/github.com/containers/buildah/bors.toml b/vendor/github.com/containers/buildah/bors.toml
deleted file mode 100644
index df68695e8..000000000
--- a/vendor/github.com/containers/buildah/bors.toml
+++ /dev/null
@@ -1,44 +0,0 @@
-# Bors-ng is a service which provides a merge and review bot for github PRs.
-# When approved for merging (`bors r+`) or test merging (`bors try`), all
-# pending PRs at the time will be merged together in one of two special
-# branches. Either 'staging' or 'trying'. In the case of `staging` branch,
-# when all status tests pass (see below) the serialized set of merges will become
-# the new destination branch HEAD (i.e. master). This guarantees there is never
-# any conflicts with PR merge order on the destination branch(es).
-#
-# Note: The branches 'staging.tmp' and 'trying.tmp' must always be ignored
-# by _all_ CI systems. They are by bors temporarily, and may go away at
-# unpredictable times.
-#
-# Format Ref: https://bors.tech/documentation/#configuration-borstoml
-#
-# status
-# ------------------
-# Selects which tests are required for merging, matching against values
-# from BOTH the older github 'status API' (ref: https://developer.github.com/v3/repos/statuses
-# /#list-statuses-for-a-specific-ref) AND newer 'checks API'. Ref: https://developer.github.com/v3/checks
-# /runs/#list-check-runs-in-a-check-suite both return JSON:
-#
-# Status API: Matches against '[].context' values
-# Checks API: Matches against 'check_runs[].name' values
-#
-# Note: The wild-card character '%' is available.
-status = [
- "cirrus-ci/success",
-]
-
-# Same as 'status' (above) but statuses that must pass on every PR
-pr_status = [
- "cirrus-ci/success",
-]
-
-# Cirrus-CI Max Timeout is 60 * 60 * 2
-timeout_sec = 7200
-
-# List of strings: PR Labels that must NOT be present
-block_labels = []
-
-# The number of required GitHub code reviews set 'Approve'
-# before 'bors r+' will allow merging. Does not require
-# the reviewer being in the 'Reviewers' list for the PR.
-required_approvals = 0
diff --git a/vendor/github.com/containers/buildah/btrfs_installed_tag.sh b/vendor/github.com/containers/buildah/btrfs_installed_tag.sh
index c4d99f377..f2f2b33c8 100644
--- a/vendor/github.com/containers/buildah/btrfs_installed_tag.sh
+++ b/vendor/github.com/containers/buildah/btrfs_installed_tag.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-cc -E - > /dev/null 2> /dev/null << EOF
+${CPP:-${CC:-cc} -E} ${CPPFLAGS} - > /dev/null 2> /dev/null << EOF
#include <btrfs/ioctl.h>
EOF
if test $? -ne 0 ; then
diff --git a/vendor/github.com/containers/buildah/btrfs_tag.sh b/vendor/github.com/containers/buildah/btrfs_tag.sh
index 59cb969ad..ea753d4d0 100644
--- a/vendor/github.com/containers/buildah/btrfs_tag.sh
+++ b/vendor/github.com/containers/buildah/btrfs_tag.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-cc -E - > /dev/null 2> /dev/null << EOF
+${CPP:-${CC:-cc} -E} ${CPPFLAGS} - > /dev/null 2> /dev/null << EOF
#include <btrfs/version.h>
EOF
if test $? -ne 0 ; then
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index d27689d4a..9ab47e60c 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -11,13 +11,13 @@ import (
"sort"
"time"
+ "github.com/containers/buildah/define"
"github.com/containers/buildah/docker"
"github.com/containers/image/v5/types"
encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/ioutils"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/opencontainers/runc/libcontainer/configs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -28,7 +28,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.16.5"
+ Version = "1.18.0"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
@@ -42,43 +42,28 @@ const (
)
// PullPolicy takes the value PullIfMissing, PullAlways, PullIfNewer, or PullNever.
-type PullPolicy int
+type PullPolicy = define.PullPolicy
const (
// PullIfMissing is one of the values that BuilderOptions.PullPolicy
// can take, signalling that the source image should be pulled from a
// registry if a local copy of it is not already present.
- PullIfMissing PullPolicy = iota
+ PullIfMissing = define.PullIfMissing
// PullAlways is one of the values that BuilderOptions.PullPolicy can
// take, signalling that a fresh, possibly updated, copy of the image
// should be pulled from a registry before the build proceeds.
- PullAlways
+ PullAlways = define.PullAlways
// PullIfNewer is one of the values that BuilderOptions.PullPolicy
// can take, signalling that the source image should only be pulled
// from a registry if a local copy is not already present or if a
// newer version the image is present on the repository.
- PullIfNewer
+ PullIfNewer = define.PullIfNewer
// PullNever is one of the values that BuilderOptions.PullPolicy can
// take, signalling that the source image should not be pulled from a
// registry if a local copy of it is not already present.
- PullNever
+ PullNever = define.PullNever
)
-// String converts a PullPolicy into a string.
-func (p PullPolicy) String() string {
- switch p {
- case PullIfMissing:
- return "PullIfMissing"
- case PullAlways:
- return "PullAlways"
- case PullIfNewer:
- return "PullIfNewer"
- case PullNever:
- return "PullNever"
- }
- return fmt.Sprintf("unrecognized policy %d", p)
-}
-
// NetworkConfigurationPolicy takes the value NetworkDefault, NetworkDisabled,
// or NetworkEnabled.
type NetworkConfigurationPolicy int
@@ -202,7 +187,7 @@ type Builder struct {
// ContentDigester counts the digest of all Add()ed content
ContentDigester CompositeDigester
// Devices are the additional devices to add to the containers
- Devices []configs.Device
+ Devices ContainerDevices
}
// BuilderInfo are used as objects to display container information
@@ -231,7 +216,7 @@ type BuilderInfo struct {
CNIConfigDir string
IDMappingOptions IDMappingOptions
History []v1.History
- Devices []configs.Device
+ Devices ContainerDevices
}
// GetBuildInfo gets a pointer to a Builder object and returns a BuilderInfo object from it.
@@ -239,15 +224,6 @@ type BuilderInfo struct {
func GetBuildInfo(b *Builder) BuilderInfo {
history := copyHistory(b.OCIv1.History)
history = append(history, copyHistory(b.PrependedEmptyLayers)...)
- now := time.Now().UTC()
- created := &now
- history = append(history, v1.History{
- Created: created,
- CreatedBy: b.ImageCreatedBy,
- Author: b.Maintainer(),
- Comment: b.ImageHistoryComment,
- EmptyLayer: false,
- })
history = append(history, copyHistory(b.AppendedEmptyLayers)...)
sort.Strings(b.Capabilities)
return BuilderInfo{
@@ -409,7 +385,7 @@ type BuilderOptions struct {
// Format for the container image
Format string
// Devices are the additional devices to add to the containers
- Devices []configs.Device
+ Devices ContainerDevices
//DefaultEnv for containers
DefaultEnv []string
// MaxPullRetries is the maximum number of attempts we'll make to pull
@@ -477,7 +453,7 @@ func OpenBuilder(store storage.Store, container string) (*Builder, error) {
}
buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile))
if err != nil {
- return nil, errors.Wrapf(err, "error reading %q", filepath.Join(cdir, stateFile))
+ return nil, err
}
b := &Builder{}
if err = json.Unmarshal(buildstate, &b); err != nil {
@@ -500,7 +476,7 @@ func OpenBuilderByPath(store storage.Store, path string) (*Builder, error) {
}
abs, err := filepath.Abs(path)
if err != nil {
- return nil, errors.Wrapf(err, "error turning %q into an absolute path", path)
+ return nil, err
}
builderMatchesPath := func(b *Builder, path string) bool {
return (b.MountPoint == path)
@@ -516,7 +492,7 @@ func OpenBuilderByPath(store storage.Store, path string) (*Builder, error) {
logrus.Debugf("error reading %q: %v, ignoring container %q", filepath.Join(cdir, stateFile), err, container.ID)
continue
}
- return nil, errors.Wrapf(err, "error reading %q", filepath.Join(cdir, stateFile))
+ return nil, err
}
b := &Builder{}
err = json.Unmarshal(buildstate, &b)
@@ -552,7 +528,7 @@ func OpenAllBuilders(store storage.Store) (builders []*Builder, err error) {
logrus.Debugf("error reading %q: %v, ignoring container %q", filepath.Join(cdir, stateFile), err, container.ID)
continue
}
- return nil, errors.Wrapf(err, "error reading %q", filepath.Join(cdir, stateFile))
+ return nil, err
}
b := &Builder{}
err = json.Unmarshal(buildstate, &b)
diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt
index 50648cb7f..f59d426ae 100644
--- a/vendor/github.com/containers/buildah/changelog.txt
+++ b/vendor/github.com/containers/buildah/changelog.txt
@@ -1,29 +1,124 @@
-- Changelog for v1.16.5 (2020-10-21)
+- Changelog for v1.18.0 (2020-11-16)
+ * Fix testing error caused by simultanious merge
+ * Vendor in containers/storage v1.24.0
+ * short-names aliasing
+ * Add --policy flag to buildah pull
+ * Stop overwrapping and stuttering
+ * copier.Get(): ignore ENOTSUP/ENOSYS when listing xattrs
+ * Run: don't forcibly disable UTS namespaces in rootless mode
+ * test: ensure non-directory in a Dockerfile path is handled correctly
+ * Add a few tests for `pull` command
+ * Fix buildah config --cmd to handle array
+ * build(deps): bump github.com/containers/storage from 1.23.8 to 1.23.9
+ * Fix NPE when Dockerfile path contains non-directory entries
+ * Update buildah bud man page from podman build man page
+ * Move declaration of decryption-keys to common cli
+ * Run: correctly call copier.Mkdir
+ * util: digging UID/GID out of os.FileInfo should work on Unix
+ * imagebuildah.getImageTypeAndHistoryAndDiffIDs: cache results
+ * Verify userns-uid-map and userns-gid-map input
+ * Use CPP, CC and flags in dep check scripts
+ * Avoid overriding LDFLAGS in Makefile
+ * ADD: handle --chown on URLs
+ * Update nix pin with `make nixpkgs`
+ * (*Builder).Run: MkdirAll: handle EEXIST error
+ * copier: try to force loading of nsswitch modules before chroot()
+ * fix MkdirAll usage
+ * build(deps): bump github.com/containers/common from 0.26.2 to 0.26.3
+ * build(deps): bump github.com/containers/storage from 1.23.7 to 1.23.8
+ * Use osusergo build tag for static build
+ * imagebuildah: cache should take image format into account
+ * Bump to v1.18.0-dev
+
+- Changelog for v1.17.0 (2020-10-29)
+ * Handle cases where other tools mount/unmount containers
+ * overlay.MountReadOnly: support RO overlay mounts
+ * overlay: use fusermount for rootless umounts
+ * overlay: fix umount
+ * Switch default log level of Buildah to Warn. Users need to see these messages
+ * Drop error messages about OCI/Docker format to Warning level
+ * build(deps): bump github.com/containers/common from 0.26.0 to 0.26.2
+ * tests/testreport: adjust for API break in storage v1.23.6
+ * build(deps): bump github.com/containers/storage from 1.23.5 to 1.23.7
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.6.5 to 1.6.6
+ * copier: put: ignore Typeflag="g"
+ * Use curl to get repo file (fix #2714)
+ * build(deps): bump github.com/containers/common from 0.25.0 to 0.26.0
+ * build(deps): bump github.com/spf13/cobra from 1.0.0 to 1.1.1
+ * Remove docs that refer to bors, since we're not using it
+ * Buildah bud should not use stdin by default
+ * bump containerd, docker, and golang.org/x/sys
+ * Makefile: cross: remove windows.386 target
* copier.copierHandlerPut: don't check length when there are errors
- * CI: run gating tasks with a lot more memory
- * Run(): ignore containers.conf's environment configuration
+ * Stop excessive wrapping
+ * CI: require that conformance tests pass
* bump(github.com/openshift/imagebuilder) to v1.1.8
+ * Skip tlsVerify insecure BUILD_REGISTRY_SOURCES
+ * Fix build path wrong https://github.com/containers/podman/issues/7993
+ * refactor pullpolicy to avoid deps
+ * build(deps): bump github.com/containers/common from 0.24.0 to 0.25.0
+ * CI: run gating tasks with a lot more memory
* ADD and COPY: descend into excluded directories, sometimes
* copier: add more context to a couple of error messages
* copier: check an error earlier
+ * copier: log stderr output as debug on success
+ * Update nix pin with `make nixpkgs`
* Set directory ownership when copied with ID mapping
-
-- Changelog for v1.16.4 (2020-10-01)
+ * build(deps): bump github.com/sirupsen/logrus from 1.6.0 to 1.7.0
+ * build(deps): bump github.com/containers/common from 0.23.0 to 0.24.0
+ * Cirrus: Remove bors artifacts
+ * Sort build flag definitions alphabetically
* ADD: only expand archives at the right time
-
-- Changelog for v1.16.3 (2020-09-30)
- * Lint: Use same linters as podman
+ * Remove configuration for bors
+ * Shell Completion for podman build flags
+ * Bump c/common to v0.24.0
+ * New CI check: xref --help vs man pages
+ * CI: re-enable several linters
+ * Move --userns-uid-map/--userns-gid-map description into buildah man page
* add: preserve ownerships and permissions on ADDed archives
- * chroot: fix handling of errno seccomp rules
- * git-validation.sh: set the base for comparison to v1.16.0
+ * Makefile: tweak the cross-compile target
+ * Bump containers/common to v0.23.0
* chroot: create bind mount targets 0755 instead of 0700
-
-- Changelog for v1.16.2 (2020-09-21)
+ * Change call to Split() to safer SplitN()
+ * chroot: fix handling of errno seccomp rules
+ * build(deps): bump github.com/containers/image/v5 from 5.5.2 to 5.6.0
+ * Add In Progress section to contributing
+ * integration tests: make sure tests run in ${topdir}/tests
+ * Run(): ignore containers.conf's environment configuration
+ * Warn when setting healthcheck in OCI format
+ * Cirrus: Skip git-validate on branches
+ * tools: update git-validation to the latest commit
+ * tools: update golangci-lint to v1.18.0
+ * Add a few tests of push command
* Add(): fix handling of relative paths with no ContextDir
-
-- Changelog for v1.16.1 (2020-09-10)
- * CI: use release-1.16 as the basis for validation tests
+ * build(deps): bump github.com/containers/common from 0.21.0 to 0.22.0
+ * Lint: Use same linters as podman
+ * Validate: reference HEAD
+ * Fix buildah mount to display container names not ids
+ * Update nix pin with `make nixpkgs`
+ * Add missing --format option in buildah from man page
+ * Fix up code based on codespell
+ * build(deps): bump github.com/openshift/imagebuilder from 1.1.6 to 1.1.7
+ * build(deps): bump github.com/containers/storage from 1.23.4 to 1.23.5
+ * Improve buildah completions
+ * Cirrus: Fix validate commit epoch
+ * Fix bash completion of manifest flags
+ * Uniform some man pages
+ * Update Buildah Tutorial to address BZ1867426
+ * Update bash completion of `manifest add` sub command
* copier.Get(): hard link targets shouldn't be relative paths
+ * build(deps): bump github.com/onsi/gomega from 1.10.1 to 1.10.2
+ * Pass timestamp down to history lines
+ * Timestamp gets updated everytime you inspect an image
+ * bud.bats: use absolute paths in newly-added tests
+ * contrib/cirrus/lib.sh: don't use CN for the hostname
+ * tests: Add some tests
+ * Update `manifest add` man page
+ * Extend flags of `manifest add`
+ * build(deps): bump github.com/containers/storage from 1.23.3 to 1.23.4
+ * build(deps): bump github.com/onsi/ginkgo from 1.14.0 to 1.14.1
+ * Bump to v1.17.0-dev
+ * CI: expand cross-compile checks
- Changelog for v1.16.0 (2020-09-03)
* fix build on 32bit arches
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index 38601fbad..7a57021aa 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -167,17 +167,17 @@ var (
// variable, if it's set. The contents are expected to be a JSON-encoded
// github.com/openshift/api/config/v1.Image, set by an OpenShift build
// controller that arranged for us to be run in a container.
-func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) error {
+func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) (insecure bool, err error) {
transport := dest.Transport()
if transport == nil {
- return nil
+ return false, nil
}
if transport.Name() != docker.Transport.Name() {
- return nil
+ return false, nil
}
dref := dest.DockerReference()
if dref == nil || reference.Domain(dref) == "" {
- return nil
+ return false, nil
}
if registrySources, ok := os.LookupEnv("BUILD_REGISTRY_SOURCES"); ok && len(registrySources) > 0 {
@@ -188,7 +188,7 @@ func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) error
AllowedRegistries []string `json:"allowedRegistries,omitempty"`
}
if err := json.Unmarshal([]byte(registrySources), &sources); err != nil {
- return errors.Wrapf(err, "error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON", registrySources)
+ return false, errors.Wrapf(err, "error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON", registrySources)
}
blocked := false
if len(sources.BlockedRegistries) > 0 {
@@ -199,7 +199,7 @@ func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) error
}
}
if blocked {
- return errors.Errorf("%s registry at %q denied by policy: it is in the blocked registries list", forWhat, reference.Domain(dref))
+ return false, errors.Errorf("%s registry at %q denied by policy: it is in the blocked registries list", forWhat, reference.Domain(dref))
}
allowed := true
if len(sources.AllowedRegistries) > 0 {
@@ -211,10 +211,13 @@ func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) error
}
}
if !allowed {
- return errors.Errorf("%s registry at %q denied by policy: not in allowed registries list", forWhat, reference.Domain(dref))
+ return false, errors.Errorf("%s registry at %q denied by policy: not in allowed registries list", forWhat, reference.Domain(dref))
+ }
+ if len(sources.InsecureRegistries) > 0 {
+ return true, nil
}
}
- return nil
+ return false, nil
}
// Commit writes the contents of the container, along with its updated
@@ -278,9 +281,18 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}()
// Check if the commit is blocked by $BUILDER_REGISTRY_SOURCES.
- if err := checkRegistrySourcesAllows("commit to", dest); err != nil {
+ insecure, err := checkRegistrySourcesAllows("commit to", dest)
+ if err != nil {
return imgID, nil, "", err
}
+ if insecure {
+ if systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
+ return imgID, nil, "", errors.Errorf("can't require tls verification on an insecured registry")
+ }
+ systemContext.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
+ systemContext.OCIInsecureSkipTLSVerify = true
+ systemContext.DockerDaemonInsecureSkipTLSVerify = true
+ }
if len(options.AdditionalTags) > 0 {
names, err := util.ExpandNames(options.AdditionalTags, "", systemContext, b.store)
if err != nil {
@@ -291,9 +303,18 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
if err != nil {
return imgID, nil, "", errors.Wrapf(err, "error parsing image name %q as an image reference", name)
}
- if err := checkRegistrySourcesAllows("commit to", additionalDest); err != nil {
+ insecure, err := checkRegistrySourcesAllows("commit to", additionalDest)
+ if err != nil {
return imgID, nil, "", err
}
+ if insecure {
+ if systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
+ return imgID, nil, "", errors.Errorf("can't require tls verification on an insecured registry")
+ }
+ systemContext.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
+ systemContext.OCIInsecureSkipTLSVerify = true
+ systemContext.DockerDaemonInsecureSkipTLSVerify = true
+ }
}
}
logrus.Debugf("committing image with reference %q is allowed by policy", transports.ImageName(dest))
@@ -398,7 +419,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
if options.IIDFile != "" {
if err = ioutil.WriteFile(options.IIDFile, []byte(img.ID), 0644); err != nil {
- return imgID, nil, "", errors.Wrapf(err, "failed to write image ID to file %q", options.IIDFile)
+ return imgID, nil, "", err
}
}
}
@@ -471,9 +492,18 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
}
// Check if the push is blocked by $BUILDER_REGISTRY_SOURCES.
- if err := checkRegistrySourcesAllows("push to", dest); err != nil {
+ insecure, err := checkRegistrySourcesAllows("push to", dest)
+ if err != nil {
return nil, "", err
}
+ if insecure {
+ if systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
+ return nil, "", errors.Errorf("can't require tls verification on an insecured registry")
+ }
+ systemContext.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
+ systemContext.OCIInsecureSkipTLSVerify = true
+ systemContext.DockerDaemonInsecureSkipTLSVerify = true
+ }
logrus.Debugf("pushing image to reference %q is allowed by policy", transports.ImageName(dest))
// Copy everything.
diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go
index 32f2171eb..08beeb407 100644
--- a/vendor/github.com/containers/buildah/config.go
+++ b/vendor/github.com/containers/buildah/config.go
@@ -220,7 +220,7 @@ func (b *Builder) ClearOnBuild() {
// discarded when writing images using OCIv1 formats.
func (b *Builder) SetOnBuild(onBuild string) {
if onBuild != "" && b.Format != Dockerv2ImageManifest {
- logrus.Errorf("ONBUILD is not supported for OCI image format, %s will be ignored. Must use `docker` format", onBuild)
+ logrus.Warnf("ONBUILD is not supported for OCI image format, %s will be ignored. Must use `docker` format", onBuild)
}
b.Docker.Config.OnBuild = append(b.Docker.Config.OnBuild, onBuild)
}
@@ -252,7 +252,7 @@ func (b *Builder) Shell() []string {
// discarded when writing images using OCIv1 formats.
func (b *Builder) SetShell(shell []string) {
if len(shell) > 0 && b.Format != Dockerv2ImageManifest {
- logrus.Errorf("SHELL is not supported for OCI image format, %s will be ignored. Must use `docker` format", shell)
+ logrus.Warnf("SHELL is not supported for OCI image format, %s will be ignored. Must use `docker` format", shell)
}
b.Docker.Config.Shell = copyStringSlice(shell)
@@ -489,7 +489,7 @@ func (b *Builder) Domainname() string {
// discarded when writing images using OCIv1 formats.
func (b *Builder) SetDomainname(name string) {
if name != "" && b.Format != Dockerv2ImageManifest {
- logrus.Errorf("DOMAINNAME is not supported for OCI image format, domainname %s will be ignored. Must use `docker` format", name)
+ logrus.Warnf("DOMAINNAME is not supported for OCI image format, domainname %s will be ignored. Must use `docker` format", name)
}
b.Docker.Config.Domainname = name
}
@@ -511,7 +511,7 @@ func (b *Builder) Comment() string {
// discarded when writing images using OCIv1 formats.
func (b *Builder) SetComment(comment string) {
if comment != "" && b.Format != Dockerv2ImageManifest {
- logrus.Errorf("COMMENT is not supported for OCI image format, comment %s will be ignored. Must use `docker` format", comment)
+ logrus.Warnf("COMMENT is not supported for OCI image format, comment %s will be ignored. Must use `docker` format", comment)
}
b.Docker.Comment = comment
}
@@ -565,6 +565,9 @@ func (b *Builder) Healthcheck() *docker.HealthConfig {
func (b *Builder) SetHealthcheck(config *docker.HealthConfig) {
b.Docker.Config.Healthcheck = nil
if config != nil {
+ if b.Format != Dockerv2ImageManifest {
+ logrus.Warnf("Healthcheck is not supported for OCI image format and will be ignored. Must use `docker` format")
+ }
b.Docker.Config.Healthcheck = &docker.HealthConfig{
Test: copyStringSlice(config.Test),
Interval: config.Interval,
diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go
index 42ddd452f..84b636202 100644
--- a/vendor/github.com/containers/buildah/copier/copier.go
+++ b/vendor/github.com/containers/buildah/copier/copier.go
@@ -7,7 +7,9 @@ import (
"fmt"
"io"
"io/ioutil"
+ "net"
"os"
+ "os/user"
"path/filepath"
"strconv"
"strings"
@@ -35,6 +37,14 @@ const (
func init() {
reexec.Register(copierCommand, copierMain)
+ // Attempt a user and host lookup to force libc (glibc, and possibly others that use dynamic
+ // modules to handle looking up user and host information) to load modules that match the libc
+ // our binary is currently using. Hopefully they're loaded on first use, so that they won't
+ // need to be loaded after we've chrooted into the rootfs, which could include modules that
+ // don't match our libc and which can't be loaded, or modules which we don't want to execute
+ // because we don't trust their code.
+ _, _ = user.Lookup("buildah")
+ _, _ = net.LookupHost("localhost")
}
// isArchivePath returns true if the specified path can be read like a (possibly
@@ -580,6 +590,12 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
}
return nil, err
}
+ loggedOutput := strings.TrimSuffix(errorBuffer.String(), "\n")
+ if len(loggedOutput) > 0 {
+ for _, output := range strings.Split(loggedOutput, "\n") {
+ logrus.Debug(output)
+ }
+ }
if readError != nil {
return nil, errors.Wrapf(readError, "error passing bulk input to subprocess")
}
@@ -1362,7 +1378,8 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
hdr.Uid, hdr.Gid = *fileUID, *fileGID
}
}
- // make sure the parent directory exists
+ // make sure the parent directory exists, including for tar.TypeXGlobalHeader entries
+ // that we otherwise ignore, because that's what docker build does with them
path := filepath.Join(targetDirectory, cleanerReldirectory(filepath.FromSlash(hdr.Name)))
if err := ensureDirectoryUnderRoot(filepath.Dir(path)); err != nil {
return err
@@ -1380,6 +1397,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
// create the new item
devMajor := uint32(hdr.Devmajor)
devMinor := uint32(hdr.Devminor)
+ mode := os.FileMode(hdr.Mode) & os.ModePerm
switch hdr.Typeflag {
// no type flag for sockets
default:
@@ -1439,6 +1457,13 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
err = mkfifo(path, 0600)
}
}
+ case tar.TypeXGlobalHeader:
+ // Per archive/tar, PAX uses these to specify key=value information
+ // applies to all subsequent entries. The one in reported in #2717,
+ // https://www.openssl.org/source/openssl-1.1.1g.tar.gz, includes a
+ // comment=(40 byte hex string) at the start, possibly a digest.
+ // Don't try to create whatever path was used for the header.
+ goto nextHeader
}
// check for errors
if err != nil {
@@ -1457,7 +1482,6 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
return errors.Wrapf(err, "copier: put: error setting ownership of %q to %d:%d", path, hdr.Uid, hdr.Gid)
}
// set permissions, except for symlinks, since we don't have lchmod
- mode := os.FileMode(hdr.Mode) & os.ModePerm
if hdr.Typeflag != tar.TypeSymlink {
if err = os.Chmod(path, mode); err != nil {
return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, mode)
@@ -1485,6 +1509,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
if err = lutimes(hdr.Typeflag == tar.TypeSymlink, path, hdr.AccessTime, hdr.ModTime); err != nil {
return errors.Wrapf(err, "error setting access and modify timestamps on %q to %s and %s", path, hdr.AccessTime, hdr.ModTime)
}
+ nextHeader:
hdr, err = tr.Next()
}
if err != io.EOF {
diff --git a/vendor/github.com/containers/buildah/copier/xattrs.go b/vendor/github.com/containers/buildah/copier/xattrs.go
index 71769989c..c757adcc8 100644
--- a/vendor/github.com/containers/buildah/copier/xattrs.go
+++ b/vendor/github.com/containers/buildah/copier/xattrs.go
@@ -45,6 +45,11 @@ func Lgetxattrs(path string) (map[string]string, error) {
listSize *= 2
continue
}
+ if (unwrapError(err) == syscall.ENOTSUP) || (unwrapError(err) == syscall.ENOSYS) {
+ // treat these errors listing xattrs as equivalent to "no xattrs"
+ list = list[:0]
+ break
+ }
return nil, errors.Wrapf(err, "error listing extended attributes of %q", path)
}
list = list[:size]
diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go
new file mode 100644
index 000000000..62c47d6bc
--- /dev/null
+++ b/vendor/github.com/containers/buildah/define/types.go
@@ -0,0 +1,50 @@
+package define
+
+import (
+ "fmt"
+)
+
+// PullPolicy takes the value PullIfMissing, PullAlways, PullIfNewer, or PullNever.
+type PullPolicy int
+
+const (
+ // PullIfMissing is one of the values that BuilderOptions.PullPolicy
+ // can take, signalling that the source image should be pulled from a
+ // registry if a local copy of it is not already present.
+ PullIfMissing PullPolicy = iota
+ // PullAlways is one of the values that BuilderOptions.PullPolicy can
+ // take, signalling that a fresh, possibly updated, copy of the image
+ // should be pulled from a registry before the build proceeds.
+ PullAlways
+ // PullIfNewer is one of the values that BuilderOptions.PullPolicy
+ // can take, signalling that the source image should only be pulled
+ // from a registry if a local copy is not already present or if a
+ // newer version the image is present on the repository.
+ PullIfNewer
+ // PullNever is one of the values that BuilderOptions.PullPolicy can
+ // take, signalling that the source image should not be pulled from a
+ // registry if a local copy of it is not already present.
+ PullNever
+)
+
+// String converts a PullPolicy into a string.
+func (p PullPolicy) String() string {
+ switch p {
+ case PullIfMissing:
+ return "PullIfMissing"
+ case PullAlways:
+ return "PullAlways"
+ case PullIfNewer:
+ return "PullIfNewer"
+ case PullNever:
+ return "PullNever"
+ }
+ return fmt.Sprintf("unrecognized policy %d", p)
+}
+
+var PolicyMap = map[string]PullPolicy{
+ "missing": PullIfMissing,
+ "always": PullAlways,
+ "never": PullNever,
+ "ifnewer": PullIfNewer,
+}
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index 61663cea2..b1f3ad67a 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -3,21 +3,26 @@ module github.com/containers/buildah
go 1.12
require (
+ github.com/containerd/containerd v1.4.1 // indirect
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784
- github.com/containers/common v0.21.0
- github.com/containers/image/v5 v5.5.2
+ github.com/containers/common v0.26.3
+ github.com/containers/image/v5 v5.8.0
github.com/containers/ocicrypt v1.0.3
- github.com/containers/storage v1.23.3
+ github.com/containers/storage v1.24.0
github.com/docker/distribution v2.7.1+incompatible
+ github.com/docker/docker v17.12.0-ce-rc1.0.20201020191947-73dc6a680cdd+incompatible // indirect
github.com/docker/go-units v0.4.0
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
- github.com/fsouza/go-dockerclient v1.6.5
+ github.com/fsouza/go-dockerclient v1.6.6
github.com/ghodss/yaml v1.0.0
github.com/hashicorp/go-multierror v1.1.0
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
+ github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
github.com/mattn/go-shellwords v1.0.10
- github.com/onsi/ginkgo v1.14.0
- github.com/onsi/gomega v1.10.1
+ github.com/moby/sys/mount v0.1.1 // indirect
+ github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2 // indirect
+ github.com/onsi/ginkgo v1.14.2
+ github.com/onsi/gomega v1.10.3
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/runc v1.0.0-rc91
@@ -27,16 +32,16 @@ require (
github.com/openshift/imagebuilder v1.1.8
github.com/pkg/errors v0.9.1
github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf
- github.com/sirupsen/logrus v1.6.0
- github.com/spf13/cobra v0.0.7
+ github.com/sirupsen/logrus v1.7.0
+ github.com/spf13/cobra v1.1.1
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.6.1
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
go.etcd.io/bbolt v1.3.5
- golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
+ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
- golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1
- golang.org/x/text v0.3.3 // indirect
+ golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13
+ gotest.tools/v3 v3.0.3 // indirect
k8s.io/klog v1.0.0 // indirect
)
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index 723cf9c40..069328c38 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -1,15 +1,29 @@
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873 h1:93nQ7k53GjoMQ07HVP8g6Zj1fQZDDj7Xy2VkNNtvX8o=
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
+github.com/Microsoft/go-winio v0.4.15 h1:qkLXKzb1QoVatRyd/YlXZ/Kg0m5K3SPuoD82jjSOaBc=
+github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
@@ -19,65 +33,72 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpH
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
+github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
-github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v1.0.0 h1:fU3UuQapBs+zLJu82NhR11Rif1ny2zfMMAyPJzSN5tQ=
github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
-github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.1 h1:pASeJT3R3YyVn+94qEPk0SnU1OQ20Jd/T+SPKy9xehY=
+github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c h1:8ahmSVELW1wghbjerVAyuEYD5+Dio66RYvSS0iGfL1M=
-github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
+github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb h1:nXPkFq8X1a9ycY3GYQpFNxHh3j2JgY7zDZfq2EXMIzk=
+github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 h1:rqUVLD8I859xRgUx/WMC3v7QAFqbLKZbs+0kqYboRJc=
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containers/common v0.21.0 h1:v2U9MrGw0vMgefQf0/uJYBsSnengxLbSORYqhCVEBs0=
-github.com/containers/common v0.21.0/go.mod h1:8w8SVwc+P2p1MOnRMbSKNWXt1Iwd2bKFu2LLZx55DTM=
-github.com/containers/image/v5 v5.5.2 h1:fv7FArz0zUnjH0W0l8t90CqWFlFcQrPP6Pug+9dUtVI=
-github.com/containers/image/v5 v5.5.2/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
+github.com/containers/common v0.26.3 h1:5Kb5fMmJ7/xMiJ+iEbPA+5pQpl/FGxCgJex4nml4Slo=
+github.com/containers/common v0.26.3/go.mod h1:hJWZIlrl5MsE2ELNRa+MPp6I1kPbXHauuj0Ym4BsLG4=
+github.com/containers/image/v5 v5.7.0 h1:fiTC8/Xbr+zEP6njGTZtPW/3UD7MC93nC9DbUoWdxkA=
+github.com/containers/image/v5 v5.7.0/go.mod h1:8aOy+YaItukxghRORkvhq5ibWttHErzDLy6egrKfKos=
+github.com/containers/image/v5 v5.8.0 h1:B3FGHi0bdGXgg698kBIGOlHCXN5n+scJr6/5354GOPU=
+github.com/containers/image/v5 v5.8.0/go.mod h1:jKxdRtyIDumVa56hdsZvV+gwx4zB50hRou6pIuCWLkg=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
-github.com/containers/ocicrypt v1.0.2 h1:Q0/IPs8ohfbXNxEfyJ2pFVmvJu5BhqJUAmc6ES9NKbo=
-github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNjsqWarIUce4M=
github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c=
github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
-github.com/containers/storage v1.20.2 h1:tw/uKRPDnmVrluIzer3dawTFG/bTJLP8IEUyHFhltYk=
-github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc=
-github.com/containers/storage v1.23.3 h1:6ZeQi+xKBXrbUXSSZvSs8HuKoNCPfRkXR4f+8TkiMsI=
-github.com/containers/storage v1.23.3/go.mod h1:0azTMiuBhArp/VUmH1o4DJAGaaH+qLtEu17pJ/iKJCg=
+github.com/containers/storage v1.23.6/go.mod h1:haFs0HRowKwyzvWEx9EgI3WsL8XCSnBDb5f8P5CAxJY=
+github.com/containers/storage v1.23.7/go.mod h1:cUT2zHjtx+WlVri30obWmM2gpqpi8jfPsmIzP1TVpEI=
+github.com/containers/storage v1.24.0 h1:Fo2LkF7tkMLmo38sTZ/G8wHjcn8JfUFPfyTxM4WwMfk=
+github.com/containers/storage v1.24.0/go.mod h1:A4d3BzuZK9b3oLVEsiSRhZLPIx3z7utgiPyXLK/YMhY=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
+github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -86,9 +107,10 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v1.4.2-0.20191101170500-ac7306503d23/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f h1:Sm8iD2lifO31DwXfkGzq8VgA7rwxPjRsYmeo0K/dF9Y=
github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v17.12.0-ce-rc1.0.20200505174321-1655290016ac+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v17.12.0-ce-rc1.0.20201020191947-73dc6a680cdd+incompatible h1:+0LETFJcCLdIqdtEbVWF1JIxATqM15Y4sLiMcWOYq2U=
+github.com/docker/docker v17.12.0-ce-rc1.0.20201020191947-73dc6a680cdd+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@@ -102,18 +124,18 @@ github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsouza/go-dockerclient v1.6.5 h1:vuFDnPcds3LvTWGYb9h0Rty14FLgkjHZdwLDROCdgsw=
-github.com/fsouza/go-dockerclient v1.6.5/go.mod h1:GOdftxWLWIbIWKbIMDroKFJzPdg6Iw7r+jX1DDZdVsA=
-github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=
-github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
+github.com/fsouza/go-dockerclient v1.6.6 h1:9e3xkBrVkPb81gzYq23i7iDUEd6sx2ooeJA/gnYU6R4=
+github.com/fsouza/go-dockerclient v1.6.6/go.mod h1:3/oRIWoe7uT6bwtAayj/EmJmepBjeL4pYvt7ZxC7Rnk=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
@@ -127,13 +149,15 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@@ -142,34 +166,56 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
-github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
-github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 h1:rw3IAne6CDuVFlZbPOkA7bhxlqawFh7RJJ+CejfMaxE=
@@ -177,46 +223,70 @@ github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwD
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a h1:FaWFmfWdAUKbSCtOU2QjDaorUexogfaMgbipgYATUMU=
+github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqybrAg=
-github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.10.8 h1:eLeJ3dr/Y9+XRfJT4l+8ZjmtB5RPJhucH2HeCV5+IZY=
-github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.10.11 h1:K9z59aO18Aywg2b/WSgBaUX99mHy2BES18Cr5lBKZHk=
-github.com/klauspost/compress v1.10.11/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
-github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/klauspost/compress v1.11.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.2 h1:MiK62aErc3gIiVEtyzKfeOHgW7atJb5g/KNX5m3c2nQ=
+github.com/klauspost/compress v1.11.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
+github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a h1:weJVJJRzAJBFRlAiJQROKQs8oC9vOxvm4rZmBBk0ONw=
+github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/manifoldco/promptui v0.8.0 h1:R95mMF+McvXZQ7j1g8ucVZE1gLP3Sv6j9vlF9kyRqQo=
+github.com/manifoldco/promptui v0.8.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ=
+github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/moby/sys/mountinfo v0.1.3 h1:KIrhRO14+AkwKvG/g2yIpNMOUVZ02xNhOw8KY1WsLOI=
+github.com/moby/sys/mount v0.1.0/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74=
+github.com/moby/sys/mount v0.1.1 h1:mdhBytJ1SMmMat0gtzWWjFX/87K5j6E/7Q5z7rR0cZY=
+github.com/moby/sys/mount v0.1.1/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74=
+github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
+github.com/moby/sys/mountinfo v0.3.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/mountinfo v0.4.0 h1:1KInV3Huv18akCu58V7lzNlt+jFmqlu1EaErnEHE/VM=
+github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/term v0.0.0-20200429084858-129dac9f73f6/go.mod h1:or9wGItza1sRcM4Wd3dIv8DsFHYQuFsMHEdxUIlUxms=
+github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2 h1:SPoLlS9qUUnXcIY4pvA4CTwYjk0Is5f4UPEkeESr53k=
+github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
@@ -229,14 +299,14 @@ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
-github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M=
+github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA=
+github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
@@ -246,28 +316,22 @@ github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc90 h1:4+xo8mtWixbHoEm451+WJNUrq12o2/tDsyK9Vgc/NcA=
-github.com/opencontainers/runc v1.0.0-rc90/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc91 h1:Tp8LWs5G8rFpzTsbRjAtQkPVexhCu0bnANE5IfIhJ6g=
github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E=
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445 h1:y8cfsJRmn8g3VkM4IDpusKSgMUZEXhudm/BuYANLozE=
github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/selinux v1.5.1 h1:jskKwSMFYqyTrHEuJgQoUlTcId0av64S6EWObrIfn5Y=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
-github.com/opencontainers/selinux v1.5.2 h1:F6DgIsjgBIcDksLW4D5RG9bXok6oqZ3nvMwj4ZoFu/Q=
-github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY=
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/openshift/imagebuilder v1.1.8 h1:gjiIl8pbNj0eC4XWvFJHATdDvYm64p9/pLDLQWoLZPA=
github.com/openshift/imagebuilder v1.1.8/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -276,6 +340,7 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9 h1:kyf9snWXHvQc+yxE9imhdI8YAm4oKeZISlaAR+x73zs=
github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
@@ -297,60 +362,56 @@ github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf h1:b0+ZBD3rohnkQ4q5duD1+RyTXTg9yk+qTOPMSQtapO0=
github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
-github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.7 h1:FfTH+vuMXOas8jmfb5/M7dzEYx7LpcLb7a0LPe34uOU=
-github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
+github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4=
-github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
-github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5 h1:MCfT24H3f//U5+UCrZp1/riVO3B50BovxtDiNn0XKkk=
+github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ=
+github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
-github.com/vbauerster/mpb/v5 v5.2.2 h1:zIICVOm+XD+uV6crpSORaL6I0Q1WqOdvxZTp+r3L9cw=
-github.com/vbauerster/mpb/v5 v5.2.2/go.mod h1:W5Fvgw4dm3/0NhqzV8j6EacfuTe5SvnzBRwiXxDR9ww=
+github.com/vbauerster/mpb/v5 v5.3.0 h1:vgrEJjUzHaSZKDRRxul5Oh4C72Yy/5VEMb0em+9M0mQ=
+github.com/vbauerster/mpb/v5 v5.3.0/go.mod h1:4yTkvAb8Cm4eylAp6t0JRq6pXDkFJ4krUlDqWYkakAs=
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
@@ -362,49 +423,72 @@ github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
-go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU=
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0 h1:wBouT66WTYFXdxfVdz9sVWARVd/2vfGcmI45D2gj45M=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -412,36 +496,48 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 h1:sIky/MyNRSHTrdxfsiUSS4WIAMvInbeXljJz+jDjeYE=
-golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13 h1:5jaG59Zhd+8ZXe8C+lgiAGqkOaZBruqrWclLkgAww34=
+golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -451,24 +547,50 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
-google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -482,8 +604,10 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
@@ -500,8 +624,14 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
+gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
+gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index b2c95fecd..154bc503f 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -620,7 +620,7 @@ func (i *containerImageRef) Transport() types.ImageTransport {
func (i *containerImageSource) Close() error {
err := os.RemoveAll(i.path)
if err != nil {
- return errors.Wrapf(err, "error removing layer blob directory %q", i.path)
+ return errors.Wrapf(err, "error removing layer blob directory")
}
return nil
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index a8ada90d1..a97a403b3 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -208,7 +208,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
logrus.Debugf("reading remote Dockerfile %q", dfile)
resp, err := http.Get(dfile)
if err != nil {
- return "", nil, errors.Wrapf(err, "error getting %q", dfile)
+ return "", nil, err
}
if resp.ContentLength == 0 {
resp.Body.Close()
@@ -216,16 +216,19 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
}
data = resp.Body
} else {
- // If the Dockerfile isn't found try prepending the
- // context directory to it.
dinfo, err := os.Stat(dfile)
- if os.IsNotExist(err) {
- dfile = filepath.Join(options.ContextDirectory, dfile)
- dinfo, err = os.Stat(dfile)
- if err != nil {
- return "", nil, errors.Wrapf(err, "error reading info about %q", dfile)
+ if err != nil {
+ // If the Dockerfile isn't available, try again with
+ // context directory prepended (if not prepended yet).
+ if !strings.HasPrefix(dfile, options.ContextDirectory) {
+ dfile = filepath.Join(options.ContextDirectory, dfile)
+ dinfo, err = os.Stat(dfile)
}
}
+ if err != nil {
+ return "", nil, err
+ }
+
// If given a directory, add '/Dockerfile' to it.
if dinfo.Mode().IsDir() {
dfile = filepath.Join(dfile, "Dockerfile")
@@ -233,7 +236,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
logrus.Debugf("reading local Dockerfile %q", dfile)
contents, err := os.Open(dfile)
if err != nil {
- return "", nil, errors.Wrapf(err, "error reading %q", dfile)
+ return "", nil, err
}
dinfo, err = contents.Stat()
if err != nil {
@@ -242,7 +245,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
}
if dinfo.Mode().IsRegular() && dinfo.Size() == 0 {
contents.Close()
- return "", nil, errors.Wrapf(err, "no contents in %q", dfile)
+ return "", nil, errors.Errorf("no contents in %q", dfile)
}
data = contents
}
@@ -261,7 +264,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
mainNode, err := imagebuilder.ParseDockerfile(dockerfiles[0])
if err != nil {
- return "", nil, errors.Wrapf(err, "error parsing main Dockerfile")
+ return "", nil, errors.Wrapf(err, "error parsing main Dockerfile: %s", dockerfiles[0])
}
warnOnUnsetBuildArgs(mainNode, options.Args)
@@ -269,7 +272,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
for _, d := range dockerfiles[1:] {
additionalNode, err := imagebuilder.ParseDockerfile(d)
if err != nil {
- return "", nil, errors.Wrapf(err, "error parsing additional Dockerfile")
+ return "", nil, errors.Wrapf(err, "error parsing additional Dockerfile %s", d)
}
mainNode.Children = append(mainNode.Children, additionalNode.Children...)
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go
index 2fb10ab83..4dd49130d 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_linux.go
@@ -90,7 +90,7 @@ func getSymbolicLink(path string) (string, error) {
symPath = filepath.Join(symPath, p)
isSymlink, resolvedPath, err := hasSymlink(symPath)
if err != nil {
- return "", errors.Wrapf(err, "error checking symlink for %q", symPath)
+ return "", err
}
// if isSymlink is true, check if resolvedPath is potentially another symlink
// keep doing this till resolvedPath is not a symlink and isSymlink is false
@@ -102,7 +102,7 @@ func getSymbolicLink(path string) (string, error) {
}
isSymlink, resolvedPath, err = hasSymlink(resolvedPath)
if err != nil {
- return "", errors.Wrapf(err, "error checking symlink for %q", resolvedPath)
+ return "", err
}
symLinksResolved++
}
@@ -121,14 +121,14 @@ func hasSymlink(path string) (bool, string, error) {
if err != nil {
if os.IsNotExist(err) {
if err = os.MkdirAll(path, 0755); err != nil {
- return false, "", errors.Wrapf(err, "error ensuring volume path %q exists", path)
+ return false, "", err
}
info, err = os.Lstat(path)
if err != nil {
- return false, "", errors.Wrapf(err, "error running lstat on %q", path)
+ return false, "", err
}
} else {
- return false, path, errors.Wrapf(err, "error get stat of path %q", path)
+ return false, path, err
}
}
@@ -140,7 +140,7 @@ func hasSymlink(path string) (bool, string, error) {
// Read the symlink to get what it points to
targetDir, err := os.Readlink(path)
if err != nil {
- return false, "", errors.Wrapf(err, "error reading link %q", path)
+ return false, "", err
}
// if the symlink points to a relative path, prepend the path till now to the resolved path
if !filepath.IsAbs(targetDir) {
diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go
index b0ec1cda0..8c96b4e67 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go
@@ -17,6 +17,7 @@ import (
"github.com/containers/buildah/util"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/manifest"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
@@ -26,7 +27,6 @@ import (
"github.com/containers/storage/pkg/archive"
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/opencontainers/runc/libcontainer/configs"
"github.com/openshift/imagebuilder"
"github.com/openshift/imagebuilder/dockerfile/parser"
"github.com/pkg/errors"
@@ -98,7 +98,7 @@ type Executor struct {
excludes []string
unusedArgs map[string]struct{}
capabilities []string
- devices []configs.Device
+ devices buildah.ContainerDevices
signBy string
architecture string
timestamp *time.Time
@@ -112,6 +112,15 @@ type Executor struct {
stagesSemaphore *semaphore.Weighted
jobs int
logRusage bool
+ imageInfoLock sync.Mutex
+ imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs
+}
+
+type imageTypeAndHistoryAndDiffIDs struct {
+ manifestType string
+ history []v1.History
+ diffIDs []digest.Digest
+ err error
}
// NewExecutor creates a new instance of the imagebuilder.Executor interface.
@@ -130,7 +139,7 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
return nil, err
}
- devices := []configs.Device{}
+ devices := buildah.ContainerDevices{}
for _, device := range append(defaultContainerConfig.Containers.Devices, options.Devices...) {
dev, err := parse.DeviceFromPath(device)
if err != nil {
@@ -216,6 +225,7 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
terminatedStage: make(map[string]struct{}),
jobs: jobs,
logRusage: options.LogRusage,
+ imageInfoCache: make(map[string]imageTypeAndHistoryAndDiffIDs),
}
if exec.err == nil {
exec.err = os.Stderr
@@ -336,22 +346,43 @@ func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebu
}
}
-// getImageHistoryAndDiffIDs returns the history and diff IDs list of imageID.
-func (b *Executor) getImageHistoryAndDiffIDs(ctx context.Context, imageID string) ([]v1.History, []digest.Digest, error) {
+// getImageTypeAndHistoryAndDiffIDs returns the manifest type, history, and diff IDs list of imageID.
+func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID string) (string, []v1.History, []digest.Digest, error) {
+ b.imageInfoLock.Lock()
+ imageInfo, ok := b.imageInfoCache[imageID]
+ b.imageInfoLock.Unlock()
+ if ok {
+ return imageInfo.manifestType, imageInfo.history, imageInfo.diffIDs, imageInfo.err
+ }
imageRef, err := is.Transport.ParseStoreReference(b.store, "@"+imageID)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error getting image reference %q", imageID)
+ return "", nil, nil, errors.Wrapf(err, "error getting image reference %q", imageID)
}
ref, err := imageRef.NewImage(ctx, nil)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error creating new image from reference to image %q", imageID)
+ return "", nil, nil, errors.Wrapf(err, "error creating new image from reference to image %q", imageID)
}
defer ref.Close()
oci, err := ref.OCIConfig(ctx)
if err != nil {
- return nil, nil, errors.Wrapf(err, "error getting possibly-converted OCI config of image %q", imageID)
+ return "", nil, nil, errors.Wrapf(err, "error getting possibly-converted OCI config of image %q", imageID)
+ }
+ manifestBytes, manifestFormat, err := ref.Manifest(ctx)
+ if err != nil {
+ return "", nil, nil, errors.Wrapf(err, "error getting manifest of image %q", imageID)
+ }
+ if manifestFormat == "" && len(manifestBytes) > 0 {
+ manifestFormat = manifest.GuessMIMEType(manifestBytes)
+ }
+ b.imageInfoLock.Lock()
+ b.imageInfoCache[imageID] = imageTypeAndHistoryAndDiffIDs{
+ manifestType: manifestFormat,
+ history: oci.History,
+ diffIDs: oci.RootFS.DiffIDs,
+ err: nil,
}
- return oci.History, oci.RootFS.DiffIDs, nil
+ b.imageInfoLock.Unlock()
+ return manifestFormat, oci.History, oci.RootFS.DiffIDs, nil
}
func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageExecutor, stages imagebuilder.Stages, stageIndex int) (imageID string, ref reference.Canonical, err error) {
diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
index 0b1db01a3..6c058e226 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
@@ -81,7 +81,7 @@ func (s *StageExecutor) Preserve(path string) error {
// except ensure that it exists.
archivedPath := filepath.Join(s.mountPoint, path)
if err := os.MkdirAll(archivedPath, 0755); err != nil {
- return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath)
+ return errors.Wrapf(err, "error ensuring volume path exists")
}
if err := s.volumeCacheInvalidate(path); err != nil {
return errors.Wrapf(err, "error ensuring volume path %q is preserved", archivedPath)
@@ -110,13 +110,13 @@ func (s *StageExecutor) Preserve(path string) error {
st, err := os.Stat(archivedPath)
if os.IsNotExist(err) {
if err = os.MkdirAll(archivedPath, 0755); err != nil {
- return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath)
+ return errors.Wrapf(err, "error ensuring volume path exists")
}
st, err = os.Stat(archivedPath)
}
if err != nil {
logrus.Debugf("error reading info about %q: %v", archivedPath, err)
- return errors.Wrapf(err, "error reading info about volume path %q", archivedPath)
+ return err
}
s.volumeCacheInfo[path] = st
if !s.volumes.Add(path) {
@@ -152,7 +152,7 @@ func (s *StageExecutor) Preserve(path string) error {
if os.IsNotExist(err) {
continue
}
- return errors.Wrapf(err, "error removing %q", s.volumeCache[cachedPath])
+ return err
}
delete(s.volumeCache, cachedPath)
}
@@ -173,7 +173,7 @@ func (s *StageExecutor) volumeCacheInvalidate(path string) error {
if os.IsNotExist(err) {
continue
}
- return errors.Wrapf(err, "error removing volume cache %q", s.volumeCache[cachedPath])
+ return err
}
archivedPath := filepath.Join(s.mountPoint, cachedPath)
logrus.Debugf("invalidated volume cache for %q from %q", archivedPath, s.volumeCache[cachedPath])
@@ -193,15 +193,15 @@ func (s *StageExecutor) volumeCacheSave() error {
continue
}
if !os.IsNotExist(err) {
- return errors.Wrapf(err, "error checking for cache of %q in %q", archivedPath, cacheFile)
+ return err
}
if err := os.MkdirAll(archivedPath, 0755); err != nil {
- return errors.Wrapf(err, "error ensuring volume path %q exists", archivedPath)
+ return errors.Wrapf(err, "error ensuring volume path exists")
}
logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile)
cache, err := os.Create(cacheFile)
if err != nil {
- return errors.Wrapf(err, "error creating archive at %q", cacheFile)
+ return err
}
defer cache.Close()
rc, err := archive.Tar(archivedPath, archive.Uncompressed)
@@ -224,14 +224,14 @@ func (s *StageExecutor) volumeCacheRestore() error {
logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile)
cache, err := os.Open(cacheFile)
if err != nil {
- return errors.Wrapf(err, "error opening archive at %q", cacheFile)
+ return err
}
defer cache.Close()
if err := os.RemoveAll(archivedPath); err != nil {
- return errors.Wrapf(err, "error clearing volume path %q", archivedPath)
+ return err
}
if err := os.MkdirAll(archivedPath, 0755); err != nil {
- return errors.Wrapf(err, "error recreating volume path %q", archivedPath)
+ return err
}
err = archive.Untar(cache, archivedPath, nil)
if err != nil {
@@ -239,7 +239,7 @@ func (s *StageExecutor) volumeCacheRestore() error {
}
if st, ok := s.volumeCacheInfo[cachedPath]; ok {
if err := os.Chmod(archivedPath, st.Mode()); err != nil {
- return errors.Wrapf(err, "error restoring permissions on %q", archivedPath)
+ return err
}
uid := 0
gid := 0
@@ -248,10 +248,10 @@ func (s *StageExecutor) volumeCacheRestore() error {
gid = util.GID(st)
}
if err := os.Chown(archivedPath, uid, gid); err != nil {
- return errors.Wrapf(err, "error setting ownership on %q", archivedPath)
+ return err
}
if err := os.Chtimes(archivedPath, st.ModTime(), st.ModTime()); err != nil {
- return errors.Wrapf(err, "error restoring datestamps on %q", archivedPath)
+ return err
}
}
}
@@ -789,8 +789,11 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// for this stage. Make a note of the
// instruction in the history that we'll write
// for the image when we eventually commit it.
- now := time.Now()
- s.builder.AddPrependedEmptyLayer(&now, s.getCreatedBy(node, addedContentSummary), "", "")
+ timestamp := time.Now().UTC()
+ if s.executor.timestamp != nil {
+ timestamp = *s.executor.timestamp
+ }
+ s.builder.AddPrependedEmptyLayer(&timestamp, s.getCreatedBy(node, addedContentSummary), "", "")
continue
} else {
// This is the last instruction for this stage,
@@ -1107,7 +1110,7 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p
var baseHistory []v1.History
var baseDiffIDs []digest.Digest
if s.builder.FromImageID != "" {
- baseHistory, baseDiffIDs, err = s.executor.getImageHistoryAndDiffIDs(ctx, s.builder.FromImageID)
+ _, baseHistory, baseDiffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID)
if err != nil {
return "", errors.Wrapf(err, "error getting history of base image %q", s.builder.FromImageID)
}
@@ -1139,10 +1142,15 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p
}
// Next we double check that the history of this image is equivalent to the previous
// lines in the Dockerfile up till the point we are at in the build.
- history, diffIDs, err := s.executor.getImageHistoryAndDiffIDs(ctx, image.ID)
+ manifestType, history, diffIDs, err := s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, image.ID)
if err != nil {
return "", errors.Wrapf(err, "error getting history of %q", image.ID)
}
+ // If this candidate isn't of the type that we're building, then it may have lost
+ // some format-specific information that a building-without-cache run wouldn't lose.
+ if manifestType != s.executor.outputFormat {
+ continue
+ }
// children + currNode is the point of the Dockerfile we are currently at.
if s.historyAndDiffIDsMatch(baseHistory, baseDiffIDs, currNode, history, diffIDs, addedContentDigest, buildAddsLayer) {
return image.ID, nil
@@ -1273,5 +1281,5 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
}
func (s *StageExecutor) EnsureContainerPath(path string) error {
- return copier.Mkdir(s.mountPoint, path, copier.MkdirOptions{})
+ return copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{})
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go
index 29cdf44d0..50da74b37 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/util.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/util.go
@@ -40,7 +40,7 @@ func downloadToDirectory(url, dir string) error {
logrus.Debugf("extracting %q to %q", url, dir)
resp, err := http.Get(url)
if err != nil {
- return errors.Wrapf(err, "error getting %q", url)
+ return err
}
defer resp.Body.Close()
if resp.ContentLength == 0 {
@@ -49,12 +49,12 @@ func downloadToDirectory(url, dir string) error {
if err := chrootarchive.Untar(resp.Body, dir, nil); err != nil {
resp1, err := http.Get(url)
if err != nil {
- return errors.Wrapf(err, "error getting %q", url)
+ return err
}
defer resp1.Body.Close()
body, err := ioutil.ReadAll(resp1.Body)
if err != nil {
- return errors.Wrapf(err, "Failed to read %q", url)
+ return err
}
dockerfile := filepath.Join(dir, "Dockerfile")
// Assume this is a Dockerfile
diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go
index 329633b44..0aa1d236d 100644
--- a/vendor/github.com/containers/buildah/import.go
+++ b/vendor/github.com/containers/buildah/import.go
@@ -154,7 +154,7 @@ func importBuilderFromImage(ctx context.Context, store storage.Store, options Im
_, img, err := util.FindImage(store, "", systemContext, options.Image)
if err != nil {
- return nil, errors.Wrapf(err, "error locating image %q for importing settings", options.Image)
+ return nil, errors.Wrapf(err, "importing settings")
}
builder, err := importBuilderDataFromImage(ctx, store, systemContext, img.ID, "", "")
diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md
index 03c9b2720..00381e16a 100644
--- a/vendor/github.com/containers/buildah/install.md
+++ b/vendor/github.com/containers/buildah/install.md
@@ -38,24 +38,21 @@ provides updated packages for CentOS 7, 8 and Stream.
```bash
# CentOS 7
-cd /etc/yum.repos.d/
-sudo wget https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_7/devel:kubic:libcontainers:stable.repo
+sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_7/devel:kubic:libcontainers:stable.repo
sudo yum -y install buildah
# CentOS 8
sudo dnf -y module disable container-tools
sudo dnf -y install 'dnf-command(copr)'
sudo dnf -y copr enable rhcontainerbot/container-selinux
-cd /etc/yum.repos.d
-sudo wget https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_8/devel:kubic:libcontainers:stable.repo
+sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_8/devel:kubic:libcontainers:stable.repo
sudo dnf -y install buildah
# CentOS Stream
sudo dnf -y module disable container-tools
sudo dnf -y install 'dnf-command(copr)'
sudo dnf -y copr enable rhcontainerbot/container-selinux
-cd /etc/yum.repos.d
-sudo wget https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_8_Stream/devel:kubic:libcontainers:stable.repo
+sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_8_Stream/devel:kubic:libcontainers:stable.repo
sudo dnf -y install buildah
```
@@ -472,7 +469,7 @@ Buildah uses Go Modules for vendoring purposes. If you need to update or add a
* `export GO111MODULE=on`
* `go get` the needed version:
* Assuming you want to 'bump' the `github.com/containers/storage` package to version 1.12.13, use this command: `go get github.com/containers/storage@v1.12.13`
- * Assumming that you want to 'bump' the `github.com/containers/storage` package to a particular commit, use this command: `go get github.com/containers/storage@e307568568533c4afccdf7b56df7b4493e4e9a7b`
+ * Assuming that you want to 'bump' the `github.com/containers/storage` package to a particular commit, use this command: `go get github.com/containers/storage@e307568568533c4afccdf7b56df7b4493e4e9a7b`
* `make vendor-in-container`
* `make`
* `make install`
diff --git a/vendor/github.com/containers/buildah/libdm_tag.sh b/vendor/github.com/containers/buildah/libdm_tag.sh
index d3668aab1..815b5d914 100644
--- a/vendor/github.com/containers/buildah/libdm_tag.sh
+++ b/vendor/github.com/containers/buildah/libdm_tag.sh
@@ -2,7 +2,7 @@
tmpdir="$PWD/tmp.$RANDOM"
mkdir -p "$tmpdir"
trap 'rm -fr "$tmpdir"' EXIT
-cc -o "$tmpdir"/libdm_tag -ldevmapper -x c - > /dev/null 2> /dev/null << EOF
+${CC:-cc} ${CFLAGS} ${CPPFLAGS} ${LDFLAGS} -o "$tmpdir"/libdm_tag -x c - -ldevmapper > /dev/null 2> /dev/null << EOF
#include <libdevmapper.h>
int main() {
struct dm_task *task;
diff --git a/vendor/github.com/containers/buildah/mount.go b/vendor/github.com/containers/buildah/mount.go
index 0eb9cbcea..8c7a23f8c 100644
--- a/vendor/github.com/containers/buildah/mount.go
+++ b/vendor/github.com/containers/buildah/mount.go
@@ -19,3 +19,35 @@ func (b *Builder) Mount(label string) (string, error) {
}
return mountpoint, nil
}
+
+func (b *Builder) setMountPoint(mountPoint string) error {
+ b.MountPoint = mountPoint
+ if err := b.Save(); err != nil {
+ return errors.Wrapf(err, "error saving updated state for build container %q", b.ContainerID)
+ }
+ return nil
+}
+
+// Mounted returns whether the container is mounted or not
+func (b *Builder) Mounted() (bool, error) {
+ mountCnt, err := b.store.Mounted(b.ContainerID)
+ if err != nil {
+ return false, errors.Wrapf(err, "error determining if mounting build container %q is mounted", b.ContainerID)
+ }
+ mounted := mountCnt > 0
+ if mounted && b.MountPoint == "" {
+ ctr, err := b.store.Container(b.ContainerID)
+ if err != nil {
+ return mountCnt > 0, errors.Wrapf(err, "error determining if mounting build container %q is mounted", b.ContainerID)
+ }
+ layer, err := b.store.Layer(ctr.LayerID)
+ if err != nil {
+ return mountCnt > 0, errors.Wrapf(err, "error determining if mounting build container %q is mounted", b.ContainerID)
+ }
+ return mounted, b.setMountPoint(layer.MountPoint)
+ }
+ if !mounted && b.MountPoint != "" {
+ return mounted, b.setMountPoint("")
+ }
+ return mounted, nil
+}
diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go
index 4f4b1564b..c1abb1cdb 100644
--- a/vendor/github.com/containers/buildah/new.go
+++ b/vendor/github.com/containers/buildah/new.go
@@ -4,13 +4,14 @@ import (
"context"
"fmt"
"math/rand"
+ "os"
"strings"
- "time"
"github.com/containers/buildah/util"
+ "github.com/containers/image/v5/docker"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/manifest"
- "github.com/containers/image/v5/pkg/sysregistriesv2"
+ "github.com/containers/image/v5/pkg/shortnames"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
@@ -103,145 +104,168 @@ func newContainerIDMappingOptions(idmapOptions *IDMappingOptions) storage.IDMapp
return options
}
-func resolveImage(ctx context.Context, systemContext *types.SystemContext, store storage.Store, options BuilderOptions) (types.ImageReference, string, *storage.Image, error) {
- type failure struct {
- resolvedImageName string
- err error
- }
- candidates, transport, searchRegistriesWereUsedButEmpty, err := util.ResolveName(options.FromImage, options.Registry, systemContext, store)
+func resolveLocalImage(systemContext *types.SystemContext, store storage.Store, options BuilderOptions) (types.ImageReference, string, *storage.Image, error) {
+ candidates, _, _, err := util.ResolveName(options.FromImage, options.Registry, systemContext, store)
if err != nil {
- return nil, "", nil, errors.Wrapf(err, "error parsing reference to image %q", options.FromImage)
+ return nil, "", nil, errors.Wrapf(err, "error resolving local image %q", options.FromImage)
}
-
- failures := []failure{}
for _, image := range candidates {
- if transport == "" {
- img, err := store.Image(image)
- if err != nil {
- logrus.Debugf("error looking up known-local image %q: %v", image, err)
- failures = append(failures, failure{resolvedImageName: image, err: err})
+ img, err := store.Image(image)
+ if err != nil {
+ if errors.Cause(err) == storage.ErrImageUnknown {
continue
}
- ref, err := is.Transport.ParseStoreReference(store, img.ID)
- if err != nil {
- return nil, "", nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID)
- }
- return ref, transport, img, nil
+ return nil, "", nil, err
}
-
- trans := transport
- if transport != util.DefaultTransport {
- trans = trans + ":"
- }
- srcRef, err := alltransports.ParseImageName(trans + image)
+ ref, err := is.Transport.ParseStoreReference(store, img.ID)
if err != nil {
- logrus.Debugf("error parsing image name %q: %v", trans+image, err)
- failures = append(failures, failure{
- resolvedImageName: image,
- err: errors.Wrapf(err, "error parsing attempted image name %q", trans+image),
- })
- continue
+ return nil, "", nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID)
}
+ return ref, ref.Transport().Name(), img, nil
+ }
+
+ return nil, "", nil, nil
+}
+
+// getShortNameMode looks up the `CONTAINERS_SHORT_NAME_ALIASING` environment
+// variable. If it's "on", return `nil` to use the defaults from
+// containers/image and the registries.conf files on the system. If it's
+// "off", empty or unset, return types.ShortNameModeDisabled to turn off
+// short-name aliasing by default.
+//
+// TODO: remove this function once we want to default to short-name aliasing.
+func getShortNameMode() *types.ShortNameMode {
+ env := os.Getenv("CONTAINERS_SHORT_NAME_ALIASING")
+ if strings.ToLower(env) == "on" {
+ return nil // default to whatever registries.conf and c/image decide
+ }
+ mode := types.ShortNameModeDisabled
+ return &mode
+}
- if options.PullPolicy == PullAlways {
+func resolveImage(ctx context.Context, systemContext *types.SystemContext, store storage.Store, options BuilderOptions) (types.ImageReference, string, *storage.Image, error) {
+ if systemContext == nil {
+ systemContext = &types.SystemContext{}
+ }
+ systemContext.ShortNameMode = getShortNameMode()
+
+ fromImage := options.FromImage
+ // If the image name includes a transport we can use it as it. Special
+ // treatment for docker references which are subject to pull policies
+ // that we're handling below.
+ srcRef, err := alltransports.ParseImageName(options.FromImage)
+ if err == nil {
+ if srcRef.Transport().Name() == docker.Transport.Name() {
+ fromImage = srcRef.DockerReference().String()
+ } else {
pulledImg, pulledReference, err := pullAndFindImage(ctx, store, srcRef, options, systemContext)
- if err != nil {
- logrus.Debugf("unable to pull and read image %q: %v", image, err)
- failures = append(failures, failure{resolvedImageName: image, err: err})
- continue
- }
- return pulledReference, transport, pulledImg, nil
+ return pulledReference, srcRef.Transport().Name(), pulledImg, err
}
+ }
- destImage, err := localImageNameForReference(ctx, store, srcRef)
- if err != nil {
- return nil, "", nil, errors.Wrapf(err, "error computing local image name for %q", transports.ImageName(srcRef))
+ localImageRef, _, localImage, err := resolveLocalImage(systemContext, store, options)
+ if err != nil {
+ return nil, "", nil, err
+ }
+
+ // If we could resolve the image locally, check if it was referenced by
+ // ID. In that case, we don't need to bother any further and can
+ // prevent prompting the user.
+ if localImage != nil && strings.HasPrefix(localImage.ID, options.FromImage) {
+ return localImageRef, localImageRef.Transport().Name(), localImage, nil
+ }
+
+ if options.PullPolicy == PullNever || options.PullPolicy == PullIfMissing {
+ if localImage != nil {
+ return localImageRef, localImageRef.Transport().Name(), localImage, nil
}
- if destImage == "" {
- return nil, "", nil, errors.Errorf("error computing local image name for %q", transports.ImageName(srcRef))
+ if options.PullPolicy == PullNever {
+ return nil, "", nil, errors.Errorf("pull policy is %q but %q could not be found locally", "never", options.FromImage)
}
- ref, err := is.Transport.ParseStoreReference(store, destImage)
+ }
+
+ resolved, err := shortnames.Resolve(systemContext, fromImage)
+ if err != nil {
+ return nil, "", nil, err
+ }
+
+ // Print the image-resolution description unless we're looking for a
+ // new image and already found a local image. In many cases, the
+ // description will be more confusing than helpful (e.g., `buildah from
+ // localImage`).
+ if desc := resolved.Description(); len(desc) > 0 {
+ logrus.Debug(desc)
+ if !(options.PullPolicy == PullIfNewer && localImage != nil) {
+ if options.ReportWriter != nil {
+ if _, err := options.ReportWriter.Write([]byte(desc + "\n")); err != nil {
+ return nil, "", nil, err
+ }
+ }
+ }
+ }
+
+ var pullErrors []error
+ for _, pullCandidate := range resolved.PullCandidates {
+ ref, err := docker.NewReference(pullCandidate.Value)
if err != nil {
- return nil, "", nil, errors.Wrapf(err, "error parsing reference to image %q", destImage)
+ return nil, "", nil, err
}
- if options.PullPolicy == PullIfNewer {
- img, err := is.Transport.GetStoreImage(store, ref)
- if err == nil {
- // Let's see if this image is on the repository and if it's there
- // then note it's Created date.
- var repoImageCreated time.Time
- repoImageFound := false
- repoImage, err := srcRef.NewImage(ctx, systemContext)
- if err == nil {
- inspect, err := repoImage.Inspect(ctx)
- if err == nil {
- repoImageFound = true
- repoImageCreated = *inspect.Created
- }
- repoImage.Close()
- }
- if !repoImageFound || repoImageCreated == img.Created {
- // The image is only local or the same date is on the
- // local and repo versions of the image, no need to pull.
- return ref, transport, img, nil
- }
+ // We're tasked to pull a "newer" image. If there's no local
+ // image, we have no base for comparison, so we'll pull the
+ // first available image.
+ //
+ // If there's a local image, the `pullCandidate` is considered
+ // to be newer if its time stamp differs from the local one.
+ // Otherwise, we don't pull and skip it.
+ if options.PullPolicy == PullIfNewer && localImage != nil {
+ remoteImage, err := ref.NewImage(ctx, systemContext)
+ if err != nil {
+ logrus.Debugf("unable to remote-inspect image %q: %v", pullCandidate.Value.String(), err)
+ pullErrors = append(pullErrors, err)
+ continue
}
- } else {
- // Get the image from the store if present for PullNever and PullIfMissing
- img, err := is.Transport.GetStoreImage(store, ref)
- if err == nil {
- return ref, transport, img, nil
+ defer remoteImage.Close()
+
+ remoteData, err := remoteImage.Inspect(ctx)
+ if err != nil {
+ logrus.Debugf("unable to remote-inspect image %q: %v", pullCandidate.Value.String(), err)
+ pullErrors = append(pullErrors, err)
+ continue
}
- if errors.Cause(err) == storage.ErrImageUnknown && options.PullPolicy == PullNever {
- logrus.Debugf("no such image %q: %v", transports.ImageName(ref), err)
- failures = append(failures, failure{
- resolvedImageName: image,
- err: errors.Errorf("no such image %q", transports.ImageName(ref)),
- })
+
+ // FIXME: we should compare image digests not time stamps.
+ // Comparing time stamps is flawed. Be aware that fixing
+ // it may entail non-trivial changes to the tests. Please
+ // refer to https://github.com/containers/buildah/issues/2779
+ // for more.
+ if localImage.Created.Equal(*remoteData.Created) {
continue
}
}
- pulledImg, pulledReference, err := pullAndFindImage(ctx, store, srcRef, options, systemContext)
+ pulledImg, pulledReference, err := pullAndFindImage(ctx, store, ref, options, systemContext)
if err != nil {
- logrus.Debugf("unable to pull and read image %q: %v", image, err)
- failures = append(failures, failure{resolvedImageName: image, err: err})
+ logrus.Debugf("unable to pull and read image %q: %v", pullCandidate.Value.String(), err)
+ pullErrors = append(pullErrors, err)
continue
}
- return pulledReference, transport, pulledImg, nil
- }
-
- if len(failures) != len(candidates) {
- return nil, "", nil, errors.Errorf("internal error: %d candidates (%#v) vs. %d failures (%#v)", len(candidates), candidates, len(failures), failures)
- }
- registriesConfPath := sysregistriesv2.ConfigPath(systemContext)
- switch len(failures) {
- case 0:
- if searchRegistriesWereUsedButEmpty {
- return nil, "", nil, errors.Errorf("image name %q is a short name and no search registries are defined in %s.", options.FromImage, registriesConfPath)
+ // Make sure to record the short-name alias if necessary.
+ if err = pullCandidate.Record(); err != nil {
+ return nil, "", nil, err
}
- return nil, "", nil, errors.Errorf("internal error: no pull candidates were available for %q for an unknown reason", options.FromImage)
- case 1:
- err := failures[0].err
- if failures[0].resolvedImageName != options.FromImage {
- err = errors.Wrapf(err, "while pulling %q as %q", options.FromImage, failures[0].resolvedImageName)
- }
- if searchRegistriesWereUsedButEmpty {
- err = errors.Wrapf(err, "(image name %q is a short name and no search registries are defined in %s)", options.FromImage, registriesConfPath)
- }
- return nil, "", nil, err
+ return pulledReference, "", pulledImg, nil
+ }
- default:
- // NOTE: a multi-line error string:
- e := fmt.Sprintf("The following failures happened while trying to pull image specified by %q based on search registries in %s:", options.FromImage, registriesConfPath)
- for _, f := range failures {
- e = e + fmt.Sprintf("\n* %q: %s", f.resolvedImageName, f.err.Error())
- }
- return nil, "", nil, errors.New(e)
+ // If we were looking for a newer image but could not find one, return
+ // the local image if present.
+ if options.PullPolicy == PullIfNewer && localImage != nil {
+ return localImageRef, localImageRef.Transport().Name(), localImage, nil
}
+
+ return nil, "", nil, resolved.FormatPullErrors(pullErrors)
}
func containerNameExist(name string, containers []storage.Container) bool {
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index 41d545bd8..62a328de0 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -11,9 +11,11 @@ import (
"strings"
"github.com/containers/buildah"
+ "github.com/containers/buildah/pkg/completion"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util"
"github.com/containers/common/pkg/auth"
+ commonComp "github.com/containers/common/pkg/completion"
"github.com/containers/common/pkg/config"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
@@ -57,7 +59,6 @@ type BudResults struct {
Creds string
DisableCompression bool
DisableContentTrust bool
- DecryptionKeys []string
File []string
Format string
Iidfile string
@@ -88,43 +89,55 @@ type BudResults struct {
// FromAndBugResults represents the results for common flags
// in bud and from
type FromAndBudResults struct {
- AddHost []string
- BlobCache string
- CapAdd []string
- CapDrop []string
- CgroupParent string
- CPUPeriod uint64
- CPUQuota int64
- CPUSetCPUs string
- CPUSetMems string
- CPUShares uint64
- Devices []string
- DNSSearch []string
- DNSServers []string
- DNSOptions []string
- HTTPProxy bool
- Isolation string
- Memory string
- MemorySwap string
- OverrideArch string
- OverrideOS string
- SecurityOpt []string
- ShmSize string
- Ulimit []string
- Volumes []string
+ AddHost []string
+ BlobCache string
+ CapAdd []string
+ CapDrop []string
+ CgroupParent string
+ CPUPeriod uint64
+ CPUQuota int64
+ CPUSetCPUs string
+ CPUSetMems string
+ CPUShares uint64
+ DecryptionKeys []string
+ Devices []string
+ DNSSearch []string
+ DNSServers []string
+ DNSOptions []string
+ HTTPProxy bool
+ Isolation string
+ Memory string
+ MemorySwap string
+ OverrideArch string
+ OverrideOS string
+ SecurityOpt []string
+ ShmSize string
+ Ulimit []string
+ Volumes []string
}
// GetUserNSFlags returns the common flags for usernamespace
func GetUserNSFlags(flags *UserNSResults) pflag.FlagSet {
usernsFlags := pflag.FlagSet{}
usernsFlags.StringVar(&flags.UserNS, "userns", "", "'container', `path` of user namespace to join, or 'host'")
- usernsFlags.StringSliceVar(&flags.UserNSUIDMap, "userns-uid-map", []string{}, "`containerID:hostID:length` UID mapping to use in user namespace")
- usernsFlags.StringSliceVar(&flags.UserNSGIDMap, "userns-gid-map", []string{}, "`containerID:hostID:length` GID mapping to use in user namespace")
+ usernsFlags.StringSliceVar(&flags.UserNSUIDMap, "userns-uid-map", []string{}, "`containerUID:hostUID:length` UID mapping to use in user namespace")
+ usernsFlags.StringSliceVar(&flags.UserNSGIDMap, "userns-gid-map", []string{}, "`containerGID:hostGID:length` GID mapping to use in user namespace")
usernsFlags.StringVar(&flags.UserNSUIDMapUser, "userns-uid-map-user", "", "`name` of entries from /etc/subuid to use to set user namespace UID mapping")
usernsFlags.StringVar(&flags.UserNSGIDMapGroup, "userns-gid-map-group", "", "`name` of entries from /etc/subgid to use to set user namespace GID mapping")
return usernsFlags
}
+// GetUserNSFlagsCompletions returns the FlagCompletions for the userns flags
+func GetUserNSFlagsCompletions() commonComp.FlagCompletions {
+ flagCompletion := commonComp.FlagCompletions{}
+ flagCompletion["userns"] = completion.AutocompleteNamespaceFlag
+ flagCompletion["userns-uid-map"] = commonComp.AutocompleteNone
+ flagCompletion["userns-gid-map"] = commonComp.AutocompleteNone
+ flagCompletion["userns-uid-map-user"] = commonComp.AutocompleteSubuidName
+ flagCompletion["userns-gid-map-group"] = commonComp.AutocompleteSubgidName
+ return flagCompletion
+}
+
// GetNameSpaceFlags returns the common flags for a namespace menu
func GetNameSpaceFlags(flags *NameSpaceResults) pflag.FlagSet {
fs := pflag.FlagSet{}
@@ -137,6 +150,18 @@ func GetNameSpaceFlags(flags *NameSpaceResults) pflag.FlagSet {
return fs
}
+// GetNameSpaceFlagsCompletions returns the FlagCompletions for the namespace flags
+func GetNameSpaceFlagsCompletions() commonComp.FlagCompletions {
+ flagCompletion := commonComp.FlagCompletions{}
+ flagCompletion[string(specs.IPCNamespace)] = completion.AutocompleteNamespaceFlag
+ flagCompletion[string(specs.NetworkNamespace)] = completion.AutocompleteNamespaceFlag
+ flagCompletion["cni-config-dir"] = commonComp.AutocompleteDefault
+ flagCompletion["cni-plugin-path"] = commonComp.AutocompleteDefault
+ flagCompletion[string(specs.PIDNamespace)] = completion.AutocompleteNamespaceFlag
+ flagCompletion[string(specs.UTSNamespace)] = completion.AutocompleteNamespaceFlag
+ return flagCompletion
+}
+
// GetLayerFlags returns the common flags for layers
func GetLayerFlags(flags *LayerResults) pflag.FlagSet {
fs := pflag.FlagSet{}
@@ -145,6 +170,8 @@ func GetLayerFlags(flags *LayerResults) pflag.FlagSet {
return fs
}
+// Note: GetLayerFlagsCompletion is not needed since GetLayerFlags only contains bool flags
+
// GetBudFlags returns common bud flags
func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs := pflag.FlagSet{}
@@ -161,11 +188,15 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.StringSliceVarP(&flags.File, "file", "f", []string{}, "`pathname or URL` of a Dockerfile")
fs.StringVar(&flags.Format, "format", DefaultFormat(), "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.")
fs.StringVar(&flags.Iidfile, "iidfile", "", "`file` to write the image ID to")
+ fs.IntVar(&flags.Jobs, "jobs", 1, "how many stages to run in parallel")
fs.StringArrayVar(&flags.Label, "label", []string{}, "Set metadata for an image (default [])")
- fs.BoolVar(&flags.NoCache, "no-cache", false, "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.")
fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr")
fs.IntVar(&flags.Loglevel, "loglevel", 0, "adjust logging level (range from -2 to 3)")
- fs.Int64Var(&flags.Timestamp, "timestamp", 0, "set created timestamp to the specified epoch seconds to allow for deterministic builds, defaults to current time")
+ fs.BoolVar(&flags.LogRusage, "log-rusage", false, "log resource usage at each build step")
+ if err := fs.MarkHidden("log-rusage"); err != nil {
+ panic(fmt.Sprintf("error marking the log-rusage flag as hidden: %v", err))
+ }
+ fs.BoolVar(&flags.NoCache, "no-cache", false, "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.")
fs.StringVar(&flags.OS, "os", runtime.GOOS, "set the OS to the provided value instead of the current operating system of the host")
fs.StringVar(&flags.Platform, "platform", parse.DefaultPlatform(), "set the OS/ARCH to the provided value instead of the current operating system and architecture of the host (for example `linux/arm`)")
fs.BoolVar(&flags.Pull, "pull", true, "pull the image from the registry if newer or not present in store, if false, only pull the image if not present")
@@ -177,18 +208,46 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime")
fs.StringVar(&flags.SignBy, "sign-by", "", "sign the image using a GPG key with the specified `FINGERPRINT`")
fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
+ if err := fs.MarkHidden("signature-policy"); err != nil {
+ panic(fmt.Sprintf("error marking the signature-policy flag as hidden: %v", err))
+ }
fs.BoolVar(&flags.Squash, "squash", false, "squash newly built layers into a single new layer")
fs.StringArrayVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image")
fs.StringVar(&flags.Target, "target", "", "set the target build stage to build")
+ fs.Int64Var(&flags.Timestamp, "timestamp", 0, "set created timestamp to the specified epoch seconds to allow for deterministic builds, defaults to current time")
fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
- fs.IntVar(&flags.Jobs, "jobs", 1, "how many stages to run in parallel")
- fs.BoolVar(&flags.LogRusage, "log-rusage", false, "log resource usage at each build step")
- if err := fs.MarkHidden("log-rusage"); err != nil {
- panic(fmt.Sprintf("error marking the log-rusage flag as hidden: %v", err))
- }
return fs
}
+// GetBudFlagsCompletions returns the FlagCompletions for the common bud flags
+func GetBudFlagsCompletions() commonComp.FlagCompletions {
+ flagCompletion := commonComp.FlagCompletions{}
+ flagCompletion["arch"] = commonComp.AutocompleteNone
+ flagCompletion["annotation"] = commonComp.AutocompleteNone
+ flagCompletion["authfile"] = commonComp.AutocompleteDefault
+ flagCompletion["build-arg"] = commonComp.AutocompleteNone
+ flagCompletion["cache-from"] = commonComp.AutocompleteNone
+ flagCompletion["cert-dir"] = commonComp.AutocompleteDefault
+ flagCompletion["creds"] = commonComp.AutocompleteNone
+ flagCompletion["file"] = commonComp.AutocompleteDefault
+ flagCompletion["format"] = commonComp.AutocompleteNone
+ flagCompletion["iidfile"] = commonComp.AutocompleteDefault
+ flagCompletion["jobs"] = commonComp.AutocompleteNone
+ flagCompletion["label"] = commonComp.AutocompleteNone
+ flagCompletion["logfile"] = commonComp.AutocompleteDefault
+ flagCompletion["loglevel"] = commonComp.AutocompleteDefault
+ flagCompletion["os"] = commonComp.AutocompleteNone
+ flagCompletion["platform"] = commonComp.AutocompleteNone
+ flagCompletion["runtime-flag"] = commonComp.AutocompleteNone
+ flagCompletion["sign-by"] = commonComp.AutocompleteNone
+ flagCompletion["signature-policy"] = commonComp.AutocompleteNone
+ flagCompletion["tag"] = commonComp.AutocompleteNone
+ flagCompletion["target"] = commonComp.AutocompleteNone
+ flagCompletion["timestamp"] = commonComp.AutocompleteNone
+ return flagCompletion
+}
+
+// GetFromAndBudFlags returns from and bud flags
func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, namespaceResults *NameSpaceResults) (pflag.FlagSet, error) {
fs := pflag.FlagSet{}
defaultContainerConfig, err := config.Default()
@@ -209,6 +268,7 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults,
fs.Uint64VarP(&flags.CPUShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.")
+ fs.StringSliceVar(&flags.DecryptionKeys, "decryption-key", nil, "key needed to decrypt the image")
fs.StringArrayVar(&flags.Devices, "device", defaultContainerConfig.Containers.Devices, "Additional devices to be used within containers (default [])")
fs.StringSliceVar(&flags.DNSSearch, "dns-search", defaultContainerConfig.Containers.DNSSearches, "Set custom DNS search domains")
fs.StringSliceVar(&flags.DNSServers, "dns", defaultContainerConfig.Containers.DNSServers, "Set custom DNS servers or disable it completely by setting it to 'none', which prevents the automatic creation of `/etc/resolv.conf`.")
@@ -239,6 +299,45 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults,
return fs, nil
}
+// GetFromAndBudFlagsCompletions returns the FlagCompletions for the from and bud flags
+func GetFromAndBudFlagsCompletions() commonComp.FlagCompletions {
+ flagCompletion := commonComp.FlagCompletions{}
+ flagCompletion["add-host"] = commonComp.AutocompleteNone
+ flagCompletion["blob-cache"] = commonComp.AutocompleteNone
+ flagCompletion["cap-add"] = commonComp.AutocompleteCapabilities
+ flagCompletion["cap-drop"] = commonComp.AutocompleteCapabilities
+ flagCompletion["cgroup-parent"] = commonComp.AutocompleteDefault // FIXME: This would be a path right?!
+ flagCompletion["cpu-period"] = commonComp.AutocompleteNone
+ flagCompletion["cpu-quota"] = commonComp.AutocompleteNone
+ flagCompletion["cpu-shares"] = commonComp.AutocompleteNone
+ flagCompletion["cpuset-cpus"] = commonComp.AutocompleteNone
+ flagCompletion["cpuset-mems"] = commonComp.AutocompleteNone
+ flagCompletion["decryption-key"] = commonComp.AutocompleteNone
+ flagCompletion["device"] = commonComp.AutocompleteDefault
+ flagCompletion["dns-search"] = commonComp.AutocompleteNone
+ flagCompletion["dns"] = commonComp.AutocompleteNone
+ flagCompletion["dns-option"] = commonComp.AutocompleteNone
+ flagCompletion["isolation"] = commonComp.AutocompleteNone
+ flagCompletion["memory"] = commonComp.AutocompleteNone
+ flagCompletion["memory-swap"] = commonComp.AutocompleteNone
+ flagCompletion["security-opt"] = commonComp.AutocompleteNone
+ flagCompletion["shm-size"] = commonComp.AutocompleteNone
+ flagCompletion["ulimit"] = commonComp.AutocompleteNone
+ flagCompletion["volume"] = commonComp.AutocompleteDefault
+
+ // Add in the usernamespace and namespace flag completions
+ userNsComp := GetUserNSFlagsCompletions()
+ for name, comp := range userNsComp {
+ flagCompletion[name] = comp
+ }
+ namespaceComp := GetNameSpaceFlagsCompletions()
+ for name, comp := range namespaceComp {
+ flagCompletion[name] = comp
+ }
+
+ return flagCompletion
+}
+
// UseLayers returns true if BUILDAH_LAYERS is set to "1" or "true"
// otherwise it returns false
func UseLayers() bool {
diff --git a/vendor/github.com/containers/buildah/pkg/completion/completion.go b/vendor/github.com/containers/buildah/pkg/completion/completion.go
new file mode 100644
index 000000000..a7812d296
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/completion/completion.go
@@ -0,0 +1,23 @@
+package completion
+
+import (
+ "strings"
+
+ "github.com/spf13/cobra"
+)
+
+/* Autocomplete Functions for cobra ValidArgsFunction */
+
+// AutocompleteNamespaceFlag - Autocomplete the userns flag.
+// -> host, private, container, ns:[path], [path]
+func AutocompleteNamespaceFlag(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ var completions []string
+ // If we don't filter on "toComplete", zsh and fish will not do file completion
+ // even if the prefix typed by the user does not match the returned completions
+ for _, comp := range []string{"host", "private", "container", "ns:"} {
+ if strings.HasPrefix(comp, toComplete) {
+ completions = append(completions, comp)
+ }
+ }
+ return completions, cobra.ShellCompDirectiveDefault
+}
diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
index c3d9302bb..07aa91e20 100644
--- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
+++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
@@ -13,6 +13,7 @@ import (
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -48,10 +49,36 @@ func TempDir(containerDir string, rootUID, rootGID int) (string, error) {
// from the source system. It then mounts up the source directory on to the
// generated mount point and returns the mount point to the caller.
func Mount(contentDir, source, dest string, rootUID, rootGID int, graphOptions []string) (mount specs.Mount, Err error) {
- upperDir := filepath.Join(contentDir, "upper")
- workDir := filepath.Join(contentDir, "work")
+ return mountHelper(contentDir, source, dest, rootUID, rootGID, graphOptions, false)
+}
+
+// MountReadOnly creates a subdir of the contentDir based on the source directory
+// from the source system. It then mounts up the source directory on to the
+// generated mount point and returns the mount point to the caller. Note that no
+// upper layer will be created rendering it a read-only mount
+func MountReadOnly(contentDir, source, dest string, rootUID, rootGID int, graphOptions []string) (mount specs.Mount, Err error) {
+ return mountHelper(contentDir, source, dest, rootUID, rootGID, graphOptions, true)
+}
+
+// NOTE: rootUID and rootUID are not yet used.
+func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []string, readOnly bool) (mount specs.Mount, Err error) {
mergeDir := filepath.Join(contentDir, "merge")
- overlayOptions := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", source, upperDir, workDir)
+
+ // Create overlay mount options for rw/ro.
+ var overlayOptions string
+ if readOnly {
+ // Read-only overlay mounts require two lower layer.
+ lowerTwo := filepath.Join(contentDir, "lower")
+ if err := os.Mkdir(lowerTwo, 0755); err != nil {
+ return mount, err
+ }
+ overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", source, lowerTwo)
+ } else {
+ // Read-write overlay mounts want a lower, upper and a work layer.
+ workDir := filepath.Join(contentDir, "work")
+ upperDir := filepath.Join(contentDir, "upper")
+ overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", source, upperDir, workDir)
+ }
if unshare.IsRootless() {
mountProgram := ""
@@ -90,7 +117,7 @@ func Mount(contentDir, source, dest string, rootUID, rootGID int, graphOptions [
/* If a mount_program is not specified, fallback to try mount native overlay. */
}
- mount.Source = "overlay"
+ mount.Source = mergeDir
mount.Destination = dest
mount.Type = "overlay"
mount.Options = strings.Split(overlayOptions, ",")
@@ -101,18 +128,34 @@ func Mount(contentDir, source, dest string, rootUID, rootGID int, graphOptions [
// RemoveTemp removes temporary mountpoint and all content from its parent
// directory
func RemoveTemp(contentDir string) error {
- if unshare.IsRootless() {
- if err := Unmount(contentDir); err != nil {
- return err
- }
+ if err := Unmount(contentDir); err != nil {
+ return err
}
+
return os.RemoveAll(contentDir)
}
// Unmount the overlay mountpoint
-func Unmount(contentDir string) (Err error) {
+func Unmount(contentDir string) error {
mergeDir := filepath.Join(contentDir, "merge")
- if err := unix.Unmount(mergeDir, 0); err != nil && !os.IsNotExist(err) {
+
+ if unshare.IsRootless() {
+ // Attempt to unmount the FUSE mount using either fusermount or fusermount3.
+ // If they fail, fallback to unix.Unmount
+ for _, v := range []string{"fusermount3", "fusermount"} {
+ err := exec.Command(v, "-u", mergeDir).Run()
+ if err != nil && errors.Cause(err) != exec.ErrNotFound {
+ logrus.Debugf("Error unmounting %s with %s - %v", mergeDir, v, err)
+ }
+ if err == nil {
+ return nil
+ }
+ }
+ // If fusermount|fusermount3 failed to unmount the FUSE file system, attempt unmount
+ }
+
+ // Ignore EINVAL as the specified merge dir is not a mount point
+ if err := unix.Unmount(mergeDir, 0); err != nil && !os.IsNotExist(err) && err != unix.EINVAL {
return errors.Wrapf(err, "unmount overlay %s", mergeDir)
}
return nil
@@ -153,14 +196,20 @@ func CleanupMount(contentDir string) (Err error) {
func CleanupContent(containerDir string) (Err error) {
contentDir := filepath.Join(containerDir, "overlay")
- if unshare.IsRootless() {
- mergeDir := filepath.Join(contentDir, "merge")
- if err := unix.Unmount(mergeDir, 0); err != nil {
- if !os.IsNotExist(err) {
- return errors.Wrapf(err, "unmount overlay %s", mergeDir)
- }
+ files, err := ioutil.ReadDir(contentDir)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
}
+ return errors.Wrapf(err, "read directory")
}
+ for _, f := range files {
+ dir := filepath.Join(contentDir, f.Name())
+ if err := Unmount(dir); err != nil {
+ return err
+ }
+ }
+
if err := os.RemoveAll(contentDir); err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "failed to cleanup overlay %s directory", contentDir)
}
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index f5f91d22d..fb348b252 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -335,7 +335,7 @@ func GetBindMount(args []string) (specs.Mount, error) {
setDest := false
for _, val := range args {
- kv := strings.Split(val, "=")
+ kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "bind-nonrecursive":
newMount.Options = append(newMount.Options, "bind")
@@ -407,7 +407,7 @@ func GetTmpfsMount(args []string) (specs.Mount, error) {
setDest := false
for _, val := range args {
- kv := strings.Split(val, "=")
+ kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "ro", "nosuid", "nodev", "noexec":
newMount.Options = append(newMount.Options, kv[0])
@@ -789,9 +789,7 @@ func IDMappingOptions(c *cobra.Command, isolation buildah.Isolation) (usernsOpti
case "host":
usernsOption.Host = true
default:
- if strings.HasPrefix(how, "ns:") {
- how = how[3:]
- }
+ how = strings.TrimPrefix(how, "ns:")
if _, err := os.Stat(how); err != nil {
return nil, nil, errors.Wrapf(err, "error checking for %s namespace at %q", string(specs.UserNamespace), how)
}
@@ -890,9 +888,7 @@ func NamespaceOptions(c *cobra.Command) (namespaceOptions buildah.NamespaceOptio
break
}
}
- if strings.HasPrefix(how, "ns:") {
- how = how[3:]
- }
+ how = strings.TrimPrefix(how, "ns:")
if _, err := os.Stat(how); err != nil {
return nil, buildah.NetworkDefault, errors.Wrapf(err, "error checking for %s namespace at %q", what, how)
}
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go
index fcf62b59e..5e56c1a48 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go
@@ -3,7 +3,7 @@
package parse
import (
- "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/containers/buildah"
"github.com/pkg/errors"
)
@@ -11,6 +11,6 @@ func getDefaultProcessLimits() []string {
return []string{}
}
-func DeviceFromPath(device string) ([]configs.Device, error) {
- return []configs.Device{}, errors.Errorf("devices not supported")
+func DeviceFromPath(device string) (buildah.ContainerDevices, error) {
+ return buildah.ContainerDevices{}, errors.Errorf("devices not supported")
}
diff --git a/vendor/github.com/containers/buildah/pkg/secrets/secrets.go b/vendor/github.com/containers/buildah/pkg/secrets/secrets.go
index ee2e9a7c8..32f888fa8 100644
--- a/vendor/github.com/containers/buildah/pkg/secrets/secrets.go
+++ b/vendor/github.com/containers/buildah/pkg/secrets/secrets.go
@@ -38,7 +38,7 @@ type secretData struct {
// saveTo saves secret data to given directory
func (s secretData) saveTo(dir string) error {
path := filepath.Join(dir, s.name)
- if err := os.MkdirAll(filepath.Dir(path), s.dirMode); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(filepath.Dir(path), s.dirMode); err != nil {
return err
}
return ioutil.WriteFile(path, s.data, s.mode)
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index be9a521d1..d1fec145e 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -7,7 +7,6 @@ import (
"time"
"github.com/containers/buildah/pkg/blobcache"
- "github.com/containers/buildah/util"
"github.com/containers/image/v5/directory"
"github.com/containers/image/v5/docker"
dockerarchive "github.com/containers/image/v5/docker/archive"
@@ -18,6 +17,7 @@ import (
"github.com/containers/image/v5/signature"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
@@ -60,6 +60,8 @@ type PullOptions struct {
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
OciDecryptConfig *encconfig.DecryptConfig
+ // PullPolicy takes the value PullIfMissing, PullAlways, PullIfNewer, or PullNever.
+ PullPolicy PullPolicy
}
func localImageNameForReference(ctx context.Context, store storage.Store, srcRef types.ImageReference) (string, error) {
@@ -169,65 +171,66 @@ func Pull(ctx context.Context, imageName string, options PullOptions) (imageID s
MaxPullRetries: options.MaxRetries,
PullRetryDelay: options.RetryDelay,
OciDecryptConfig: options.OciDecryptConfig,
+ PullPolicy: options.PullPolicy,
}
- storageRef, transport, img, err := resolveImage(ctx, systemContext, options.Store, boptions)
+ if !options.AllTags {
+ _, _, img, err := resolveImage(ctx, systemContext, options.Store, boptions)
+ if err != nil {
+ return "", err
+ }
+ return img.ID, nil
+ }
+
+ srcRef, err := alltransports.ParseImageName(imageName)
+ if err == nil && srcRef.Transport().Name() != docker.Transport.Name() {
+ return "", errors.New("Non-docker transport is not supported, for --all-tags pulling")
+ }
+
+ storageRef, _, _, err := resolveImage(ctx, systemContext, options.Store, boptions)
if err != nil {
return "", err
}
var errs *multierror.Error
- if options.AllTags {
- if transport != util.DefaultTransport {
- return "", errors.New("Non-docker transport is not supported, for --all-tags pulling")
- }
-
- repo := reference.TrimNamed(storageRef.DockerReference())
- dockerRef, err := docker.NewReference(reference.TagNameOnly(storageRef.DockerReference()))
+ repo := reference.TrimNamed(storageRef.DockerReference())
+ dockerRef, err := docker.NewReference(reference.TagNameOnly(storageRef.DockerReference()))
+ if err != nil {
+ return "", errors.Wrapf(err, "internal error creating docker.Transport reference for %s", storageRef.DockerReference().String())
+ }
+ tags, err := docker.GetRepositoryTags(ctx, systemContext, dockerRef)
+ if err != nil {
+ return "", errors.Wrapf(err, "error getting repository tags")
+ }
+ for _, tag := range tags {
+ tagged, err := reference.WithTag(repo, tag)
if err != nil {
- return "", errors.Wrapf(err, "internal error creating docker.Transport reference for %s", storageRef.DockerReference().String())
+ errs = multierror.Append(errs, err)
+ continue
}
- tags, err := docker.GetRepositoryTags(ctx, systemContext, dockerRef)
+ taggedRef, err := docker.NewReference(tagged)
if err != nil {
- return "", errors.Wrapf(err, "error getting repository tags")
+ return "", errors.Wrapf(err, "internal error creating docker.Transport reference for %s", tagged.String())
}
- for _, tag := range tags {
- tagged, err := reference.WithTag(repo, tag)
- if err != nil {
- errs = multierror.Append(errs, err)
- continue
- }
- taggedRef, err := docker.NewReference(tagged)
- if err != nil {
- return "", errors.Wrapf(err, "internal error creating docker.Transport reference for %s", tagged.String())
+ if options.ReportWriter != nil {
+ if _, err := options.ReportWriter.Write([]byte("Pulling " + tagged.String() + "\n")); err != nil {
+ return "", errors.Wrapf(err, "error writing pull report")
}
- if options.ReportWriter != nil {
- if _, err := options.ReportWriter.Write([]byte("Pulling " + tagged.String() + "\n")); err != nil {
- return "", errors.Wrapf(err, "error writing pull report")
- }
- }
- ref, err := pullImage(ctx, options.Store, taggedRef, options, systemContext)
- if err != nil {
- errs = multierror.Append(errs, err)
- continue
- }
- taggedImg, err := is.Transport.GetStoreImage(options.Store, ref)
- if err != nil {
- errs = multierror.Append(errs, err)
- continue
- }
- imageID = taggedImg.ID
}
- } else {
- imageID = img.ID
- }
- if errs == nil {
- err = nil
- } else {
- err = errs.ErrorOrNil()
+ ref, err := pullImage(ctx, options.Store, taggedRef, options, systemContext)
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ continue
+ }
+ taggedImg, err := is.Transport.GetStoreImage(options.Store, ref)
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ continue
+ }
+ imageID = taggedImg.ID
}
- return imageID, err
+ return imageID, errs.ErrorOrNil()
}
func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageReference, options PullOptions, sc *types.SystemContext) (types.ImageReference, error) {
@@ -238,9 +241,18 @@ func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageRefer
if blocked {
return nil, errors.Errorf("pull access to registry for %q is blocked by configuration", transports.ImageName(srcRef))
}
- if err := checkRegistrySourcesAllows("pull from", srcRef); err != nil {
+ insecure, err := checkRegistrySourcesAllows("pull from", srcRef)
+ if err != nil {
return nil, err
}
+ if insecure {
+ if sc.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
+ return nil, errors.Errorf("can't require tls verification on an insecured registry")
+ }
+ sc.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
+ sc.OCIInsecureSkipTLSVerify = true
+ sc.DockerDaemonInsecureSkipTLSVerify = true
+ }
destName, err := localImageNameForReference(ctx, store, srcRef)
if err != nil {
diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go
index bec1738e6..15417bba6 100644
--- a/vendor/github.com/containers/buildah/run.go
+++ b/vendor/github.com/containers/buildah/run.go
@@ -4,7 +4,6 @@ import (
"fmt"
"io"
- "github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runtime-spec/specs-go"
)
@@ -169,7 +168,7 @@ type RunOptions struct {
// lists, it will be dropped.
DropCapabilities []string
// Devices are the additional devices to add to the containers
- Devices []configs.Device
+ Devices ContainerDevices
}
// Find the configuration for the namespace of the given type. If there are
diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go
index 66a3ba997..d907941ed 100644
--- a/vendor/github.com/containers/buildah/run_linux.go
+++ b/vendor/github.com/containers/buildah/run_linux.go
@@ -23,6 +23,7 @@ import (
"github.com/containernetworking/cni/libcni"
"github.com/containers/buildah/bind"
"github.com/containers/buildah/chroot"
+ "github.com/containers/buildah/copier"
"github.com/containers/buildah/pkg/overlay"
"github.com/containers/buildah/pkg/secrets"
"github.com/containers/buildah/util"
@@ -37,6 +38,7 @@ import (
"github.com/docker/libnetwork/resolvconf"
"github.com/docker/libnetwork/types"
"github.com/opencontainers/go-digest"
+ "github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runtime-spec/specs-go"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
@@ -47,6 +49,9 @@ import (
"golang.org/x/sys/unix"
)
+// ContainerDevices is an alias for a slice of github.com/opencontainers/runc/libcontainer/configs.Device structures.
+type ContainerDevices = []configs.Device
+
func setChildProcess() error {
if err := unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(1), 0, 0, 0); err != nil {
fmt.Fprintf(os.Stderr, "prctl(PR_SET_CHILD_SUBREAPER, 1): %v\n", err)
@@ -59,18 +64,18 @@ func setChildProcess() error {
func (b *Builder) Run(command []string, options RunOptions) error {
p, err := ioutil.TempDir("", Package)
if err != nil {
- return errors.Wrapf(err, "run: error creating temporary directory under %q", os.TempDir())
+ return err
}
// On some hosts like AH, /tmp is a symlink and we need an
// absolute path.
path, err := filepath.EvalSymlinks(p)
if err != nil {
- return errors.Wrapf(err, "run: error evaluating %q for symbolic links", p)
+ return err
}
logrus.Debugf("using %q to hold bundle data", path)
defer func() {
if err2 := os.RemoveAll(path); err2 != nil {
- logrus.Errorf("error removing %q: %v", path, err2)
+ logrus.Error(err2)
}
}()
@@ -161,11 +166,6 @@ func (b *Builder) Run(command []string, options RunOptions) error {
spec := g.Config
g = nil
- logrus.Debugf("ensuring working directory %q exists", filepath.Join(mountPoint, spec.Process.Cwd))
- if err = os.MkdirAll(filepath.Join(mountPoint, spec.Process.Cwd), 0755); err != nil && !os.IsExist(err) {
- return errors.Wrapf(err, "error ensuring working directory %q exists", spec.Process.Cwd)
- }
-
// Set the seccomp configuration using the specified profile name. Some syscalls are
// allowed if certain capabilities are to be granted (example: CAP_SYS_CHROOT and chroot),
// so we sorted out the capabilities lists first.
@@ -180,6 +180,15 @@ func (b *Builder) Run(command []string, options RunOptions) error {
}
rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}
+ mode := os.FileMode(0755)
+ coptions := copier.MkdirOptions{
+ ChownNew: rootIDPair,
+ ChmodNew: &mode,
+ }
+ if err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, spec.Process.Cwd), coptions); err != nil {
+ return err
+ }
+
bindFiles := make(map[string]string)
namespaceOptions := append(b.NamespaceOptions, options.NamespaceOptions...)
volumes := b.Volumes()
@@ -209,7 +218,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
if _, ok := bindFiles["/run/.containerenv"]; !ok {
// Empty string for now, but we may consider populating this later
containerenvPath := filepath.Join(path, "/run/.containerenv")
- if err = os.MkdirAll(filepath.Dir(containerenvPath), 0755); err != nil && !os.IsExist(err) {
+ if err = os.MkdirAll(filepath.Dir(containerenvPath), 0755); err != nil {
return err
}
emptyFile, err := os.Create(containerenvPath)
@@ -218,7 +227,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
}
emptyFile.Close()
if err := label.Relabel(containerenvPath, b.MountLabel, false); err != nil {
- return errors.Wrapf(err, "error relabeling %q in container %q", containerenvPath, b.ContainerID)
+ return err
}
bindFiles["/run/.containerenv"] = containerenvPath
@@ -326,35 +335,35 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin
// If we need to, initialize the volume path's initial contents.
if _, err := os.Stat(volumePath); err != nil {
if !os.IsNotExist(err) {
- return nil, errors.Wrapf(err, "failed to stat %q for volume %q", volumePath, volume)
+ return nil, err
}
logrus.Debugf("setting up built-in volume at %q", volumePath)
if err = os.MkdirAll(volumePath, 0755); err != nil {
- return nil, errors.Wrapf(err, "error creating directory %q for volume %q", volumePath, volume)
+ return nil, err
}
if err = label.Relabel(volumePath, mountLabel, false); err != nil {
- return nil, errors.Wrapf(err, "error relabeling directory %q for volume %q", volumePath, volume)
+ return nil, err
}
initializeVolume = true
}
stat, err := os.Stat(srcPath)
if err != nil {
if !os.IsNotExist(err) {
- return nil, errors.Wrapf(err, "failed to stat %q for volume %q", srcPath, volume)
+ return nil, err
}
if err = idtools.MkdirAllAndChownNew(srcPath, 0755, hostOwner); err != nil {
- return nil, errors.Wrapf(err, "error creating directory %q for volume %q", srcPath, volume)
+ return nil, err
}
if stat, err = os.Stat(srcPath); err != nil {
- return nil, errors.Wrapf(err, "failed to stat %q for volume %q", srcPath, volume)
+ return nil, err
}
}
if initializeVolume {
if err = os.Chmod(volumePath, stat.Mode().Perm()); err != nil {
- return nil, errors.Wrapf(err, "failed to chmod %q for volume %q", volumePath, volume)
+ return nil, err
}
if err = os.Chown(volumePath, int(stat.Sys().(*syscall.Stat_t).Uid), int(stat.Sys().(*syscall.Stat_t).Gid)); err != nil {
- return nil, errors.Wrapf(err, "error chowning directory %q for volume %q", volumePath, volume)
+ return nil, err
}
if err = extractWithTar(mountPoint, srcPath, volumePath); err != nil && !os.IsNotExist(errors.Cause(err)) {
return nil, errors.Wrapf(err, "error populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath)
@@ -510,11 +519,11 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
func (b *Builder) addNetworkConfig(rdir, hostPath string, chownOpts *idtools.IDPair, dnsServers, dnsSearch, dnsOptions []string, namespaceOptions NamespaceOptions) (string, error) {
stat, err := os.Stat(hostPath)
if err != nil {
- return "", errors.Wrapf(err, "error statting %q for container %q", hostPath, b.ContainerID)
+ return "", err
}
contents, err := ioutil.ReadFile(hostPath)
if err != nil {
- return "", errors.Wrapf(err, "unable to read %s", hostPath)
+ return "", err
}
search := resolvconf.GetSearchDomains(contents)
@@ -568,13 +577,12 @@ func (b *Builder) addNetworkConfig(rdir, hostPath string, chownOpts *idtools.IDP
gid = chownOpts.GID
}
if err = os.Chown(cfile, uid, gid); err != nil {
- return "", errors.Wrapf(err, "error chowning file %q for container %q", cfile, b.ContainerID)
+ return "", err
}
if err := label.Relabel(cfile, b.MountLabel, false); err != nil {
- return "", errors.Wrapf(err, "error relabeling %q in container %q", cfile, b.ContainerID)
+ return "", err
}
-
return cfile, nil
}
@@ -583,13 +591,13 @@ func (b *Builder) generateHosts(rdir, hostname string, addHosts []string, chownO
hostPath := "/etc/hosts"
stat, err := os.Stat(hostPath)
if err != nil {
- return "", errors.Wrapf(err, "error statting %q for container %q", hostPath, b.ContainerID)
+ return "", err
}
hosts := bytes.NewBufferString("# Generated by Buildah\n")
orig, err := ioutil.ReadFile(hostPath)
if err != nil {
- return "", errors.Wrapf(err, "unable to read %s", hostPath)
+ return "", err
}
hosts.Write(orig)
for _, host := range addHosts {
@@ -622,10 +630,10 @@ func (b *Builder) generateHosts(rdir, hostname string, addHosts []string, chownO
gid = chownOpts.GID
}
if err = os.Chown(cfile, uid, gid); err != nil {
- return "", errors.Wrapf(err, "error chowning file %q for container %q", cfile, b.ContainerID)
+ return "", err
}
if err := label.Relabel(cfile, b.MountLabel, false); err != nil {
- return "", errors.Wrapf(err, "error relabeling %q in container %q", cfile, b.ContainerID)
+ return "", err
}
return cfile, nil
@@ -674,7 +682,7 @@ func runUsingRuntime(isolation Isolation, options RunOptions, configureNetwork b
return 1, errors.Wrapf(err, "error encoding configuration %#v as json", spec)
}
if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil {
- return 1, errors.Wrapf(err, "error storing runtime configuration in %q", filepath.Join(bundlePath, "config.json"))
+ return 1, errors.Wrapf(err, "error storing runtime configuration")
}
logrus.Debugf("config = %v", string(specbytes))
@@ -796,7 +804,7 @@ func runUsingRuntime(isolation Isolation, options RunOptions, configureNetwork b
// Make sure we read the container's exit status when it exits.
pidValue, err := ioutil.ReadFile(pidFile)
if err != nil {
- return 1, errors.Wrapf(err, "error reading pid from %q", pidFile)
+ return 1, err
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue)))
if err != nil {
@@ -947,7 +955,7 @@ func runCollectOutput(fds, closeBeforeReadingFds []int) string {
func setupRootlessNetwork(pid int) (teardown func(), err error) {
slirp4netns, err := exec.LookPath("slirp4netns")
if err != nil {
- return nil, errors.Wrapf(err, "cannot find slirp4netns")
+ return nil, err
}
rootlessSlirpSyncR, rootlessSlirpSyncW, err := os.Pipe()
@@ -1564,7 +1572,7 @@ func setupNamespaces(g *generate.Generator, namespaceOptions NamespaceOptions, i
p := filepath.Join("/proc/sys", strings.Replace(name, ".", "/", -1))
_, err := os.Stat(p)
if err != nil && !os.IsNotExist(err) {
- return false, nil, false, errors.Wrapf(err, "cannot stat %s", p)
+ return false, nil, false, err
}
if err == nil {
g.AddLinuxSysctl(name, val)
@@ -1702,12 +1710,12 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
}
if foundz {
if err := label.Relabel(host, mountLabel, true); err != nil {
- return specs.Mount{}, errors.Wrapf(err, "relabeling %q failed", host)
+ return specs.Mount{}, err
}
}
if foundZ {
if err := label.Relabel(host, mountLabel, false); err != nil {
- return specs.Mount{}, errors.Wrapf(err, "relabeling %q failed", host)
+ return specs.Mount{}, err
}
}
if foundO {
@@ -1978,13 +1986,12 @@ func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions
}
func setupRootlessSpecChanges(spec *specs.Spec, bundleDir string, shmSize string) error {
- spec.Hostname = ""
spec.Process.User.AdditionalGids = nil
spec.Linux.Resources = nil
emptyDir := filepath.Join(bundleDir, "empty")
if err := os.Mkdir(emptyDir, 0); err != nil {
- return errors.Wrapf(err, "error creating %q", emptyDir)
+ return err
}
// Replace /sys with a read-only bind mount.
@@ -2134,10 +2141,6 @@ func checkAndOverrideIsolationOptions(isolation Isolation, options *RunOptions)
logrus.Debugf("Forcing use of a user namespace.")
}
options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.UserNamespace)})
- if ns := options.NamespaceOptions.Find(string(specs.UTSNamespace)); ns != nil && !ns.Host {
- logrus.Debugf("Disabling UTS namespace.")
- }
- options.NamespaceOptions.AddOrReplace(NamespaceOption{Name: string(specs.UTSNamespace), Host: true})
case IsolationOCI:
pidns := options.NamespaceOptions.Find(string(specs.PIDNamespace))
userns := options.NamespaceOptions.Find(string(specs.UserNamespace))
diff --git a/vendor/github.com/containers/buildah/run_unix.go b/vendor/github.com/containers/buildah/run_unix.go
new file mode 100644
index 000000000..54ca122a4
--- /dev/null
+++ b/vendor/github.com/containers/buildah/run_unix.go
@@ -0,0 +1,24 @@
+// +build darwin
+
+package buildah
+
+import (
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/pkg/errors"
+)
+
+// ContainerDevices is an alias for a slice of github.com/opencontainers/runc/libcontainer/configs.Device structures.
+type ContainerDevices = []configs.Device
+
+func setChildProcess() error {
+ return errors.New("function not supported on non-linux systems")
+}
+
+func runUsingRuntimeMain() {}
+
+func (b *Builder) Run(command []string, options RunOptions) error {
+ return errors.New("function not supported on non-linux systems")
+}
+func DefaultNamespaceOptions() (NamespaceOptions, error) {
+ return NamespaceOptions{}, errors.New("function not supported on non-linux systems")
+}
diff --git a/vendor/github.com/containers/buildah/run_unsupported.go b/vendor/github.com/containers/buildah/run_unsupported.go
index d8ff46cf6..04cdc46bb 100644
--- a/vendor/github.com/containers/buildah/run_unsupported.go
+++ b/vendor/github.com/containers/buildah/run_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux
+// +build !linux,!darwin
package buildah
@@ -6,6 +6,9 @@ import (
"github.com/pkg/errors"
)
+// ContainerDevices is currently not implemented.
+type ContainerDevices = []struct{}
+
func setChildProcess() error {
return errors.New("function not supported on non-linux systems")
}
diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go
index 4b5a00e44..47c9ac5cd 100644
--- a/vendor/github.com/containers/buildah/util.go
+++ b/vendor/github.com/containers/buildah/util.go
@@ -189,7 +189,7 @@ func IsContainer(id string, store storage.Store) (bool, error) {
if os.IsNotExist(err) {
return false, nil
}
- return false, errors.Wrapf(err, "error stating %q", filepath.Join(cdir, stateFile))
+ return false, err
}
return true, nil
}
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index 00efc8d21..99f68d9e1 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -12,10 +12,10 @@ import (
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/pkg/shortnames"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/signature"
is "github.com/containers/image/v5/storage"
- "github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
@@ -69,42 +69,10 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
}
}
- // If the image includes a transport's name as a prefix, use it as-is.
- if strings.HasPrefix(name, DefaultTransport) {
- return []string{strings.TrimPrefix(name, DefaultTransport)}, DefaultTransport, false, nil
- }
- split := strings.SplitN(name, ":", 2)
- if StartsWithValidTransport(name) && len(split) == 2 {
- if trans := transports.Get(split[0]); trans != nil {
- return []string{split[1]}, trans.Name(), false, nil
- }
- }
- // If the image name already included a domain component, we're done.
- named, err := reference.ParseNormalizedNamed(name)
- if err != nil {
- return nil, "", false, errors.Wrapf(err, "error parsing image name %q", name)
- }
- if named.String() == name {
- // Parsing produced the same result, so there was a domain name in there to begin with.
- return []string{name}, DefaultTransport, false, nil
- }
- if reference.Domain(named) != "" && RegistryDefaultPathPrefix[reference.Domain(named)] != "" {
- // If this domain can cause us to insert something in the middle, check if that happened.
- repoPath := reference.Path(named)
- domain := reference.Domain(named)
- tag := ""
- if tagged, ok := named.(reference.Tagged); ok {
- tag = ":" + tagged.Tag()
- }
- digest := ""
- if digested, ok := named.(reference.Digested); ok {
- digest = "@" + digested.Digest().String()
- }
- defaultPrefix := RegistryDefaultPathPrefix[reference.Domain(named)] + "/"
- if strings.HasPrefix(repoPath, defaultPrefix) && path.Join(domain, repoPath[len(defaultPrefix):])+tag+digest == name {
- // Yup, parsing just inserted a bit in the middle, so there was a domain name there to begin with.
- return []string{name}, DefaultTransport, false, nil
- }
+ // Transports are not supported for local image look ups.
+ srcRef, err := alltransports.ParseImageName(name)
+ if err == nil {
+ return []string{srcRef.StringWithinTransport()}, srcRef.Transport().Name(), false, nil
}
// Figure out the list of registries.
@@ -126,25 +94,26 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
}
searchRegistriesAreEmpty := len(registries) == 0
- // Create all of the combinations. Some registries need an additional component added, so
- // use our lookaside map to keep track of them. If there are no configured registries, we'll
- // return a name using "localhost" as the registry name.
- candidates := []string{}
- initRegistries := []string{"localhost"}
+ var candidates []string
+ // Set the first registry if requested.
if firstRegistry != "" && firstRegistry != "localhost" {
- initRegistries = append([]string{firstRegistry}, initRegistries...)
- }
- for _, registry := range append(initRegistries, registries...) {
- if registry == "" {
- continue
- }
middle := ""
- if prefix, ok := RegistryDefaultPathPrefix[registry]; ok && !strings.ContainsRune(name, '/') {
+ if prefix, ok := RegistryDefaultPathPrefix[firstRegistry]; ok && !strings.ContainsRune(name, '/') {
middle = prefix
}
- candidate := path.Join(registry, middle, name)
+ candidate := path.Join(firstRegistry, middle, name)
candidates = append(candidates, candidate)
}
+
+ // Local short-name resolution.
+ namedCandidates, err := shortnames.ResolveLocally(sc, name)
+ if err != nil {
+ return nil, "", false, err
+ }
+ for _, named := range namedCandidates {
+ candidates = append(candidates, named.String())
+ }
+
return candidates, DefaultTransport, searchRegistriesAreEmpty, nil
}
diff --git a/vendor/github.com/containers/buildah/util/util_linux.go b/vendor/github.com/containers/buildah/util/util_linux.go
index 1a13699df..cca1f9e7e 100644
--- a/vendor/github.com/containers/buildah/util/util_linux.go
+++ b/vendor/github.com/containers/buildah/util/util_linux.go
@@ -1,7 +1,6 @@
package util
import (
- "os"
"syscall"
"golang.org/x/sys/unix"
@@ -19,11 +18,3 @@ func IsCgroup2UnifiedMode() (bool, error) {
})
return isUnified, isUnifiedErr
}
-
-func UID(st os.FileInfo) int {
- return int(st.Sys().(*syscall.Stat_t).Uid)
-}
-
-func GID(st os.FileInfo) int {
- return int(st.Sys().(*syscall.Stat_t).Gid)
-}
diff --git a/vendor/github.com/containers/buildah/util/util_not_uint64.go b/vendor/github.com/containers/buildah/util/util_not_uint64.go
index 92ff23b41..a318fb308 100644
--- a/vendor/github.com/containers/buildah/util/util_not_uint64.go
+++ b/vendor/github.com/containers/buildah/util/util_not_uint64.go
@@ -1,4 +1,4 @@
-// +build darwin
+// +build darwin linux,mips linux,mipsle linux,mips64 linux,mips64le
package util
diff --git a/vendor/github.com/containers/buildah/util/util_uint64.go b/vendor/github.com/containers/buildah/util/util_uint64.go
index b271e3d11..b0b922531 100644
--- a/vendor/github.com/containers/buildah/util/util_uint64.go
+++ b/vendor/github.com/containers/buildah/util/util_uint64.go
@@ -1,4 +1,4 @@
-// +build linux
+// +build linux,!mips,!mipsle,!mips64,!mips64le
package util
diff --git a/vendor/github.com/containers/buildah/util/util_unix.go b/vendor/github.com/containers/buildah/util/util_unix.go
index 04d9a01cc..29983e40f 100644
--- a/vendor/github.com/containers/buildah/util/util_unix.go
+++ b/vendor/github.com/containers/buildah/util/util_unix.go
@@ -29,3 +29,11 @@ func (h *HardlinkChecker) Add(fi os.FileInfo, name string) {
h.hardlinks.Store(makeHardlinkDeviceAndInode(st), name)
}
}
+
+func UID(st os.FileInfo) int {
+ return int(st.Sys().(*syscall.Stat_t).Uid)
+}
+
+func GID(st os.FileInfo) int {
+ return int(st.Sys().(*syscall.Stat_t).Gid)
+}
diff --git a/vendor/github.com/containers/buildah/util/util_unsupported.go b/vendor/github.com/containers/buildah/util/util_unsupported.go
index 8810536a6..05a68f60b 100644
--- a/vendor/github.com/containers/buildah/util/util_unsupported.go
+++ b/vendor/github.com/containers/buildah/util/util_unsupported.go
@@ -2,19 +2,7 @@
package util
-import (
- "os"
-)
-
// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode.
func IsCgroup2UnifiedMode() (bool, error) {
return false, nil
}
-
-func UID(st os.FileInfo) int {
- return 0
-}
-
-func GID(st os.FileInfo) int {
- return 0
-}
diff --git a/vendor/github.com/containers/buildah/util/util_windows.go b/vendor/github.com/containers/buildah/util/util_windows.go
index 0e7f92325..18965ab17 100644
--- a/vendor/github.com/containers/buildah/util/util_windows.go
+++ b/vendor/github.com/containers/buildah/util/util_windows.go
@@ -14,3 +14,11 @@ func (h *HardlinkChecker) Check(fi os.FileInfo) string {
}
func (h *HardlinkChecker) Add(fi os.FileInfo, name string) {
}
+
+func UID(st os.FileInfo) int {
+ return 0
+}
+
+func GID(st os.FileInfo) int {
+ return 0
+}
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index 18fab5485..2769781f2 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -113,6 +113,10 @@ type ContainersConfig struct {
// DNSSearches set default DNS search domains.
DNSSearches []string `toml:"dns_searches,omitempty"`
+ // EnableKeyring tells the container engines whether to create
+ // a kernel keyring for use within the container
+ EnableKeyring bool `toml:"keyring,omitempty"`
+
// EnableLabeling tells the container engines whether to use MAC
// Labeling to separate containers (SELinux)
EnableLabeling bool `toml:"label,omitempty"`
@@ -282,7 +286,7 @@ type EngineConfig struct {
PullPolicy string `toml:"pull_policy,omitempty"`
// Indicates whether the application should be running in Remote mode
- Remote bool `toml:"-"`
+ Remote bool `toml:"remote,omitempty"`
// RemoteURI is deprecated, see ActiveService
// RemoteURI containers connection information used to connect to remote system.
diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf
index 261cfd1cb..ed7c91931 100644
--- a/vendor/github.com/containers/common/pkg/config/containers.conf
+++ b/vendor/github.com/containers/common/pkg/config/containers.conf
@@ -76,10 +76,10 @@ default_capabilities = [
# A list of sysctls to be set in containers by default,
# specified as "name=value",
-# for example:"net.ipv4.ping_group_range = 0 1".
+# for example:"net.ipv4.ping_group_range = 0 0".
#
default_sysctls = [
- "net.ipv4.ping_group_range=0 1",
+ "net.ipv4.ping_group_range=0 0",
]
# A list of ulimits to be set in containers by default, specified as
@@ -146,9 +146,13 @@ default_sysctls = [
#
# ipcns = "private"
-# Flag tells container engine to whether to use container separation using
-# MAC(SELinux)labeling or not.
-# Flag is ignored on label disabled systems.
+# keyring tells the container engine whether to create
+# a kernel keyring for use within the container.
+# keyring = true
+
+# label tells the container engine whether to use container separation using
+# MAC(SELinux) labeling or not.
+# The label flag is ignored on label disabled systems.
#
# label = true
@@ -357,6 +361,11 @@ default_sysctls = [
# Whether to pull new image before running a container
# pull_policy = "missing"
+# Indicates whether the application should be running in remote mode. This flag modifies the
+# --remote option on container engines. Setting the flag to true will default
+# `podman --remote=true` for access to the remote Podman service.
+# remote = false
+
# Directory for persistent engine files (database, etc)
# By default, this will be configured relative to where the containers/storage
# stores containers
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index 5f8f4999f..4f1460e3b 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -46,8 +46,6 @@ var (
DefaultInitPath = "/usr/libexec/podman/catatonit"
// DefaultInfraImage to use for infra container
DefaultInfraImage = "k8s.gcr.io/pause:3.2"
- // DefaultInfraCommand to be run in an infra container
- DefaultInfraCommand = "/pause"
// DefaultRootlessSHMLockPath is the default path for rootless SHM locks
DefaultRootlessSHMLockPath = "/libpod_rootless_lock"
// DefaultDetachKeys is the default keys sequence for detaching a
@@ -179,6 +177,7 @@ func DefaultConfig() (*Config, error) {
DNSServers: []string{},
DNSOptions: []string{},
DNSSearches: []string{},
+ EnableKeyring: true,
EnableLabeling: selinuxEnabled(),
Env: []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
@@ -308,7 +307,6 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
c.InitPath = DefaultInitPath
c.NoPivotRoot = false
- c.InfraCommand = DefaultInfraCommand
c.InfraImage = DefaultInfraImage
c.EnablePortReservation = true
c.NumLocks = 2048
diff --git a/vendor/github.com/containers/common/pkg/retry/retry.go b/vendor/github.com/containers/common/pkg/retry/retry.go
index d0ac19fb6..f6ecab0c0 100644
--- a/vendor/github.com/containers/common/pkg/retry/retry.go
+++ b/vendor/github.com/containers/common/pkg/retry/retry.go
@@ -30,7 +30,7 @@ func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions
if retryOptions.Delay != 0 {
delay = retryOptions.Delay
}
- logrus.Infof("Warning: failed, retrying in %s ... (%d/%d)", delay, attempt+1, retryOptions.MaxRetry)
+ logrus.Infof("Warning: failed, retrying in %s ... (%d/%d). Error: %v", delay, attempt+1, retryOptions.MaxRetry, err)
select {
case <-time.After(delay):
break
diff --git a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go
index ddc25ac67..09629724d 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go
@@ -174,6 +174,7 @@ func DefaultProfile() *Seccomp {
"ioprio_get",
"ioprio_set",
"ipc",
+ "keyctl",
"kill",
"lchown",
"lchown32",
@@ -327,6 +328,7 @@ func DefaultProfile() *Seccomp {
"signalfd",
"signalfd4",
"sigreturn",
+ "socket",
"socketcall",
"socketpair",
"splice",
diff --git a/vendor/github.com/containers/common/pkg/seccomp/supported.go b/vendor/github.com/containers/common/pkg/seccomp/supported.go
index ab2a94a73..1177ef630 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/supported.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/supported.go
@@ -1,3 +1,5 @@
+// +build !windows
+
package seccomp
import (
diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go
index 8e497e7fa..72f4e00f7 100644
--- a/vendor/github.com/containers/common/version/version.go
+++ b/vendor/github.com/containers/common/version/version.go
@@ -1,4 +1,4 @@
package version
// Version is the version of the build.
-const Version = "0.26.3"
+const Version = "0.29.0"
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index d8e3fa106..4d5b07689 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -1166,6 +1166,21 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
return digest.Canonical.FromReader(stream)
}
+// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read.
+// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination.
+type errorAnnotationReader struct {
+ reader io.Reader
+}
+
+// Read annotates the error happened during read
+func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
+ n, err = r.reader.Read(b)
+ if err != io.EOF {
+ return n, errors.Wrapf(err, "error happened during read")
+ }
+ return n, err
+}
+
// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
// perhaps compressing it if canCompress,
@@ -1335,7 +1350,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
}
// === Finally, send the layer stream to dest.
- uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig)
+ uploadedInfo, err := c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
}
diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go
index 479effa59..0c1cee0d3 100644
--- a/vendor/github.com/containers/image/v5/docker/docker_image.go
+++ b/vendor/github.com/containers/image/v5/docker/docker_image.go
@@ -4,12 +4,15 @@ import (
"context"
"encoding/json"
"fmt"
+ "net/http"
"net/url"
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
@@ -103,3 +106,47 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
}
return tags, nil
}
+
+// GetDigest returns the image's digest
+// Use this to optimize and avoid use of an ImageSource based on the returned digest;
+// if you are going to use an ImageSource anyway, it’s more efficient to create it first
+// and compute the digest from the value returned by GetManifest.
+// NOTE: Implemented to avoid Docker Hub API limits, and mirror configuration may be
+// ignored (but may be implemented in the future)
+func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (digest.Digest, error) {
+ dr, ok := ref.(dockerReference)
+ if !ok {
+ return "", errors.Errorf("ref must be a dockerReference")
+ }
+
+ tagOrDigest, err := dr.tagOrDigest()
+ if err != nil {
+ return "", err
+ }
+
+ client, err := newDockerClientFromRef(sys, dr, false, "pull")
+ if err != nil {
+ return "", errors.Wrap(err, "failed to create client")
+ }
+
+ path := fmt.Sprintf(manifestPath, reference.Path(dr.ref), tagOrDigest)
+ headers := map[string][]string{
+ "Accept": manifest.DefaultRequestedManifestMIMETypes,
+ }
+
+ res, err := client.makeRequest(ctx, "HEAD", path, headers, nil, v2Auth, nil)
+ if err != nil {
+ return "", err
+ }
+
+ if res.StatusCode != http.StatusOK {
+ return "", errors.Wrapf(registryHTTPResponseToError(res), "Error reading digest %s in %s", tagOrDigest, dr.ref.Name())
+ }
+
+ dig, err := digest.Parse(res.Header.Get("Docker-Content-Digest"))
+ if err != nil {
+ return "", err
+ }
+
+ return dig, nil
+}
diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
new file mode 100644
index 000000000..e02703d77
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
@@ -0,0 +1,458 @@
+package shortnames
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/pkg/sysregistriesv2"
+ "github.com/containers/image/v5/types"
+ "github.com/manifoldco/promptui"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "golang.org/x/crypto/ssh/terminal"
+)
+
+// IsShortName returns true if the specified input is a "short name". A "short
+// name" refers to a container image without a fully-qualified reference, and
+// is hence missing a registry (or domain). Names including a digest are not
+// short names.
+//
+// Examples:
+// * short names: "image:tag", "library/fedora"
+// * not short names: "quay.io/image", "localhost/image:tag",
+// "server.org:5000/lib/image", "image@sha256:..."
+func IsShortName(input string) bool {
+ isShort, _, _ := parseUnnormalizedShortName(input)
+ return isShort
+}
+
+// parseUnnormalizedShortName parses the input and returns if it's short name,
+// the unnormalized reference.Named, and a parsing error.
+func parseUnnormalizedShortName(input string) (bool, reference.Named, error) {
+ ref, err := reference.Parse(input)
+ if err != nil {
+ return false, nil, errors.Wrapf(err, "cannot parse input: %q", input)
+ }
+
+ named, ok := ref.(reference.Named)
+ if !ok {
+ return true, nil, errors.Errorf("%q is not a named reference", input)
+ }
+
+ registry := reference.Domain(named)
+ if strings.ContainsAny(registry, ".:") || registry == "localhost" {
+ // A final parse to make sure that docker.io references are correctly
+ // normalized (e.g., docker.io/alpine to docker.io/library/alpine.
+ named, err = reference.ParseNormalizedNamed(input)
+ if err != nil {
+ return false, nil, errors.Wrapf(err, "cannot normalize input: %q", input)
+ }
+ return false, named, nil
+ }
+
+ return true, named, nil
+}
+
+// splitUserInput parses the user-specified reference. Namely, it strips off
+// the tag or digest and stores it in the return values so that both can be
+// re-added to a possible resolved alias' or USRs at a later point.
+func splitUserInput(named reference.Named) (isTagged bool, isDigested bool, normalized reference.Named, tag string, digest digest.Digest) {
+ normalized = named
+
+ tagged, isT := named.(reference.NamedTagged)
+ if isT {
+ isTagged = true
+ tag = tagged.Tag()
+ }
+
+ digested, isD := named.(reference.Digested)
+ if isD {
+ isDigested = true
+ digest = digested.Digest()
+ }
+
+ // Strip off tag/digest if present.
+ normalized = reference.TrimNamed(named)
+
+ return
+}
+
+// Add records the specified name-value pair as a new short-name alias to the
+// user-specific aliases.conf. It may override an existing alias for `name`.
+func Add(ctx *types.SystemContext, name string, value reference.Named) error {
+ isShort, _, err := parseUnnormalizedShortName(name)
+ if err != nil {
+ return err
+ }
+ if !isShort {
+ return errors.Errorf("%q is not a short name", name)
+ }
+ return sysregistriesv2.AddShortNameAlias(ctx, name, value.String())
+}
+
+// Remove clears the short-name alias for the specified name. It throws an
+// error in case name does not exist in the machine-generated
+// short-name-alias.conf. In such case, the alias must be specified in one of
+// the registries.conf files, which is the users' responsibility.
+func Remove(ctx *types.SystemContext, name string) error {
+ isShort, _, err := parseUnnormalizedShortName(name)
+ if err != nil {
+ return err
+ }
+ if !isShort {
+ return errors.Errorf("%q is not a short name", name)
+ }
+ return sysregistriesv2.RemoveShortNameAlias(ctx, name)
+}
+
+// Resolved encapsulates all data for a resolved image name.
+type Resolved struct {
+ PullCandidates []PullCandidate
+
+ userInput reference.Named
+ systemContext *types.SystemContext
+ rationale rationale
+ originDescription string
+}
+
+func (r *Resolved) addCandidate(named reference.Named) {
+ r.PullCandidates = append(r.PullCandidates, PullCandidate{named, false, r})
+}
+
+func (r *Resolved) addCandidateToRecord(named reference.Named) {
+ r.PullCandidates = append(r.PullCandidates, PullCandidate{named, true, r})
+}
+
+// Allows to reason over pull errors and add some context information.
+// Used in (*Resolved).WrapPullError.
+type rationale int
+
+const (
+ // No additional context.
+ rationaleNone rationale = iota
+ // Resolved value is a short-name alias.
+ rationaleAlias
+ // Resolved value has been completed with an Unqualified Search Registry.
+ rationaleUSR
+ // Resolved value has been selected by the user (via the prompt).
+ rationaleUserSelection
+)
+
+// Description returns a human-readable description about the resolution
+// process (e.g., short-name alias, unqualified-search registries, etc.).
+// It is meant to be printed before attempting to pull the pull candidates
+// to make the short-name resolution more transparent to user.
+//
+// If the returned string is empty, it is not meant to be printed.
+func (r *Resolved) Description() string {
+ switch r.rationale {
+ case rationaleAlias:
+ return fmt.Sprintf("Resolved short name %q to a recorded short-name alias (origin: %s)", r.userInput, r.originDescription)
+ case rationaleUSR:
+ return fmt.Sprintf("Completed short name %q with unqualified-search registries (origin: %s)", r.userInput, r.originDescription)
+ case rationaleUserSelection, rationaleNone:
+ fallthrough
+ default:
+ return ""
+ }
+}
+
+// FormatPullErrors is a convenience function to format errors that occurred
+// while trying to pull all of the resolved pull candidates.
+//
+// Note that nil is returned if len(pullErrors) == 0. Otherwise, the amount of
+// pull errors must equal the amount of pull candidates.
+func (r *Resolved) FormatPullErrors(pullErrors []error) error {
+ if len(pullErrors) >= 0 && len(pullErrors) != len(r.PullCandidates) {
+ pullErrors = append(pullErrors,
+ errors.Errorf("internal error: expected %d instead of %d errors for %d pull candidates",
+ len(r.PullCandidates), len(pullErrors), len(r.PullCandidates)))
+ }
+
+ switch len(pullErrors) {
+ case 0:
+ return nil
+ case 1:
+ return pullErrors[0]
+ default:
+ var sb strings.Builder
+ sb.WriteString(fmt.Sprintf("%d errors occurred while pulling:", len(pullErrors)))
+ for _, e := range pullErrors {
+ sb.WriteString("\n * ")
+ sb.WriteString(e.Error())
+ }
+ return errors.New(sb.String())
+ }
+}
+
+// PullCandidate is a resolved name. Once the Value has been used
+// successfully, users MUST call `(*PullCandidate).Record(..)` to possibly
+// record it as a new short-name alias.
+type PullCandidate struct {
+ // Fully-qualified reference with tag or digest.
+ Value reference.Named
+ // Control whether to record it permanently as an alias.
+ record bool
+
+ // Backwards pointer to the Resolved "parent".
+ resolved *Resolved
+}
+
+// Record may store a short-name alias for the PullCandidate.
+func (c *PullCandidate) Record() error {
+ if !c.record {
+ return nil
+ }
+
+ // Strip off tags/digests from name/value.
+ name := reference.TrimNamed(c.resolved.userInput)
+ value := reference.TrimNamed(c.Value)
+
+ if err := Add(c.resolved.systemContext, name.String(), value); err != nil {
+ return errors.Wrapf(err, "error recording short-name alias (%q=%q)", c.resolved.userInput, c.Value)
+ }
+ return nil
+}
+
+// Resolve resolves the specified name to either one or more fully-qualified
+// image references that the short name may be *pulled* from. If the specified
+// name is already a fully-qualified reference (i.e., not a short name), it is
+// returned as is. In case, it's a short name, it's resolved according to the
+// ShortNameMode in the SystemContext (if specified) or in the registries.conf.
+//
+// Note that tags and digests are stripped from the specified name before
+// looking up an alias. Stripped off tags and digests are later on appended to
+// all candidates. If neither tag nor digest is specified, candidates are
+// normalized with the "latest" tag. PullCandidates in the returned value may
+// be empty if there is no matching alias and no unqualified-search registries
+// are configured.
+//
+// Note that callers *must* call `(PullCandidate).Record` after a returned
+// item has been pulled successfully; this callback will record a new
+// short-name alias (depending on the specified short-name mode).
+//
+// Furthermore, before attempting to pull callers *should* call
+// `(Resolved).Description` and afterwards use
+// `(Resolved).FormatPullErrors` in case of pull errors.
+func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
+ resolved := &Resolved{}
+
+ // Create a copy of the system context to make it usable beyond this
+ // function call.
+ var sys *types.SystemContext
+ if ctx != nil {
+ sys = &(*ctx)
+ }
+ resolved.systemContext = ctx
+
+ // Detect which mode we're running in.
+ mode, err := sysregistriesv2.GetShortNameMode(sys)
+ if err != nil {
+ return nil, err
+ }
+
+ // Sanity check the short-name mode.
+ switch mode {
+ case types.ShortNameModeDisabled, types.ShortNameModePermissive, types.ShortNameModeEnforcing:
+ // We're good.
+ default:
+ return nil, errors.Errorf("unsupported short-name mode (%v)", mode)
+ }
+
+ isShort, shortRef, err := parseUnnormalizedShortName(name)
+ if err != nil {
+ return nil, err
+ }
+ if !isShort { // no short name
+ named := reference.TagNameOnly(shortRef) // Make sure to add ":latest" if needed
+ resolved.addCandidate(named)
+ return resolved, nil
+ }
+
+ // Strip off the tag to normalize the short name for looking it up in
+ // the config files.
+ isTagged, isDigested, shortNameRepo, tag, digest := splitUserInput(shortRef)
+ resolved.userInput = shortNameRepo
+
+ // If there's already an alias, use it.
+ namedAlias, aliasOriginDescription, err := sysregistriesv2.ResolveShortNameAlias(sys, shortNameRepo.String())
+ if err != nil {
+ return nil, err
+ }
+
+ // Always use an alias if present.
+ if namedAlias != nil {
+ if isTagged {
+ namedAlias, err = reference.WithTag(namedAlias, tag)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if isDigested {
+ namedAlias, err = reference.WithDigest(namedAlias, digest)
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Make sure to add ":latest" if needed
+ namedAlias = reference.TagNameOnly(namedAlias)
+
+ resolved.addCandidate(namedAlias)
+ resolved.rationale = rationaleAlias
+ resolved.originDescription = aliasOriginDescription
+ return resolved, nil
+ }
+
+ resolved.rationale = rationaleUSR
+
+ // Query the registry for unqualified-search registries.
+ unqualifiedSearchRegistries, usrConfig, err := sysregistriesv2.UnqualifiedSearchRegistriesWithOrigin(sys)
+ if err != nil {
+ return nil, err
+ }
+ resolved.originDescription = usrConfig
+
+ for _, reg := range unqualifiedSearchRegistries {
+ named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name))
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating reference with unqualified-search registry %q", reg)
+ }
+ // Make sure to add ":latest" if needed
+ named = reference.TagNameOnly(named)
+
+ resolved.addCandidate(named)
+ }
+
+ // If we're running in disabled, return the candidates without
+ // prompting (and without recording).
+ if mode == types.ShortNameModeDisabled {
+ return resolved, nil
+ }
+
+ // If we have only one candidate, there's no ambiguity. In case of an
+ // empty candidate slices, callers can implement custom logic or raise
+ // an error.
+ if len(resolved.PullCandidates) <= 1 {
+ return resolved, nil
+ }
+
+ // If we don't have a TTY, act according to the mode.
+ if !terminal.IsTerminal(int(os.Stdout.Fd())) || !terminal.IsTerminal(int(os.Stdin.Fd())) {
+ switch mode {
+ case types.ShortNameModePermissive:
+ // Permissive falls back to using all candidates.
+ return resolved, nil
+ case types.ShortNameModeEnforcing:
+ // Enforcing errors out without a prompt.
+ return nil, errors.New("short-name resolution enforced but cannot prompt without a TTY")
+ default:
+ // We should not end up here.
+ return nil, errors.Errorf("unexpected short-name mode (%v) during resolution", mode)
+ }
+ }
+
+ // We have a TTY, and can prompt the user with a selection of all
+ // possible candidates.
+ strCandidates := []string{}
+ for _, candidate := range resolved.PullCandidates {
+ strCandidates = append(strCandidates, candidate.Value.String())
+ }
+ prompt := promptui.Select{
+ Label: "Please select an image",
+ Items: strCandidates,
+ HideHelp: true, // do not show navigation help
+ }
+
+ _, selection, err := prompt.Run()
+ if err != nil {
+ return nil, err
+ }
+
+ named, err := reference.ParseNormalizedNamed(selection)
+ if err != nil {
+ return nil, errors.Wrapf(err, "selection %q is not a valid reference", selection)
+ }
+
+ resolved.PullCandidates = nil
+ resolved.addCandidateToRecord(named)
+ resolved.rationale = rationaleUserSelection
+
+ return resolved, nil
+}
+
+// ResolveLocally resolves the specified name to either one or more local
+// images. If the specified name is already a fully-qualified reference (i.e.,
+// not a short name), it is returned as is. In case, it's a short name, the
+// returned slice of named references looks as follows:
+//
+// 1) If present, the short-name alias
+// 2) "localhost/" as used by many container engines such as Podman and Buildah
+// 3) Unqualified-search registries from the registries.conf files
+//
+// Note that tags and digests are stripped from the specified name before
+// looking up an alias. Stripped off tags and digests are later on appended to
+// all candidates. If neither tag nor digest is specified, candidates are
+// normalized with the "latest" tag. The returned slice contains at least one
+// item.
+func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, error) {
+ isShort, shortRef, err := parseUnnormalizedShortName(name)
+ if err != nil {
+ return nil, err
+ }
+ if !isShort { // no short name
+ named := reference.TagNameOnly(shortRef) // Make sure to add ":latest" if needed
+ return []reference.Named{named}, nil
+ }
+
+ var candidates []reference.Named
+
+ // Strip off the tag to normalize the short name for looking it up in
+ // the config files.
+ isTagged, isDigested, shortNameRepo, tag, digest := splitUserInput(shortRef)
+
+ // If there's already an alias, use it.
+ namedAlias, _, err := sysregistriesv2.ResolveShortNameAlias(ctx, shortNameRepo.String())
+ if err != nil {
+ return nil, err
+ }
+ if namedAlias != nil {
+ if isTagged {
+ namedAlias, err = reference.WithTag(namedAlias, tag)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if isDigested {
+ namedAlias, err = reference.WithDigest(namedAlias, digest)
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Make sure to add ":latest" if needed
+ namedAlias = reference.TagNameOnly(namedAlias)
+
+ candidates = append(candidates, namedAlias)
+ }
+
+ // Query the registry for unqualified-search registries.
+ unqualifiedSearchRegistries, err := sysregistriesv2.UnqualifiedSearchRegistries(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // Note that "localhost" has precedence over the unqualified-search registries.
+ for _, reg := range append([]string{"localhost"}, unqualifiedSearchRegistries...) {
+ named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name))
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating reference with unqualified-search registry %q", reg)
+ }
+ // Make sure to add ":latest" if needed
+ named = reference.TagNameOnly(named)
+
+ candidates = append(candidates, named)
+ }
+
+ return candidates, nil
+}
diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
new file mode 100644
index 000000000..4001b65b6
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
@@ -0,0 +1,328 @@
+package sysregistriesv2
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/BurntSushi/toml"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+ "github.com/containers/storage/pkg/homedir"
+ "github.com/containers/storage/pkg/lockfile"
+ "github.com/pkg/errors"
+)
+
+// defaultShortNameMode is the default mode of registries.conf files if the
+// corresponding field is left empty.
+const defaultShortNameMode = types.ShortNameModePermissive
+
+// userShortNamesFile is the user-specific config file to store aliases.
+var userShortNamesFile = filepath.FromSlash("containers/short-name-aliases.conf")
+
+// shortNameAliasesConfPath returns the path to the machine-generated
+// short-name-aliases.conf file.
+func shortNameAliasesConfPath(ctx *types.SystemContext) (string, error) {
+ if ctx != nil && len(ctx.UserShortNameAliasConfPath) > 0 {
+ return ctx.UserShortNameAliasConfPath, nil
+ }
+
+ configHome, err := homedir.GetConfigHome()
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(configHome, userShortNamesFile), nil
+}
+
+// shortNameAliasConf is a subset of the `V2RegistriesConf` format. It's used in the
+// software-maintained `userShortNamesFile`.
+type shortNameAliasConf struct {
+ // A map for aliasing short names to their fully-qualified image
+ // reference counter parts.
+ // Note that Aliases is niled after being loaded from a file.
+ Aliases map[string]string `toml:"aliases"`
+}
+
+// alias combines the parsed value of an alias with the config file it has been
+// specified in. The config file is crucial for an improved user experience
+// such that users are able to resolve potential pull errors.
+type alias struct {
+ // The parsed value of an alias. May be nil if set to "" in a config.
+ value reference.Named
+ // The config file the alias originates from.
+ configOrigin string
+}
+
+// shortNameAliasCache is the result of parsing shortNameAliasConf,
+// pre-processed for faster usage.
+type shortNameAliasCache struct {
+ // Note that an alias value may be nil iff it's set as an empty string
+ // in the config.
+ namedAliases map[string]alias
+}
+
+// ResolveShortNameAlias performs an alias resolution of the specified name.
+// The user-specific short-name-aliases.conf has precedence over aliases in the
+// assembled registries.conf. It returns the possibly resolved alias or nil, a
+// human-readable description of the config where the alias is specified, and
+// an error. The origin of the config file is crucial for an improved user
+// experience such that users are able to resolve potential pull errors.
+// Almost all callers should use pkg/shortnames instead.
+//
+// Note that it’s the caller’s responsibility to pass only a repository
+// (reference.IsNameOnly) as the short name.
+func ResolveShortNameAlias(ctx *types.SystemContext, name string) (reference.Named, string, error) {
+ if err := validateShortName(name); err != nil {
+ return nil, "", err
+ }
+ confPath, lock, err := shortNameAliasesConfPathAndLock(ctx)
+ if err != nil {
+ return nil, "", err
+ }
+
+ // Acquire the lock as a reader to allow for multiple routines in the
+ // same process space to read simultaneously.
+ lock.RLock()
+ defer lock.Unlock()
+
+ _, aliasCache, err := loadShortNameAliasConf(confPath)
+ if err != nil {
+ return nil, "", err
+ }
+
+ // First look up the short-name-aliases.conf. Note that a value may be
+ // nil iff it's set as an empty string in the config.
+ alias, resolved := aliasCache.namedAliases[name]
+ if resolved {
+ return alias.value, alias.configOrigin, nil
+ }
+
+ config, err := getConfig(ctx)
+ if err != nil {
+ return nil, "", err
+ }
+ alias, resolved = config.aliasCache.namedAliases[name]
+ if resolved {
+ return alias.value, alias.configOrigin, nil
+ }
+ return nil, "", nil
+}
+
+// editShortNameAlias loads the aliases.conf file and changes it. If value is
+// set, it adds the name-value pair as a new alias. Otherwise, it will remove
+// name from the config.
+func editShortNameAlias(ctx *types.SystemContext, name string, value *string) error {
+ if err := validateShortName(name); err != nil {
+ return err
+ }
+ if value != nil {
+ if _, err := parseShortNameValue(*value); err != nil {
+ return err
+ }
+ }
+
+ confPath, lock, err := shortNameAliasesConfPathAndLock(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Acquire the lock as a writer to prevent data corruption.
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Load the short-name-alias.conf, add the specified name-value pair,
+ // and write it back to the file.
+ conf, _, err := loadShortNameAliasConf(confPath)
+ if err != nil {
+ return err
+ }
+
+ if conf.Aliases == nil { // Ensure we have a map to update.
+ conf.Aliases = make(map[string]string)
+ }
+ if value != nil {
+ conf.Aliases[name] = *value
+ } else {
+ // If the name does not exist, throw an error.
+ if _, exists := conf.Aliases[name]; !exists {
+ return errors.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath)
+ }
+
+ delete(conf.Aliases, name)
+ }
+
+ f, err := os.OpenFile(confPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ encoder := toml.NewEncoder(f)
+ return encoder.Encode(conf)
+}
+
+// AddShortNameAlias adds the specified name-value pair as a new alias to the
+// user-specific aliases.conf. It may override an existing alias for `name`.
+//
+// Note that it’s the caller’s responsibility to pass only a repository
+// (reference.IsNameOnly) as the short name.
+func AddShortNameAlias(ctx *types.SystemContext, name string, value string) error {
+ return editShortNameAlias(ctx, name, &value)
+}
+
+// RemoveShortNameAlias clears the alias for the specified name. It throws an
+// error in case name does not exist in the machine-generated
+// short-name-alias.conf. In such case, the alias must be specified in one of
+// the registries.conf files, which is the users' responsibility.
+//
+// Note that it’s the caller’s responsibility to pass only a repository
+// (reference.IsNameOnly) as the short name.
+func RemoveShortNameAlias(ctx *types.SystemContext, name string) error {
+ return editShortNameAlias(ctx, name, nil)
+}
+
+// parseShortNameValue parses the specified alias into a reference.Named. The alias is
+// expected to not be tagged or carry a digest and *must* include a
+// domain/registry.
+//
+// Note that the returned reference is always normalized.
+func parseShortNameValue(alias string) (reference.Named, error) {
+ ref, err := reference.Parse(alias)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error parsing alias %q", alias)
+ }
+
+ if _, ok := ref.(reference.Digested); ok {
+ return nil, errors.Errorf("invalid alias %q: must not contain digest", alias)
+ }
+
+ if _, ok := ref.(reference.Tagged); ok {
+ return nil, errors.Errorf("invalid alias %q: must not contain tag", alias)
+ }
+
+ named, ok := ref.(reference.Named)
+ if !ok {
+ return nil, errors.Errorf("invalid alias %q: must contain registry and repository", alias)
+ }
+
+ registry := reference.Domain(named)
+ if !(strings.ContainsAny(registry, ".:") || registry == "localhost") {
+ return nil, errors.Errorf("invalid alias %q: must contain registry and repository", alias)
+ }
+
+ // A final parse to make sure that docker.io references are correctly
+ // normalized (e.g., docker.io/alpine to docker.io/library/alpine.
+ named, err = reference.ParseNormalizedNamed(alias)
+ return named, err
+}
+
+// validateShortName parses the specified `name` of an alias (i.e., the left-hand
+// side) and checks if it's a short name and does not include a tag or digest.
+func validateShortName(name string) error {
+ repo, err := reference.Parse(name)
+ if err != nil {
+ return errors.Wrapf(err, "cannot parse short name: %q", name)
+ }
+
+ if _, ok := repo.(reference.Digested); ok {
+ return errors.Errorf("invalid short name %q: must not contain digest", name)
+ }
+
+ if _, ok := repo.(reference.Tagged); ok {
+ return errors.Errorf("invalid short name %q: must not contain tag", name)
+ }
+
+ named, ok := repo.(reference.Named)
+ if !ok {
+ return errors.Errorf("invalid short name %q: no name", name)
+ }
+
+ registry := reference.Domain(named)
+ if strings.ContainsAny(registry, ".:") || registry == "localhost" {
+ return errors.Errorf("invalid short name %q: must not contain registry", name)
+ }
+ return nil
+}
+
+// newShortNameAliasCache parses shortNameAliasConf and returns the corresponding internal
+// representation.
+func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAliasCache, error) {
+ res := shortNameAliasCache{
+ namedAliases: make(map[string]alias),
+ }
+ errs := []error{}
+ for name, value := range conf.Aliases {
+ if err := validateShortName(name); err != nil {
+ errs = append(errs, err)
+ }
+
+ // Empty right-hand side values in config files allow to reset
+ // an alias in a previously loaded config. This way, drop-in
+ // config files from registries.conf.d can reset potentially
+ // malconfigured aliases.
+ if value == "" {
+ res.namedAliases[name] = alias{nil, path}
+ continue
+ }
+
+ named, err := parseShortNameValue(value)
+ if err != nil {
+ // We want to report *all* malformed entries to avoid a
+ // whack-a-mole for the user.
+ errs = append(errs, err)
+ } else {
+ res.namedAliases[name] = alias{named, path}
+ }
+ }
+ if len(errs) > 0 {
+ err := errs[0]
+ for i := 1; i < len(errs); i++ {
+ err = errors.Wrapf(err, "%v\n", errs[i])
+ }
+ return nil, err
+ }
+ return &res, nil
+}
+
+// updateWithConfigurationFrom updates c with configuration from updates.
+// In case of conflict, updates is preferred.
+func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) {
+ for name, value := range updates.namedAliases {
+ c.namedAliases[name] = value
+ }
+}
+
+func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) {
+ conf := shortNameAliasConf{}
+
+ _, err := toml.DecodeFile(confPath, &conf)
+ if err != nil && !os.IsNotExist(err) {
+ // It's okay if the config doesn't exist. Other errors are not.
+ return nil, nil, errors.Wrapf(err, "error loading short-name aliases config file %q", confPath)
+ }
+
+ // Even if we don’t always need the cache, doing so validates the machine-generated config. The
+ // file could still be corrupted by another process or user.
+ cache, err := newShortNameAliasCache(confPath, &conf)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "error loading short-name aliases config file %q", confPath)
+ }
+
+ return &conf, cache, nil
+}
+
+func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, lockfile.Locker, error) {
+ shortNameAliasesConfPath, err := shortNameAliasesConfPath(ctx)
+ if err != nil {
+ return "", nil, err
+ }
+ // Make sure the path to file exists.
+ if err := os.MkdirAll(filepath.Dir(shortNameAliasesConfPath), 0700); err != nil {
+ return "", nil, err
+ }
+
+ lockPath := shortNameAliasesConfPath + ".lock"
+ locker, err := lockfile.GetLockfile(lockPath)
+ return shortNameAliasesConfPath, locker, err
+}
diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
index ea2b21575..89ad7c533 100644
--- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
+++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
@@ -154,6 +154,19 @@ type V2RegistriesConf struct {
Registries []Registry `toml:"registry"`
// An array of host[:port] (not prefix!) entries to use for resolving unqualified image references
UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"`
+
+ // ShortNameMode defines how short-name resolution should be handled by
+ // _consumers_ of this package. Depending on the mode, the user should
+ // be prompted with a choice of using one of the unqualified-search
+ // registries when referring to a short name.
+ //
+ // Valid modes are: * "prompt": prompt if stdout is a TTY, otherwise
+ // use all unqualified-search registries * "enforcing": always prompt
+ // and error if stdout is not a TTY * "disabled": do not prompt and
+ // potentially use all unqualified-search registries
+ ShortNameMode string `toml:"short-name-mode"`
+
+ shortNameAliasConf
}
// Nonempty returns true if config contains at least one configuration entry.
@@ -162,10 +175,23 @@ func (config *V2RegistriesConf) Nonempty() bool {
len(config.UnqualifiedSearchRegistries) != 0)
}
-// tomlConfig is the data type used to unmarshal the toml config.
-type tomlConfig struct {
- V2RegistriesConf
- V1RegistriesConf // for backwards compatibility with sysregistries v1
+// parsedConfig is the result of parsing, and possibly merging, configuration files;
+// it is the boundary between the process of reading+ingesting the files, and
+// later interpreting the configuraiton based on caller’s requests.
+type parsedConfig struct {
+ // NOTE: Update also parsedConfig.updateWithConfigurationFrom!
+
+ // partialV2 must continue to exist to maintain the return value of TryUpdatingCache
+ // for compatibility with existing callers.
+ // We store the authoritative Registries and UnqualifiedSearchRegistries values there as well.
+ partialV2 V2RegistriesConf
+ // Absolute path to the configuration file that set the UnqualifiedSearchRegistries.
+ unqualifiedSearchRegistriesOrigin string
+ // Result of parsing of partialV2.ShortNameMode.
+ // NOTE: May be ShortNameModeInvalid to represent ShortNameMode == "" in intermediate values;
+ // the full configuration in configCache / getConfig() always contains a valid value.
+ shortNameMode types.ShortNameMode
+ aliasCache *shortNameAliasCache
}
// InvalidRegistries represents an invalid registry configurations. An example
@@ -254,7 +280,7 @@ var anchoredDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.Strin
// postProcess checks the consistency of all the configuration, looks for conflicts,
// and normalizes the configuration (e.g., sets the Prefix to Location if not set).
-func (config *V2RegistriesConf) postProcess() error {
+func (config *V2RegistriesConf) postProcessRegistries() error {
regMap := make(map[string][]*Registry)
for i := range config.Registries {
@@ -301,6 +327,7 @@ func (config *V2RegistriesConf) postProcess() error {
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location)
return &InvalidRegistries{s: msg}
}
+
if reg.Blocked != other.Blocked {
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location)
return &InvalidRegistries{s: msg}
@@ -323,16 +350,25 @@ func (config *V2RegistriesConf) postProcess() error {
// rendering later items with the same prefix non-existent. We cannot error
// out anymore as this might break existing users, so let's just ignore them
// to guarantee that the same prefix exists only once.
- knownPrefixes := make(map[string]bool)
- uniqueRegistries := []Registry{}
+ //
+ // As a side effect of parsedConfig.updateWithConfigurationFrom, the Registries slice
+ // is always sorted. To be consistent in situations where it is not called (no drop-ins),
+ // sort it here as well.
+ prefixes := []string{}
+ uniqueRegistries := make(map[string]Registry)
for i := range config.Registries {
// TODO: should we warn if we see the same prefix being used multiple times?
- if _, exists := knownPrefixes[config.Registries[i].Prefix]; !exists {
- knownPrefixes[config.Registries[i].Prefix] = true
- uniqueRegistries = append(uniqueRegistries, config.Registries[i])
+ prefix := config.Registries[i].Prefix
+ if _, exists := uniqueRegistries[prefix]; !exists {
+ uniqueRegistries[prefix] = config.Registries[i]
+ prefixes = append(prefixes, prefix)
}
}
- config.Registries = uniqueRegistries
+ sort.Strings(prefixes)
+ config.Registries = []Registry{}
+ for _, prefix := range prefixes {
+ config.Registries = append(config.Registries, uniqueRegistries[prefix])
+ }
return nil
}
@@ -385,6 +421,7 @@ func newConfigWrapper(ctx *types.SystemContext) configWrapper {
} else {
wrapper.userConfigDirPath = userRegistriesDirPath
}
+
return wrapper
} else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
wrapper.configPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
@@ -426,7 +463,7 @@ var configMutex = sync.Mutex{}
// configCache caches already loaded configs with config paths as keys and is
// used to avoid redundantly parsing configs. Concurrent accesses to the cache
// are synchronized via configMutex.
-var configCache = make(map[configWrapper]*V2RegistriesConf)
+var configCache = make(map[configWrapper]*parsedConfig)
// InvalidateCache invalidates the registry cache. This function is meant to be
// used for long-running processes that need to reload potential changes made to
@@ -434,11 +471,11 @@ var configCache = make(map[configWrapper]*V2RegistriesConf)
func InvalidateCache() {
configMutex.Lock()
defer configMutex.Unlock()
- configCache = make(map[configWrapper]*V2RegistriesConf)
+ configCache = make(map[configWrapper]*parsedConfig)
}
// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached.
-func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) {
+func getConfig(ctx *types.SystemContext) (*parsedConfig, error) {
wrapper := newConfigWrapper(ctx)
configMutex.Lock()
if config, inCache := configCache[wrapper]; inCache {
@@ -504,27 +541,37 @@ func dropInConfigs(wrapper configWrapper) ([]string, error) {
// TryUpdatingCache loads the configuration from the provided `SystemContext`
// without using the internal cache. On success, the loaded configuration will
// be added into the internal registry cache.
+// It returns the resulting configuration; this is DEPRECATED and may not correctly
+// reflect any future data handled by this package.
func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) {
- return tryUpdatingCache(ctx, newConfigWrapper(ctx))
+ config, err := tryUpdatingCache(ctx, newConfigWrapper(ctx))
+ if err != nil {
+ return nil, err
+ }
+ return &config.partialV2, err
}
// tryUpdatingCache implements TryUpdatingCache with an additional configWrapper
// argument to avoid redundantly calculating the config paths.
-func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*V2RegistriesConf, error) {
+func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedConfig, error) {
configMutex.Lock()
defer configMutex.Unlock()
// load the config
- config := &tomlConfig{}
- if err := config.loadConfig(wrapper.configPath, false); err != nil {
+ config, err := loadConfigFile(wrapper.configPath, false)
+ if err != nil {
// Continue with an empty []Registry if we use the default config, which
// implies that the config path of the SystemContext isn't set.
//
// Note: if ctx.SystemRegistriesConfPath points to the default config,
// we will still return an error.
if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") {
- config = &tomlConfig{}
- config.V2RegistriesConf = V2RegistriesConf{Registries: []Registry{}}
+ config = &parsedConfig{}
+ config.partialV2 = V2RegistriesConf{Registries: []Registry{}}
+ config.aliasCache, err = newShortNameAliasCache("", &shortNameAliasConf{})
+ if err != nil {
+ return nil, err // Should never happen
+ }
} else {
return nil, errors.Wrapf(err, "error loading registries configuration %q", wrapper.configPath)
}
@@ -537,16 +584,20 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*V2Regis
}
for _, path := range dinConfigs {
// Enforce v2 format for drop-in-configs.
- if err := config.loadConfig(path, true); err != nil {
+ dropIn, err := loadConfigFile(path, true)
+ if err != nil {
return nil, errors.Wrapf(err, "error loading drop-in registries configuration %q", path)
}
+ config.updateWithConfigurationFrom(dropIn)
}
- v2Config := &config.V2RegistriesConf
+ if config.shortNameMode == types.ShortNameModeInvalid {
+ config.shortNameMode = defaultShortNameMode
+ }
// populate the cache
- configCache[wrapper] = v2Config
- return v2Config, nil
+ configCache[wrapper] = config
+ return config, nil
}
// GetRegistries loads and returns the registries specified in the config.
@@ -557,17 +608,53 @@ func GetRegistries(ctx *types.SystemContext) ([]Registry, error) {
if err != nil {
return nil, err
}
- return config.Registries, nil
+ return config.partialV2.Registries, nil
}
// UnqualifiedSearchRegistries returns a list of host[:port] entries to try
// for unqualified image search, in the returned order)
func UnqualifiedSearchRegistries(ctx *types.SystemContext) ([]string, error) {
+ registries, _, err := UnqualifiedSearchRegistriesWithOrigin(ctx)
+ return registries, err
+}
+
+// UnqualifiedSearchRegistriesWithOrigin returns a list of host[:port] entries
+// to try for unqualified image search, in the returned order. It also returns
+// a human-readable description of where these entries are specified (e.g., a
+// registries.conf file).
+func UnqualifiedSearchRegistriesWithOrigin(ctx *types.SystemContext) ([]string, string, error) {
config, err := getConfig(ctx)
if err != nil {
- return nil, err
+ return nil, "", err
}
- return config.UnqualifiedSearchRegistries, nil
+ return config.partialV2.UnqualifiedSearchRegistries, config.unqualifiedSearchRegistriesOrigin, nil
+}
+
+// parseShortNameMode translates the string into well-typed
+// types.ShortNameMode.
+func parseShortNameMode(mode string) (types.ShortNameMode, error) {
+ switch mode {
+ case "disabled":
+ return types.ShortNameModeDisabled, nil
+ case "enforcing":
+ return types.ShortNameModeEnforcing, nil
+ case "permissive":
+ return types.ShortNameModePermissive, nil
+ default:
+ return types.ShortNameModeInvalid, errors.Errorf("invalid short-name mode: %q", mode)
+ }
+}
+
+// GetShortNameMode returns the configured types.ShortNameMode.
+func GetShortNameMode(ctx *types.SystemContext) (types.ShortNameMode, error) {
+ if ctx != nil && ctx.ShortNameMode != nil {
+ return *ctx.ShortNameMode, nil
+ }
+ config, err := getConfig(ctx)
+ if err != nil {
+ return -1, err
+ }
+ return config.shortNameMode, err
}
// refMatchesPrefix returns true iff ref,
@@ -609,7 +696,7 @@ func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) {
reg := Registry{}
prefixLen := 0
- for _, r := range config.Registries {
+ for _, r := range config.partialV2.Registries {
if refMatchesPrefix(ref, r.Prefix) {
length := len(r.Prefix)
if length > prefixLen {
@@ -624,55 +711,87 @@ func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) {
return nil, nil
}
-// loadConfig loads and unmarshals the configuration at the specified path. Note
-// that v1 configs are translated into v2 and are cleared. Use forceV2 if the
-// config must in the v2 format.
-//
-// Note that specified fields in path will replace already set fields in the
-// tomlConfig. Only the [[registry]] tables are merged by prefix.
-func (c *tomlConfig) loadConfig(path string, forceV2 bool) error {
+// loadConfigFile loads and unmarshals a single config file.
+// Use forceV2 if the config must in the v2 format.
+func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) {
logrus.Debugf("Loading registries configuration %q", path)
- // Save the registries before decoding the file where they could be lost.
- // We merge them later again.
- registryMap := make(map[string]Registry)
- for i := range c.Registries {
- registryMap[c.Registries[i].Prefix] = c.Registries[i]
+ // tomlConfig allows us to unmarshal either V1 or V2 simultaneously.
+ type tomlConfig struct {
+ V2RegistriesConf
+ V1RegistriesConf // for backwards compatibility with sysregistries v1
}
// Load the tomlConfig. Note that `DecodeFile` will overwrite set fields.
- c.Registries = nil // important to clear the memory to prevent us from overlapping fields
- _, err := toml.DecodeFile(path, c)
+ var combinedTOML tomlConfig
+ _, err := toml.DecodeFile(path, &combinedTOML)
if err != nil {
- return err
+ return nil, err
}
- if c.V1RegistriesConf.Nonempty() {
+ if combinedTOML.V1RegistriesConf.Nonempty() {
// Enforce the v2 format if requested.
if forceV2 {
- return &InvalidRegistries{s: "registry must be in v2 format but is in v1"}
+ return nil, &InvalidRegistries{s: "registry must be in v2 format but is in v1"}
}
// Convert a v1 config into a v2 config.
- if c.V2RegistriesConf.Nonempty() {
- return &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"}
+ if combinedTOML.V2RegistriesConf.Nonempty() {
+ return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"}
}
- v2, err := c.V1RegistriesConf.ConvertToV2()
+ converted, err := combinedTOML.V1RegistriesConf.ConvertToV2()
if err != nil {
- return err
+ return nil, err
}
- c.V1RegistriesConf = V1RegistriesConf{}
- c.V2RegistriesConf = *v2
+ combinedTOML.V1RegistriesConf = V1RegistriesConf{}
+ combinedTOML.V2RegistriesConf = *converted
}
+ res := parsedConfig{partialV2: combinedTOML.V2RegistriesConf}
+
// Post process registries, set the correct prefixes, sanity checks, etc.
- if err := c.postProcess(); err != nil {
- return err
+ if err := res.partialV2.postProcessRegistries(); err != nil {
+ return nil, err
}
+ res.unqualifiedSearchRegistriesOrigin = path
+
+ if len(res.partialV2.ShortNameMode) > 0 {
+ mode, err := parseShortNameMode(res.partialV2.ShortNameMode)
+ if err != nil {
+ return nil, err
+ }
+ res.shortNameMode = mode
+ } else {
+ res.shortNameMode = types.ShortNameModeInvalid
+ }
+
+ // Parse and validate short-name aliases.
+ cache, err := newShortNameAliasCache(path, &res.partialV2.shortNameAliasConf)
+ if err != nil {
+ return nil, errors.Wrap(err, "error validating short-name aliases")
+ }
+ res.aliasCache = cache
+ // Clear conf.partialV2.shortNameAliasConf to make it available for garbage collection and
+ // reduce memory consumption. We're consulting aliasCache for lookups.
+ res.partialV2.shortNameAliasConf = shortNameAliasConf{}
+
+ return &res, nil
+}
+
+// updateWithConfigurationFrom updates c with configuration from updates.
+//
+// Fields present in updates will typically replace already set fields in c.
+// The [[registry]] and alias tables are merged.
+func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) {
+ // == Merge Registries:
+ registryMap := make(map[string]Registry)
+ for i := range c.partialV2.Registries {
+ registryMap[c.partialV2.Registries[i].Prefix] = c.partialV2.Registries[i]
+ }
// Merge the freshly loaded registries.
- for i := range c.Registries {
- registryMap[c.Registries[i].Prefix] = c.Registries[i]
+ for i := range updates.partialV2.Registries {
+ registryMap[updates.partialV2.Registries[i].Prefix] = updates.partialV2.Registries[i]
}
// Go maps have a non-deterministic order when iterating the keys, so
@@ -686,10 +805,27 @@ func (c *tomlConfig) loadConfig(path string, forceV2 bool) error {
}
sort.Strings(prefixes)
- c.Registries = []Registry{}
+ c.partialV2.Registries = []Registry{}
for _, prefix := range prefixes {
- c.Registries = append(c.Registries, registryMap[prefix])
+ c.partialV2.Registries = append(c.partialV2.Registries, registryMap[prefix])
}
- return nil
+ // == Merge UnqualifiedSearchRegistries:
+ // This depends on an subtlety of the behavior of the TOML decoder, where a missing array field
+ // is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled
+ // as a non-nil []string{}.
+ if updates.partialV2.UnqualifiedSearchRegistries != nil {
+ c.partialV2.UnqualifiedSearchRegistries = updates.partialV2.UnqualifiedSearchRegistries
+ c.unqualifiedSearchRegistriesOrigin = updates.unqualifiedSearchRegistriesOrigin
+ }
+
+ // == Merge shortNameMode:
+ // We don’t maintain c.partialV2.ShortNameMode.
+ if updates.shortNameMode != types.ShortNameModeInvalid {
+ c.shortNameMode = updates.shortNameMode
+ }
+
+ // == Merge aliasCache:
+ // We don’t maintain (in fact we actively clear) c.partialV2.shortNameAliasConf.
+ c.aliasCache.updateWithConfigurationFrom(updates.aliasCache)
}
diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go
index 5a91f0096..3c5126b4e 100644
--- a/vendor/github.com/containers/image/v5/types/types.go
+++ b/vendor/github.com/containers/image/v5/types/types.go
@@ -486,6 +486,36 @@ func NewOptionalBool(b bool) OptionalBool {
return o
}
+// ShortNameMode defines the mode of short-name resolution.
+//
+// The use of unqualified-search registries entails an ambiguity as it's
+// unclear from which registry a given image, referenced by a short name, may
+// be pulled from.
+//
+// The ShortNameMode type defines how short names should resolve.
+type ShortNameMode int
+
+const (
+ ShortNameModeInvalid ShortNameMode = iota
+ // Use all configured unqualified-search registries without prompting
+ // the user.
+ ShortNameModeDisabled
+ // If stdout and stdin are a TTY, prompt the user to select a configured
+ // unqualified-search registry. Otherwise, use all configured
+ // unqualified-search registries.
+ //
+ // Note that if only one unqualified-search registry is set, it will be
+ // used without prompting.
+ ShortNameModePermissive
+ // Always prompt the user to select a configured unqualified-search
+ // registry. Throw an error if stdout or stdin is not a TTY as
+ // prompting isn't possible.
+ //
+ // Note that if only one unqualified-search registry is set, it will be
+ // used without prompting.
+ ShortNameModeEnforcing
+)
+
// SystemContext allows parameterizing access to implicitly-accessed resources,
// like configuration files in /etc and users' login state in their home directory.
// Various components can share the same field only if their semantics is exactly
@@ -509,6 +539,10 @@ type SystemContext struct {
SystemRegistriesConfPath string
// Path to the system-wide registries configuration directory
SystemRegistriesConfDirPath string
+ // Path to the user-specific short-names configuration file
+ UserShortNameAliasConfPath string
+ // If set, short-name resolution in pkg/shortnames must follow the specified mode
+ ShortNameMode *ShortNameMode
// If not "", overrides the default path for the authentication file, but only new format files
AuthFilePath string
// if not "", overrides the default path for the authentication file, but with the legacy format;
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index b6b79f26c..14e553c9f 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -6,9 +6,9 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
- VersionMinor = 7
+ VersionMinor = 8
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 0
+ VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 2d27ccba1..f9e8384bb 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.23.7
+1.24.1
diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go
index 1cadb089f..7c2a73f6b 100644
--- a/vendor/github.com/containers/storage/drivers/chown_unix.go
+++ b/vendor/github.com/containers/storage/drivers/chown_unix.go
@@ -50,22 +50,22 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.
if uid != int(st.Uid) || gid != int(st.Gid) {
cap, err := system.Lgetxattr(path, "security.capability")
if err != nil && err != system.ErrNotSupportedPlatform {
- return fmt.Errorf("%s: Lgetxattr(%q): %v", os.Args[0], path, err)
+ return fmt.Errorf("%s: %v", os.Args[0], err)
}
// Make the change.
- if err := os.Lchown(path, uid, gid); err != nil {
- return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err)
+ if err := system.Lchown(path, uid, gid); err != nil {
+ return fmt.Errorf("%s: %v", os.Args[0], err)
}
// Restore the SUID and SGID bits if they were originally set.
if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 {
- if err := os.Chmod(path, info.Mode()); err != nil {
- return fmt.Errorf("%s: chmod(%q): %v", os.Args[0], path, err)
+ if err := system.Chmod(path, info.Mode()); err != nil {
+ return fmt.Errorf("%s: %v", os.Args[0], err)
}
}
if cap != nil {
if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil {
- return fmt.Errorf("%s: Lsetxattr(%q): %v", os.Args[0], path, err)
+ return fmt.Errorf("%s: %v", os.Args[0], err)
}
}
diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
index 1e380a5ac..5147b01d6 100644
--- a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
+++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go
@@ -12,6 +12,7 @@ package copy
import "C"
import (
"container/list"
+ "errors"
"fmt"
"io"
"os"
@@ -98,7 +99,7 @@ func legacyCopy(srcFile io.Reader, dstFile io.Writer) error {
func copyXattr(srcPath, dstPath, attr string) error {
data, err := system.Lgetxattr(srcPath, attr)
- if err != nil && err != unix.EOPNOTSUPP {
+ if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
return err
}
if data != nil {
@@ -269,7 +270,7 @@ func doCopyXattrs(srcPath, dstPath string) error {
}
xattrs, err := system.Llistxattr(srcPath)
- if err != nil && err != unix.EOPNOTSUPP {
+ if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
return err
}
diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go
index a5393c10f..2d6485e80 100644
--- a/vendor/github.com/containers/storage/drivers/driver.go
+++ b/vendor/github.com/containers/storage/drivers/driver.go
@@ -60,6 +60,7 @@ type ApplyDiffOpts struct {
Mappings *idtools.IDMappings
MountLabel string
IgnoreChownErrors bool
+ ForceMask *os.FileMode
}
// InitFunc initializes the storage driver.
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 398fe6531..c1895c364 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -42,6 +42,8 @@ var (
untar = chrootarchive.UntarUncompressed
)
+const defaultPerms = os.FileMode(0555)
+
// This backend uses the overlay union filesystem for containers
// with diff directories for each layer.
@@ -91,6 +93,7 @@ type overlayOptions struct {
skipMountHome bool
mountOptions string
ignoreChownErrors bool
+ forceMask *os.FileMode
}
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
@@ -141,6 +144,9 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
if opts.mountProgram == "" {
+ if opts.forceMask != nil {
+ return nil, errors.New("'force_mask' is supported only with 'mount_program'")
+ }
switch fsMagic {
case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s, a mount_program is required", backingFs)
@@ -326,6 +332,22 @@ func parseOptions(options []string) (*overlayOptions, error) {
if err != nil {
return nil, err
}
+ case "force_mask":
+ logrus.Debugf("overlay: force_mask=%s", val)
+ var mask int64
+ switch val {
+ case "shared":
+ mask = 0755
+ case "private":
+ mask = 0700
+ default:
+ mask, err = strconv.ParseInt(val, 8, 32)
+ if err != nil {
+ return nil, err
+ }
+ }
+ m := os.FileMode(mask)
+ o.forceMask = &m
default:
return nil, fmt.Errorf("overlay: Unknown option %s", key)
}
@@ -604,7 +626,19 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
}
}
- if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil {
+ perms := defaultPerms
+ if d.options.forceMask != nil {
+ perms = *d.options.forceMask
+ }
+ if parent != "" {
+ st, err := system.Stat(filepath.Join(d.dir(parent), "diff"))
+ if err != nil {
+ return err
+ }
+ perms = os.FileMode(st.Mode())
+ }
+
+ if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil {
return err
}
@@ -847,12 +881,25 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
return "", err
}
diffN := 1
- _, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
+ perms := defaultPerms
+ if d.options.forceMask != nil {
+ perms = *d.options.forceMask
+ }
+ permsKnown := false
+ st, err := os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
+ if err == nil {
+ perms = os.FileMode(st.Mode())
+ permsKnown = true
+ }
for err == nil {
absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN)))
relLowers = append(relLowers, dumbJoin(string(link), "..", nameWithSuffix("diff", diffN)))
diffN++
- _, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
+ st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
+ if err == nil && !permsKnown {
+ perms = os.FileMode(st.Mode())
+ permsKnown = true
+ }
}
// For each lower, resolve its path, and append it and any additional diffN
@@ -863,10 +910,14 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
lower := ""
newpath := path.Join(d.home, l)
- if _, err := os.Stat(newpath); err != nil {
+ if st, err := os.Stat(newpath); err != nil {
for _, p := range d.AdditionalImageStores() {
lower = path.Join(p, d.name, l)
- if _, err2 := os.Stat(lower); err2 == nil {
+ if st2, err2 := os.Stat(lower); err2 == nil {
+ if !permsKnown {
+ perms = os.FileMode(st2.Mode())
+ permsKnown = true
+ }
break
}
lower = ""
@@ -884,6 +935,10 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err)
}
} else {
+ if !permsKnown {
+ perms = os.FileMode(st.Mode())
+ permsKnown = true
+ }
lower = newpath
}
absLowers = append(absLowers, lower)
@@ -908,7 +963,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
return "", err
}
diffDir := path.Join(dir, "diff")
- if err := idtools.MkdirAllAs(diffDir, 0755, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil {
return "", err
}
@@ -1114,6 +1169,9 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts)
if d.options.ignoreChownErrors {
options.IgnoreChownErrors = d.options.ignoreChownErrors
}
+ if d.options.forceMask != nil {
+ options.ForceMask = d.options.forceMask
+ }
return d.naiveDiff.ApplyDiff(id, parent, options)
}
@@ -1130,6 +1188,7 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts)
UIDMaps: idMappings.UIDs(),
GIDMaps: idMappings.GIDs(),
IgnoreChownErrors: d.options.ignoreChownErrors,
+ ForceMask: d.options.forceMask,
WhiteoutFormat: d.getWhiteoutFormat(),
InUserNS: rsystem.RunningInUserNS(),
}); err != nil {
@@ -1241,11 +1300,20 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
// Rotate the diff directories.
i := 0
- _, err = os.Stat(nameWithSuffix(diffDir, i))
+ perms := defaultPerms
+ st, err := os.Stat(nameWithSuffix(diffDir, i))
+ if d.options.forceMask != nil {
+ perms = *d.options.forceMask
+ } else {
+ if err == nil {
+ perms = os.FileMode(st.Mode())
+ }
+ }
for err == nil {
i++
_, err = os.Stat(nameWithSuffix(diffDir, i))
}
+
for i > 0 {
err = os.Rename(nameWithSuffix(diffDir, i-1), nameWithSuffix(diffDir, i))
if err != nil {
@@ -1258,13 +1326,13 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
// to the old upper layer in the index.
workDir := filepath.Join(dir, "work")
if err := os.RemoveAll(workDir); err == nil {
- if err := idtools.MkdirAs(workDir, 0755, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAs(workDir, defaultPerms, rootUID, rootGID); err != nil {
return err
}
}
// Re-create the directory that we're going to use as the upper layer.
- if err := idtools.MkdirAs(diffDir, 0755, rootUID, rootGID); err != nil {
+ if err := idtools.MkdirAs(diffDir, perms, rootUID, rootGID); err != nil {
return err
}
return nil
diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go
index c1ab93e1d..1fd84e3b4 100644
--- a/vendor/github.com/containers/storage/drivers/windows/windows.go
+++ b/vendor/github.com/containers/storage/drivers/windows/windows.go
@@ -3,6 +3,7 @@
package windows
import (
+ "archive/tar"
"bufio"
"bytes"
"encoding/json"
@@ -21,7 +22,6 @@ import (
"unsafe"
"github.com/Microsoft/go-winio"
- "github.com/Microsoft/go-winio/archive/tar"
"github.com/Microsoft/go-winio/backuptar"
"github.com/Microsoft/hcsshim"
"github.com/containers/storage/drivers"
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index f38266a16..86a5d8644 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -4,11 +4,11 @@ module github.com/containers/storage
require (
github.com/BurntSushi/toml v0.3.1
- github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
+ github.com/Microsoft/go-winio v0.4.15
github.com/Microsoft/hcsshim v0.8.9
github.com/docker/go-units v0.4.0
github.com/hashicorp/go-multierror v1.1.0
- github.com/klauspost/compress v1.11.1
+ github.com/klauspost/compress v1.11.3
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.10
github.com/mistifyio/go-zfs v2.1.1+incompatible
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index 2aad798d8..a5d3f3b82 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -3,6 +3,8 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
+github.com/Microsoft/go-winio v0.4.15 h1:qkLXKzb1QoVatRyd/YlXZ/Kg0m5K3SPuoD82jjSOaBc=
+github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/checkpoint-restore/go-criu/v4 v4.0.2 h1:jt+rnBIhFtPw0fhtpYGcUOilh4aO9Hj7r+YLEtf30uA=
@@ -62,8 +64,8 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.11.1 h1:bPb7nMRdOZYDrpPMTA3EInUQrdgoBinqUuSwlGdKDdE=
-github.com/klauspost/compress v1.11.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.3 h1:dB4Bn0tN3wdCzQxnS8r06kV74qN/TAfaIS0bVE8h3jc=
+github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index a70806f40..72f00b8d6 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -1329,6 +1329,7 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
if err != nil {
return -1, err
}
+ defer uncompressed.Close()
uncompressedDigest := digest.Canonical.Digester()
uncompressedCounter := ioutils.NewWriteCounter(uncompressedDigest.Hash())
uidLog := make(map[uint32]struct{})
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index 4472511a2..2f917344a 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -65,13 +65,16 @@ type (
// from the traditional behavior/format to get features like subsecond
// precision in timestamps.
CopyPass bool
+ // ForceMask, if set, indicates the permission mask used for created files.
+ ForceMask *os.FileMode
}
)
const (
- tarExt = "tar"
- solaris = "solaris"
- windows = "windows"
+ tarExt = "tar"
+ solaris = "solaris"
+ windows = "windows"
+ containersOverrideXattr = "user.containers.override_stat"
)
// Archiver allows the reuse of most utility functions of this package with a
@@ -139,6 +142,7 @@ func IsArchivePath(path string) bool {
if err != nil {
return false
}
+ defer rdr.Close()
r := tar.NewReader(rdr)
_, err = r.Next()
return err == nil
@@ -398,7 +402,7 @@ func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
}
for _, xattr := range []string{"security.capability", "security.ima"} {
capability, err := system.Lgetxattr(path, xattr)
- if err != nil && err != system.EOPNOTSUPP && err != system.ErrNotSupportedPlatform {
+ if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
return errors.Wrapf(err, "failed to read %q attribute from %q", xattr, path)
}
if capability != nil {
@@ -411,17 +415,17 @@ func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
// ReadUserXattrToTarHeader reads user.* xattr from filesystem to a tar header
func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error {
xattrs, err := system.Llistxattr(path)
- if err != nil && err != system.EOPNOTSUPP && err != system.ErrNotSupportedPlatform {
+ if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
return err
}
for _, key := range xattrs {
if strings.HasPrefix(key, "user.") {
value, err := system.Lgetxattr(path, key)
- if err == system.E2BIG {
- logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", path, key)
- continue
- }
if err != nil {
+ if errors.Is(err, system.E2BIG) {
+ logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", path, key)
+ continue
+ }
return err
}
if hdr.Xattrs == nil {
@@ -602,18 +606,23 @@ func (ta *tarAppender) addTarFile(path, name string) error {
return nil
}
-func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, buffer []byte) error {
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error {
// hdr.Mode is in linux format, which we can use for sycalls,
// but for os.Foo() calls we need the mode converted to os.FileMode,
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
hdrInfo := hdr.FileInfo()
+ mask := hdrInfo.Mode()
+ if forceMask != nil {
+ mask = *forceMask
+ }
+
switch hdr.Typeflag {
case tar.TypeDir:
// Create directory unless it exists as a directory already.
// In that case we just want to merge the two
if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
- if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
+ if err := os.Mkdir(path, mask); err != nil {
return err
}
}
@@ -622,7 +631,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
// Source is regular file. We use system.OpenFileSequential to use sequential
// file access to avoid depleting the standby list on Windows.
// On Linux, this equates to a regular os.OpenFile
- file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
+ file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, mask)
if err != nil {
return err
}
@@ -679,6 +688,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
}
+ if forceMask != nil && hdr.Typeflag != tar.TypeSymlink {
+ value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&07777)
+ if err := system.Lsetxattr(path, containersOverrideXattr, []byte(value), 0); err != nil {
+ return err
+ }
+ }
+
// Lchown is not supported on Windows.
if Lchown && runtime.GOOS != windows {
if chownOpts == nil {
@@ -696,7 +712,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
// There is no LChmod, so ignore mode for symlink. Also, this
// must happen after chown, as that can modify the file mode
- if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+ if err := handleLChmod(hdr, path, hdrInfo, forceMask); err != nil {
return err
}
@@ -724,16 +740,16 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
}
- var errors []string
+ var errs []string
for key, value := range hdr.Xattrs {
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
- if err == syscall.ENOTSUP || (err == syscall.EPERM && inUserns) {
+ if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) {
// We ignore errors here because not all graphdrivers support
// xattrs *cough* old versions of AUFS *cough*. However only
// ENOTSUP should be emitted in that case, otherwise we still
// bail. We also ignore EPERM errors if we are running in a
// user namespace.
- errors = append(errors, err.Error())
+ errs = append(errs, err.Error())
continue
}
return err
@@ -741,9 +757,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
- if len(errors) > 0 {
+ if len(errs) > 0 {
logrus.WithFields(logrus.Fields{
- "errors": errors,
+ "errors": errs,
}).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
}
@@ -945,6 +961,16 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
buffer := make([]byte, 1<<20)
+ if options.ForceMask != nil {
+ uid, gid, mode, err := getFileOwner(dest)
+ if err == nil {
+ value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
+ if err := system.Lsetxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil {
+ return err
+ }
+ }
+ }
+
// Iterate through the files in the archive.
loop:
for {
@@ -1040,7 +1066,7 @@ loop:
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
}
- if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors, buffer); err != nil {
+ if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
index 05aae4c13..6a5a867c7 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
@@ -10,6 +10,7 @@ import (
"fmt"
"github.com/containers/storage/pkg/idtools"
fflib "github.com/pquerna/ffjson/fflib/v1"
+ "os"
)
// MarshalJSON marshal bytes to json - template
@@ -501,6 +502,12 @@ func (j *TarOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
} else {
buf.WriteString(`,"CopyPass":false`)
}
+ if j.ForceMask != nil {
+ buf.WriteString(`,"ForceMask":`)
+ fflib.FormatBits2(buf, uint64(*j.ForceMask), 10, false)
+ } else {
+ buf.WriteString(`,"ForceMask":null`)
+ }
buf.WriteByte('}')
return nil
}
@@ -538,6 +545,8 @@ const (
ffjtTarOptionsInUserNS
ffjtTarOptionsCopyPass
+
+ ffjtTarOptionsForceMask
)
var ffjKeyTarOptionsIncludeFiles = []byte("IncludeFiles")
@@ -570,6 +579,8 @@ var ffjKeyTarOptionsInUserNS = []byte("InUserNS")
var ffjKeyTarOptionsCopyPass = []byte("CopyPass")
+var ffjKeyTarOptionsForceMask = []byte("ForceMask")
+
// UnmarshalJSON umarshall json - template of ffjson
func (j *TarOptions) UnmarshalJSON(input []byte) error {
fs := fflib.NewFFLexer(input)
@@ -657,6 +668,14 @@ mainparse:
goto mainparse
}
+ case 'F':
+
+ if bytes.Equal(ffjKeyTarOptionsForceMask, kn) {
+ currentKey = ffjtTarOptionsForceMask
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
case 'G':
if bytes.Equal(ffjKeyTarOptionsGIDMaps, kn) {
@@ -732,6 +751,12 @@ mainparse:
}
+ if fflib.EqualFoldRight(ffjKeyTarOptionsForceMask, kn) {
+ currentKey = ffjtTarOptionsForceMask
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
if fflib.EqualFoldRight(ffjKeyTarOptionsCopyPass, kn) {
currentKey = ffjtTarOptionsCopyPass
state = fflib.FFParse_want_colon
@@ -884,6 +909,9 @@ mainparse:
case ffjtTarOptionsCopyPass:
goto handle_CopyPass
+ case ffjtTarOptionsForceMask:
+ goto handle_ForceMask
+
case ffjtTarOptionsnosuchkey:
err = fs.SkipField(tok)
if err != nil {
@@ -1597,6 +1625,39 @@ handle_CopyPass:
state = fflib.FFParse_after_value
goto mainparse
+handle_ForceMask:
+
+ /* handler: j.ForceMask type=os.FileMode kind=uint32 quoted=false*/
+
+ {
+ if tok != fflib.FFTok_integer && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for FileMode", tok))
+ }
+ }
+
+ {
+
+ if tok == fflib.FFTok_null {
+
+ j.ForceMask = nil
+
+ } else {
+
+ tval, err := fflib.ParseUint(fs.Output.Bytes(), 10, 32)
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ ttypval := os.FileMode(tval)
+ j.ForceMask = &ttypval
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
wantedvalue:
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
wrongtokenerror:
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
index 3a47eceae..3faa23889 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go
@@ -142,3 +142,15 @@ func isWhiteOut(stat os.FileInfo) bool {
s := stat.Sys().(*syscall.Stat_t)
return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0
}
+
+func getFileOwner(path string) (uint32, uint32, uint32, error) {
+ f, err := os.Stat(path)
+ if err != nil {
+ return 0, 0, 0, err
+ }
+ s, ok := f.Sys().(*syscall.Stat_t)
+ if ok {
+ return s.Uid, s.Gid, s.Mode & 07777, nil
+ }
+ return 0, 0, uint32(f.Mode()), nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go
index 585faa824..08e3bc889 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_other.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_other.go
@@ -5,3 +5,7 @@ package archive
func getWhiteoutConverter(format WhiteoutFormat, data interface{}) tarWhiteoutConverter {
return nil
}
+
+func getFileOwner(path string) (uint32, uint32, uint32, error) {
+ return 0, 0, 0, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
index bdc1a3d79..ecb704b64 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go
@@ -106,15 +106,19 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
}
-func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error {
+ permissionsMask := hdrInfo.Mode()
+ if forceMask != nil {
+ permissionsMask = *forceMask
+ }
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
- if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ if err := os.Chmod(path, permissionsMask); err != nil {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
- if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ if err := os.Chmod(path, permissionsMask); err != nil {
return err
}
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
index 0bcbb925d..a0872444f 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go
@@ -69,7 +69,7 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
return nil
}
-func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error {
return nil
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
index ecfa45d73..da86e0c05 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go
@@ -2,6 +2,7 @@ package archive
import (
"bytes"
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -86,21 +87,21 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
}
info.stat = stat
info.capability, err = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
- if err != nil && err != system.EOPNOTSUPP {
+ if err != nil && !errors.Is(err, system.EOPNOTSUPP) {
return err
}
xattrs, err := system.Llistxattr(cpath)
- if err != nil && err != system.EOPNOTSUPP {
+ if err != nil && !errors.Is(err, system.EOPNOTSUPP) {
return err
}
for _, key := range xattrs {
if strings.HasPrefix(key, "user.") {
value, err := system.Lgetxattr(cpath, key)
- if err == system.E2BIG {
- logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", cpath, key)
- continue
- }
if err != nil {
+ if errors.Is(err, system.E2BIG) {
+ logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", cpath, key)
+ continue
+ }
return err
}
if info.xattrs == nil {
diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go
index a12dd4202..14ffad5c0 100644
--- a/vendor/github.com/containers/storage/pkg/archive/diff.go
+++ b/vendor/github.com/containers/storage/pkg/archive/diff.go
@@ -106,7 +106,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
}
defer os.RemoveAll(aufsTempdir)
}
- if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, buffer); err != nil {
+ if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
return 0, err
}
}
@@ -197,7 +197,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
return 0, err
}
- if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, buffer); err != nil {
+ if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
return 0, err
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
index 33ba6a128..aacfee76f 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
@@ -5,7 +5,9 @@ import (
"fmt"
"io"
"io/ioutil"
+ "net"
"os"
+ "os/user"
"path/filepath"
"sync"
@@ -15,6 +17,13 @@ import (
"github.com/pkg/errors"
)
+func init() {
+ // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host
+ // environment not in the chroot from untrusted files.
+ _, _ = user.Lookup("storage")
+ _, _ = net.LookupHost("localhost")
+}
+
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
archiver := archive.NewArchiver(idMappings)
diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go
index c2fa2109e..7c9ac6ad6 100644
--- a/vendor/github.com/containers/storage/pkg/config/config.go
+++ b/vendor/github.com/containers/storage/pkg/config/config.go
@@ -2,6 +2,7 @@ package config
import (
"fmt"
+ "os"
)
// ThinpoolOptionsConfig represents the "storage.options.thinpool"
@@ -94,6 +95,9 @@ type OverlayOptionsConfig struct {
Size string `toml:"size"`
// Do not create a bind mount on the storage home
SkipMountHome string `toml:"skip_mount_home"`
+ // ForceMask indicates the permissions mask (e.g. "0755") to use for new
+ // files and directories
+ ForceMask string `toml:"force_mask"`
}
type VfsOptionsConfig struct {
@@ -129,6 +133,10 @@ type OptionsConfig struct {
// ignored when building an image.
IgnoreChownErrors string `toml:"ignore_chown_errors"`
+ // ForceMask indicates the permissions mask (e.g. "0755") to use for new
+ // files and directories.
+ ForceMask os.FileMode `toml:"force_mask"`
+
// RemapUser is the name of one or more entries in /etc/subuid which
// should be used to set up default UID mappings.
RemapUser string `toml:"remap-user"`
@@ -279,6 +287,11 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
} else if options.SkipMountHome != "" {
doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.SkipMountHome))
}
+ if options.Overlay.ForceMask != "" {
+ doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.Overlay.ForceMask))
+ } else if options.ForceMask != 0 {
+ doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.ForceMask))
+ }
case "vfs":
if options.Vfs.IgnoreChownErrors != "" {
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors))
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
deleted file mode 100644
index d28ba9d69..000000000
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package homedir
-
-// Copyright 2013-2018 Docker, Inc.
-// NOTE: this package has originally been copied from github.com/docker/docker.
-
-import (
- "errors"
- "os"
- "path/filepath"
- "strings"
-)
-
-// GetRuntimeDir returns XDG_RUNTIME_DIR.
-// XDG_RUNTIME_DIR is typically configured via pam_systemd.
-// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
-//
-// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
-func GetRuntimeDir() (string, error) {
- if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" {
- return xdgRuntimeDir, nil
- }
- return "", errors.New("could not get XDG_RUNTIME_DIR")
-}
-
-// StickRuntimeDirContents sets the sticky bit on files that are under
-// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system.
-//
-// StickyRuntimeDir returns slice of sticked files.
-// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set.
-//
-// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
-func StickRuntimeDirContents(files []string) ([]string, error) {
- runtimeDir, err := GetRuntimeDir()
- if err != nil {
- // ignore error if runtimeDir is empty
- return nil, nil
- }
- runtimeDir, err = filepath.Abs(runtimeDir)
- if err != nil {
- return nil, err
- }
- var sticked []string
- for _, f := range files {
- f, err = filepath.Abs(f)
- if err != nil {
- return sticked, err
- }
- if strings.HasPrefix(f, runtimeDir+"/") {
- if err = stick(f); err != nil {
- return sticked, err
- }
- sticked = append(sticked, f)
- }
- }
- return sticked, nil
-}
-
-func stick(f string) error {
- st, err := os.Stat(f)
- if err != nil {
- return err
- }
- m := st.Mode()
- m |= os.ModeSticky
- return os.Chmod(f, m)
-}
-
-// GetDataHome returns XDG_DATA_HOME.
-// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
-//
-// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
-func GetDataHome() (string, error) {
- if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" {
- return xdgDataHome, nil
- }
- home := os.Getenv("HOME")
- if home == "" {
- return "", errors.New("could not get either XDG_DATA_HOME or HOME")
- }
- return filepath.Join(home, ".local", "share"), nil
-}
-
-// GetConfigHome returns XDG_CONFIG_HOME.
-// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
-//
-// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
-func GetConfigHome() (string, error) {
- if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
- return xdgConfigHome, nil
- }
- home := os.Getenv("HOME")
- if home == "" {
- return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
- }
- return filepath.Join(home, ".config"), nil
-}
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
index f7bcfb878..4f778c858 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
@@ -1,4 +1,4 @@
-// +build !linux
+// +build !linux,!darwin
package homedir
diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
index dcadb7e8d..0274d037f 100644
--- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
+++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
@@ -6,8 +6,12 @@ package homedir
// NOTE: this package has originally been copied from github.com/docker/docker.
import (
+ "errors"
"os"
- "os/user"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/storage/pkg/unshare"
)
// Key returns the env var name for the user's home dir based on
@@ -25,13 +29,8 @@ func Key() string {
//
// If needing to do nss lookups, do not disable cgo or set osusergo.
func Get() string {
- home := os.Getenv(Key())
- if home == "" {
- if u, err := user.Current(); err == nil {
- return u.HomeDir
- }
- }
- return home
+ homedir, _ := unshare.HomeDir()
+ return homedir
}
// GetShortcutString returns the string that is shortcut to user's home directory
@@ -39,3 +38,88 @@ func Get() string {
func GetShortcutString() string {
return "~"
}
+
+// GetRuntimeDir returns XDG_RUNTIME_DIR.
+// XDG_RUNTIME_DIR is typically configured via pam_systemd.
+// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetRuntimeDir() (string, error) {
+ if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" {
+ return xdgRuntimeDir, nil
+ }
+ return "", errors.New("could not get XDG_RUNTIME_DIR")
+}
+
+// StickRuntimeDirContents sets the sticky bit on files that are under
+// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system.
+//
+// StickyRuntimeDir returns slice of sticked files.
+// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func StickRuntimeDirContents(files []string) ([]string, error) {
+ runtimeDir, err := GetRuntimeDir()
+ if err != nil {
+ // ignore error if runtimeDir is empty
+ return nil, nil
+ }
+ runtimeDir, err = filepath.Abs(runtimeDir)
+ if err != nil {
+ return nil, err
+ }
+ var sticked []string
+ for _, f := range files {
+ f, err = filepath.Abs(f)
+ if err != nil {
+ return sticked, err
+ }
+ if strings.HasPrefix(f, runtimeDir+"/") {
+ if err = stick(f); err != nil {
+ return sticked, err
+ }
+ sticked = append(sticked, f)
+ }
+ }
+ return sticked, nil
+}
+
+func stick(f string) error {
+ st, err := os.Stat(f)
+ if err != nil {
+ return err
+ }
+ m := st.Mode()
+ m |= os.ModeSticky
+ return os.Chmod(f, m)
+}
+
+// GetDataHome returns XDG_DATA_HOME.
+// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetDataHome() (string, error) {
+ if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" {
+ return xdgDataHome, nil
+ }
+ home := Get()
+ if home == "" {
+ return "", errors.New("could not get either XDG_DATA_HOME or HOME")
+ }
+ return filepath.Join(home, ".local", "share"), nil
+}
+
+// GetConfigHome returns XDG_CONFIG_HOME.
+// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
+//
+// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
+func GetConfigHome() (string, error) {
+ if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
+ return xdgConfigHome, nil
+ }
+ home := Get()
+ if home == "" {
+ return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
+ }
+ return filepath.Join(home, ".config"), nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/chmod.go b/vendor/github.com/containers/storage/pkg/system/chmod.go
new file mode 100644
index 000000000..a01d8abfb
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/chmod.go
@@ -0,0 +1,17 @@
+package system
+
+import (
+ "errors"
+ "os"
+ "syscall"
+)
+
+func Chmod(name string, mode os.FileMode) error {
+ err := os.Chmod(name, mode)
+
+ for err != nil && errors.Is(err, syscall.EINTR) {
+ err = os.Chmod(name, mode)
+ }
+
+ return err
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/lchown.go b/vendor/github.com/containers/storage/pkg/system/lchown.go
new file mode 100644
index 000000000..eb2d8b464
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/system/lchown.go
@@ -0,0 +1,20 @@
+package system
+
+import (
+ "os"
+ "syscall"
+)
+
+func Lchown(name string, uid, gid int) error {
+ err := syscall.Lchown(name, uid, gid)
+
+ for err == syscall.EINTR {
+ err = syscall.Lchown(name, uid, gid)
+ }
+
+ if err != nil {
+ return &os.PathError{Op: "lchown", Path: name, Err: err}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
index e94bb5d5c..10355848b 100644
--- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go
@@ -2,6 +2,7 @@ package system
import (
"bytes"
+ "os"
"golang.org/x/sys/unix"
)
@@ -26,7 +27,7 @@ func Lgetxattr(path string, attr string) ([]byte, error) {
// Buffer too small, use zero-sized buffer to get the actual size
sz, errno = unix.Lgetxattr(path, attr, []byte{})
if errno != nil {
- return nil, errno
+ return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno}
}
dest = make([]byte, sz)
sz, errno = unix.Lgetxattr(path, attr, dest)
@@ -36,7 +37,7 @@ func Lgetxattr(path string, attr string) ([]byte, error) {
case errno == unix.ENODATA:
return nil, nil
case errno != nil:
- return nil, errno
+ return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno}
}
return dest[:sz], nil
@@ -45,7 +46,11 @@ func Lgetxattr(path string, attr string) ([]byte, error) {
// Lsetxattr sets the value of the extended attribute identified by attr
// and associated with the given path in the file system.
func Lsetxattr(path string, attr string, data []byte, flags int) error {
- return unix.Lsetxattr(path, attr, data, flags)
+ if err := unix.Lsetxattr(path, attr, data, flags); err != nil {
+ return &os.PathError{Op: "lsetxattr", Path: path, Err: err}
+ }
+
+ return nil
}
// Llistxattr lists extended attributes associated with the given path
@@ -58,14 +63,14 @@ func Llistxattr(path string) ([]string, error) {
// Buffer too small, use zero-sized buffer to get the actual size
sz, errno = unix.Llistxattr(path, []byte{})
if errno != nil {
- return nil, errno
+ return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno}
}
dest = make([]byte, sz)
sz, errno = unix.Llistxattr(path, dest)
}
if errno != nil {
- return nil, errno
+ return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno}
}
var attrs []string
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.go b/vendor/github.com/containers/storage/pkg/unshare/unshare.go
index a08fb674d..a9210b0bf 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.go
@@ -26,6 +26,7 @@ func HomeDir() (string, error) {
return
}
homeDir, homeDirErr = usr.HomeDir, nil
+ return
}
homeDir, homeDirErr = home, nil
})
diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf
index 64e02f327..0577e84ca 100644
--- a/vendor/github.com/containers/storage/storage.conf
+++ b/vendor/github.com/containers/storage/storage.conf
@@ -82,6 +82,39 @@ mountopt = "nodev"
# Size is used to set a maximum size of the container image.
# size = ""
+# ForceMask specifies the permissions mask that is used for new files and
+# directories.
+#
+# The values "shared" and "private" are accepted.
+# Octal permission masks are also accepted.
+#
+# "": No value specified.
+# All files/directories, get set with the permissions identified within the
+# image.
+# "private": it is equivalent to 0700.
+# All files/directories get set with 0700 permissions. The owner has rwx
+# access to the files. No other users on the system can access the files.
+# This setting could be used with networked based homedirs.
+# "shared": it is equivalent to 0755.
+# The owner has rwx access to the files and everyone else can read, access
+# and execute them. This setting is useful for sharing containers storage
+# with other users. For instance have a storage owned by root but shared
+# to rootless users as an additional store.
+# NOTE: All files within the image are made readable and executable by any
+# user on the system. Even /etc/shadow within your image is now readable by
+# any user.
+#
+# OCTAL: Users can experiment with other OCTAL Permissions.
+#
+# Note: The force_mask Flag is an experimental feature, it could change in the
+# future. When "force_mask" is set the original permission mask is stored in
+# the "user.containers.override_stat" xattr and the "mount_program" option must
+# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the
+# extended attribute permissions to processes within containers rather then the
+# "force_mask" permissions.
+#
+# force_mask = ""
+
[storage.options.thinpool]
# Storage Options for thinpool
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index 6b51b405d..b9115f195 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -3551,6 +3551,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
if config.Storage.Options.IgnoreChownErrors != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.ignore_chown_errors=%s", config.Storage.Driver, config.Storage.Options.IgnoreChownErrors))
}
+ if config.Storage.Options.ForceMask != 0 {
+ storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.force_mask=%o", config.Storage.Driver, config.Storage.Options.ForceMask))
+ }
if config.Storage.Options.MountOpt != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt))
}
diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go
index 5ba8cc418..49ec544a3 100644
--- a/vendor/github.com/containers/storage/userns.go
+++ b/vendor/github.com/containers/storage/userns.go
@@ -221,94 +221,71 @@ outer:
return size, nil
}
+func minInt(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func maxInt(a, b int) int {
+ if a < b {
+ return b
+ }
+ return a
+}
+
// subtractHostIDs return the subtraction of the range USED from AVAIL. The range is specified
// by [HostID, HostID+Size).
// ContainerID is ignored.
func subtractHostIDs(avail idtools.IDMap, used idtools.IDMap) []idtools.IDMap {
- switch {
- case used.HostID <= avail.HostID && used.HostID+used.Size >= avail.HostID+avail.Size:
- return nil
- case used.HostID <= avail.HostID && used.HostID+used.Size > avail.HostID && used.HostID+used.Size < avail.HostID+avail.Size:
- newContainerID := avail.ContainerID + used.Size
- newHostID := used.HostID + used.Size
- r := idtools.IDMap{
- ContainerID: newContainerID,
- HostID: newHostID,
- Size: avail.Size + avail.HostID - newHostID,
- }
- return []idtools.IDMap{r}
- case used.HostID > avail.HostID && used.HostID < avail.HostID+avail.Size && used.HostID+used.Size >= avail.HostID+avail.Size:
- r := idtools.IDMap{
+ var out []idtools.IDMap
+ availEnd := avail.HostID + avail.Size
+ usedEnd := used.HostID + used.Size
+ // Intersection of [avail.HostID, availEnd) and (-inf, used.HostID) is [avail.HostID, newEnd).
+ if newEnd := minInt(availEnd, used.HostID); newEnd > avail.HostID {
+ out = append(out, idtools.IDMap{
ContainerID: avail.ContainerID,
HostID: avail.HostID,
- Size: used.HostID - avail.HostID,
- }
- return []idtools.IDMap{r}
- case used.HostID > avail.HostID && used.HostID < avail.HostID+avail.Size && used.HostID+used.Size < avail.HostID+avail.Size:
- r1 := idtools.IDMap{
- ContainerID: avail.ContainerID,
- HostID: avail.HostID,
- Size: used.HostID - avail.HostID,
- }
- r2 := idtools.IDMap{
- ContainerID: used.ContainerID + used.Size,
- HostID: avail.HostID + (used.HostID - avail.HostID),
- Size: avail.HostID + avail.Size - used.HostID - used.Size,
- }
- return []idtools.IDMap{r1, r2}
- default:
- r := idtools.IDMap{
- ContainerID: 0,
- HostID: avail.HostID,
- Size: avail.Size,
- }
- return []idtools.IDMap{r}
- }
+ Size: newEnd - avail.HostID,
+ })
+ }
+ // Intersection of [avail.HostID, availEnd) and [usedEnd, +inf) is [newStart, availEnd).
+ if newStart := maxInt(avail.HostID, usedEnd); newStart < availEnd {
+ out = append(out, idtools.IDMap{
+ ContainerID: newStart + avail.ContainerID - avail.HostID,
+ HostID: newStart,
+ Size: availEnd - newStart,
+ })
+ }
+ return out
}
// subtractContainerIDs return the subtraction of the range USED from AVAIL. The range is specified
// by [ContainerID, ContainerID+Size).
// HostID is ignored.
func subtractContainerIDs(avail idtools.IDMap, used idtools.IDMap) []idtools.IDMap {
- switch {
- case used.ContainerID <= avail.ContainerID && used.ContainerID+used.Size >= avail.ContainerID+avail.Size:
- return nil
- case used.ContainerID <= avail.ContainerID && used.ContainerID+used.Size > avail.ContainerID && used.ContainerID+used.Size < avail.ContainerID+avail.Size:
- newContainerID := used.ContainerID + used.Size
- newHostID := avail.HostID + used.Size
- r := idtools.IDMap{
- ContainerID: newContainerID,
- HostID: newHostID,
- Size: avail.Size + avail.ContainerID - newContainerID,
- }
- return []idtools.IDMap{r}
- case used.ContainerID > avail.ContainerID && used.ContainerID < avail.ContainerID+avail.Size && used.ContainerID+used.Size >= avail.ContainerID+avail.Size:
- r := idtools.IDMap{
- ContainerID: avail.ContainerID,
- HostID: avail.HostID,
- Size: used.ContainerID - avail.ContainerID,
- }
- return []idtools.IDMap{r}
- case used.ContainerID > avail.ContainerID && used.ContainerID < avail.ContainerID+avail.Size && used.ContainerID+used.Size < avail.ContainerID+avail.Size:
- r1 := idtools.IDMap{
+ var out []idtools.IDMap
+ availEnd := avail.ContainerID + avail.Size
+ usedEnd := used.ContainerID + used.Size
+ // Intersection of [avail.ContainerID, availEnd) and (-inf, used.ContainerID) is
+ // [avail.ContainerID, newEnd).
+ if newEnd := minInt(availEnd, used.ContainerID); newEnd > avail.ContainerID {
+ out = append(out, idtools.IDMap{
ContainerID: avail.ContainerID,
HostID: avail.HostID,
- Size: used.ContainerID - avail.ContainerID,
- }
- r2 := idtools.IDMap{
- ContainerID: used.ContainerID + used.Size,
- HostID: avail.HostID + (used.ContainerID - avail.ContainerID),
- Size: avail.ContainerID + avail.Size - used.ContainerID - used.Size,
- }
- return []idtools.IDMap{r1, r2}
- default:
- r := idtools.IDMap{
- ContainerID: avail.ContainerID,
- HostID: avail.HostID,
- Size: avail.Size,
- }
- return []idtools.IDMap{r}
- }
+ Size: newEnd - avail.ContainerID,
+ })
+ }
+ // Intersection of [avail.ContainerID, availEnd) and [usedEnd, +inf) is [newStart, availEnd).
+ if newStart := maxInt(avail.ContainerID, usedEnd); newStart < availEnd {
+ out = append(out, idtools.IDMap{
+ ContainerID: newStart,
+ HostID: newStart + avail.HostID - avail.ContainerID,
+ Size: availEnd - newStart,
+ })
+ }
+ return out
}
// subtractAll subtracts all usedIDs from the available IDs.
diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go
index 762c3a00d..bd6c4feb1 100644
--- a/vendor/github.com/containers/storage/utils.go
+++ b/vendor/github.com/containers/storage/utils.go
@@ -273,7 +273,11 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
storageOpts.RunRoot = defaultRootlessRunRoot
}
if storageOpts.GraphRoot == "" {
- storageOpts.GraphRoot = defaultRootlessGraphRoot
+ if storageOpts.RootlessStoragePath != "" {
+ storageOpts.GraphRoot = storageOpts.RootlessStoragePath
+ } else {
+ storageOpts.GraphRoot = defaultRootlessGraphRoot
+ }
}
}
}
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
index a8e2fbfa8..ee09adc8d 100644
--- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/ocicni.go
@@ -24,7 +24,6 @@ import (
type cniNetworkPlugin struct {
cniConfig *libcni.CNIConfig
- loNetwork *cniNetwork
sync.RWMutex
defaultNetName netName
@@ -217,7 +216,6 @@ func initCNI(exec cniinvoke.Exec, cacheDir, defaultNetName string, confDir strin
changeable: defaultNetName == "",
},
networks: make(map[string]*cniNetwork),
- loNetwork: getLoNetwork(),
confDir: confDir,
binDirs: binDirs,
shutdownChan: make(chan struct{}),
@@ -335,31 +333,9 @@ func loadNetworks(confDir string, cni *libcni.CNIConfig) (map[string]*cniNetwork
}
const (
- loIfname string = "lo"
- loNetname string = "cni-loopback"
+ loIfname string = "lo"
)
-func getLoNetwork() *cniNetwork {
- loConfig, err := libcni.ConfListFromBytes([]byte(fmt.Sprintf(`{
- "cniVersion": "0.3.1",
- "name": "%s",
- "plugins": [{
- "type": "loopback"
- }]
-}`, loNetname)))
- if err != nil {
- // The hardcoded config above should always be valid and unit tests will
- // catch this
- panic(err)
- }
- loNetwork := &cniNetwork{
- name: loIfname,
- config: loConfig,
- }
-
- return loNetwork
-}
-
func (plugin *cniNetworkPlugin) syncNetworkConfig() error {
networks, defaultNetName, err := loadNetworks(plugin.confDir, plugin.cniConfig)
if err != nil {
@@ -525,16 +501,11 @@ func (plugin *cniNetworkPlugin) forEachNetwork(podNetwork *PodNetwork, fromCache
return nil
}
-func buildLoopbackRuntimeConf(cacheDir string, podNetwork *PodNetwork) *libcni.RuntimeConf {
- return &libcni.RuntimeConf{
- ContainerID: podNetwork.ID,
- NetNS: podNetwork.NetNS,
- CacheDir: cacheDir,
- IfName: loIfname,
- }
+func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]NetResult, error) {
+ return plugin.SetUpPodWithContext(context.Background(), podNetwork)
}
-func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]NetResult, error) {
+func (plugin *cniNetworkPlugin) SetUpPodWithContext(ctx context.Context, podNetwork PodNetwork) ([]NetResult, error) {
if err := plugin.networksAvailable(&podNetwork); err != nil {
return nil, err
}
@@ -542,15 +513,15 @@ func (plugin *cniNetworkPlugin) SetUpPod(podNetwork PodNetwork) ([]NetResult, er
plugin.podLock(podNetwork).Lock()
defer plugin.podUnlock(podNetwork)
- loRt := buildLoopbackRuntimeConf(plugin.cacheDir, &podNetwork)
- if _, err := plugin.loNetwork.addToNetwork(loRt, plugin.cniConfig); err != nil {
- logrus.Errorf("Error while adding to cni lo network: %s", err)
+ // Set up loopback interface
+ if err := bringUpLoopback(podNetwork.NetNS); err != nil {
+ logrus.Errorf(err.Error())
return nil, err
}
results := make([]NetResult, 0)
if err := plugin.forEachNetwork(&podNetwork, false, func(network *cniNetwork, podNetwork *PodNetwork, rt *libcni.RuntimeConf) error {
- result, err := network.addToNetwork(rt, plugin.cniConfig)
+ result, err := network.addToNetwork(ctx, rt, plugin.cniConfig)
if err != nil {
logrus.Errorf("Error while adding pod to CNI network %q: %s", network.name, err)
return err
@@ -622,7 +593,7 @@ func (plugin *cniNetworkPlugin) getCachedNetworkInfo(containerID string) ([]NetA
continue
}
// Ignore the loopback interface; it's handled separately
- if cachedInfo.IfName == loIfname && cachedInfo.NetName == loNetname {
+ if cachedInfo.IfName == loIfname && cachedInfo.NetName == "cni-loopback" {
continue
}
if cachedInfo.IfName == "" || cachedInfo.NetName == "" {
@@ -641,6 +612,10 @@ func (plugin *cniNetworkPlugin) getCachedNetworkInfo(containerID string) ([]NetA
// TearDownPod tears down pod networks. Prefers cached pod attachment information
// but falls back to given network attachment information.
func (plugin *cniNetworkPlugin) TearDownPod(podNetwork PodNetwork) error {
+ return plugin.TearDownPodWithContext(context.Background(), podNetwork)
+}
+
+func (plugin *cniNetworkPlugin) TearDownPodWithContext(ctx context.Context, podNetwork PodNetwork) error {
if len(podNetwork.Networks) == 0 {
attachments, err := plugin.getCachedNetworkInfo(podNetwork.ID)
if err == nil && len(attachments) > 0 {
@@ -652,17 +627,16 @@ func (plugin *cniNetworkPlugin) TearDownPod(podNetwork PodNetwork) error {
return err
}
- loRt := buildLoopbackRuntimeConf(plugin.cacheDir, &podNetwork)
- if err := plugin.loNetwork.deleteFromNetwork(loRt, plugin.cniConfig); err != nil {
- logrus.Errorf("Error while removing pod from CNI lo network: %v", err)
- // Loopback teardown errors are not fatal
- }
-
plugin.podLock(podNetwork).Lock()
defer plugin.podUnlock(podNetwork)
+ if err := tearDownLoopback(podNetwork.NetNS); err != nil {
+ // ignore error
+ logrus.Errorf("Ignoring error tearing down loopback interface: %v", err)
+ }
+
return plugin.forEachNetwork(&podNetwork, true, func(network *cniNetwork, podNetwork *PodNetwork, rt *libcni.RuntimeConf) error {
- if err := network.deleteFromNetwork(rt, plugin.cniConfig); err != nil {
+ if err := network.deleteFromNetwork(ctx, rt, plugin.cniConfig); err != nil {
logrus.Errorf("Error while removing pod from CNI network %q: %s", network.name, err)
return err
}
@@ -673,12 +647,23 @@ func (plugin *cniNetworkPlugin) TearDownPod(podNetwork PodNetwork) error {
// GetPodNetworkStatus returns IP addressing and interface details for all
// networks attached to the pod.
func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) ([]NetResult, error) {
+ return plugin.GetPodNetworkStatusWithContext(context.Background(), podNetwork)
+}
+
+// GetPodNetworkStatusWithContext returns IP addressing and interface details for all
+// networks attached to the pod.
+func (plugin *cniNetworkPlugin) GetPodNetworkStatusWithContext(ctx context.Context, podNetwork PodNetwork) ([]NetResult, error) {
plugin.podLock(podNetwork).Lock()
defer plugin.podUnlock(podNetwork)
+ if err := checkLoopback(podNetwork.NetNS); err != nil {
+ logrus.Errorf(err.Error())
+ return nil, err
+ }
+
results := make([]NetResult, 0)
if err := plugin.forEachNetwork(&podNetwork, true, func(network *cniNetwork, podNetwork *PodNetwork, rt *libcni.RuntimeConf) error {
- result, err := network.checkNetwork(rt, plugin.cniConfig, plugin.nsManager, podNetwork.NetNS)
+ result, err := network.checkNetwork(ctx, rt, plugin.cniConfig, plugin.nsManager, podNetwork.NetNS)
if err != nil {
logrus.Errorf("Error while checking pod to CNI network %q: %s", network.name, err)
return err
@@ -700,9 +685,9 @@ func (plugin *cniNetworkPlugin) GetPodNetworkStatus(podNetwork PodNetwork) ([]Ne
return results, nil
}
-func (network *cniNetwork) addToNetwork(rt *libcni.RuntimeConf, cni *libcni.CNIConfig) (cnitypes.Result, error) {
+func (network *cniNetwork) addToNetwork(ctx context.Context, rt *libcni.RuntimeConf, cni *libcni.CNIConfig) (cnitypes.Result, error) {
logrus.Infof("About to add CNI network %s (type=%v)", network.name, network.config.Plugins[0].Network.Type)
- res, err := cni.AddNetworkList(context.Background(), network.config, rt)
+ res, err := cni.AddNetworkList(ctx, network.config, rt)
if err != nil {
logrus.Errorf("Error adding network: %v", err)
return nil, err
@@ -711,7 +696,7 @@ func (network *cniNetwork) addToNetwork(rt *libcni.RuntimeConf, cni *libcni.CNIC
return res, nil
}
-func (network *cniNetwork) checkNetwork(rt *libcni.RuntimeConf, cni *libcni.CNIConfig, nsManager *nsManager, netns string) (cnitypes.Result, error) {
+func (network *cniNetwork) checkNetwork(ctx context.Context, rt *libcni.RuntimeConf, cni *libcni.CNIConfig, nsManager *nsManager, netns string) (cnitypes.Result, error) {
logrus.Infof("About to check CNI network %s (type=%v)", network.name, network.config.Plugins[0].Network.Type)
gtet, err := cniversion.GreaterThanOrEqualTo(network.config.CNIVersion, "0.4.0")
@@ -723,7 +708,7 @@ func (network *cniNetwork) checkNetwork(rt *libcni.RuntimeConf, cni *libcni.CNIC
// When CNIVersion supports Check, use it. Otherwise fall back on what was done initially.
if gtet {
- err = cni.CheckNetworkList(context.Background(), network.config, rt)
+ err = cni.CheckNetworkList(ctx, network.config, rt)
logrus.Infof("Checking CNI network %s (config version=%v)", network.name, network.config.CNIVersion)
if err != nil {
logrus.Errorf("Error checking network: %v", err)
@@ -783,9 +768,9 @@ func (network *cniNetwork) checkNetwork(rt *libcni.RuntimeConf, cni *libcni.CNIC
return converted, nil
}
-func (network *cniNetwork) deleteFromNetwork(rt *libcni.RuntimeConf, cni *libcni.CNIConfig) error {
+func (network *cniNetwork) deleteFromNetwork(ctx context.Context, rt *libcni.RuntimeConf, cni *libcni.CNIConfig) error {
logrus.Infof("About to del CNI network %s (type=%v)", network.name, network.config.Plugins[0].Network.Type)
- if err := cni.DelNetworkList(context.Background(), network.config, rt); err != nil {
+ if err := cni.DelNetworkList(ctx, network.config, rt); err != nil {
logrus.Errorf("Error deleting network: %v", err)
return err
}
@@ -848,6 +833,10 @@ func buildCNIRuntimeConf(cacheDir string, podNetwork *PodNetwork, ifName string,
rt.CapabilityArgs["ipRanges"] = runtimeConfig.IpRanges
}
+ // Set Aliases in Capabilities
+ if len(podNetwork.Aliases) > 0 {
+ rt.CapabilityArgs["aliases"] = podNetwork.Aliases
+ }
return rt, nil
}
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go
index 717ecda33..7326b4b40 100644
--- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/types.go
@@ -1,6 +1,8 @@
package ocicni
import (
+ "context"
+
"github.com/containernetworking/cni/pkg/types"
)
@@ -87,6 +89,11 @@ type PodNetwork struct {
// It is optional, and can be omitted for some or all specified networks
// without issue.
RuntimeConfig map[string]RuntimeConfig
+
+ // Aliases are network-scoped names for resolving a container
+ // by name. The key value is the network name and the value is
+ // is a string slice of aliases
+ Aliases map[string][]string
}
// NetAttachment describes a container network attachment
@@ -122,12 +129,21 @@ type CNIPlugin interface {
// pod are launched.
SetUpPod(network PodNetwork) ([]NetResult, error)
+ // SetUpPodWithContext is the same as SetUpPod but takes a context
+ SetUpPodWithContext(ctx context.Context, network PodNetwork) ([]NetResult, error)
+
// TearDownPod is the method called before a pod's sandbox container will be deleted
TearDownPod(network PodNetwork) error
- // Status is the method called to obtain the ipv4 or ipv6 addresses of the pod sandbox
+ // TearDownPodWithContext is the same as TearDownPod but takes a context
+ TearDownPodWithContext(ctx context.Context, network PodNetwork) error
+
+ // GetPodNetworkStatus is the method called to obtain the ipv4 or ipv6 addresses of the pod sandbox
GetPodNetworkStatus(network PodNetwork) ([]NetResult, error)
+ // GetPodNetworkStatusWithContext is the same as GetPodNetworkStatus but takes a context
+ GetPodNetworkStatusWithContext(ctx context.Context, network PodNetwork) ([]NetResult, error)
+
// NetworkStatus returns error if the network plugin is in error state
Status() error
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/util_linux.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/util_linux.go
index d263ae4df..53c22f83f 100644
--- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/util_linux.go
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/util_linux.go
@@ -7,6 +7,9 @@ import (
"net"
"os/exec"
"strings"
+
+ "github.com/containernetworking/plugins/pkg/ns"
+ "github.com/vishvananda/netlink"
)
var defaultNamespaceEnterCommandName = "nsenter"
@@ -69,3 +72,79 @@ func getContainerDetails(nsm *nsManager, netnsPath, interfaceName, addrType stri
return ipNet, &mac, nil
}
+
+func tearDownLoopback(netns string) error {
+ return ns.WithNetNSPath(netns, func(_ ns.NetNS) error {
+ link, err := netlink.LinkByName(loIfname)
+ if err != nil {
+ return err // not tested
+ }
+ err = netlink.LinkSetDown(link)
+ if err != nil {
+ return err // not tested
+ }
+ return nil
+ })
+}
+
+func bringUpLoopback(netns string) error {
+ if err := ns.WithNetNSPath(netns, func(_ ns.NetNS) error {
+ link, err := netlink.LinkByName(loIfname)
+ if err == nil {
+ err = netlink.LinkSetUp(link)
+ }
+ if err != nil {
+ return err
+ }
+
+ v4Addrs, err := netlink.AddrList(link, netlink.FAMILY_V4)
+ if err != nil {
+ return err
+ }
+ if len(v4Addrs) != 0 {
+ // sanity check that this is a loopback address
+ for _, addr := range v4Addrs {
+ if !addr.IP.IsLoopback() {
+ return fmt.Errorf("loopback interface found with non-loopback address %q", addr.IP)
+ }
+ }
+ }
+
+ v6Addrs, err := netlink.AddrList(link, netlink.FAMILY_V6)
+ if err != nil {
+ return err
+ }
+ if len(v6Addrs) != 0 {
+ // sanity check that this is a loopback address
+ for _, addr := range v6Addrs {
+ if !addr.IP.IsLoopback() {
+ return fmt.Errorf("loopback interface found with non-loopback address %q", addr.IP)
+ }
+ }
+ }
+
+ return nil
+ }); err != nil {
+ return fmt.Errorf("error adding loopback interface: %s", err)
+ }
+ return nil
+}
+
+func checkLoopback(netns string) error {
+ // Make sure loopback interface is up
+ if err := ns.WithNetNSPath(netns, func(_ ns.NetNS) error {
+ link, err := netlink.LinkByName(loIfname)
+ if err != nil {
+ return err
+ }
+
+ if link.Attrs().Flags&net.FlagUp != net.FlagUp {
+ return fmt.Errorf("loopback interface is down")
+ }
+
+ return nil
+ }); err != nil {
+ return fmt.Errorf("error checking loopback interface: %v", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cri-o/ocicni/pkg/ocicni/util_unsupported.go b/vendor/github.com/cri-o/ocicni/pkg/ocicni/util_unsupported.go
index 0b1c72208..b87f0d373 100644
--- a/vendor/github.com/cri-o/ocicni/pkg/ocicni/util_unsupported.go
+++ b/vendor/github.com/cri-o/ocicni/pkg/ocicni/util_unsupported.go
@@ -3,17 +3,32 @@
package ocicni
import (
- "fmt"
+ "errors"
"net"
)
type nsManager struct {
}
+var errUnsupportedPlatform = errors.New("unsupported platform")
+
func (nsm *nsManager) init() error {
return nil
}
func getContainerDetails(nsm *nsManager, netnsPath, interfaceName, addrType string) (*net.IPNet, *net.HardwareAddr, error) {
- return nil, nil, fmt.Errorf("not supported yet")
+ return nil, nil, errUnsupportedPlatform
+}
+
+func tearDownLoopback(netns string) error {
+ return errUnsupportedPlatform
+}
+
+func bringUpLoopback(netns string) error {
+ return errUnsupportedPlatform
+}
+
+func checkLoopback(netns string) error {
+ return errUnsupportedPlatform
+
}
diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml
index ca172599d..0ff649013 100644
--- a/vendor/github.com/docker/docker/api/swagger.yaml
+++ b/vendor/github.com/docker/docker/api/swagger.yaml
@@ -6923,7 +6923,7 @@ paths:
type: "string"
- name: "v"
in: "query"
- description: "Remove the volumes associated with the container."
+ description: "Remove anonymous volumes associated with the container."
type: "boolean"
default: false
- name: "force"
diff --git a/vendor/github.com/docker/docker/api/types/seccomp.go b/vendor/github.com/docker/docker/api/types/seccomp.go
deleted file mode 100644
index 2259c6be1..000000000
--- a/vendor/github.com/docker/docker/api/types/seccomp.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package types // import "github.com/docker/docker/api/types"
-
-// Seccomp represents the config for a seccomp profile for syscall restriction.
-type Seccomp struct {
- DefaultAction Action `json:"defaultAction"`
- // Architectures is kept to maintain backward compatibility with the old
- // seccomp profile.
- Architectures []Arch `json:"architectures,omitempty"`
- ArchMap []Architecture `json:"archMap,omitempty"`
- Syscalls []*Syscall `json:"syscalls"`
-}
-
-// Architecture is used to represent a specific architecture
-// and its sub-architectures
-type Architecture struct {
- Arch Arch `json:"architecture"`
- SubArches []Arch `json:"subArchitectures"`
-}
-
-// Arch used for architectures
-type Arch string
-
-// Additional architectures permitted to be used for system calls
-// By default only the native architecture of the kernel is permitted
-const (
- ArchX86 Arch = "SCMP_ARCH_X86"
- ArchX86_64 Arch = "SCMP_ARCH_X86_64"
- ArchX32 Arch = "SCMP_ARCH_X32"
- ArchARM Arch = "SCMP_ARCH_ARM"
- ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
- ArchMIPS Arch = "SCMP_ARCH_MIPS"
- ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
- ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
- ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
- ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
- ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
- ArchPPC Arch = "SCMP_ARCH_PPC"
- ArchPPC64 Arch = "SCMP_ARCH_PPC64"
- ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
- ArchS390 Arch = "SCMP_ARCH_S390"
- ArchS390X Arch = "SCMP_ARCH_S390X"
-)
-
-// Action taken upon Seccomp rule match
-type Action string
-
-// Define actions for Seccomp rules
-const (
- ActKill Action = "SCMP_ACT_KILL"
- ActTrap Action = "SCMP_ACT_TRAP"
- ActErrno Action = "SCMP_ACT_ERRNO"
- ActTrace Action = "SCMP_ACT_TRACE"
- ActAllow Action = "SCMP_ACT_ALLOW"
-)
-
-// Operator used to match syscall arguments in Seccomp
-type Operator string
-
-// Define operators for syscall arguments in Seccomp
-const (
- OpNotEqual Operator = "SCMP_CMP_NE"
- OpLessThan Operator = "SCMP_CMP_LT"
- OpLessEqual Operator = "SCMP_CMP_LE"
- OpEqualTo Operator = "SCMP_CMP_EQ"
- OpGreaterEqual Operator = "SCMP_CMP_GE"
- OpGreaterThan Operator = "SCMP_CMP_GT"
- OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
-)
-
-// Arg used for matching specific syscall arguments in Seccomp
-type Arg struct {
- Index uint `json:"index"`
- Value uint64 `json:"value"`
- ValueTwo uint64 `json:"valueTwo"`
- Op Operator `json:"op"`
-}
-
-// Filter is used to conditionally apply Seccomp rules
-type Filter struct {
- Caps []string `json:"caps,omitempty"`
- Arches []string `json:"arches,omitempty"`
- MinKernel string `json:"minKernel,omitempty"`
-}
-
-// Syscall is used to match a group of syscalls in Seccomp
-type Syscall struct {
- Name string `json:"name,omitempty"`
- Names []string `json:"names,omitempty"`
- Action Action `json:"action"`
- Args []*Arg `json:"args"`
- Comment string `json:"comment"`
- Includes Filter `json:"includes"`
- Excludes Filter `json:"excludes"`
-}
diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go
index 0649a69cc..68064ca9c 100644
--- a/vendor/github.com/docker/docker/client/client.go
+++ b/vendor/github.com/docker/docker/client/client.go
@@ -7,8 +7,8 @@ https://docs.docker.com/engine/reference/api/
Usage
You use the library by creating a client object and calling methods on it. The
-client can be created either from environment variables with NewEnvClient, or
-configured manually with NewClient.
+client can be created either from environment variables with NewClientWithOpts(client.FromEnv),
+or configured manually with NewClient().
For example, to list running containers (the equivalent of "docker ps"):
diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm_unix.go
index 9e251dc15..6ba3fb3ae 100644
--- a/vendor/github.com/docker/docker/pkg/system/rm.go
+++ b/vendor/github.com/docker/docker/pkg/system/rm_unix.go
@@ -1,3 +1,5 @@
+// +build !windows
+
package system // import "github.com/docker/docker/pkg/system"
import (
diff --git a/vendor/github.com/docker/docker/pkg/system/rm_windows.go b/vendor/github.com/docker/docker/pkg/system/rm_windows.go
new file mode 100644
index 000000000..ed9c5dcb8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/rm_windows.go
@@ -0,0 +1,6 @@
+package system
+
+import "os"
+
+// EnsureRemoveAll is an alias to os.RemoveAll on Windows
+var EnsureRemoveAll = os.RemoveAll
diff --git a/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml
index 33bc2f8a3..77967b70b 100644
--- a/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml
+++ b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml
@@ -15,6 +15,7 @@ issues:
- path: testing[/\\].+\.go
linters:
- gosec
+ - noctx
linters:
enable-all: true
@@ -25,9 +26,13 @@ linters:
- gochecknoglobals
- gocognit
- goconst
+ - gocritic
- godot
+ - goerr113
+ - gofumpt
- gomnd
- lll
- nestif
+ - nlreturn
- testpackage
- wsl
diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS
index 08f89afde..ec1d6258a 100644
--- a/vendor/github.com/fsouza/go-dockerclient/AUTHORS
+++ b/vendor/github.com/fsouza/go-dockerclient/AUTHORS
@@ -192,6 +192,7 @@ Tom Wilkie
Tomas Knappek
Tonic
ttyh061
+Umut Çömlekçioğlu
upccup
Victor Marmol
Vijay Krishnan
diff --git a/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE b/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
index 706634474..db092935f 100644
--- a/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
+++ b/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
@@ -3,4 +3,4 @@
http://www.apache.org/licenses/
You can find the Docker license at the following link:
-https://raw.githubusercontent.com/docker/docker/master/LICENSE
+https://raw.githubusercontent.com/docker/docker/HEAD/LICENSE
diff --git a/vendor/github.com/fsouza/go-dockerclient/README.md b/vendor/github.com/fsouza/go-dockerclient/README.md
index b44ff79a5..e170059a1 100644
--- a/vendor/github.com/fsouza/go-dockerclient/README.md
+++ b/vendor/github.com/fsouza/go-dockerclient/README.md
@@ -1,6 +1,6 @@
# go-dockerclient
-[![Build Status](https://github.com/fsouza/go-dockerclient/workflows/Build/badge.svg)](https://github.com/fsouza/go-dockerclient/actions?query=branch:master+workflow:Build)
+[![Build Status](https://github.com/fsouza/go-dockerclient/workflows/Build/badge.svg)](https://github.com/fsouza/go-dockerclient/actions?query=branch:main+workflow:Build)
[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/fsouza/go-dockerclient)
This package presents a client for the Docker remote API. It also provides
diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go
index b176e86bd..a1bccd68f 100644
--- a/vendor/github.com/fsouza/go-dockerclient/client.go
+++ b/vendor/github.com/fsouza/go-dockerclient/client.go
@@ -317,7 +317,7 @@ func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock,
return nil, err
}
}
- tlsConfig := &tls.Config{}
+ tlsConfig := &tls.Config{MinVersion: tls.VersionTLS12}
if certPEMBlock != nil && keyPEMBlock != nil {
tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
if err != nil {
@@ -541,7 +541,16 @@ func (c *Client) streamURL(method, url string, streamOptions streamOptions) erro
return err
}
}
- req, err := http.NewRequest(method, url, streamOptions.in)
+
+ // make a sub-context so that our active cancellation does not affect parent
+ ctx := streamOptions.context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ subCtx, cancelRequest := context.WithCancel(ctx)
+ defer cancelRequest()
+
+ req, err := http.NewRequestWithContext(ctx, method, url, streamOptions.in)
if err != nil {
return err
}
@@ -562,14 +571,6 @@ func (c *Client) streamURL(method, url string, streamOptions streamOptions) erro
streamOptions.stderr = ioutil.Discard
}
- // make a sub-context so that our active cancellation does not affect parent
- ctx := streamOptions.context
- if ctx == nil {
- ctx = context.Background()
- }
- subCtx, cancelRequest := context.WithCancel(ctx)
- defer cancelRequest()
-
if protocol == unixProtocol || protocol == namedPipeProtocol {
var dial net.Conn
dial, err = c.Dialer.Dial(protocol, address)
@@ -958,6 +959,7 @@ func queryString(opts interface{}) string {
}
func addQueryStringValue(items url.Values, key string, v reflect.Value) bool {
+ //nolint:exhaustive
switch v.Kind() {
case reflect.Bool:
if v.Bool() {
diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go
index d4a4c95f8..48e550495 100644
--- a/vendor/github.com/fsouza/go-dockerclient/container.go
+++ b/vendor/github.com/fsouza/go-dockerclient/container.go
@@ -5,13 +5,7 @@
package docker
import (
- "context"
- "encoding/json"
- "errors"
"fmt"
- "io"
- "net/http"
- "net/url"
"strconv"
"strings"
"time"
@@ -19,23 +13,6 @@ import (
units "github.com/docker/go-units"
)
-// ErrContainerAlreadyExists is the error returned by CreateContainer when the
-// container already exists.
-var ErrContainerAlreadyExists = errors.New("container already exists")
-
-// ListContainersOptions specify parameters to the ListContainers function.
-//
-// See https://goo.gl/kaOHGw for more details.
-type ListContainersOptions struct {
- All bool
- Size bool
- Limit int
- Since string
- Before string
- Filters map[string][]string
- Context context.Context
-}
-
// APIPort is a type that represents a port mapping returned by the Docker API
type APIPort struct {
PrivatePort int64 `json:"PrivatePort,omitempty" yaml:"PrivatePort,omitempty" toml:"PrivatePort,omitempty"`
@@ -80,23 +57,6 @@ type NetworkList struct {
Networks map[string]ContainerNetwork `json:"Networks" yaml:"Networks,omitempty" toml:"Networks,omitempty"`
}
-// ListContainers returns a slice of containers matching the given criteria.
-//
-// See https://goo.gl/kaOHGw for more details.
-func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) {
- path := "/containers/json?" + queryString(opts)
- resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context})
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- var containers []APIContainers
- if err := json.NewDecoder(resp.Body).Decode(&containers); err != nil {
- return nil, err
- }
- return containers, nil
-}
-
// Port represents the port number and the protocol, in the form
// <number>/<protocol>. For example: 80/tcp.
type Port string
@@ -482,200 +442,6 @@ type Container struct {
SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty" toml:"SizeRootFs,omitempty"`
}
-// UpdateContainerOptions specify parameters to the UpdateContainer function.
-//
-// See https://goo.gl/Y6fXUy for more details.
-type UpdateContainerOptions struct {
- BlkioWeight int `json:"BlkioWeight"`
- CPUShares int `json:"CpuShares"`
- CPUPeriod int `json:"CpuPeriod"`
- CPURealtimePeriod int64 `json:"CpuRealtimePeriod"`
- CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"`
- CPUQuota int `json:"CpuQuota"`
- CpusetCpus string `json:"CpusetCpus"`
- CpusetMems string `json:"CpusetMems"`
- Memory int `json:"Memory"`
- MemorySwap int `json:"MemorySwap"`
- MemoryReservation int `json:"MemoryReservation"`
- KernelMemory int `json:"KernelMemory"`
- RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty"`
- Context context.Context
-}
-
-// UpdateContainer updates the container at ID with the options
-//
-// See https://goo.gl/Y6fXUy for more details.
-func (c *Client) UpdateContainer(id string, opts UpdateContainerOptions) error {
- resp, err := c.do(http.MethodPost, fmt.Sprintf("/containers/"+id+"/update"), doOptions{
- data: opts,
- forceJSON: true,
- context: opts.Context,
- })
- if err != nil {
- return err
- }
- defer resp.Body.Close()
- return nil
-}
-
-// RenameContainerOptions specify parameters to the RenameContainer function.
-//
-// See https://goo.gl/46inai for more details.
-type RenameContainerOptions struct {
- // ID of container to rename
- ID string `qs:"-"`
-
- // New name
- Name string `json:"name,omitempty" yaml:"name,omitempty"`
- Context context.Context
-}
-
-// RenameContainer updates and existing containers name
-//
-// See https://goo.gl/46inai for more details.
-func (c *Client) RenameContainer(opts RenameContainerOptions) error {
- resp, err := c.do(http.MethodPost, fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{
- context: opts.Context,
- })
- if err != nil {
- return err
- }
- resp.Body.Close()
- return nil
-}
-
-// InspectContainer returns information about a container by its ID.
-//
-// Deprecated: Use InspectContainerWithOptions instead.
-func (c *Client) InspectContainer(id string) (*Container, error) {
- return c.InspectContainerWithOptions(InspectContainerOptions{ID: id})
-}
-
-// InspectContainerWithContext returns information about a container by its ID.
-// The context object can be used to cancel the inspect request.
-//
-// Deprecated: Use InspectContainerWithOptions instead.
-//nolint:golint
-func (c *Client) InspectContainerWithContext(id string, ctx context.Context) (*Container, error) {
- return c.InspectContainerWithOptions(InspectContainerOptions{ID: id, Context: ctx})
-}
-
-// InspectContainerWithOptions returns information about a container by its ID.
-//
-// See https://goo.gl/FaI5JT for more details.
-func (c *Client) InspectContainerWithOptions(opts InspectContainerOptions) (*Container, error) {
- path := "/containers/" + opts.ID + "/json?" + queryString(opts)
- resp, err := c.do(http.MethodGet, path, doOptions{
- context: opts.Context,
- })
- if err != nil {
- if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
- return nil, &NoSuchContainer{ID: opts.ID}
- }
- return nil, err
- }
- defer resp.Body.Close()
- var container Container
- if err := json.NewDecoder(resp.Body).Decode(&container); err != nil {
- return nil, err
- }
- return &container, nil
-}
-
-// InspectContainerOptions specifies parameters for InspectContainerWithOptions.
-//
-// See https://goo.gl/FaI5JT for more details.
-type InspectContainerOptions struct {
- Context context.Context
- ID string `qs:"-"`
- Size bool
-}
-
-// ContainerChanges returns changes in the filesystem of the given container.
-//
-// See https://goo.gl/15KKzh for more details.
-func (c *Client) ContainerChanges(id string) ([]Change, error) {
- path := "/containers/" + id + "/changes"
- resp, err := c.do(http.MethodGet, path, doOptions{})
- if err != nil {
- if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
- return nil, &NoSuchContainer{ID: id}
- }
- return nil, err
- }
- defer resp.Body.Close()
- var changes []Change
- if err := json.NewDecoder(resp.Body).Decode(&changes); err != nil {
- return nil, err
- }
- return changes, nil
-}
-
-// CreateContainerOptions specify parameters to the CreateContainer function.
-//
-// See https://goo.gl/tyzwVM for more details.
-type CreateContainerOptions struct {
- Name string
- Config *Config `qs:"-"`
- HostConfig *HostConfig `qs:"-"`
- NetworkingConfig *NetworkingConfig `qs:"-"`
- Context context.Context
-}
-
-// CreateContainer creates a new container, returning the container instance,
-// or an error in case of failure.
-//
-// The returned container instance contains only the container ID. To get more
-// details about the container after creating it, use InspectContainer.
-//
-// See https://goo.gl/tyzwVM for more details.
-func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) {
- path := "/containers/create?" + queryString(opts)
- resp, err := c.do(
- http.MethodPost,
- path,
- doOptions{
- data: struct {
- *Config
- HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty" toml:"HostConfig,omitempty"`
- NetworkingConfig *NetworkingConfig `json:"NetworkingConfig,omitempty" yaml:"NetworkingConfig,omitempty" toml:"NetworkingConfig,omitempty"`
- }{
- opts.Config,
- opts.HostConfig,
- opts.NetworkingConfig,
- },
- context: opts.Context,
- },
- )
-
- if e, ok := err.(*Error); ok {
- if e.Status == http.StatusNotFound && strings.Contains(e.Message, "No such image") {
- return nil, ErrNoSuchImage
- }
- if e.Status == http.StatusConflict {
- return nil, ErrContainerAlreadyExists
- }
- // Workaround for 17.09 bug returning 400 instead of 409.
- // See https://github.com/moby/moby/issues/35021
- if e.Status == http.StatusBadRequest && strings.Contains(e.Message, "Conflict.") {
- return nil, ErrContainerAlreadyExists
- }
- }
-
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- var container Container
- if err := json.NewDecoder(resp.Body).Decode(&container); err != nil {
- return nil, err
- }
-
- container.Name = opts.Name
-
- return &container, nil
-}
-
// KeyValuePair is a type for generic key/value pairs as used in the Lxc
// configuration
type KeyValuePair struct {
@@ -683,45 +449,6 @@ type KeyValuePair struct {
Value string `json:"Value,omitempty" yaml:"Value,omitempty" toml:"Value,omitempty"`
}
-// RestartPolicy represents the policy for automatically restarting a container.
-//
-// Possible values are:
-//
-// - always: the docker daemon will always restart the container
-// - on-failure: the docker daemon will restart the container on failures, at
-// most MaximumRetryCount times
-// - unless-stopped: the docker daemon will always restart the container except
-// when user has manually stopped the container
-// - no: the docker daemon will not restart the container automatically
-type RestartPolicy struct {
- Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"`
- MaximumRetryCount int `json:"MaximumRetryCount,omitempty" yaml:"MaximumRetryCount,omitempty" toml:"MaximumRetryCount,omitempty"`
-}
-
-// AlwaysRestart returns a restart policy that tells the Docker daemon to
-// always restart the container.
-func AlwaysRestart() RestartPolicy {
- return RestartPolicy{Name: "always"}
-}
-
-// RestartOnFailure returns a restart policy that tells the Docker daemon to
-// restart the container on failures, trying at most maxRetry times.
-func RestartOnFailure(maxRetry int) RestartPolicy {
- return RestartPolicy{Name: "on-failure", MaximumRetryCount: maxRetry}
-}
-
-// RestartUnlessStopped returns a restart policy that tells the Docker daemon to
-// always restart the container except when user has manually stopped the container.
-func RestartUnlessStopped() RestartPolicy {
- return RestartPolicy{Name: "unless-stopped"}
-}
-
-// NeverRestart returns a restart policy that tells the Docker daemon to never
-// restart the container on failures.
-func NeverRestart() RestartPolicy {
- return RestartPolicy{Name: "no"}
-}
-
// Device represents a device mapping between the Docker host and the
// container.
type Device struct {
@@ -836,800 +563,6 @@ type NetworkingConfig struct {
EndpointsConfig map[string]*EndpointConfig `json:"EndpointsConfig" yaml:"EndpointsConfig" toml:"EndpointsConfig"` // Endpoint configs for each connecting network
}
-// StartContainer starts a container, returning an error in case of failure.
-//
-// Passing the HostConfig to this method has been deprecated in Docker API 1.22
-// (Docker Engine 1.10.x) and totally removed in Docker API 1.24 (Docker Engine
-// 1.12.x). The client will ignore the parameter when communicating with Docker
-// API 1.24 or greater.
-//
-// See https://goo.gl/fbOSZy for more details.
-func (c *Client) StartContainer(id string, hostConfig *HostConfig) error {
- return c.startContainer(id, hostConfig, doOptions{})
-}
-
-// StartContainerWithContext starts a container, returning an error in case of
-// failure. The context can be used to cancel the outstanding start container
-// request.
-//
-// Passing the HostConfig to this method has been deprecated in Docker API 1.22
-// (Docker Engine 1.10.x) and totally removed in Docker API 1.24 (Docker Engine
-// 1.12.x). The client will ignore the parameter when communicating with Docker
-// API 1.24 or greater.
-//
-// See https://goo.gl/fbOSZy for more details.
-//nolint:golint
-func (c *Client) StartContainerWithContext(id string, hostConfig *HostConfig, ctx context.Context) error {
- return c.startContainer(id, hostConfig, doOptions{context: ctx})
-}
-
-func (c *Client) startContainer(id string, hostConfig *HostConfig, opts doOptions) error {
- path := "/containers/" + id + "/start"
- if c.serverAPIVersion == nil {
- c.checkAPIVersion()
- }
- if c.serverAPIVersion != nil && c.serverAPIVersion.LessThan(apiVersion124) {
- opts.data = hostConfig
- opts.forceJSON = true
- }
- resp, err := c.do(http.MethodPost, path, opts)
- if err != nil {
- if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
- return &NoSuchContainer{ID: id, Err: err}
- }
- return err
- }
- defer resp.Body.Close()
- if resp.StatusCode == http.StatusNotModified {
- return &ContainerAlreadyRunning{ID: id}
- }
- return nil
-}
-
-// StopContainer stops a container, killing it after the given timeout (in
-// seconds).
-//
-// See https://goo.gl/R9dZcV for more details.
-func (c *Client) StopContainer(id string, timeout uint) error {
- return c.stopContainer(id, timeout, doOptions{})
-}
-
-// StopContainerWithContext stops a container, killing it after the given
-// timeout (in seconds). The context can be used to cancel the stop
-// container request.
-//
-// See https://goo.gl/R9dZcV for more details.
-//nolint:golint
-func (c *Client) StopContainerWithContext(id string, timeout uint, ctx context.Context) error {
- return c.stopContainer(id, timeout, doOptions{context: ctx})
-}
-
-func (c *Client) stopContainer(id string, timeout uint, opts doOptions) error {
- path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout)
- resp, err := c.do(http.MethodPost, path, opts)
- if err != nil {
- if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
- return &NoSuchContainer{ID: id}
- }
- return err
- }
- defer resp.Body.Close()
- if resp.StatusCode == http.StatusNotModified {
- return &ContainerNotRunning{ID: id}
- }
- return nil
-}
-
-// RestartContainer stops a container, killing it after the given timeout (in
-// seconds), during the stop process.
-//
-// See https://goo.gl/MrAKQ5 for more details.
-func (c *Client) RestartContainer(id string, timeout uint) error {
- path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout)
- resp, err := c.do(http.MethodPost, path, doOptions{})
- if err != nil {
- if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
- return &NoSuchContainer{ID: id}
- }
- return err
- }
- resp.Body.Close()
- return nil
-}
-
-// PauseContainer pauses the given container.
-//
-// See https://goo.gl/D1Yaii for more details.
-func (c *Client) PauseContainer(id string) error {
- path := fmt.Sprintf("/containers/%s/pause", id)
- resp, err := c.do(http.MethodPost, path, doOptions{})
- if err != nil {
- if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
- return &NoSuchContainer{ID: id}
- }
- return err
- }
- resp.Body.Close()
- return nil
-}
-
-// UnpauseContainer unpauses the given container.
-//
-// See https://goo.gl/sZ2faO for more details.
-func (c *Client) UnpauseContainer(id string) error {
- path := fmt.Sprintf("/containers/%s/unpause", id)
- resp, err := c.do(http.MethodPost, path, doOptions{})
- if err != nil {
- if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
- return &NoSuchContainer{ID: id}
- }
- return err
- }
- resp.Body.Close()
- return nil
-}
-
-// TopResult represents the list of processes running in a container, as
-// returned by /containers/<id>/top.
-//
-// See https://goo.gl/FLwpPl for more details.
-type TopResult struct {
- Titles []string
- Processes [][]string
-}
-
-// TopContainer returns processes running inside a container
-//
-// See https://goo.gl/FLwpPl for more details.
-func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) {
- var args string
- var result TopResult
- if psArgs != "" {
- args = fmt.Sprintf("?ps_args=%s", psArgs)
- }
- path := fmt.Sprintf("/containers/%s/top%s", id, args)
- resp, err := c.do(http.MethodGet, path, doOptions{})
- if err != nil {
- if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
- return result, &NoSuchContainer{ID: id}
- }
- return result, err
- }
- defer resp.Body.Close()
- err = json.NewDecoder(resp.Body).Decode(&result)
- return result, err
-}
-
-// Stats represents container statistics, returned by /containers/<id>/stats.
-//
-// See https://goo.gl/Dk3Xio for more details.
-type Stats struct {
- Read time.Time `json:"read,omitempty" yaml:"read,omitempty" toml:"read,omitempty"`
- PreRead time.Time `json:"preread,omitempty" yaml:"preread,omitempty" toml:"preread,omitempty"`
- NumProcs uint32 `json:"num_procs" yaml:"num_procs" toml:"num_procs"`
- PidsStats struct {
- Current uint64 `json:"current,omitempty" yaml:"current,omitempty"`
- } `json:"pids_stats,omitempty" yaml:"pids_stats,omitempty" toml:"pids_stats,omitempty"`
- Network NetworkStats `json:"network,omitempty" yaml:"network,omitempty" toml:"network,omitempty"`
- Networks map[string]NetworkStats `json:"networks,omitempty" yaml:"networks,omitempty" toml:"networks,omitempty"`
- MemoryStats struct {
- Stats struct {
- TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty" toml:"total_pgmafault,omitempty"`
- Cache uint64 `json:"cache,omitempty" yaml:"cache,omitempty" toml:"cache,omitempty"`
- MappedFile uint64 `json:"mapped_file,omitempty" yaml:"mapped_file,omitempty" toml:"mapped_file,omitempty"`
- TotalInactiveFile uint64 `json:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty" toml:"total_inactive_file,omitempty"`
- Pgpgout uint64 `json:"pgpgout,omitempty" yaml:"pgpgout,omitempty" toml:"pgpgout,omitempty"`
- Rss uint64 `json:"rss,omitempty" yaml:"rss,omitempty" toml:"rss,omitempty"`
- TotalMappedFile uint64 `json:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty" toml:"total_mapped_file,omitempty"`
- Writeback uint64 `json:"writeback,omitempty" yaml:"writeback,omitempty" toml:"writeback,omitempty"`
- Unevictable uint64 `json:"unevictable,omitempty" yaml:"unevictable,omitempty" toml:"unevictable,omitempty"`
- Pgpgin uint64 `json:"pgpgin,omitempty" yaml:"pgpgin,omitempty" toml:"pgpgin,omitempty"`
- TotalUnevictable uint64 `json:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty" toml:"total_unevictable,omitempty"`
- Pgmajfault uint64 `json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty" toml:"pgmajfault,omitempty"`
- TotalRss uint64 `json:"total_rss,omitempty" yaml:"total_rss,omitempty" toml:"total_rss,omitempty"`
- TotalRssHuge uint64 `json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty" toml:"total_rss_huge,omitempty"`
- TotalWriteback uint64 `json:"total_writeback,omitempty" yaml:"total_writeback,omitempty" toml:"total_writeback,omitempty"`
- TotalInactiveAnon uint64 `json:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty" toml:"total_inactive_anon,omitempty"`
- RssHuge uint64 `json:"rss_huge,omitempty" yaml:"rss_huge,omitempty" toml:"rss_huge,omitempty"`
- HierarchicalMemoryLimit uint64 `json:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty" toml:"hierarchical_memory_limit,omitempty"`
- TotalPgfault uint64 `json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty" toml:"total_pgfault,omitempty"`
- TotalActiveFile uint64 `json:"total_active_file,omitempty" yaml:"total_active_file,omitempty" toml:"total_active_file,omitempty"`
- ActiveAnon uint64 `json:"active_anon,omitempty" yaml:"active_anon,omitempty" toml:"active_anon,omitempty"`
- TotalActiveAnon uint64 `json:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty" toml:"total_active_anon,omitempty"`
- TotalPgpgout uint64 `json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty" toml:"total_pgpgout,omitempty"`
- TotalCache uint64 `json:"total_cache,omitempty" yaml:"total_cache,omitempty" toml:"total_cache,omitempty"`
- InactiveAnon uint64 `json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty" toml:"inactive_anon,omitempty"`
- ActiveFile uint64 `json:"active_file,omitempty" yaml:"active_file,omitempty" toml:"active_file,omitempty"`
- Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty" toml:"pgfault,omitempty"`
- InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty" toml:"inactive_file,omitempty"`
- TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty" toml:"total_pgpgin,omitempty"`
- HierarchicalMemswLimit uint64 `json:"hierarchical_memsw_limit,omitempty" yaml:"hierarchical_memsw_limit,omitempty" toml:"hierarchical_memsw_limit,omitempty"`
- Swap uint64 `json:"swap,omitempty" yaml:"swap,omitempty" toml:"swap,omitempty"`
- } `json:"stats,omitempty" yaml:"stats,omitempty" toml:"stats,omitempty"`
- MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty" toml:"max_usage,omitempty"`
- Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty" toml:"usage,omitempty"`
- Failcnt uint64 `json:"failcnt,omitempty" yaml:"failcnt,omitempty" toml:"failcnt,omitempty"`
- Limit uint64 `json:"limit,omitempty" yaml:"limit,omitempty" toml:"limit,omitempty"`
- Commit uint64 `json:"commitbytes,omitempty" yaml:"commitbytes,omitempty" toml:"privateworkingset,omitempty"`
- CommitPeak uint64 `json:"commitpeakbytes,omitempty" yaml:"commitpeakbytes,omitempty" toml:"commitpeakbytes,omitempty"`
- PrivateWorkingSet uint64 `json:"privateworkingset,omitempty" yaml:"privateworkingset,omitempty" toml:"privateworkingset,omitempty"`
- } `json:"memory_stats,omitempty" yaml:"memory_stats,omitempty" toml:"memory_stats,omitempty"`
- BlkioStats struct {
- IOServiceBytesRecursive []BlkioStatsEntry `json:"io_service_bytes_recursive,omitempty" yaml:"io_service_bytes_recursive,omitempty" toml:"io_service_bytes_recursive,omitempty"`
- IOServicedRecursive []BlkioStatsEntry `json:"io_serviced_recursive,omitempty" yaml:"io_serviced_recursive,omitempty" toml:"io_serviced_recursive,omitempty"`
- IOQueueRecursive []BlkioStatsEntry `json:"io_queue_recursive,omitempty" yaml:"io_queue_recursive,omitempty" toml:"io_queue_recursive,omitempty"`
- IOServiceTimeRecursive []BlkioStatsEntry `json:"io_service_time_recursive,omitempty" yaml:"io_service_time_recursive,omitempty" toml:"io_service_time_recursive,omitempty"`
- IOWaitTimeRecursive []BlkioStatsEntry `json:"io_wait_time_recursive,omitempty" yaml:"io_wait_time_recursive,omitempty" toml:"io_wait_time_recursive,omitempty"`
- IOMergedRecursive []BlkioStatsEntry `json:"io_merged_recursive,omitempty" yaml:"io_merged_recursive,omitempty" toml:"io_merged_recursive,omitempty"`
- IOTimeRecursive []BlkioStatsEntry `json:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty" toml:"io_time_recursive,omitempty"`
- SectorsRecursive []BlkioStatsEntry `json:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty" toml:"sectors_recursive,omitempty"`
- } `json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty" toml:"blkio_stats,omitempty"`
- CPUStats CPUStats `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty" toml:"cpu_stats,omitempty"`
- PreCPUStats CPUStats `json:"precpu_stats,omitempty"`
- StorageStats struct {
- ReadCountNormalized uint64 `json:"read_count_normalized,omitempty" yaml:"read_count_normalized,omitempty" toml:"read_count_normalized,omitempty"`
- ReadSizeBytes uint64 `json:"read_size_bytes,omitempty" yaml:"read_size_bytes,omitempty" toml:"read_size_bytes,omitempty"`
- WriteCountNormalized uint64 `json:"write_count_normalized,omitempty" yaml:"write_count_normalized,omitempty" toml:"write_count_normalized,omitempty"`
- WriteSizeBytes uint64 `json:"write_size_bytes,omitempty" yaml:"write_size_bytes,omitempty" toml:"write_size_bytes,omitempty"`
- } `json:"storage_stats,omitempty" yaml:"storage_stats,omitempty" toml:"storage_stats,omitempty"`
-}
-
-// NetworkStats is a stats entry for network stats
-type NetworkStats struct {
- RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty" toml:"rx_dropped,omitempty"`
- RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty" toml:"rx_bytes,omitempty"`
- RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty" toml:"rx_errors,omitempty"`
- TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty" toml:"tx_packets,omitempty"`
- TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty" toml:"tx_dropped,omitempty"`
- RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty" toml:"rx_packets,omitempty"`
- TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty" toml:"tx_errors,omitempty"`
- TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty" toml:"tx_bytes,omitempty"`
-}
-
-// CPUStats is a stats entry for cpu stats
-type CPUStats struct {
- CPUUsage struct {
- PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty" toml:"percpu_usage,omitempty"`
- UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty" toml:"usage_in_usermode,omitempty"`
- TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty" toml:"total_usage,omitempty"`
- UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty" toml:"usage_in_kernelmode,omitempty"`
- } `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty" toml:"cpu_usage,omitempty"`
- SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty" toml:"system_cpu_usage,omitempty"`
- OnlineCPUs uint64 `json:"online_cpus,omitempty" yaml:"online_cpus,omitempty" toml:"online_cpus,omitempty"`
- ThrottlingData struct {
- Periods uint64 `json:"periods,omitempty"`
- ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
- ThrottledTime uint64 `json:"throttled_time,omitempty"`
- } `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty" toml:"throttling_data,omitempty"`
-}
-
-// BlkioStatsEntry is a stats entry for blkio_stats
-type BlkioStatsEntry struct {
- Major uint64 `json:"major,omitempty" yaml:"major,omitempty" toml:"major,omitempty"`
- Minor uint64 `json:"minor,omitempty" yaml:"minor,omitempty" toml:"minor,omitempty"`
- Op string `json:"op,omitempty" yaml:"op,omitempty" toml:"op,omitempty"`
- Value uint64 `json:"value,omitempty" yaml:"value,omitempty" toml:"value,omitempty"`
-}
-
-// StatsOptions specify parameters to the Stats function.
-//
-// See https://goo.gl/Dk3Xio for more details.
-type StatsOptions struct {
- ID string
- Stats chan<- *Stats
- Stream bool
- // A flag that enables stopping the stats operation
- Done <-chan bool
- // Initial connection timeout
- Timeout time.Duration
- // Timeout with no data is received, it's reset every time new data
- // arrives
- InactivityTimeout time.Duration `qs:"-"`
- Context context.Context
-}
-
-// Stats sends container statistics for the given container to the given channel.
-//
-// This function is blocking, similar to a streaming call for logs, and should be run
-// on a separate goroutine from the caller. Note that this function will block until
-// the given container is removed, not just exited. When finished, this function
-// will close the given channel. Alternatively, function can be stopped by
-// signaling on the Done channel.
-//
-// See https://goo.gl/Dk3Xio for more details.
-func (c *Client) Stats(opts StatsOptions) (retErr error) {
- errC := make(chan error, 1)
- readCloser, writeCloser := io.Pipe()
-
- defer func() {
- close(opts.Stats)
-
- if err := <-errC; err != nil && retErr == nil {
- retErr = err
- }
-
- if err := readCloser.Close(); err != nil && retErr == nil {
- retErr = err
- }
- }()
-
- reqSent := make(chan struct{})
- go func() {
- defer close(errC)
- err := c.stream(http.MethodGet, fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{
- rawJSONStream: true,
- useJSONDecoder: true,
- stdout: writeCloser,
- timeout: opts.Timeout,
- inactivityTimeout: opts.InactivityTimeout,
- context: opts.Context,
- reqSent: reqSent,
- })
- if err != nil {
- dockerError, ok := err.(*Error)
- if ok {
- if dockerError.Status == http.StatusNotFound {
- err = &NoSuchContainer{ID: opts.ID}
- }
- }
- }
- if closeErr := writeCloser.Close(); closeErr != nil && err == nil {
- err = closeErr
- }
- errC <- err
- }()
-
- quit := make(chan struct{})
- defer close(quit)
- go func() {
- // block here waiting for the signal to stop function
- select {
- case <-opts.Done:
- readCloser.Close()
- case <-quit:
- return
- }
- }()
-
- decoder := json.NewDecoder(readCloser)
- stats := new(Stats)
- <-reqSent
- for err := decoder.Decode(stats); err != io.EOF; err = decoder.Decode(stats) {
- if err != nil {
- return err
- }
- opts.Stats <- stats
- stats = new(Stats)
- }
- return nil
-}
-
-// KillContainerOptions represents the set of options that can be used in a
-// call to KillContainer.
-//
-// See https://goo.gl/JnTxXZ for more details.
-type KillContainerOptions struct {
- // The ID of the container.
- ID string `qs:"-"`
-
- // The signal to send to the container. When omitted, Docker server
- // will assume SIGKILL.
- Signal Signal
- Context context.Context
-}
-
-// KillContainer sends a signal to a container, returning an error in case of
-// failure.
-//
-// See https://goo.gl/JnTxXZ for more details.
-func (c *Client) KillContainer(opts KillContainerOptions) error {
- path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts)
- resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context})
- if err != nil {
- e, ok := err.(*Error)
- if !ok {
- return err
- }
- switch e.Status {
- case http.StatusNotFound:
- return &NoSuchContainer{ID: opts.ID}
- case http.StatusConflict:
- return &ContainerNotRunning{ID: opts.ID}
- default:
- return err
- }
- }
- resp.Body.Close()
- return nil
-}
-
-// RemoveContainerOptions encapsulates options to remove a container.
-//
-// See https://goo.gl/hL5IPC for more details.
-type RemoveContainerOptions struct {
- // The ID of the container.
- ID string `qs:"-"`
-
- // A flag that indicates whether Docker should remove the volumes
- // associated to the container.
- RemoveVolumes bool `qs:"v"`
-
- // A flag that indicates whether Docker should remove the container
- // even if it is currently running.
- Force bool
- Context context.Context
-}
-
-// RemoveContainer removes a container, returning an error in case of failure.
-//
-// See https://goo.gl/hL5IPC for more details.
-func (c *Client) RemoveContainer(opts RemoveContainerOptions) error {
- path := "/containers/" + opts.ID + "?" + queryString(opts)
- resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context})
- if err != nil {
- if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
- return &NoSuchContainer{ID: opts.ID}
- }
- return err
- }
- resp.Body.Close()
- return nil
-}
-
-// UploadToContainerOptions is the set of options that can be used when
-// uploading an archive into a container.
-//
-// See https://goo.gl/g25o7u for more details.
-type UploadToContainerOptions struct {
- InputStream io.Reader `json:"-" qs:"-"`
- Path string `qs:"path"`
- NoOverwriteDirNonDir bool `qs:"noOverwriteDirNonDir"`
- Context context.Context
-}
-
-// UploadToContainer uploads a tar archive to be extracted to a path in the
-// filesystem of the container.
-//
-// See https://goo.gl/g25o7u for more details.
-func (c *Client) UploadToContainer(id string, opts UploadToContainerOptions) error {
- url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts)
-
- return c.stream(http.MethodPut, url, streamOptions{
- in: opts.InputStream,
- context: opts.Context,
- })
-}
-
-// DownloadFromContainerOptions is the set of options that can be used when
-// downloading resources from a container.
-//
-// See https://goo.gl/W49jxK for more details.
-type DownloadFromContainerOptions struct {
- OutputStream io.Writer `json:"-" qs:"-"`
- Path string `qs:"path"`
- InactivityTimeout time.Duration `qs:"-"`
- Context context.Context
-}
-
-// DownloadFromContainer downloads a tar archive of files or folders in a container.
-//
-// See https://goo.gl/W49jxK for more details.
-func (c *Client) DownloadFromContainer(id string, opts DownloadFromContainerOptions) error {
- url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts)
-
- return c.stream(http.MethodGet, url, streamOptions{
- setRawTerminal: true,
- stdout: opts.OutputStream,
- inactivityTimeout: opts.InactivityTimeout,
- context: opts.Context,
- })
-}
-
-// CopyFromContainerOptions contains the set of options used for copying
-// files from a container.
-//
-// Deprecated: Use DownloadFromContainerOptions and DownloadFromContainer instead.
-type CopyFromContainerOptions struct {
- OutputStream io.Writer `json:"-"`
- Container string `json:"-"`
- Resource string
- Context context.Context `json:"-"`
-}
-
-// CopyFromContainer copies files from a container.
-//
-// Deprecated: Use DownloadFromContainer and DownloadFromContainer instead.
-func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error {
- if opts.Container == "" {
- return &NoSuchContainer{ID: opts.Container}
- }
- if c.serverAPIVersion == nil {
- c.checkAPIVersion()
- }
- if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion124) {
- return errors.New("go-dockerclient: CopyFromContainer is no longer available in Docker >= 1.12, use DownloadFromContainer instead")
- }
- url := fmt.Sprintf("/containers/%s/copy", opts.Container)
- resp, err := c.do(http.MethodPost, url, doOptions{
- data: opts,
- context: opts.Context,
- })
- if err != nil {
- if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
- return &NoSuchContainer{ID: opts.Container}
- }
- return err
- }
- defer resp.Body.Close()
- _, err = io.Copy(opts.OutputStream, resp.Body)
- return err
-}
-
-// WaitContainer blocks until the given container stops, return the exit code
-// of the container status.
-//
-// See https://goo.gl/4AGweZ for more details.
-func (c *Client) WaitContainer(id string) (int, error) {
- return c.waitContainer(id, doOptions{})
-}
-
-// WaitContainerWithContext blocks until the given container stops, return the exit code
-// of the container status. The context object can be used to cancel the
-// inspect request.
-//
-// See https://goo.gl/4AGweZ for more details.
-//nolint:golint
-func (c *Client) WaitContainerWithContext(id string, ctx context.Context) (int, error) {
- return c.waitContainer(id, doOptions{context: ctx})
-}
-
-func (c *Client) waitContainer(id string, opts doOptions) (int, error) {
- resp, err := c.do(http.MethodPost, "/containers/"+id+"/wait", opts)
- if err != nil {
- if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
- return 0, &NoSuchContainer{ID: id}
- }
- return 0, err
- }
- defer resp.Body.Close()
- var r struct{ StatusCode int }
- if err := json.NewDecoder(resp.Body).Decode(&r); err != nil {
- return 0, err
- }
- return r.StatusCode, nil
-}
-
-// CommitContainerOptions aggregates parameters to the CommitContainer method.
-//
-// See https://goo.gl/CzIguf for more details.
-type CommitContainerOptions struct {
- Container string
- Repository string `qs:"repo"`
- Tag string
- Message string `qs:"comment"`
- Author string
- Changes []string `qs:"changes"`
- Run *Config `qs:"-"`
- Context context.Context
-}
-
-// CommitContainer creates a new image from a container's changes.
-//
-// See https://goo.gl/CzIguf for more details.
-func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) {
- path := "/commit?" + queryString(opts)
- resp, err := c.do(http.MethodPost, path, doOptions{
- data: opts.Run,
- context: opts.Context,
- })
- if err != nil {
- if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
- return nil, &NoSuchContainer{ID: opts.Container}
- }
- return nil, err
- }
- defer resp.Body.Close()
- var image Image
- if err := json.NewDecoder(resp.Body).Decode(&image); err != nil {
- return nil, err
- }
- return &image, nil
-}
-
-// AttachToContainerOptions is the set of options that can be used when
-// attaching to a container.
-//
-// See https://goo.gl/JF10Zk for more details.
-type AttachToContainerOptions struct {
- Container string `qs:"-"`
- InputStream io.Reader `qs:"-"`
- OutputStream io.Writer `qs:"-"`
- ErrorStream io.Writer `qs:"-"`
-
- // If set, after a successful connect, a sentinel will be sent and then the
- // client will block on receive before continuing.
- //
- // It must be an unbuffered channel. Using a buffered channel can lead
- // to unexpected behavior.
- Success chan struct{}
-
- // Override the key sequence for detaching a container.
- DetachKeys string
-
- // Use raw terminal? Usually true when the container contains a TTY.
- RawTerminal bool `qs:"-"`
-
- // Get container logs, sending it to OutputStream.
- Logs bool
-
- // Stream the response?
- Stream bool
-
- // Attach to stdin, and use InputStream.
- Stdin bool
-
- // Attach to stdout, and use OutputStream.
- Stdout bool
-
- // Attach to stderr, and use ErrorStream.
- Stderr bool
-}
-
-// AttachToContainer attaches to a container, using the given options.
-//
-// See https://goo.gl/JF10Zk for more details.
-func (c *Client) AttachToContainer(opts AttachToContainerOptions) error {
- cw, err := c.AttachToContainerNonBlocking(opts)
- if err != nil {
- return err
- }
- return cw.Wait()
-}
-
-// AttachToContainerNonBlocking attaches to a container, using the given options.
-// This function does not block.
-//
-// See https://goo.gl/NKpkFk for more details.
-func (c *Client) AttachToContainerNonBlocking(opts AttachToContainerOptions) (CloseWaiter, error) {
- if opts.Container == "" {
- return nil, &NoSuchContainer{ID: opts.Container}
- }
- path := "/containers/" + opts.Container + "/attach?" + queryString(opts)
- return c.hijack(http.MethodPost, path, hijackOptions{
- success: opts.Success,
- setRawTerminal: opts.RawTerminal,
- in: opts.InputStream,
- stdout: opts.OutputStream,
- stderr: opts.ErrorStream,
- })
-}
-
-// LogsOptions represents the set of options used when getting logs from a
-// container.
-//
-// See https://goo.gl/krK0ZH for more details.
-type LogsOptions struct {
- Context context.Context
- Container string `qs:"-"`
- OutputStream io.Writer `qs:"-"`
- ErrorStream io.Writer `qs:"-"`
- InactivityTimeout time.Duration `qs:"-"`
- Tail string
-
- Since int64
- Follow bool
- Stdout bool
- Stderr bool
- Timestamps bool
-
- // Use raw terminal? Usually true when the container contains a TTY.
- RawTerminal bool `qs:"-"`
-}
-
-// Logs gets stdout and stderr logs from the specified container.
-//
-// When LogsOptions.RawTerminal is set to false, go-dockerclient will multiplex
-// the streams and send the containers stdout to LogsOptions.OutputStream, and
-// stderr to LogsOptions.ErrorStream.
-//
-// When LogsOptions.RawTerminal is true, callers will get the raw stream on
-// LogsOptions.OutputStream. The caller can use libraries such as dlog
-// (github.com/ahmetalpbalkan/dlog).
-//
-// See https://goo.gl/krK0ZH for more details.
-func (c *Client) Logs(opts LogsOptions) error {
- if opts.Container == "" {
- return &NoSuchContainer{ID: opts.Container}
- }
- if opts.Tail == "" {
- opts.Tail = "all"
- }
- path := "/containers/" + opts.Container + "/logs?" + queryString(opts)
- return c.stream(http.MethodGet, path, streamOptions{
- setRawTerminal: opts.RawTerminal,
- stdout: opts.OutputStream,
- stderr: opts.ErrorStream,
- inactivityTimeout: opts.InactivityTimeout,
- context: opts.Context,
- })
-}
-
-// ResizeContainerTTY resizes the terminal to the given height and width.
-//
-// See https://goo.gl/FImjeq for more details.
-func (c *Client) ResizeContainerTTY(id string, height, width int) error {
- params := make(url.Values)
- params.Set("h", strconv.Itoa(height))
- params.Set("w", strconv.Itoa(width))
- resp, err := c.do(http.MethodPost, "/containers/"+id+"/resize?"+params.Encode(), doOptions{})
- if err != nil {
- return err
- }
- resp.Body.Close()
- return nil
-}
-
-// ExportContainerOptions is the set of parameters to the ExportContainer
-// method.
-//
-// See https://goo.gl/yGJCIh for more details.
-type ExportContainerOptions struct {
- ID string
- OutputStream io.Writer
- InactivityTimeout time.Duration `qs:"-"`
- Context context.Context
-}
-
-// ExportContainer export the contents of container id as tar archive
-// and prints the exported contents to stdout.
-//
-// See https://goo.gl/yGJCIh for more details.
-func (c *Client) ExportContainer(opts ExportContainerOptions) error {
- if opts.ID == "" {
- return &NoSuchContainer{ID: opts.ID}
- }
- url := fmt.Sprintf("/containers/%s/export", opts.ID)
- return c.stream(http.MethodGet, url, streamOptions{
- setRawTerminal: true,
- stdout: opts.OutputStream,
- inactivityTimeout: opts.InactivityTimeout,
- context: opts.Context,
- })
-}
-
-// PruneContainersOptions specify parameters to the PruneContainers function.
-//
-// See https://goo.gl/wnkgDT for more details.
-type PruneContainersOptions struct {
- Filters map[string][]string
- Context context.Context
-}
-
-// PruneContainersResults specify results from the PruneContainers function.
-//
-// See https://goo.gl/wnkgDT for more details.
-type PruneContainersResults struct {
- ContainersDeleted []string
- SpaceReclaimed int64
-}
-
-// PruneContainers deletes containers which are stopped.
-//
-// See https://goo.gl/wnkgDT for more details.
-func (c *Client) PruneContainers(opts PruneContainersOptions) (*PruneContainersResults, error) {
- path := "/containers/prune?" + queryString(opts)
- resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context})
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- var results PruneContainersResults
- if err := json.NewDecoder(resp.Body).Decode(&results); err != nil {
- return nil, err
- }
- return &results, nil
-}
-
// NoSuchContainer is the error returned when a given container does not exist.
type NoSuchContainer struct {
ID string
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_archive.go b/vendor/github.com/fsouza/go-dockerclient/container_archive.go
new file mode 100644
index 000000000..6c7e61c79
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_archive.go
@@ -0,0 +1,58 @@
+package docker
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+)
+
+// UploadToContainerOptions is the set of options that can be used when
+// uploading an archive into a container.
+//
+// See https://goo.gl/g25o7u for more details.
+type UploadToContainerOptions struct {
+ InputStream io.Reader `json:"-" qs:"-"`
+ Path string `qs:"path"`
+ NoOverwriteDirNonDir bool `qs:"noOverwriteDirNonDir"`
+ Context context.Context
+}
+
+// UploadToContainer uploads a tar archive to be extracted to a path in the
+// filesystem of the container.
+//
+// See https://goo.gl/g25o7u for more details.
+func (c *Client) UploadToContainer(id string, opts UploadToContainerOptions) error {
+ url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts)
+
+ return c.stream(http.MethodPut, url, streamOptions{
+ in: opts.InputStream,
+ context: opts.Context,
+ })
+}
+
+// DownloadFromContainerOptions is the set of options that can be used when
+// downloading resources from a container.
+//
+// See https://goo.gl/W49jxK for more details.
+type DownloadFromContainerOptions struct {
+ OutputStream io.Writer `json:"-" qs:"-"`
+ Path string `qs:"path"`
+ InactivityTimeout time.Duration `qs:"-"`
+ Context context.Context
+}
+
+// DownloadFromContainer downloads a tar archive of files or folders in a container.
+//
+// See https://goo.gl/W49jxK for more details.
+func (c *Client) DownloadFromContainer(id string, opts DownloadFromContainerOptions) error {
+ url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts)
+
+ return c.stream(http.MethodGet, url, streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
+ })
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_attach.go b/vendor/github.com/fsouza/go-dockerclient/container_attach.go
new file mode 100644
index 000000000..e6742d007
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_attach.go
@@ -0,0 +1,74 @@
+package docker
+
+import (
+ "io"
+ "net/http"
+)
+
+// AttachToContainerOptions is the set of options that can be used when
+// attaching to a container.
+//
+// See https://goo.gl/JF10Zk for more details.
+type AttachToContainerOptions struct {
+ Container string `qs:"-"`
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+
+ // If set, after a successful connect, a sentinel will be sent and then the
+ // client will block on receive before continuing.
+ //
+ // It must be an unbuffered channel. Using a buffered channel can lead
+ // to unexpected behavior.
+ Success chan struct{}
+
+ // Override the key sequence for detaching a container.
+ DetachKeys string
+
+ // Use raw terminal? Usually true when the container contains a TTY.
+ RawTerminal bool `qs:"-"`
+
+ // Get container logs, sending it to OutputStream.
+ Logs bool
+
+ // Stream the response?
+ Stream bool
+
+ // Attach to stdin, and use InputStream.
+ Stdin bool
+
+ // Attach to stdout, and use OutputStream.
+ Stdout bool
+
+ // Attach to stderr, and use ErrorStream.
+ Stderr bool
+}
+
+// AttachToContainer attaches to a container, using the given options.
+//
+// See https://goo.gl/JF10Zk for more details.
+func (c *Client) AttachToContainer(opts AttachToContainerOptions) error {
+ cw, err := c.AttachToContainerNonBlocking(opts)
+ if err != nil {
+ return err
+ }
+ return cw.Wait()
+}
+
+// AttachToContainerNonBlocking attaches to a container, using the given options.
+// This function does not block.
+//
+// See https://goo.gl/NKpkFk for more details.
+func (c *Client) AttachToContainerNonBlocking(opts AttachToContainerOptions) (CloseWaiter, error) {
+ if opts.Container == "" {
+ return nil, &NoSuchContainer{ID: opts.Container}
+ }
+ path := "/containers/" + opts.Container + "/attach?" + queryString(opts)
+ return c.hijack(http.MethodPost, path, hijackOptions{
+ success: opts.Success,
+ setRawTerminal: opts.RawTerminal,
+ in: opts.InputStream,
+ stdout: opts.OutputStream,
+ stderr: opts.ErrorStream,
+ })
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_changes.go b/vendor/github.com/fsouza/go-dockerclient/container_changes.go
new file mode 100644
index 000000000..42b6f4a5c
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_changes.go
@@ -0,0 +1,26 @@
+package docker
+
+import (
+ "encoding/json"
+ "net/http"
+)
+
+// ContainerChanges returns changes in the filesystem of the given container.
+//
+// See https://goo.gl/15KKzh for more details.
+func (c *Client) ContainerChanges(id string) ([]Change, error) {
+ path := "/containers/" + id + "/changes"
+ resp, err := c.do(http.MethodGet, path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var changes []Change
+ if err := json.NewDecoder(resp.Body).Decode(&changes); err != nil {
+ return nil, err
+ }
+ return changes, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_commit.go b/vendor/github.com/fsouza/go-dockerclient/container_commit.go
new file mode 100644
index 000000000..d8551626b
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_commit.go
@@ -0,0 +1,44 @@
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+)
+
+// CommitContainerOptions aggregates parameters to the CommitContainer method.
+//
+// See https://goo.gl/CzIguf for more details.
+type CommitContainerOptions struct {
+ Container string
+ Repository string `qs:"repo"`
+ Tag string
+ Message string `qs:"comment"`
+ Author string
+ Changes []string `qs:"changes"`
+ Run *Config `qs:"-"`
+ Context context.Context
+}
+
+// CommitContainer creates a new image from a container's changes.
+//
+// See https://goo.gl/CzIguf for more details.
+func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) {
+ path := "/commit?" + queryString(opts)
+ resp, err := c.do(http.MethodPost, path, doOptions{
+ data: opts.Run,
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: opts.Container}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var image Image
+ if err := json.NewDecoder(resp.Body).Decode(&image); err != nil {
+ return nil, err
+ }
+ return &image, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_copy.go b/vendor/github.com/fsouza/go-dockerclient/container_copy.go
new file mode 100644
index 000000000..d36c7e710
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_copy.go
@@ -0,0 +1,49 @@
+package docker
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+)
+
+// CopyFromContainerOptions contains the set of options used for copying
+// files from a container.
+//
+// Deprecated: Use DownloadFromContainerOptions and DownloadFromContainer instead.
+type CopyFromContainerOptions struct {
+ OutputStream io.Writer `json:"-"`
+ Container string `json:"-"`
+ Resource string
+ Context context.Context `json:"-"`
+}
+
+// CopyFromContainer copies files from a container.
+//
+// Deprecated: Use DownloadFromContainer and DownloadFromContainer instead.
+func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error {
+ if opts.Container == "" {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ if c.serverAPIVersion == nil {
+ c.checkAPIVersion()
+ }
+ if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion124) {
+ return errors.New("go-dockerclient: CopyFromContainer is no longer available in Docker >= 1.12, use DownloadFromContainer instead")
+ }
+ url := fmt.Sprintf("/containers/%s/copy", opts.Container)
+ resp, err := c.do(http.MethodPost, url, doOptions{
+ data: opts,
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ _, err = io.Copy(opts.OutputStream, resp.Body)
+ return err
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_create.go b/vendor/github.com/fsouza/go-dockerclient/container_create.go
new file mode 100644
index 000000000..b1952095d
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_create.go
@@ -0,0 +1,78 @@
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "net/http"
+ "strings"
+)
+
+// ErrContainerAlreadyExists is the error returned by CreateContainer when the
+// container already exists.
+var ErrContainerAlreadyExists = errors.New("container already exists")
+
+// CreateContainerOptions specify parameters to the CreateContainer function.
+//
+// See https://goo.gl/tyzwVM for more details.
+type CreateContainerOptions struct {
+ Name string
+ Config *Config `qs:"-"`
+ HostConfig *HostConfig `qs:"-"`
+ NetworkingConfig *NetworkingConfig `qs:"-"`
+ Context context.Context
+}
+
+// CreateContainer creates a new container, returning the container instance,
+// or an error in case of failure.
+//
+// The returned container instance contains only the container ID. To get more
+// details about the container after creating it, use InspectContainer.
+//
+// See https://goo.gl/tyzwVM for more details.
+func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) {
+ path := "/containers/create?" + queryString(opts)
+ resp, err := c.do(
+ http.MethodPost,
+ path,
+ doOptions{
+ data: struct {
+ *Config
+ HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty" toml:"HostConfig,omitempty"`
+ NetworkingConfig *NetworkingConfig `json:"NetworkingConfig,omitempty" yaml:"NetworkingConfig,omitempty" toml:"NetworkingConfig,omitempty"`
+ }{
+ opts.Config,
+ opts.HostConfig,
+ opts.NetworkingConfig,
+ },
+ context: opts.Context,
+ },
+ )
+
+ if e, ok := err.(*Error); ok {
+ if e.Status == http.StatusNotFound && strings.Contains(e.Message, "No such image") {
+ return nil, ErrNoSuchImage
+ }
+ if e.Status == http.StatusConflict {
+ return nil, ErrContainerAlreadyExists
+ }
+ // Workaround for 17.09 bug returning 400 instead of 409.
+ // See https://github.com/moby/moby/issues/35021
+ if e.Status == http.StatusBadRequest && strings.Contains(e.Message, "Conflict.") {
+ return nil, ErrContainerAlreadyExists
+ }
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var container Container
+ if err := json.NewDecoder(resp.Body).Decode(&container); err != nil {
+ return nil, err
+ }
+
+ container.Name = opts.Name
+
+ return &container, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_export.go b/vendor/github.com/fsouza/go-dockerclient/container_export.go
new file mode 100644
index 000000000..312f8cf10
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_export.go
@@ -0,0 +1,37 @@
+package docker
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+)
+
+// ExportContainerOptions is the set of parameters to the ExportContainer
+// method.
+//
+// See https://goo.gl/yGJCIh for more details.
+type ExportContainerOptions struct {
+ ID string
+ OutputStream io.Writer
+ InactivityTimeout time.Duration `qs:"-"`
+ Context context.Context
+}
+
+// ExportContainer export the contents of container id as tar archive
+// and prints the exported contents to stdout.
+//
+// See https://goo.gl/yGJCIh for more details.
+func (c *Client) ExportContainer(opts ExportContainerOptions) error {
+ if opts.ID == "" {
+ return &NoSuchContainer{ID: opts.ID}
+ }
+ url := fmt.Sprintf("/containers/%s/export", opts.ID)
+ return c.stream(http.MethodGet, url, streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
+ })
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_inspect.go b/vendor/github.com/fsouza/go-dockerclient/container_inspect.go
new file mode 100644
index 000000000..e8091f739
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_inspect.go
@@ -0,0 +1,54 @@
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+)
+
+// InspectContainer returns information about a container by its ID.
+//
+// Deprecated: Use InspectContainerWithOptions instead.
+func (c *Client) InspectContainer(id string) (*Container, error) {
+ return c.InspectContainerWithOptions(InspectContainerOptions{ID: id})
+}
+
+// InspectContainerWithContext returns information about a container by its ID.
+// The context object can be used to cancel the inspect request.
+//
+// Deprecated: Use InspectContainerWithOptions instead.
+//nolint:golint
+func (c *Client) InspectContainerWithContext(id string, ctx context.Context) (*Container, error) {
+ return c.InspectContainerWithOptions(InspectContainerOptions{ID: id, Context: ctx})
+}
+
+// InspectContainerWithOptions returns information about a container by its ID.
+//
+// See https://goo.gl/FaI5JT for more details.
+func (c *Client) InspectContainerWithOptions(opts InspectContainerOptions) (*Container, error) {
+ path := "/containers/" + opts.ID + "/json?" + queryString(opts)
+ resp, err := c.do(http.MethodGet, path, doOptions{
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: opts.ID}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var container Container
+ if err := json.NewDecoder(resp.Body).Decode(&container); err != nil {
+ return nil, err
+ }
+ return &container, nil
+}
+
+// InspectContainerOptions specifies parameters for InspectContainerWithOptions.
+//
+// See https://goo.gl/FaI5JT for more details.
+type InspectContainerOptions struct {
+ Context context.Context
+ ID string `qs:"-"`
+ Size bool
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_kill.go b/vendor/github.com/fsouza/go-dockerclient/container_kill.go
new file mode 100644
index 000000000..ee9f0418f
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_kill.go
@@ -0,0 +1,45 @@
+package docker
+
+import (
+ "context"
+ "net/http"
+)
+
+// KillContainerOptions represents the set of options that can be used in a
+// call to KillContainer.
+//
+// See https://goo.gl/JnTxXZ for more details.
+type KillContainerOptions struct {
+ // The ID of the container.
+ ID string `qs:"-"`
+
+ // The signal to send to the container. When omitted, Docker server
+ // will assume SIGKILL.
+ Signal Signal
+ Context context.Context
+}
+
+// KillContainer sends a signal to a container, returning an error in case of
+// failure.
+//
+// See https://goo.gl/JnTxXZ for more details.
+func (c *Client) KillContainer(opts KillContainerOptions) error {
+ path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts)
+ resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context})
+ if err != nil {
+ e, ok := err.(*Error)
+ if !ok {
+ return err
+ }
+ switch e.Status {
+ case http.StatusNotFound:
+ return &NoSuchContainer{ID: opts.ID}
+ case http.StatusConflict:
+ return &ContainerNotRunning{ID: opts.ID}
+ default:
+ return err
+ }
+ }
+ resp.Body.Close()
+ return nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_list.go b/vendor/github.com/fsouza/go-dockerclient/container_list.go
new file mode 100644
index 000000000..1dec0e915
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_list.go
@@ -0,0 +1,37 @@
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+)
+
+// ListContainersOptions specify parameters to the ListContainers function.
+//
+// See https://goo.gl/kaOHGw for more details.
+type ListContainersOptions struct {
+ All bool
+ Size bool
+ Limit int
+ Since string
+ Before string
+ Filters map[string][]string
+ Context context.Context
+}
+
+// ListContainers returns a slice of containers matching the given criteria.
+//
+// See https://goo.gl/kaOHGw for more details.
+func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) {
+ path := "/containers/json?" + queryString(opts)
+ resp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var containers []APIContainers
+ if err := json.NewDecoder(resp.Body).Decode(&containers); err != nil {
+ return nil, err
+ }
+ return containers, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_logs.go b/vendor/github.com/fsouza/go-dockerclient/container_logs.go
new file mode 100644
index 000000000..0e3f1199c
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_logs.go
@@ -0,0 +1,58 @@
+package docker
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "time"
+)
+
+// LogsOptions represents the set of options used when getting logs from a
+// container.
+//
+// See https://goo.gl/krK0ZH for more details.
+type LogsOptions struct {
+ Context context.Context
+ Container string `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+ Tail string
+
+ Since int64
+ Follow bool
+ Stdout bool
+ Stderr bool
+ Timestamps bool
+
+ // Use raw terminal? Usually true when the container contains a TTY.
+ RawTerminal bool `qs:"-"`
+}
+
+// Logs gets stdout and stderr logs from the specified container.
+//
+// When LogsOptions.RawTerminal is set to false, go-dockerclient will multiplex
+// the streams and send the containers stdout to LogsOptions.OutputStream, and
+// stderr to LogsOptions.ErrorStream.
+//
+// When LogsOptions.RawTerminal is true, callers will get the raw stream on
+// LogsOptions.OutputStream. The caller can use libraries such as dlog
+// (github.com/ahmetalpbalkan/dlog).
+//
+// See https://goo.gl/krK0ZH for more details.
+func (c *Client) Logs(opts LogsOptions) error {
+ if opts.Container == "" {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ if opts.Tail == "" {
+ opts.Tail = "all"
+ }
+ path := "/containers/" + opts.Container + "/logs?" + queryString(opts)
+ return c.stream(http.MethodGet, path, streamOptions{
+ setRawTerminal: opts.RawTerminal,
+ stdout: opts.OutputStream,
+ stderr: opts.ErrorStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
+ })
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_pause.go b/vendor/github.com/fsouza/go-dockerclient/container_pause.go
new file mode 100644
index 000000000..66fb9ba7b
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_pause.go
@@ -0,0 +1,22 @@
+package docker
+
+import (
+ "fmt"
+ "net/http"
+)
+
+// PauseContainer pauses the given container.
+//
+// See https://goo.gl/D1Yaii for more details.
+func (c *Client) PauseContainer(id string) error {
+ path := fmt.Sprintf("/containers/%s/pause", id)
+ resp, err := c.do(http.MethodPost, path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_prune.go b/vendor/github.com/fsouza/go-dockerclient/container_prune.go
new file mode 100644
index 000000000..3f2bdc6a2
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_prune.go
@@ -0,0 +1,40 @@
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+)
+
+// PruneContainersOptions specify parameters to the PruneContainers function.
+//
+// See https://goo.gl/wnkgDT for more details.
+type PruneContainersOptions struct {
+ Filters map[string][]string
+ Context context.Context
+}
+
+// PruneContainersResults specify results from the PruneContainers function.
+//
+// See https://goo.gl/wnkgDT for more details.
+type PruneContainersResults struct {
+ ContainersDeleted []string
+ SpaceReclaimed int64
+}
+
+// PruneContainers deletes containers which are stopped.
+//
+// See https://goo.gl/wnkgDT for more details.
+func (c *Client) PruneContainers(opts PruneContainersOptions) (*PruneContainersResults, error) {
+ path := "/containers/prune?" + queryString(opts)
+ resp, err := c.do(http.MethodPost, path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var results PruneContainersResults
+ if err := json.NewDecoder(resp.Body).Decode(&results); err != nil {
+ return nil, err
+ }
+ return &results, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_remove.go b/vendor/github.com/fsouza/go-dockerclient/container_remove.go
new file mode 100644
index 000000000..23298f51c
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_remove.go
@@ -0,0 +1,39 @@
+package docker
+
+import (
+ "context"
+ "net/http"
+)
+
+// RemoveContainerOptions encapsulates options to remove a container.
+//
+// See https://goo.gl/hL5IPC for more details.
+type RemoveContainerOptions struct {
+ // The ID of the container.
+ ID string `qs:"-"`
+
+ // A flag that indicates whether Docker should remove the volumes
+ // associated to the container.
+ RemoveVolumes bool `qs:"v"`
+
+ // A flag that indicates whether Docker should remove the container
+ // even if it is currently running.
+ Force bool
+ Context context.Context
+}
+
+// RemoveContainer removes a container, returning an error in case of failure.
+//
+// See https://goo.gl/hL5IPC for more details.
+func (c *Client) RemoveContainer(opts RemoveContainerOptions) error {
+ path := "/containers/" + opts.ID + "?" + queryString(opts)
+ resp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: opts.ID}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_rename.go b/vendor/github.com/fsouza/go-dockerclient/container_rename.go
new file mode 100644
index 000000000..893c423bd
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_rename.go
@@ -0,0 +1,33 @@
+package docker
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+)
+
+// RenameContainerOptions specify parameters to the RenameContainer function.
+//
+// See https://goo.gl/46inai for more details.
+type RenameContainerOptions struct {
+ // ID of container to rename
+ ID string `qs:"-"`
+
+ // New name
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+ Context context.Context
+}
+
+// RenameContainer updates and existing containers name
+//
+// See https://goo.gl/46inai for more details.
+func (c *Client) RenameContainer(opts RenameContainerOptions) error {
+ resp, err := c.do(http.MethodPost, fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{
+ context: opts.Context,
+ })
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_resize.go b/vendor/github.com/fsouza/go-dockerclient/container_resize.go
new file mode 100644
index 000000000..3445be6b5
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_resize.go
@@ -0,0 +1,22 @@
+package docker
+
+import (
+ "net/http"
+ "net/url"
+ "strconv"
+)
+
+// ResizeContainerTTY resizes the terminal to the given height and width.
+//
+// See https://goo.gl/FImjeq for more details.
+func (c *Client) ResizeContainerTTY(id string, height, width int) error {
+ params := make(url.Values)
+ params.Set("h", strconv.Itoa(height))
+ params.Set("w", strconv.Itoa(width))
+ resp, err := c.do(http.MethodPost, "/containers/"+id+"/resize?"+params.Encode(), doOptions{})
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_restart.go b/vendor/github.com/fsouza/go-dockerclient/container_restart.go
new file mode 100644
index 000000000..90e59055c
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_restart.go
@@ -0,0 +1,62 @@
+package docker
+
+import (
+ "fmt"
+ "net/http"
+)
+
+// RestartPolicy represents the policy for automatically restarting a container.
+//
+// Possible values are:
+//
+// - always: the docker daemon will always restart the container
+// - on-failure: the docker daemon will restart the container on failures, at
+// most MaximumRetryCount times
+// - unless-stopped: the docker daemon will always restart the container except
+// when user has manually stopped the container
+// - no: the docker daemon will not restart the container automatically
+type RestartPolicy struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty" toml:"Name,omitempty"`
+ MaximumRetryCount int `json:"MaximumRetryCount,omitempty" yaml:"MaximumRetryCount,omitempty" toml:"MaximumRetryCount,omitempty"`
+}
+
+// AlwaysRestart returns a restart policy that tells the Docker daemon to
+// always restart the container.
+func AlwaysRestart() RestartPolicy {
+ return RestartPolicy{Name: "always"}
+}
+
+// RestartOnFailure returns a restart policy that tells the Docker daemon to
+// restart the container on failures, trying at most maxRetry times.
+func RestartOnFailure(maxRetry int) RestartPolicy {
+ return RestartPolicy{Name: "on-failure", MaximumRetryCount: maxRetry}
+}
+
+// RestartUnlessStopped returns a restart policy that tells the Docker daemon to
+// always restart the container except when user has manually stopped the container.
+func RestartUnlessStopped() RestartPolicy {
+ return RestartPolicy{Name: "unless-stopped"}
+}
+
+// NeverRestart returns a restart policy that tells the Docker daemon to never
+// restart the container on failures.
+func NeverRestart() RestartPolicy {
+ return RestartPolicy{Name: "no"}
+}
+
+// RestartContainer stops a container, killing it after the given timeout (in
+// seconds), during the stop process.
+//
+// See https://goo.gl/MrAKQ5 for more details.
+func (c *Client) RestartContainer(id string, timeout uint) error {
+ path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout)
+ resp, err := c.do(http.MethodPost, path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_start.go b/vendor/github.com/fsouza/go-dockerclient/container_start.go
new file mode 100644
index 000000000..5edb29438
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_start.go
@@ -0,0 +1,56 @@
+package docker
+
+import (
+ "context"
+ "net/http"
+)
+
+// StartContainer starts a container, returning an error in case of failure.
+//
+// Passing the HostConfig to this method has been deprecated in Docker API 1.22
+// (Docker Engine 1.10.x) and totally removed in Docker API 1.24 (Docker Engine
+// 1.12.x). The client will ignore the parameter when communicating with Docker
+// API 1.24 or greater.
+//
+// See https://goo.gl/fbOSZy for more details.
+func (c *Client) StartContainer(id string, hostConfig *HostConfig) error {
+ return c.startContainer(id, hostConfig, doOptions{})
+}
+
+// StartContainerWithContext starts a container, returning an error in case of
+// failure. The context can be used to cancel the outstanding start container
+// request.
+//
+// Passing the HostConfig to this method has been deprecated in Docker API 1.22
+// (Docker Engine 1.10.x) and totally removed in Docker API 1.24 (Docker Engine
+// 1.12.x). The client will ignore the parameter when communicating with Docker
+// API 1.24 or greater.
+//
+// See https://goo.gl/fbOSZy for more details.
+//nolint:golint
+func (c *Client) StartContainerWithContext(id string, hostConfig *HostConfig, ctx context.Context) error {
+ return c.startContainer(id, hostConfig, doOptions{context: ctx})
+}
+
+func (c *Client) startContainer(id string, hostConfig *HostConfig, opts doOptions) error {
+ path := "/containers/" + id + "/start"
+ if c.serverAPIVersion == nil {
+ c.checkAPIVersion()
+ }
+ if c.serverAPIVersion != nil && c.serverAPIVersion.LessThan(apiVersion124) {
+ opts.data = hostConfig
+ opts.forceJSON = true
+ }
+ resp, err := c.do(http.MethodPost, path, opts)
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id, Err: err}
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusNotModified {
+ return &ContainerAlreadyRunning{ID: id}
+ }
+ return nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_stats.go b/vendor/github.com/fsouza/go-dockerclient/container_stats.go
new file mode 100644
index 000000000..a899b95cf
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_stats.go
@@ -0,0 +1,214 @@
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+)
+
+// Stats represents container statistics, returned by /containers/<id>/stats.
+//
+// See https://goo.gl/Dk3Xio for more details.
+type Stats struct {
+ Read time.Time `json:"read,omitempty" yaml:"read,omitempty" toml:"read,omitempty"`
+ PreRead time.Time `json:"preread,omitempty" yaml:"preread,omitempty" toml:"preread,omitempty"`
+ NumProcs uint32 `json:"num_procs" yaml:"num_procs" toml:"num_procs"`
+ PidsStats struct {
+ Current uint64 `json:"current,omitempty" yaml:"current,omitempty"`
+ } `json:"pids_stats,omitempty" yaml:"pids_stats,omitempty" toml:"pids_stats,omitempty"`
+ Network NetworkStats `json:"network,omitempty" yaml:"network,omitempty" toml:"network,omitempty"`
+ Networks map[string]NetworkStats `json:"networks,omitempty" yaml:"networks,omitempty" toml:"networks,omitempty"`
+ MemoryStats struct {
+ Stats struct {
+ TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty" toml:"total_pgmafault,omitempty"`
+ Cache uint64 `json:"cache,omitempty" yaml:"cache,omitempty" toml:"cache,omitempty"`
+ MappedFile uint64 `json:"mapped_file,omitempty" yaml:"mapped_file,omitempty" toml:"mapped_file,omitempty"`
+ TotalInactiveFile uint64 `json:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty" toml:"total_inactive_file,omitempty"`
+ Pgpgout uint64 `json:"pgpgout,omitempty" yaml:"pgpgout,omitempty" toml:"pgpgout,omitempty"`
+ Rss uint64 `json:"rss,omitempty" yaml:"rss,omitempty" toml:"rss,omitempty"`
+ TotalMappedFile uint64 `json:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty" toml:"total_mapped_file,omitempty"`
+ Writeback uint64 `json:"writeback,omitempty" yaml:"writeback,omitempty" toml:"writeback,omitempty"`
+ Unevictable uint64 `json:"unevictable,omitempty" yaml:"unevictable,omitempty" toml:"unevictable,omitempty"`
+ Pgpgin uint64 `json:"pgpgin,omitempty" yaml:"pgpgin,omitempty" toml:"pgpgin,omitempty"`
+ TotalUnevictable uint64 `json:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty" toml:"total_unevictable,omitempty"`
+ Pgmajfault uint64 `json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty" toml:"pgmajfault,omitempty"`
+ TotalRss uint64 `json:"total_rss,omitempty" yaml:"total_rss,omitempty" toml:"total_rss,omitempty"`
+ TotalRssHuge uint64 `json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty" toml:"total_rss_huge,omitempty"`
+ TotalWriteback uint64 `json:"total_writeback,omitempty" yaml:"total_writeback,omitempty" toml:"total_writeback,omitempty"`
+ TotalInactiveAnon uint64 `json:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty" toml:"total_inactive_anon,omitempty"`
+ RssHuge uint64 `json:"rss_huge,omitempty" yaml:"rss_huge,omitempty" toml:"rss_huge,omitempty"`
+ HierarchicalMemoryLimit uint64 `json:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty" toml:"hierarchical_memory_limit,omitempty"`
+ TotalPgfault uint64 `json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty" toml:"total_pgfault,omitempty"`
+ TotalActiveFile uint64 `json:"total_active_file,omitempty" yaml:"total_active_file,omitempty" toml:"total_active_file,omitempty"`
+ ActiveAnon uint64 `json:"active_anon,omitempty" yaml:"active_anon,omitempty" toml:"active_anon,omitempty"`
+ TotalActiveAnon uint64 `json:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty" toml:"total_active_anon,omitempty"`
+ TotalPgpgout uint64 `json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty" toml:"total_pgpgout,omitempty"`
+ TotalCache uint64 `json:"total_cache,omitempty" yaml:"total_cache,omitempty" toml:"total_cache,omitempty"`
+ InactiveAnon uint64 `json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty" toml:"inactive_anon,omitempty"`
+ ActiveFile uint64 `json:"active_file,omitempty" yaml:"active_file,omitempty" toml:"active_file,omitempty"`
+ Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty" toml:"pgfault,omitempty"`
+ InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty" toml:"inactive_file,omitempty"`
+ TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty" toml:"total_pgpgin,omitempty"`
+ HierarchicalMemswLimit uint64 `json:"hierarchical_memsw_limit,omitempty" yaml:"hierarchical_memsw_limit,omitempty" toml:"hierarchical_memsw_limit,omitempty"`
+ Swap uint64 `json:"swap,omitempty" yaml:"swap,omitempty" toml:"swap,omitempty"`
+ } `json:"stats,omitempty" yaml:"stats,omitempty" toml:"stats,omitempty"`
+ MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty" toml:"max_usage,omitempty"`
+ Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty" toml:"usage,omitempty"`
+ Failcnt uint64 `json:"failcnt,omitempty" yaml:"failcnt,omitempty" toml:"failcnt,omitempty"`
+ Limit uint64 `json:"limit,omitempty" yaml:"limit,omitempty" toml:"limit,omitempty"`
+ Commit uint64 `json:"commitbytes,omitempty" yaml:"commitbytes,omitempty" toml:"privateworkingset,omitempty"`
+ CommitPeak uint64 `json:"commitpeakbytes,omitempty" yaml:"commitpeakbytes,omitempty" toml:"commitpeakbytes,omitempty"`
+ PrivateWorkingSet uint64 `json:"privateworkingset,omitempty" yaml:"privateworkingset,omitempty" toml:"privateworkingset,omitempty"`
+ } `json:"memory_stats,omitempty" yaml:"memory_stats,omitempty" toml:"memory_stats,omitempty"`
+ BlkioStats struct {
+ IOServiceBytesRecursive []BlkioStatsEntry `json:"io_service_bytes_recursive,omitempty" yaml:"io_service_bytes_recursive,omitempty" toml:"io_service_bytes_recursive,omitempty"`
+ IOServicedRecursive []BlkioStatsEntry `json:"io_serviced_recursive,omitempty" yaml:"io_serviced_recursive,omitempty" toml:"io_serviced_recursive,omitempty"`
+ IOQueueRecursive []BlkioStatsEntry `json:"io_queue_recursive,omitempty" yaml:"io_queue_recursive,omitempty" toml:"io_queue_recursive,omitempty"`
+ IOServiceTimeRecursive []BlkioStatsEntry `json:"io_service_time_recursive,omitempty" yaml:"io_service_time_recursive,omitempty" toml:"io_service_time_recursive,omitempty"`
+ IOWaitTimeRecursive []BlkioStatsEntry `json:"io_wait_time_recursive,omitempty" yaml:"io_wait_time_recursive,omitempty" toml:"io_wait_time_recursive,omitempty"`
+ IOMergedRecursive []BlkioStatsEntry `json:"io_merged_recursive,omitempty" yaml:"io_merged_recursive,omitempty" toml:"io_merged_recursive,omitempty"`
+ IOTimeRecursive []BlkioStatsEntry `json:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty" toml:"io_time_recursive,omitempty"`
+ SectorsRecursive []BlkioStatsEntry `json:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty" toml:"sectors_recursive,omitempty"`
+ } `json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty" toml:"blkio_stats,omitempty"`
+ CPUStats CPUStats `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty" toml:"cpu_stats,omitempty"`
+ PreCPUStats CPUStats `json:"precpu_stats,omitempty"`
+ StorageStats struct {
+ ReadCountNormalized uint64 `json:"read_count_normalized,omitempty" yaml:"read_count_normalized,omitempty" toml:"read_count_normalized,omitempty"`
+ ReadSizeBytes uint64 `json:"read_size_bytes,omitempty" yaml:"read_size_bytes,omitempty" toml:"read_size_bytes,omitempty"`
+ WriteCountNormalized uint64 `json:"write_count_normalized,omitempty" yaml:"write_count_normalized,omitempty" toml:"write_count_normalized,omitempty"`
+ WriteSizeBytes uint64 `json:"write_size_bytes,omitempty" yaml:"write_size_bytes,omitempty" toml:"write_size_bytes,omitempty"`
+ } `json:"storage_stats,omitempty" yaml:"storage_stats,omitempty" toml:"storage_stats,omitempty"`
+}
+
+// NetworkStats is a stats entry for network stats
+type NetworkStats struct {
+ RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty" toml:"rx_dropped,omitempty"`
+ RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty" toml:"rx_bytes,omitempty"`
+ RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty" toml:"rx_errors,omitempty"`
+ TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty" toml:"tx_packets,omitempty"`
+ TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty" toml:"tx_dropped,omitempty"`
+ RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty" toml:"rx_packets,omitempty"`
+ TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty" toml:"tx_errors,omitempty"`
+ TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty" toml:"tx_bytes,omitempty"`
+}
+
+// CPUStats is a stats entry for cpu stats
+type CPUStats struct {
+ CPUUsage struct {
+ PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty" toml:"percpu_usage,omitempty"`
+ UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty" toml:"usage_in_usermode,omitempty"`
+ TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty" toml:"total_usage,omitempty"`
+ UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty" toml:"usage_in_kernelmode,omitempty"`
+ } `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty" toml:"cpu_usage,omitempty"`
+ SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty" toml:"system_cpu_usage,omitempty"`
+ OnlineCPUs uint64 `json:"online_cpus,omitempty" yaml:"online_cpus,omitempty" toml:"online_cpus,omitempty"`
+ ThrottlingData struct {
+ Periods uint64 `json:"periods,omitempty"`
+ ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
+ ThrottledTime uint64 `json:"throttled_time,omitempty"`
+ } `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty" toml:"throttling_data,omitempty"`
+}
+
+// BlkioStatsEntry is a stats entry for blkio_stats
+type BlkioStatsEntry struct {
+ Major uint64 `json:"major,omitempty" yaml:"major,omitempty" toml:"major,omitempty"`
+ Minor uint64 `json:"minor,omitempty" yaml:"minor,omitempty" toml:"minor,omitempty"`
+ Op string `json:"op,omitempty" yaml:"op,omitempty" toml:"op,omitempty"`
+ Value uint64 `json:"value,omitempty" yaml:"value,omitempty" toml:"value,omitempty"`
+}
+
+// StatsOptions specify parameters to the Stats function.
+//
+// See https://goo.gl/Dk3Xio for more details.
+type StatsOptions struct {
+ ID string
+ Stats chan<- *Stats
+ Stream bool
+ // A flag that enables stopping the stats operation
+ Done <-chan bool
+ // Initial connection timeout
+ Timeout time.Duration
+ // Timeout with no data is received, it's reset every time new data
+ // arrives
+ InactivityTimeout time.Duration `qs:"-"`
+ Context context.Context
+}
+
+// Stats sends container statistics for the given container to the given channel.
+//
+// This function is blocking, similar to a streaming call for logs, and should be run
+// on a separate goroutine from the caller. Note that this function will block until
+// the given container is removed, not just exited. When finished, this function
+// will close the given channel. Alternatively, function can be stopped by
+// signaling on the Done channel.
+//
+// See https://goo.gl/Dk3Xio for more details.
+func (c *Client) Stats(opts StatsOptions) (retErr error) {
+ errC := make(chan error, 1)
+ readCloser, writeCloser := io.Pipe()
+
+ defer func() {
+ close(opts.Stats)
+
+ if err := <-errC; err != nil && retErr == nil {
+ retErr = err
+ }
+
+ if err := readCloser.Close(); err != nil && retErr == nil {
+ retErr = err
+ }
+ }()
+
+ reqSent := make(chan struct{})
+ go func() {
+ defer close(errC)
+ err := c.stream(http.MethodGet, fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{
+ rawJSONStream: true,
+ useJSONDecoder: true,
+ stdout: writeCloser,
+ timeout: opts.Timeout,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
+ reqSent: reqSent,
+ })
+ if err != nil {
+ dockerError, ok := err.(*Error)
+ if ok {
+ if dockerError.Status == http.StatusNotFound {
+ err = &NoSuchContainer{ID: opts.ID}
+ }
+ }
+ }
+ if closeErr := writeCloser.Close(); closeErr != nil && err == nil {
+ err = closeErr
+ }
+ errC <- err
+ }()
+
+ quit := make(chan struct{})
+ defer close(quit)
+ go func() {
+ // block here waiting for the signal to stop function
+ select {
+ case <-opts.Done:
+ readCloser.Close()
+ case <-quit:
+ return
+ }
+ }()
+
+ decoder := json.NewDecoder(readCloser)
+ stats := new(Stats)
+ <-reqSent
+ for err := decoder.Decode(stats); err != io.EOF; err = decoder.Decode(stats) {
+ if err != nil {
+ return err
+ }
+ opts.Stats <- stats
+ stats = new(Stats)
+ }
+ return nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_stop.go b/vendor/github.com/fsouza/go-dockerclient/container_stop.go
new file mode 100644
index 000000000..0b148b084
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_stop.go
@@ -0,0 +1,41 @@
+package docker
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+)
+
+// StopContainer stops a container, killing it after the given timeout (in
+// seconds).
+//
+// See https://goo.gl/R9dZcV for more details.
+func (c *Client) StopContainer(id string, timeout uint) error {
+ return c.stopContainer(id, timeout, doOptions{})
+}
+
+// StopContainerWithContext stops a container, killing it after the given
+// timeout (in seconds). The context can be used to cancel the stop
+// container request.
+//
+// See https://goo.gl/R9dZcV for more details.
+//nolint:golint
+func (c *Client) StopContainerWithContext(id string, timeout uint, ctx context.Context) error {
+ return c.stopContainer(id, timeout, doOptions{context: ctx})
+}
+
+func (c *Client) stopContainer(id string, timeout uint, opts doOptions) error {
+ path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout)
+ resp, err := c.do(http.MethodPost, path, opts)
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusNotModified {
+ return &ContainerNotRunning{ID: id}
+ }
+ return nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_top.go b/vendor/github.com/fsouza/go-dockerclient/container_top.go
new file mode 100644
index 000000000..7fb00e051
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_top.go
@@ -0,0 +1,38 @@
+package docker
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+)
+
+// TopResult represents the list of processes running in a container, as
+// returned by /containers/<id>/top.
+//
+// See https://goo.gl/FLwpPl for more details.
+type TopResult struct {
+ Titles []string
+ Processes [][]string
+}
+
+// TopContainer returns processes running inside a container
+//
+// See https://goo.gl/FLwpPl for more details.
+func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) {
+ var args string
+ var result TopResult
+ if psArgs != "" {
+ args = fmt.Sprintf("?ps_args=%s", psArgs)
+ }
+ path := fmt.Sprintf("/containers/%s/top%s", id, args)
+ resp, err := c.do(http.MethodGet, path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return result, &NoSuchContainer{ID: id}
+ }
+ return result, err
+ }
+ defer resp.Body.Close()
+ err = json.NewDecoder(resp.Body).Decode(&result)
+ return result, err
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_unpause.go b/vendor/github.com/fsouza/go-dockerclient/container_unpause.go
new file mode 100644
index 000000000..af80d81d6
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_unpause.go
@@ -0,0 +1,22 @@
+package docker
+
+import (
+ "fmt"
+ "net/http"
+)
+
+// UnpauseContainer unpauses the given container.
+//
+// See https://goo.gl/sZ2faO for more details.
+func (c *Client) UnpauseContainer(id string) error {
+ path := fmt.Sprintf("/containers/%s/unpause", id)
+ resp, err := c.do(http.MethodPost, path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_update.go b/vendor/github.com/fsouza/go-dockerclient/container_update.go
new file mode 100644
index 000000000..e8de21365
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_update.go
@@ -0,0 +1,43 @@
+package docker
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+)
+
+// UpdateContainerOptions specify parameters to the UpdateContainer function.
+//
+// See https://goo.gl/Y6fXUy for more details.
+type UpdateContainerOptions struct {
+ BlkioWeight int `json:"BlkioWeight"`
+ CPUShares int `json:"CpuShares"`
+ CPUPeriod int `json:"CpuPeriod"`
+ CPURealtimePeriod int64 `json:"CpuRealtimePeriod"`
+ CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"`
+ CPUQuota int `json:"CpuQuota"`
+ CpusetCpus string `json:"CpusetCpus"`
+ CpusetMems string `json:"CpusetMems"`
+ Memory int `json:"Memory"`
+ MemorySwap int `json:"MemorySwap"`
+ MemoryReservation int `json:"MemoryReservation"`
+ KernelMemory int `json:"KernelMemory"`
+ RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty"`
+ Context context.Context
+}
+
+// UpdateContainer updates the container at ID with the options
+//
+// See https://goo.gl/Y6fXUy for more details.
+func (c *Client) UpdateContainer(id string, opts UpdateContainerOptions) error {
+ resp, err := c.do(http.MethodPost, fmt.Sprintf("/containers/"+id+"/update"), doOptions{
+ data: opts,
+ forceJSON: true,
+ context: opts.Context,
+ })
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container_wait.go b/vendor/github.com/fsouza/go-dockerclient/container_wait.go
new file mode 100644
index 000000000..6fd27b0ce
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container_wait.go
@@ -0,0 +1,41 @@
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+)
+
+// WaitContainer blocks until the given container stops, return the exit code
+// of the container status.
+//
+// See https://goo.gl/4AGweZ for more details.
+func (c *Client) WaitContainer(id string) (int, error) {
+ return c.waitContainer(id, doOptions{})
+}
+
+// WaitContainerWithContext blocks until the given container stops, return the exit code
+// of the container status. The context object can be used to cancel the
+// inspect request.
+//
+// See https://goo.gl/4AGweZ for more details.
+//nolint:golint
+func (c *Client) WaitContainerWithContext(id string, ctx context.Context) (int, error) {
+ return c.waitContainer(id, doOptions{context: ctx})
+}
+
+func (c *Client) waitContainer(id string, opts doOptions) (int, error) {
+ resp, err := c.do(http.MethodPost, "/containers/"+id+"/wait", opts)
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return 0, &NoSuchContainer{ID: id}
+ }
+ return 0, err
+ }
+ defer resp.Body.Close()
+ var r struct{ StatusCode int }
+ if err := json.NewDecoder(resp.Body).Decode(&r); err != nil {
+ return 0, err
+ }
+ return r.StatusCode, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/event.go b/vendor/github.com/fsouza/go-dockerclient/event.go
index 6de7c5535..8e362d44e 100644
--- a/vendor/github.com/fsouza/go-dockerclient/event.go
+++ b/vendor/github.com/fsouza/go-dockerclient/event.go
@@ -329,14 +329,12 @@ func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan
if err != nil {
return err
}
- //nolint:staticcheck
- conn := httputil.NewClientConn(dial, nil)
- req, err := http.NewRequest(http.MethodGet, uri, nil)
+ conn := httputil.NewClientConn(dial, nil) //nolint:staticcheck
+ req, err := http.NewRequest(http.MethodGet, uri, nil) //nolint:noctx
if err != nil {
return err
}
- //nolint:bodyclose
- res, err := conn.Do(req)
+ res, err := conn.Do(req) //nolint:bodyclose
if err != nil {
return err
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/go.mod b/vendor/github.com/fsouza/go-dockerclient/go.mod
index cf98b1b62..6de12ff37 100644
--- a/vendor/github.com/fsouza/go-dockerclient/go.mod
+++ b/vendor/github.com/fsouza/go-dockerclient/go.mod
@@ -1,23 +1,24 @@
module github.com/fsouza/go-dockerclient
-go 1.12
+go 1.14
require (
- github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873
- github.com/Microsoft/hcsshim v0.8.7 // indirect
- github.com/containerd/containerd v1.3.0 // indirect
- github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c // indirect
+ github.com/Microsoft/hcsshim v0.8.9 // indirect
+ github.com/containerd/containerd v1.3.4 // indirect
+ github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
- github.com/docker/docker v1.4.2-0.20191101170500-ac7306503d23
+ github.com/docker/docker v17.12.0-ce-rc1.0.20200505174321-1655290016ac+incompatible
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0
- github.com/gogo/protobuf v1.3.1 // indirect
- github.com/google/go-cmp v0.4.0
- github.com/gorilla/mux v1.7.4
+ github.com/google/go-cmp v0.5.2
+ github.com/gorilla/mux v1.8.0
+ github.com/moby/sys/mount v0.1.0 // indirect
+ github.com/moby/term v0.0.0-20200429084858-129dac9f73f6 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/opencontainers/runc v0.1.1 // indirect
- golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975
- google.golang.org/grpc v1.27.1 // indirect
+ golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79
+ google.golang.org/grpc v1.29.1 // indirect
+ gotest.tools/v3 v3.0.2 // indirect
)
diff --git a/vendor/github.com/fsouza/go-dockerclient/go.sum b/vendor/github.com/fsouza/go-dockerclient/go.sum
index 5c7d610dd..99480302e 100644
--- a/vendor/github.com/fsouza/go-dockerclient/go.sum
+++ b/vendor/github.com/fsouza/go-dockerclient/go.sum
@@ -7,39 +7,45 @@ github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6tr
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873 h1:93nQ7k53GjoMQ07HVP8g6Zj1fQZDDj7Xy2VkNNtvX8o=
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg=
-github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
-github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
+github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY=
-github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.4 h1:3o0smo5SKY7H6AJCmJhsnCjR2/V2T8VmiHt7seN2/kI=
+github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVlf61smEIq1nwLLAjQVEK2EADoW3CX9AuT+8=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c h1:8ahmSVELW1wghbjerVAyuEYD5+Dio66RYvSS0iGfL1M=
-github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
+github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb h1:nXPkFq8X1a9ycY3GYQpFNxHh3j2JgY7zDZfq2EXMIzk=
+github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
+github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 h1:PUD50EuOMkXVcpBIA/R95d56duJR9VxhwncsFbNnxW4=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de h1:dlfGmNcE3jDAecLqwKPMNX6nk2qh1c1Vg1/YTzpOOF4=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd h1:JNn81o/xG+8NEo3bC/vx9pbi/g2WI8mtP2/nXzu297Y=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v1.4.2-0.20191101170500-ac7306503d23 h1:oqgGT9O61YAYvI41EBsLePOr+LE6roB0xY4gpkZuFSE=
-github.com/docker/docker v1.4.2-0.20191101170500-ac7306503d23/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v17.12.0-ce-rc1.0.20200505174321-1655290016ac+incompatible h1:ZxJX4ZSNg1LORBsStUojbrLfkrE3Ut122XhzyZnN110=
+github.com/docker/docker v17.12.0-ce-rc1.0.20200505174321-1655290016ac+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
@@ -55,14 +61,25 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
+github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@@ -72,6 +89,17 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/moby/sys/mount v0.1.0 h1:Ytx78EatgFKtrqZ0BvJ0UtJE472ZvawVmil6pIfuCCU=
+github.com/moby/sys/mount v0.1.0/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74=
+github.com/moby/sys/mountinfo v0.1.0 h1:r8vMRbMAFEAfiNptYVokP+nfxPJzvRuia5e2vzXtENo=
+github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
+github.com/moby/term v0.0.0-20200429084858-129dac9f73f6 h1:3Y9aosU6S5Bo8GYH0s+t1ej4m30GuUKvQ3c9ZLqdL28=
+github.com/moby/term v0.0.0-20200429084858-129dac9f73f6/go.mod h1:or9wGItza1sRcM4Wd3dIv8DsFHYQuFsMHEdxUIlUxms=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -85,38 +113,39 @@ github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 h1:eNUVfm/RFLIi1G7flU5/ZRTHvd4kcVuzfRnL6OFlzCI=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
-golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79 h1:IaQbIIB2X/Mp/DKctl6ROxz1KyMlKp4uyvL6+kQ7C88=
+golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@@ -129,10 +158,11 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -141,10 +171,14 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@@ -155,6 +189,7 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
@@ -162,24 +197,32 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E=
+gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
diff --git a/vendor/github.com/fsouza/go-dockerclient/tls.go b/vendor/github.com/fsouza/go-dockerclient/tls.go
index 8a76f0dbf..535b72987 100644
--- a/vendor/github.com/fsouza/go-dockerclient/tls.go
+++ b/vendor/github.com/fsouza/go-dockerclient/tls.go
@@ -111,7 +111,6 @@ func copyTLSConfig(cfg *tls.Config) *tls.Config {
Rand: cfg.Rand,
RootCAs: cfg.RootCAs,
ServerName: cfg.ServerName,
- SessionTicketKey: cfg.SessionTicketKey,
SessionTicketsDisabled: cfg.SessionTicketsDisabled,
}
}
diff --git a/vendor/github.com/juju/ansiterm/LICENSE b/vendor/github.com/juju/ansiterm/LICENSE
new file mode 100644
index 000000000..ade9307b3
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/vendor/github.com/juju/ansiterm/Makefile b/vendor/github.com/juju/ansiterm/Makefile
new file mode 100644
index 000000000..212fdcbe5
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/Makefile
@@ -0,0 +1,14 @@
+# Copyright 2016 Canonical Ltd.
+# Licensed under the LGPLv3, see LICENCE file for details.
+
+default: check
+
+check:
+ go test
+
+docs:
+ godoc2md github.com/juju/ansiterm > README.md
+ sed -i 's|\[godoc-link-here\]|[![GoDoc](https://godoc.org/github.com/juju/ansiterm?status.svg)](https://godoc.org/github.com/juju/ansiterm)|' README.md
+
+
+.PHONY: default check docs
diff --git a/vendor/github.com/juju/ansiterm/README.md b/vendor/github.com/juju/ansiterm/README.md
new file mode 100644
index 000000000..567438721
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/README.md
@@ -0,0 +1,323 @@
+
+# ansiterm
+ import "github.com/juju/ansiterm"
+
+Package ansiterm provides a Writer that writes out the ANSI escape
+codes for color and styles.
+
+
+
+
+
+
+
+## type Color
+``` go
+type Color int
+```
+Color represents one of the standard 16 ANSI colors.
+
+
+
+``` go
+const (
+ Default Color
+ Black
+ Red
+ Green
+ Yellow
+ Blue
+ Magenta
+ Cyan
+ Gray
+ DarkGray
+ BrightRed
+ BrightGreen
+ BrightYellow
+ BrightBlue
+ BrightMagenta
+ BrightCyan
+ White
+)
+```
+
+
+
+
+
+
+
+
+### func (Color) String
+``` go
+func (c Color) String() string
+```
+String returns the name of the color.
+
+
+
+## type Context
+``` go
+type Context struct {
+ Foreground Color
+ Background Color
+ Styles []Style
+}
+```
+Context provides a way to specify both foreground and background colors
+along with other styles and write text to a Writer with those colors and
+styles.
+
+
+
+
+
+
+
+
+
+### func Background
+``` go
+func Background(color Color) *Context
+```
+Background is a convenience function that creates a Context with the
+specified color as the background color.
+
+
+### func Foreground
+``` go
+func Foreground(color Color) *Context
+```
+Foreground is a convenience function that creates a Context with the
+specified color as the foreground color.
+
+
+### func Styles
+``` go
+func Styles(styles ...Style) *Context
+```
+Styles is a convenience function that creates a Context with the
+specified styles set.
+
+
+
+
+### func (\*Context) Fprint
+``` go
+func (c *Context) Fprint(w sgrWriter, args ...interface{})
+```
+Fprint will set the sgr values of the writer to the specified foreground,
+background and styles, then formats using the default formats for its
+operands and writes to w. Spaces are added between operands when neither is
+a string. It returns the number of bytes written and any write error
+encountered.
+
+
+
+### func (\*Context) Fprintf
+``` go
+func (c *Context) Fprintf(w sgrWriter, format string, args ...interface{})
+```
+Fprintf will set the sgr values of the writer to the specified
+foreground, background and styles, then write the formatted string,
+then reset the writer.
+
+
+
+### func (\*Context) SetBackground
+``` go
+func (c *Context) SetBackground(color Color) *Context
+```
+SetBackground sets the background to the specified color.
+
+
+
+### func (\*Context) SetForeground
+``` go
+func (c *Context) SetForeground(color Color) *Context
+```
+SetForeground sets the foreground to the specified color.
+
+
+
+### func (\*Context) SetStyle
+``` go
+func (c *Context) SetStyle(styles ...Style) *Context
+```
+SetStyle replaces the styles with the new values.
+
+
+
+## type Style
+``` go
+type Style int
+```
+
+
+``` go
+const (
+ Bold Style
+ Faint
+ Italic
+ Underline
+ Blink
+ Reverse
+ Strikethrough
+ Conceal
+)
+```
+
+
+
+
+
+
+
+
+### func (Style) String
+``` go
+func (s Style) String() string
+```
+
+
+## type TabWriter
+``` go
+type TabWriter struct {
+ Writer
+ // contains filtered or unexported fields
+}
+```
+TabWriter is a filter that inserts padding around tab-delimited
+columns in its input to align them in the output.
+
+It also setting of colors and styles over and above the standard
+tabwriter package.
+
+
+
+
+
+
+
+
+
+### func NewTabWriter
+``` go
+func NewTabWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter
+```
+NewTabWriter returns a writer that is able to set colors and styels.
+The ansi escape codes are stripped for width calculations.
+
+
+
+
+### func (\*TabWriter) Flush
+``` go
+func (t *TabWriter) Flush() error
+```
+Flush should be called after the last call to Write to ensure
+that any data buffered in the Writer is written to output. Any
+incomplete escape sequence at the end is considered
+complete for formatting purposes.
+
+
+
+### func (\*TabWriter) Init
+``` go
+func (t *TabWriter) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter
+```
+A Writer must be initialized with a call to Init. The first parameter (output)
+specifies the filter output. The remaining parameters control the formatting:
+
+
+ minwidth minimal cell width including any padding
+ tabwidth width of tab characters (equivalent number of spaces)
+ padding padding added to a cell before computing its width
+ padchar ASCII char used for padding
+ if padchar == '\t', the Writer will assume that the
+ width of a '\t' in the formatted output is tabwidth,
+ and cells are left-aligned independent of align_left
+ (for correct-looking results, tabwidth must correspond
+ to the tab width in the viewer displaying the result)
+ flags formatting control
+
+
+
+## type Writer
+``` go
+type Writer struct {
+ io.Writer
+ // contains filtered or unexported fields
+}
+```
+Writer allows colors and styles to be specified. If the io.Writer
+is not a terminal capable of color, all attempts to set colors or
+styles are no-ops.
+
+
+
+
+
+
+
+
+
+### func NewWriter
+``` go
+func NewWriter(w io.Writer) *Writer
+```
+NewWriter returns a Writer that allows the caller to specify colors and
+styles. If the io.Writer is not a terminal capable of color, all attempts
+to set colors or styles are no-ops.
+
+
+
+
+### func (\*Writer) ClearStyle
+``` go
+func (w *Writer) ClearStyle(s Style)
+```
+ClearStyle clears the text style.
+
+
+
+### func (\*Writer) Reset
+``` go
+func (w *Writer) Reset()
+```
+Reset returns the default foreground and background colors with no styles.
+
+
+
+### func (\*Writer) SetBackground
+``` go
+func (w *Writer) SetBackground(c Color)
+```
+SetBackground sets the background color.
+
+
+
+### func (\*Writer) SetForeground
+``` go
+func (w *Writer) SetForeground(c Color)
+```
+SetForeground sets the foreground color.
+
+
+
+### func (\*Writer) SetStyle
+``` go
+func (w *Writer) SetStyle(s Style)
+```
+SetStyle sets the text style.
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) \ No newline at end of file
diff --git a/vendor/github.com/juju/ansiterm/attribute.go b/vendor/github.com/juju/ansiterm/attribute.go
new file mode 100644
index 000000000..f2daa4813
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/attribute.go
@@ -0,0 +1,50 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+type attribute int
+
+const (
+ unknownAttribute attribute = -1
+ reset attribute = 0
+)
+
+// sgr returns the escape sequence for the Select Graphic Rendition
+// for the attribute.
+func (a attribute) sgr() string {
+ if a < 0 {
+ return ""
+ }
+ return fmt.Sprintf("\x1b[%dm", a)
+}
+
+type attributes []attribute
+
+func (a attributes) Len() int { return len(a) }
+func (a attributes) Less(i, j int) bool { return a[i] < a[j] }
+func (a attributes) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// sgr returns the combined escape sequence for the Select Graphic Rendition
+// for the sequence of attributes.
+func (a attributes) sgr() string {
+ switch len(a) {
+ case 0:
+ return ""
+ case 1:
+ return a[0].sgr()
+ default:
+ sort.Sort(a)
+ var values []string
+ for _, attr := range a {
+ values = append(values, fmt.Sprint(attr))
+ }
+ return fmt.Sprintf("\x1b[%sm", strings.Join(values, ";"))
+ }
+}
diff --git a/vendor/github.com/juju/ansiterm/color.go b/vendor/github.com/juju/ansiterm/color.go
new file mode 100644
index 000000000..0a97de31e
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/color.go
@@ -0,0 +1,119 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+const (
+ _ Color = iota
+ Default
+ Black
+ Red
+ Green
+ Yellow
+ Blue
+ Magenta
+ Cyan
+ Gray
+ DarkGray
+ BrightRed
+ BrightGreen
+ BrightYellow
+ BrightBlue
+ BrightMagenta
+ BrightCyan
+ White
+)
+
+// Color represents one of the standard 16 ANSI colors.
+type Color int
+
+// String returns the name of the color.
+func (c Color) String() string {
+ switch c {
+ case Default:
+ return "default"
+ case Black:
+ return "black"
+ case Red:
+ return "red"
+ case Green:
+ return "green"
+ case Yellow:
+ return "yellow"
+ case Blue:
+ return "blue"
+ case Magenta:
+ return "magenta"
+ case Cyan:
+ return "cyan"
+ case Gray:
+ return "gray"
+ case DarkGray:
+ return "darkgray"
+ case BrightRed:
+ return "brightred"
+ case BrightGreen:
+ return "brightgreen"
+ case BrightYellow:
+ return "brightyellow"
+ case BrightBlue:
+ return "brightblue"
+ case BrightMagenta:
+ return "brightmagenta"
+ case BrightCyan:
+ return "brightcyan"
+ case White:
+ return "white"
+ default:
+ return ""
+ }
+}
+
+func (c Color) foreground() attribute {
+ switch c {
+ case Default:
+ return 39
+ case Black:
+ return 30
+ case Red:
+ return 31
+ case Green:
+ return 32
+ case Yellow:
+ return 33
+ case Blue:
+ return 34
+ case Magenta:
+ return 35
+ case Cyan:
+ return 36
+ case Gray:
+ return 37
+ case DarkGray:
+ return 90
+ case BrightRed:
+ return 91
+ case BrightGreen:
+ return 92
+ case BrightYellow:
+ return 93
+ case BrightBlue:
+ return 94
+ case BrightMagenta:
+ return 95
+ case BrightCyan:
+ return 96
+ case White:
+ return 97
+ default:
+ return unknownAttribute
+ }
+}
+
+func (c Color) background() attribute {
+ value := c.foreground()
+ if value != unknownAttribute {
+ return value + 10
+ }
+ return value
+}
diff --git a/vendor/github.com/juju/ansiterm/context.go b/vendor/github.com/juju/ansiterm/context.go
new file mode 100644
index 000000000..e61a867ff
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/context.go
@@ -0,0 +1,95 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+ "fmt"
+ "io"
+)
+
+// Context provides a way to specify both foreground and background colors
+// along with other styles and write text to a Writer with those colors and
+// styles.
+type Context struct {
+ Foreground Color
+ Background Color
+ Styles []Style
+}
+
+// Foreground is a convenience function that creates a Context with the
+// specified color as the foreground color.
+func Foreground(color Color) *Context {
+ return &Context{Foreground: color}
+}
+
+// Background is a convenience function that creates a Context with the
+// specified color as the background color.
+func Background(color Color) *Context {
+ return &Context{Background: color}
+}
+
+// Styles is a convenience function that creates a Context with the
+// specified styles set.
+func Styles(styles ...Style) *Context {
+ return &Context{Styles: styles}
+}
+
+// SetForeground sets the foreground to the specified color.
+func (c *Context) SetForeground(color Color) *Context {
+ c.Foreground = color
+ return c
+}
+
+// SetBackground sets the background to the specified color.
+func (c *Context) SetBackground(color Color) *Context {
+ c.Background = color
+ return c
+}
+
+// SetStyle replaces the styles with the new values.
+func (c *Context) SetStyle(styles ...Style) *Context {
+ c.Styles = styles
+ return c
+}
+
+type sgrWriter interface {
+ io.Writer
+ writeSGR(value sgr)
+}
+
+// Fprintf will set the sgr values of the writer to the specified
+// foreground, background and styles, then write the formatted string,
+// then reset the writer.
+func (c *Context) Fprintf(w sgrWriter, format string, args ...interface{}) {
+ w.writeSGR(c)
+ fmt.Fprintf(w, format, args...)
+ w.writeSGR(reset)
+}
+
+// Fprint will set the sgr values of the writer to the specified foreground,
+// background and styles, then formats using the default formats for its
+// operands and writes to w. Spaces are added between operands when neither is
+// a string. It returns the number of bytes written and any write error
+// encountered.
+func (c *Context) Fprint(w sgrWriter, args ...interface{}) {
+ w.writeSGR(c)
+ fmt.Fprint(w, args...)
+ w.writeSGR(reset)
+}
+
+func (c *Context) sgr() string {
+ var values attributes
+ if foreground := c.Foreground.foreground(); foreground != unknownAttribute {
+ values = append(values, foreground)
+ }
+ if background := c.Background.background(); background != unknownAttribute {
+ values = append(values, background)
+ }
+ for _, style := range c.Styles {
+ if value := style.enable(); value != unknownAttribute {
+ values = append(values, value)
+ }
+ }
+ return values.sgr()
+}
diff --git a/vendor/github.com/juju/ansiterm/doc.go b/vendor/github.com/juju/ansiterm/doc.go
new file mode 100644
index 000000000..782700779
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// Package ansiterm provides a Writer that writes out the ANSI escape
+// codes for color and styles.
+package ansiterm
diff --git a/vendor/github.com/juju/ansiterm/style.go b/vendor/github.com/juju/ansiterm/style.go
new file mode 100644
index 000000000..0be42da56
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/style.go
@@ -0,0 +1,72 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+const (
+ _ Style = iota
+ Bold
+ Faint
+ Italic
+ Underline
+ Blink
+ Reverse
+ Strikethrough
+ Conceal
+)
+
+type Style int
+
+func (s Style) String() string {
+ switch s {
+ case Bold:
+ return "bold"
+ case Faint:
+ return "faint"
+ case Italic:
+ return "italic"
+ case Underline:
+ return "underline"
+ case Blink:
+ return "blink"
+ case Reverse:
+ return "reverse"
+ case Strikethrough:
+ return "strikethrough"
+ case Conceal:
+ return "conceal"
+ default:
+ return ""
+ }
+}
+
+func (s Style) enable() attribute {
+ switch s {
+ case Bold:
+ return 1
+ case Faint:
+ return 2
+ case Italic:
+ return 3
+ case Underline:
+ return 4
+ case Blink:
+ return 5
+ case Reverse:
+ return 7
+ case Conceal:
+ return 8
+ case Strikethrough:
+ return 9
+ default:
+ return unknownAttribute
+ }
+}
+
+func (s Style) disable() attribute {
+ value := s.enable()
+ if value != unknownAttribute {
+ return value + 20
+ }
+ return value
+}
diff --git a/vendor/github.com/juju/ansiterm/tabwriter.go b/vendor/github.com/juju/ansiterm/tabwriter.go
new file mode 100644
index 000000000..1ff6faaaf
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/tabwriter.go
@@ -0,0 +1,64 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+ "io"
+
+ "github.com/juju/ansiterm/tabwriter"
+)
+
+// NewTabWriter returns a writer that is able to set colors and styels.
+// The ansi escape codes are stripped for width calculations.
+func NewTabWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter {
+ return new(TabWriter).Init(output, minwidth, tabwidth, padding, padchar, flags)
+}
+
+// TabWriter is a filter that inserts padding around tab-delimited
+// columns in its input to align them in the output.
+//
+// It also setting of colors and styles over and above the standard
+// tabwriter package.
+type TabWriter struct {
+ Writer
+ tw tabwriter.Writer
+}
+
+// Flush should be called after the last call to Write to ensure
+// that any data buffered in the Writer is written to output. Any
+// incomplete escape sequence at the end is considered
+// complete for formatting purposes.
+//
+func (t *TabWriter) Flush() error {
+ return t.tw.Flush()
+}
+
+// SetColumnAlignRight will mark a particular column as align right.
+// This is reset on the next flush.
+func (t *TabWriter) SetColumnAlignRight(column int) {
+ t.tw.SetColumnAlignRight(column)
+}
+
+// A Writer must be initialized with a call to Init. The first parameter (output)
+// specifies the filter output. The remaining parameters control the formatting:
+//
+// minwidth minimal cell width including any padding
+// tabwidth width of tab characters (equivalent number of spaces)
+// padding padding added to a cell before computing its width
+// padchar ASCII char used for padding
+// if padchar == '\t', the Writer will assume that the
+// width of a '\t' in the formatted output is tabwidth,
+// and cells are left-aligned independent of align_left
+// (for correct-looking results, tabwidth must correspond
+// to the tab width in the viewer displaying the result)
+// flags formatting control
+//
+func (t *TabWriter) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter {
+ writer, colorCapable := colorEnabledWriter(output)
+ t.Writer = Writer{
+ Writer: t.tw.Init(writer, minwidth, tabwidth, padding, padchar, flags),
+ noColor: !colorCapable,
+ }
+ return t
+}
diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE b/vendor/github.com/juju/ansiterm/tabwriter/LICENSE
index 744875676..744875676 100644
--- a/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
+++ b/vendor/github.com/juju/ansiterm/tabwriter/LICENSE
diff --git a/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go b/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go
new file mode 100644
index 000000000..98949d036
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go
@@ -0,0 +1,587 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is mostly a copy of the go standard library text/tabwriter. With
+// the additional stripping of ansi control characters for width calculations.
+
+// Package tabwriter implements a write filter (tabwriter.Writer) that
+// translates tabbed columns in input into properly aligned text.
+//
+// The package is using the Elastic Tabstops algorithm described at
+// http://nickgravgaard.com/elastictabstops/index.html.
+//
+package tabwriter
+
+import (
+ "bytes"
+ "io"
+ "unicode/utf8"
+
+ "github.com/lunixbochs/vtclean"
+)
+
+// ----------------------------------------------------------------------------
+// Filter implementation
+
+// A cell represents a segment of text terminated by tabs or line breaks.
+// The text itself is stored in a separate buffer; cell only describes the
+// segment's size in bytes, its width in runes, and whether it's an htab
+// ('\t') terminated cell.
+//
+type cell struct {
+ size int // cell size in bytes
+ width int // cell width in runes
+ htab bool // true if the cell is terminated by an htab ('\t')
+}
+
+// A Writer is a filter that inserts padding around tab-delimited
+// columns in its input to align them in the output.
+//
+// The Writer treats incoming bytes as UTF-8 encoded text consisting
+// of cells terminated by (horizontal or vertical) tabs or line
+// breaks (newline or formfeed characters). Cells in adjacent lines
+// constitute a column. The Writer inserts padding as needed to
+// make all cells in a column have the same width, effectively
+// aligning the columns. It assumes that all characters have the
+// same width except for tabs for which a tabwidth must be specified.
+// Note that cells are tab-terminated, not tab-separated: trailing
+// non-tab text at the end of a line does not form a column cell.
+//
+// The Writer assumes that all Unicode code points have the same width;
+// this may not be true in some fonts.
+//
+// If DiscardEmptyColumns is set, empty columns that are terminated
+// entirely by vertical (or "soft") tabs are discarded. Columns
+// terminated by horizontal (or "hard") tabs are not affected by
+// this flag.
+//
+// If a Writer is configured to filter HTML, HTML tags and entities
+// are passed through. The widths of tags and entities are
+// assumed to be zero (tags) and one (entities) for formatting purposes.
+//
+// A segment of text may be escaped by bracketing it with Escape
+// characters. The tabwriter passes escaped text segments through
+// unchanged. In particular, it does not interpret any tabs or line
+// breaks within the segment. If the StripEscape flag is set, the
+// Escape characters are stripped from the output; otherwise they
+// are passed through as well. For the purpose of formatting, the
+// width of the escaped text is always computed excluding the Escape
+// characters.
+//
+// The formfeed character ('\f') acts like a newline but it also
+// terminates all columns in the current line (effectively calling
+// Flush). Cells in the next line start new columns. Unless found
+// inside an HTML tag or inside an escaped text segment, formfeed
+// characters appear as newlines in the output.
+//
+// The Writer must buffer input internally, because proper spacing
+// of one line may depend on the cells in future lines. Clients must
+// call Flush when done calling Write.
+//
+type Writer struct {
+ // configuration
+ output io.Writer
+ minwidth int
+ tabwidth int
+ padding int
+ padbytes [8]byte
+ flags uint
+
+ // current state
+ buf bytes.Buffer // collected text excluding tabs or line breaks
+ pos int // buffer position up to which cell.width of incomplete cell has been computed
+ cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
+ endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
+ lines [][]cell // list of lines; each line is a list of cells
+ widths []int // list of column widths in runes - re-used during formatting
+ alignment map[int]uint // column alignment
+}
+
+func (b *Writer) addLine() { b.lines = append(b.lines, []cell{}) }
+
+// Reset the current state.
+func (b *Writer) reset() {
+ b.buf.Reset()
+ b.pos = 0
+ b.cell = cell{}
+ b.endChar = 0
+ b.lines = b.lines[0:0]
+ b.widths = b.widths[0:0]
+ b.alignment = make(map[int]uint)
+ b.addLine()
+}
+
+// Internal representation (current state):
+//
+// - all text written is appended to buf; tabs and line breaks are stripped away
+// - at any given time there is a (possibly empty) incomplete cell at the end
+// (the cell starts after a tab or line break)
+// - cell.size is the number of bytes belonging to the cell so far
+// - cell.width is text width in runes of that cell from the start of the cell to
+// position pos; html tags and entities are excluded from this width if html
+// filtering is enabled
+// - the sizes and widths of processed text are kept in the lines list
+// which contains a list of cells for each line
+// - the widths list is a temporary list with current widths used during
+// formatting; it is kept in Writer because it's re-used
+//
+// |<---------- size ---------->|
+// | |
+// |<- width ->|<- ignored ->| |
+// | | | |
+// [---processed---tab------------<tag>...</tag>...]
+// ^ ^ ^
+// | | |
+// buf start of incomplete cell pos
+
+// Formatting can be controlled with these flags.
+const (
+ // Ignore html tags and treat entities (starting with '&'
+ // and ending in ';') as single characters (width = 1).
+ FilterHTML uint = 1 << iota
+
+ // Strip Escape characters bracketing escaped text segments
+ // instead of passing them through unchanged with the text.
+ StripEscape
+
+ // Force right-alignment of cell content.
+ // Default is left-alignment.
+ AlignRight
+
+ // Handle empty columns as if they were not present in
+ // the input in the first place.
+ DiscardEmptyColumns
+
+ // Always use tabs for indentation columns (i.e., padding of
+ // leading empty cells on the left) independent of padchar.
+ TabIndent
+
+ // Print a vertical bar ('|') between columns (after formatting).
+ // Discarded columns appear as zero-width columns ("||").
+ Debug
+)
+
+// A Writer must be initialized with a call to Init. The first parameter (output)
+// specifies the filter output. The remaining parameters control the formatting:
+//
+// minwidth minimal cell width including any padding
+// tabwidth width of tab characters (equivalent number of spaces)
+// padding padding added to a cell before computing its width
+// padchar ASCII char used for padding
+// if padchar == '\t', the Writer will assume that the
+// width of a '\t' in the formatted output is tabwidth,
+// and cells are left-aligned independent of align_left
+// (for correct-looking results, tabwidth must correspond
+// to the tab width in the viewer displaying the result)
+// flags formatting control
+//
+func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+ if minwidth < 0 || tabwidth < 0 || padding < 0 {
+ panic("negative minwidth, tabwidth, or padding")
+ }
+ b.output = output
+ b.minwidth = minwidth
+ b.tabwidth = tabwidth
+ b.padding = padding
+ for i := range b.padbytes {
+ b.padbytes[i] = padchar
+ }
+ if padchar == '\t' {
+ // tab padding enforces left-alignment
+ flags &^= AlignRight
+ }
+ b.flags = flags
+
+ b.reset()
+
+ return b
+}
+
+// debugging support (keep code around)
+func (b *Writer) dump() {
+ pos := 0
+ for i, line := range b.lines {
+ print("(", i, ") ")
+ for _, c := range line {
+ print("[", string(b.buf.Bytes()[pos:pos+c.size]), "]")
+ pos += c.size
+ }
+ print("\n")
+ }
+ print("\n")
+}
+
+// local error wrapper so we can distinguish errors we want to return
+// as errors from genuine panics (which we don't want to return as errors)
+type osError struct {
+ err error
+}
+
+func (b *Writer) write0(buf []byte) {
+ n, err := b.output.Write(buf)
+ if n != len(buf) && err == nil {
+ err = io.ErrShortWrite
+ }
+ if err != nil {
+ panic(osError{err})
+ }
+}
+
+func (b *Writer) writeN(src []byte, n int) {
+ for n > len(src) {
+ b.write0(src)
+ n -= len(src)
+ }
+ b.write0(src[0:n])
+}
+
+var (
+ newline = []byte{'\n'}
+ tabs = []byte("\t\t\t\t\t\t\t\t")
+)
+
+func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
+ if b.padbytes[0] == '\t' || useTabs {
+ // padding is done with tabs
+ if b.tabwidth == 0 {
+ return // tabs have no width - can't do any padding
+ }
+ // make cellw the smallest multiple of b.tabwidth
+ cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
+ n := cellw - textw // amount of padding
+ if n < 0 {
+ panic("internal error")
+ }
+ b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
+ return
+ }
+
+ // padding is done with non-tab characters
+ b.writeN(b.padbytes[0:], cellw-textw)
+}
+
+var vbar = []byte{'|'}
+
+func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
+ pos = pos0
+ for i := line0; i < line1; i++ {
+ line := b.lines[i]
+
+ // if TabIndent is set, use tabs to pad leading empty cells
+ useTabs := b.flags&TabIndent != 0
+
+ for j, c := range line {
+ if j > 0 && b.flags&Debug != 0 {
+ // indicate column break
+ b.write0(vbar)
+ }
+
+ if c.size == 0 {
+ // empty cell
+ if j < len(b.widths) {
+ b.writePadding(c.width, b.widths[j], useTabs)
+ }
+ } else {
+ // non-empty cell
+ useTabs = false
+ alignColumnRight := b.alignment[j] == AlignRight
+ if (b.flags&AlignRight == 0) && !alignColumnRight { // align left
+ b.write0(b.buf.Bytes()[pos : pos+c.size])
+ pos += c.size
+ if j < len(b.widths) {
+ b.writePadding(c.width, b.widths[j], false)
+ }
+ } else if alignColumnRight && j < len(b.widths) {
+ // just this column
+ internalSize := b.widths[j] - b.padding
+ if j < len(b.widths) {
+ b.writePadding(c.width, internalSize, false)
+ }
+ b.write0(b.buf.Bytes()[pos : pos+c.size])
+ if b.padding > 0 {
+ b.writePadding(0, b.padding, false)
+ }
+ pos += c.size
+ } else { // align right
+ if j < len(b.widths) {
+ b.writePadding(c.width, b.widths[j], false)
+ }
+ b.write0(b.buf.Bytes()[pos : pos+c.size])
+ pos += c.size
+ }
+ }
+ }
+
+ if i+1 == len(b.lines) {
+ // last buffered line - we don't have a newline, so just write
+ // any outstanding buffered data
+ b.write0(b.buf.Bytes()[pos : pos+b.cell.size])
+ pos += b.cell.size
+ } else {
+ // not the last line - write newline
+ b.write0(newline)
+ }
+ }
+ return
+}
+
+// Format the text between line0 and line1 (excluding line1); pos
+// is the buffer position corresponding to the beginning of line0.
+// Returns the buffer position corresponding to the beginning of
+// line1 and an error, if any.
+//
+func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
+ pos = pos0
+ column := len(b.widths)
+ for this := line0; this < line1; this++ {
+ line := b.lines[this]
+
+ if column < len(line)-1 {
+ // cell exists in this column => this line
+ // has more cells than the previous line
+ // (the last cell per line is ignored because cells are
+ // tab-terminated; the last cell per line describes the
+ // text before the newline/formfeed and does not belong
+ // to a column)
+
+ // print unprinted lines until beginning of block
+ pos = b.writeLines(pos, line0, this)
+ line0 = this
+
+ // column block begin
+ width := b.minwidth // minimal column width
+ discardable := true // true if all cells in this column are empty and "soft"
+ for ; this < line1; this++ {
+ line = b.lines[this]
+ if column < len(line)-1 {
+ // cell exists in this column
+ c := line[column]
+ // update width
+ if w := c.width + b.padding; w > width {
+ width = w
+ }
+ // update discardable
+ if c.width > 0 || c.htab {
+ discardable = false
+ }
+ } else {
+ break
+ }
+ }
+ // column block end
+
+ // discard empty columns if necessary
+ if discardable && b.flags&DiscardEmptyColumns != 0 {
+ width = 0
+ }
+
+ // format and print all columns to the right of this column
+ // (we know the widths of this column and all columns to the left)
+ b.widths = append(b.widths, width) // push width
+ pos = b.format(pos, line0, this)
+ b.widths = b.widths[0 : len(b.widths)-1] // pop width
+ line0 = this
+ }
+ }
+
+ // print unprinted lines until end
+ return b.writeLines(pos, line0, line1)
+}
+
+// Append text to current cell.
+func (b *Writer) append(text []byte) {
+ b.buf.Write(text)
+ b.cell.size += len(text)
+}
+
+// Update the cell width.
+func (b *Writer) updateWidth() {
+ // ---- Changes here -----
+ newChars := b.buf.Bytes()[b.pos:b.buf.Len()]
+ cleaned := vtclean.Clean(string(newChars), false) // false to strip colors
+ b.cell.width += utf8.RuneCount([]byte(cleaned))
+ // --- end of changes ----
+ b.pos = b.buf.Len()
+}
+
+// To escape a text segment, bracket it with Escape characters.
+// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
+// does not terminate a cell and constitutes a single character of
+// width one for formatting purposes.
+//
+// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
+//
+const Escape = '\xff'
+
+// Start escaped mode.
+func (b *Writer) startEscape(ch byte) {
+ switch ch {
+ case Escape:
+ b.endChar = Escape
+ case '<':
+ b.endChar = '>'
+ case '&':
+ b.endChar = ';'
+ }
+}
+
+// Terminate escaped mode. If the escaped text was an HTML tag, its width
+// is assumed to be zero for formatting purposes; if it was an HTML entity,
+// its width is assumed to be one. In all other cases, the width is the
+// unicode width of the text.
+//
+func (b *Writer) endEscape() {
+ switch b.endChar {
+ case Escape:
+ b.updateWidth()
+ if b.flags&StripEscape == 0 {
+ b.cell.width -= 2 // don't count the Escape chars
+ }
+ case '>': // tag of zero width
+ case ';':
+ b.cell.width++ // entity, count as one rune
+ }
+ b.pos = b.buf.Len()
+ b.endChar = 0
+}
+
+// Terminate the current cell by adding it to the list of cells of the
+// current line. Returns the number of cells in that line.
+//
+func (b *Writer) terminateCell(htab bool) int {
+ b.cell.htab = htab
+ line := &b.lines[len(b.lines)-1]
+ *line = append(*line, b.cell)
+ b.cell = cell{}
+ return len(*line)
+}
+
+func handlePanic(err *error, op string) {
+ if e := recover(); e != nil {
+ if nerr, ok := e.(osError); ok {
+ *err = nerr.err
+ return
+ }
+ panic("tabwriter: panic during " + op)
+ }
+}
+
+// Flush should be called after the last call to Write to ensure
+// that any data buffered in the Writer is written to output. Any
+// incomplete escape sequence at the end is considered
+// complete for formatting purposes.
+//
+func (b *Writer) Flush() (err error) {
+ defer b.reset() // even in the presence of errors
+ defer handlePanic(&err, "Flush")
+
+ // add current cell if not empty
+ if b.cell.size > 0 {
+ if b.endChar != 0 {
+ // inside escape - terminate it even if incomplete
+ b.endEscape()
+ }
+ b.terminateCell(false)
+ }
+
+ // format contents of buffer
+ b.format(0, 0, len(b.lines))
+
+ return
+}
+
+var hbar = []byte("---\n")
+
+// SetColumnAlignRight will mark a particular column as align right.
+// This is reset on the next flush.
+func (b *Writer) SetColumnAlignRight(column int) {
+ b.alignment[column] = AlignRight
+}
+
+// Write writes buf to the writer b.
+// The only errors returned are ones encountered
+// while writing to the underlying output stream.
+//
+func (b *Writer) Write(buf []byte) (n int, err error) {
+ defer handlePanic(&err, "Write")
+
+ // split text into cells
+ n = 0
+ for i, ch := range buf {
+ if b.endChar == 0 {
+ // outside escape
+ switch ch {
+ case '\t', '\v', '\n', '\f':
+ // end of cell
+ b.append(buf[n:i])
+ b.updateWidth()
+ n = i + 1 // ch consumed
+ ncells := b.terminateCell(ch == '\t')
+ if ch == '\n' || ch == '\f' {
+ // terminate line
+ b.addLine()
+ if ch == '\f' || ncells == 1 {
+ // A '\f' always forces a flush. Otherwise, if the previous
+ // line has only one cell which does not have an impact on
+ // the formatting of the following lines (the last cell per
+ // line is ignored by format()), thus we can flush the
+ // Writer contents.
+ if err = b.Flush(); err != nil {
+ return
+ }
+ if ch == '\f' && b.flags&Debug != 0 {
+ // indicate section break
+ b.write0(hbar)
+ }
+ }
+ }
+
+ case Escape:
+ // start of escaped sequence
+ b.append(buf[n:i])
+ b.updateWidth()
+ n = i
+ if b.flags&StripEscape != 0 {
+ n++ // strip Escape
+ }
+ b.startEscape(Escape)
+
+ case '<', '&':
+ // possibly an html tag/entity
+ if b.flags&FilterHTML != 0 {
+ // begin of tag/entity
+ b.append(buf[n:i])
+ b.updateWidth()
+ n = i
+ b.startEscape(ch)
+ }
+ }
+
+ } else {
+ // inside escape
+ if ch == b.endChar {
+ // end of tag/entity
+ j := i + 1
+ if ch == Escape && b.flags&StripEscape != 0 {
+ j = i // strip Escape
+ }
+ b.append(buf[n:j])
+ n = i + 1 // ch consumed
+ b.endEscape()
+ }
+ }
+ }
+
+ // append leftover text
+ b.append(buf[n:])
+ n = len(buf)
+ return
+}
+
+// NewWriter allocates and initializes a new tabwriter.Writer.
+// The parameters are the same as for the Init function.
+//
+func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+ return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
+}
diff --git a/vendor/github.com/juju/ansiterm/terminal.go b/vendor/github.com/juju/ansiterm/terminal.go
new file mode 100644
index 000000000..96fd11c51
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/terminal.go
@@ -0,0 +1,32 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+ "io"
+ "os"
+
+ "github.com/mattn/go-colorable"
+ "github.com/mattn/go-isatty"
+)
+
+// colorEnabledWriter returns a writer that can handle the ansi color codes
+// and true if the writer passed in is a terminal capable of color. If the
+// TERM environment variable is set to "dumb", the terminal is not considered
+// color capable.
+func colorEnabledWriter(w io.Writer) (io.Writer, bool) {
+ f, ok := w.(*os.File)
+ if !ok {
+ return w, false
+ }
+ // Check the TERM environment variable specifically
+ // to check for "dumb" terminals.
+ if os.Getenv("TERM") == "dumb" {
+ return w, false
+ }
+ if !isatty.IsTerminal(f.Fd()) {
+ return w, false
+ }
+ return colorable.NewColorable(f), true
+}
diff --git a/vendor/github.com/juju/ansiterm/writer.go b/vendor/github.com/juju/ansiterm/writer.go
new file mode 100644
index 000000000..32437bb27
--- /dev/null
+++ b/vendor/github.com/juju/ansiterm/writer.go
@@ -0,0 +1,74 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+ "fmt"
+ "io"
+)
+
+// Writer allows colors and styles to be specified. If the io.Writer
+// is not a terminal capable of color, all attempts to set colors or
+// styles are no-ops.
+type Writer struct {
+ io.Writer
+
+ noColor bool
+}
+
+// NewWriter returns a Writer that allows the caller to specify colors and
+// styles. If the io.Writer is not a terminal capable of color, all attempts
+// to set colors or styles are no-ops.
+func NewWriter(w io.Writer) *Writer {
+ writer, colorCapable := colorEnabledWriter(w)
+ return &Writer{
+ Writer: writer,
+ noColor: !colorCapable,
+ }
+}
+
+// SetColorCapable forces the writer to either write the ANSI escape color
+// if capable is true, or to not write them if capable is false.
+func (w *Writer) SetColorCapable(capable bool) {
+ w.noColor = !capable
+}
+
+// SetForeground sets the foreground color.
+func (w *Writer) SetForeground(c Color) {
+ w.writeSGR(c.foreground())
+}
+
+// SetBackground sets the background color.
+func (w *Writer) SetBackground(c Color) {
+ w.writeSGR(c.background())
+}
+
+// SetStyle sets the text style.
+func (w *Writer) SetStyle(s Style) {
+ w.writeSGR(s.enable())
+}
+
+// ClearStyle clears the text style.
+func (w *Writer) ClearStyle(s Style) {
+ w.writeSGR(s.disable())
+}
+
+// Reset returns the default foreground and background colors with no styles.
+func (w *Writer) Reset() {
+ w.writeSGR(reset)
+}
+
+type sgr interface {
+ // sgr returns the combined escape sequence for the Select Graphic Rendition.
+ sgr() string
+}
+
+// writeSGR takes the appropriate integer SGR parameters
+// and writes out the ANIS escape code.
+func (w *Writer) writeSGR(value sgr) {
+ if w.noColor {
+ return
+ }
+ fmt.Fprint(w, value.sgr())
+}
diff --git a/vendor/github.com/klauspost/compress/flate/gen_inflate.go b/vendor/github.com/klauspost/compress/flate/gen_inflate.go
index b26d19ec2..35fc072a3 100644
--- a/vendor/github.com/klauspost/compress/flate/gen_inflate.go
+++ b/vendor/github.com/klauspost/compress/flate/gen_inflate.go
@@ -42,16 +42,6 @@ func (f *decompressor) $FUNCNAME$() {
stateDict
)
fr := f.r.($TYPE$)
- moreBits := func() error {
- c, err := fr.ReadByte()
- if err != nil {
- return noEOF(err)
- }
- f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
- return nil
- }
switch f.stepState {
case stateInit:
@@ -112,9 +102,7 @@ readLiteral:
}
}
- var n uint // number of bits extra
var length int
- var err error
switch {
case v < 256:
f.dict.writeByte(byte(v))
@@ -131,71 +119,97 @@ readLiteral:
// otherwise, reference to older data
case v < 265:
length = v - (257 - 3)
- n = 0
- case v < 269:
- length = v*2 - (265*2 - 11)
- n = 1
- case v < 273:
- length = v*4 - (269*4 - 19)
- n = 2
- case v < 277:
- length = v*8 - (273*8 - 35)
- n = 3
- case v < 281:
- length = v*16 - (277*16 - 67)
- n = 4
- case v < 285:
- length = v*32 - (281*32 - 131)
- n = 5
case v < maxNumLit:
- length = 258
- n = 0
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- if n > 0 {
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
for f.nb < n {
- if err = moreBits(); err != nil {
+ c, err := fr.ReadByte()
+ if err != nil {
if debugDecode {
fmt.Println("morebits n>0:", err)
}
f.err = err
return
}
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
}
length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
f.b >>= n & regSizeMaskUint32
f.nb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
}
var dist uint32
if f.hd == nil {
for f.nb < 5 {
- if err = f.moreBits(); err != nil {
+ c, err := fr.ReadByte()
+ if err != nil {
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
f.err = err
return
}
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
}
dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
f.b >>= 5
f.nb -= 5
} else {
- sym, err := f.huffSym(f.hd)
- if err != nil {
- if debugDecode {
- fmt.Println("huffsym:", err)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & regSizeMaskUint32)
+ nb += 8
+ }
+ chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & regSizeMaskUint32)
+ f.nb = nb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
}
- f.err = err
- return
}
- dist = uint32(sym)
}
switch {
@@ -206,13 +220,17 @@ readLiteral:
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
for f.nb < nb {
- if err = f.moreBits(); err != nil {
+ c, err := fr.ReadByte()
+ if err != nil {
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
f.err = err
return
}
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
}
extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
f.b >>= nb & regSizeMaskUint32
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
index 189e9fe0b..16bc51408 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -29,6 +29,13 @@ const (
debugDecode = false
)
+// Value of length - 3 and extra bits.
+type lengthExtra struct {
+ length, extra uint8
+}
+
+var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
+
// Initialize the fixedHuffmanDecoder only once upon first use.
var fixedOnce sync.Once
var fixedHuffmanDecoder huffmanDecoder
diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
index 9a92a1b30..cc6db2792 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
@@ -20,16 +20,6 @@ func (f *decompressor) huffmanBytesBuffer() {
stateDict
)
fr := f.r.(*bytes.Buffer)
- moreBits := func() error {
- c, err := fr.ReadByte()
- if err != nil {
- return noEOF(err)
- }
- f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
- return nil
- }
switch f.stepState {
case stateInit:
@@ -90,9 +80,7 @@ readLiteral:
}
}
- var n uint // number of bits extra
var length int
- var err error
switch {
case v < 256:
f.dict.writeByte(byte(v))
@@ -109,71 +97,97 @@ readLiteral:
// otherwise, reference to older data
case v < 265:
length = v - (257 - 3)
- n = 0
- case v < 269:
- length = v*2 - (265*2 - 11)
- n = 1
- case v < 273:
- length = v*4 - (269*4 - 19)
- n = 2
- case v < 277:
- length = v*8 - (273*8 - 35)
- n = 3
- case v < 281:
- length = v*16 - (277*16 - 67)
- n = 4
- case v < 285:
- length = v*32 - (281*32 - 131)
- n = 5
case v < maxNumLit:
- length = 258
- n = 0
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- if n > 0 {
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
for f.nb < n {
- if err = moreBits(); err != nil {
+ c, err := fr.ReadByte()
+ if err != nil {
if debugDecode {
fmt.Println("morebits n>0:", err)
}
f.err = err
return
}
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
}
length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
f.b >>= n & regSizeMaskUint32
f.nb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
}
var dist uint32
if f.hd == nil {
for f.nb < 5 {
- if err = f.moreBits(); err != nil {
+ c, err := fr.ReadByte()
+ if err != nil {
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
f.err = err
return
}
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
}
dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
f.b >>= 5
f.nb -= 5
} else {
- sym, err := f.huffSym(f.hd)
- if err != nil {
- if debugDecode {
- fmt.Println("huffsym:", err)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & regSizeMaskUint32)
+ nb += 8
+ }
+ chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & regSizeMaskUint32)
+ f.nb = nb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
}
- f.err = err
- return
}
- dist = uint32(sym)
}
switch {
@@ -184,13 +198,17 @@ readLiteral:
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
for f.nb < nb {
- if err = f.moreBits(); err != nil {
+ c, err := fr.ReadByte()
+ if err != nil {
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
f.err = err
return
}
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
}
extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
f.b >>= nb & regSizeMaskUint32
@@ -246,16 +264,6 @@ func (f *decompressor) huffmanBytesReader() {
stateDict
)
fr := f.r.(*bytes.Reader)
- moreBits := func() error {
- c, err := fr.ReadByte()
- if err != nil {
- return noEOF(err)
- }
- f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
- return nil
- }
switch f.stepState {
case stateInit:
@@ -316,9 +324,7 @@ readLiteral:
}
}
- var n uint // number of bits extra
var length int
- var err error
switch {
case v < 256:
f.dict.writeByte(byte(v))
@@ -335,71 +341,97 @@ readLiteral:
// otherwise, reference to older data
case v < 265:
length = v - (257 - 3)
- n = 0
- case v < 269:
- length = v*2 - (265*2 - 11)
- n = 1
- case v < 273:
- length = v*4 - (269*4 - 19)
- n = 2
- case v < 277:
- length = v*8 - (273*8 - 35)
- n = 3
- case v < 281:
- length = v*16 - (277*16 - 67)
- n = 4
- case v < 285:
- length = v*32 - (281*32 - 131)
- n = 5
case v < maxNumLit:
- length = 258
- n = 0
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- if n > 0 {
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
for f.nb < n {
- if err = moreBits(); err != nil {
+ c, err := fr.ReadByte()
+ if err != nil {
if debugDecode {
fmt.Println("morebits n>0:", err)
}
f.err = err
return
}
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
}
length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
f.b >>= n & regSizeMaskUint32
f.nb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
}
var dist uint32
if f.hd == nil {
for f.nb < 5 {
- if err = f.moreBits(); err != nil {
+ c, err := fr.ReadByte()
+ if err != nil {
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
f.err = err
return
}
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
}
dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
f.b >>= 5
f.nb -= 5
} else {
- sym, err := f.huffSym(f.hd)
- if err != nil {
- if debugDecode {
- fmt.Println("huffsym:", err)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & regSizeMaskUint32)
+ nb += 8
+ }
+ chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & regSizeMaskUint32)
+ f.nb = nb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
}
- f.err = err
- return
}
- dist = uint32(sym)
}
switch {
@@ -410,13 +442,17 @@ readLiteral:
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
for f.nb < nb {
- if err = f.moreBits(); err != nil {
+ c, err := fr.ReadByte()
+ if err != nil {
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
f.err = err
return
}
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
}
extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
f.b >>= nb & regSizeMaskUint32
@@ -472,16 +508,6 @@ func (f *decompressor) huffmanBufioReader() {
stateDict
)
fr := f.r.(*bufio.Reader)
- moreBits := func() error {
- c, err := fr.ReadByte()
- if err != nil {
- return noEOF(err)
- }
- f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
- return nil
- }
switch f.stepState {
case stateInit:
@@ -542,9 +568,7 @@ readLiteral:
}
}
- var n uint // number of bits extra
var length int
- var err error
switch {
case v < 256:
f.dict.writeByte(byte(v))
@@ -561,71 +585,97 @@ readLiteral:
// otherwise, reference to older data
case v < 265:
length = v - (257 - 3)
- n = 0
- case v < 269:
- length = v*2 - (265*2 - 11)
- n = 1
- case v < 273:
- length = v*4 - (269*4 - 19)
- n = 2
- case v < 277:
- length = v*8 - (273*8 - 35)
- n = 3
- case v < 281:
- length = v*16 - (277*16 - 67)
- n = 4
- case v < 285:
- length = v*32 - (281*32 - 131)
- n = 5
case v < maxNumLit:
- length = 258
- n = 0
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- if n > 0 {
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
for f.nb < n {
- if err = moreBits(); err != nil {
+ c, err := fr.ReadByte()
+ if err != nil {
if debugDecode {
fmt.Println("morebits n>0:", err)
}
f.err = err
return
}
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
}
length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
f.b >>= n & regSizeMaskUint32
f.nb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
}
var dist uint32
if f.hd == nil {
for f.nb < 5 {
- if err = f.moreBits(); err != nil {
+ c, err := fr.ReadByte()
+ if err != nil {
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
f.err = err
return
}
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
}
dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
f.b >>= 5
f.nb -= 5
} else {
- sym, err := f.huffSym(f.hd)
- if err != nil {
- if debugDecode {
- fmt.Println("huffsym:", err)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & regSizeMaskUint32)
+ nb += 8
+ }
+ chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & regSizeMaskUint32)
+ f.nb = nb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
}
- f.err = err
- return
}
- dist = uint32(sym)
}
switch {
@@ -636,13 +686,17 @@ readLiteral:
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
for f.nb < nb {
- if err = f.moreBits(); err != nil {
+ c, err := fr.ReadByte()
+ if err != nil {
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
f.err = err
return
}
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
}
extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
f.b >>= nb & regSizeMaskUint32
@@ -698,16 +752,6 @@ func (f *decompressor) huffmanStringsReader() {
stateDict
)
fr := f.r.(*strings.Reader)
- moreBits := func() error {
- c, err := fr.ReadByte()
- if err != nil {
- return noEOF(err)
- }
- f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
- return nil
- }
switch f.stepState {
case stateInit:
@@ -768,9 +812,7 @@ readLiteral:
}
}
- var n uint // number of bits extra
var length int
- var err error
switch {
case v < 256:
f.dict.writeByte(byte(v))
@@ -787,71 +829,97 @@ readLiteral:
// otherwise, reference to older data
case v < 265:
length = v - (257 - 3)
- n = 0
- case v < 269:
- length = v*2 - (265*2 - 11)
- n = 1
- case v < 273:
- length = v*4 - (269*4 - 19)
- n = 2
- case v < 277:
- length = v*8 - (273*8 - 35)
- n = 3
- case v < 281:
- length = v*16 - (277*16 - 67)
- n = 4
- case v < 285:
- length = v*32 - (281*32 - 131)
- n = 5
case v < maxNumLit:
- length = 258
- n = 0
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- if n > 0 {
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
for f.nb < n {
- if err = moreBits(); err != nil {
+ c, err := fr.ReadByte()
+ if err != nil {
if debugDecode {
fmt.Println("morebits n>0:", err)
}
f.err = err
return
}
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
}
length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
f.b >>= n & regSizeMaskUint32
f.nb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
}
var dist uint32
if f.hd == nil {
for f.nb < 5 {
- if err = f.moreBits(); err != nil {
+ c, err := fr.ReadByte()
+ if err != nil {
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
f.err = err
return
}
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
}
dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
f.b >>= 5
f.nb -= 5
} else {
- sym, err := f.huffSym(f.hd)
- if err != nil {
- if debugDecode {
- fmt.Println("huffsym:", err)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & regSizeMaskUint32)
+ nb += 8
+ }
+ chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ f.b = b >> (n & regSizeMaskUint32)
+ f.nb = nb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
}
- f.err = err
- return
}
- dist = uint32(sym)
}
switch {
@@ -862,13 +930,17 @@ readLiteral:
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
for f.nb < nb {
- if err = f.moreBits(); err != nil {
+ c, err := fr.ReadByte()
+ if err != nil {
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
f.err = err
return
}
+ f.roffset++
+ f.b |= uint32(c) << f.nb
+ f.nb += 8
}
extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
f.b >>= nb & regSizeMaskUint32
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index ea3e51082..08e553f75 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -54,11 +54,11 @@ To create a writer with default options, do like this:
```Go
// Compress input to output.
func Compress(in io.Reader, out io.Writer) error {
- w, err := NewWriter(output)
+ enc, err := zstd.NewWriter(out)
if err != nil {
return err
}
- _, err := io.Copy(w, input)
+ _, err = io.Copy(enc, in)
if err != nil {
enc.Close()
return err
@@ -251,14 +251,14 @@ For streaming use a simple setup could look like this:
import "github.com/klauspost/compress/zstd"
func Decompress(in io.Reader, out io.Writer) error {
- d, err := zstd.NewReader(input)
+ d, err := zstd.NewReader(in)
if err != nil {
return err
}
defer d.Close()
// Copy content...
- _, err := io.Copy(out, d)
+ _, err = io.Copy(out, d)
return err
}
```
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index d78be6d42..cdda0de58 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -323,19 +323,23 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
}
if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
// Never preallocate moe than 1 GB up front.
- if uint64(cap(dst)) < frame.FrameContentSize {
+ if cap(dst)-len(dst) < int(frame.FrameContentSize) {
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
copy(dst2, dst)
dst = dst2
}
}
if cap(dst) == 0 {
- // Allocate window size * 2 by default if nothing is provided and we didn't get frame content size.
- size := frame.WindowSize * 2
+ // Allocate len(input) * 2 by default if nothing is provided
+ // and we didn't get frame content size.
+ size := len(input) * 2
// Cap to 1 MB.
if size > 1<<20 {
size = 1 << 20
}
+ if uint64(size) > d.o.maxDecodedSize {
+ size = int(d.o.maxDecodedSize)
+ }
dst = make([]byte, 0, size)
}
diff --git a/vendor/github.com/lunixbochs/vtclean/.travis.yml b/vendor/github.com/lunixbochs/vtclean/.travis.yml
new file mode 100644
index 000000000..fc0a54325
--- /dev/null
+++ b/vendor/github.com/lunixbochs/vtclean/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+sudo: false
+
+script: go test -v
+
+go:
+ - 1.5
+ - 1.6
+ - 1.7
diff --git a/vendor/github.com/lunixbochs/vtclean/LICENSE b/vendor/github.com/lunixbochs/vtclean/LICENSE
new file mode 100644
index 000000000..42e82633f
--- /dev/null
+++ b/vendor/github.com/lunixbochs/vtclean/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2015 Ryan Hileman
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/lunixbochs/vtclean/README.md b/vendor/github.com/lunixbochs/vtclean/README.md
new file mode 100644
index 000000000..99910a460
--- /dev/null
+++ b/vendor/github.com/lunixbochs/vtclean/README.md
@@ -0,0 +1,46 @@
+[![Build Status](https://travis-ci.org/lunixbochs/vtclean.svg?branch=master)](https://travis-ci.org/lunixbochs/vtclean)
+
+vtclean
+----
+
+Clean up raw terminal output by stripping escape sequences, optionally preserving color.
+
+Get it: `go get github.com/lunixbochs/vtclean/vtclean`
+
+API:
+
+ import "github.com/lunixbochs/vtclean"
+ vtclean.Clean(line string, color bool) string
+
+Command line example:
+
+ $ echo -e '\x1b[1;32mcolor example
+ color forced to stop at end of line
+ backspace is ba\b\bgood
+ no beeps!\x07\x07' | ./vtclean -color
+
+ color example
+ color forced to stop at end of line
+ backspace is good
+ no beeps!
+
+Go example:
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/lunixbochs/vtclean"
+ )
+
+ func main() {
+ line := vtclean.Clean(
+ "\033[1;32mcolor, " +
+ "curs\033[Aor, " +
+ "backspace\b\b\b\b\b\b\b\b\b\b\b\033[K", false)
+ fmt.Println(line)
+ }
+
+Output:
+
+ color, cursor
diff --git a/vendor/github.com/lunixbochs/vtclean/io.go b/vendor/github.com/lunixbochs/vtclean/io.go
new file mode 100644
index 000000000..31be0076a
--- /dev/null
+++ b/vendor/github.com/lunixbochs/vtclean/io.go
@@ -0,0 +1,93 @@
+package vtclean
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+)
+
+type reader struct {
+ io.Reader
+ scanner *bufio.Scanner
+ buf []byte
+
+ color bool
+}
+
+func NewReader(r io.Reader, color bool) io.Reader {
+ return &reader{Reader: r, color: color}
+}
+
+func (r *reader) scan() bool {
+ if r.scanner == nil {
+ r.scanner = bufio.NewScanner(r.Reader)
+ }
+ if len(r.buf) > 0 {
+ return true
+ }
+ if r.scanner.Scan() {
+ r.buf = []byte(Clean(r.scanner.Text(), r.color) + "\n")
+ return true
+ }
+ return false
+}
+
+func (r *reader) fill(p []byte) int {
+ n := len(r.buf)
+ copy(p, r.buf)
+ if len(p) < len(r.buf) {
+ r.buf = r.buf[len(p):]
+ n = len(p)
+ } else {
+ r.buf = nil
+ }
+ return n
+}
+
+func (r *reader) Read(p []byte) (int, error) {
+ n := r.fill(p)
+ if n < len(p) {
+ if !r.scan() {
+ if n == 0 {
+ return 0, io.EOF
+ }
+ return n, nil
+ }
+ n += r.fill(p[n:])
+ }
+ return n, nil
+}
+
+type writer struct {
+ io.Writer
+ buf []byte
+ color bool
+}
+
+func NewWriter(w io.Writer, color bool) io.WriteCloser {
+ return &writer{Writer: w, color: color}
+}
+
+func (w *writer) Write(p []byte) (int, error) {
+ buf := append(w.buf, p...)
+ lines := bytes.Split(buf, []byte("\n"))
+ if len(lines) > 0 {
+ last := len(lines) - 1
+ w.buf = lines[last]
+ count := 0
+ for _, line := range lines[:last] {
+ n, err := w.Writer.Write([]byte(Clean(string(line), w.color) + "\n"))
+ count += n
+ if err != nil {
+ return count, err
+ }
+ }
+ }
+ return len(p), nil
+}
+
+func (w *writer) Close() error {
+ cl := Clean(string(w.buf), w.color)
+ _, err := w.Writer.Write([]byte(cl))
+ return err
+}
diff --git a/vendor/github.com/lunixbochs/vtclean/line.go b/vendor/github.com/lunixbochs/vtclean/line.go
new file mode 100644
index 000000000..66ee990be
--- /dev/null
+++ b/vendor/github.com/lunixbochs/vtclean/line.go
@@ -0,0 +1,113 @@
+package vtclean
+
+type char struct {
+ char byte
+ vt100 []byte
+}
+
+func chars(p []byte) []char {
+ tmp := make([]char, len(p))
+ for i, v := range p {
+ tmp[i].char = v
+ }
+ return tmp
+}
+
+type lineEdit struct {
+ buf []char
+ pos, size int
+ vt100 []byte
+}
+
+func newLineEdit(length int) *lineEdit {
+ return &lineEdit{buf: make([]char, length)}
+}
+
+func (l *lineEdit) Vt100(p []byte) {
+ l.vt100 = p
+}
+
+func (l *lineEdit) Move(x int) {
+ if x < 0 && l.pos <= -x {
+ l.pos = 0
+ } else if x > 0 && l.pos+x > l.size {
+ l.pos = l.size
+ } else {
+ l.pos += x
+ }
+}
+
+func (l *lineEdit) MoveAbs(x int) {
+ if x < l.size {
+ l.pos = x
+ }
+}
+
+func (l *lineEdit) Write(p []byte) {
+ c := chars(p)
+ if len(c) > 0 {
+ c[0].vt100 = l.vt100
+ l.vt100 = nil
+ }
+ if len(l.buf)-l.pos < len(c) {
+ l.buf = append(l.buf[:l.pos], c...)
+ } else {
+ copy(l.buf[l.pos:], c)
+ }
+ l.pos += len(c)
+ if l.pos > l.size {
+ l.size = l.pos
+ }
+}
+
+func (l *lineEdit) Insert(p []byte) {
+ c := chars(p)
+ if len(c) > 0 {
+ c[0].vt100 = l.vt100
+ l.vt100 = nil
+ }
+ l.size += len(c)
+ c = append(c, l.buf[l.pos:]...)
+ l.buf = append(l.buf[:l.pos], c...)
+}
+
+func (l *lineEdit) Delete(n int) {
+ most := l.size - l.pos
+ if n > most {
+ n = most
+ }
+ copy(l.buf[l.pos:], l.buf[l.pos+n:])
+ l.size -= n
+}
+
+func (l *lineEdit) Clear() {
+ for i := 0; i < len(l.buf); i++ {
+ l.buf[i].char = ' '
+ }
+}
+func (l *lineEdit) ClearLeft() {
+ for i := 0; i < l.pos+1; i++ {
+ l.buf[i].char = ' '
+ }
+}
+func (l *lineEdit) ClearRight() {
+ l.size = l.pos
+}
+
+func (l *lineEdit) Bytes() []byte {
+ length := 0
+ buf := l.buf[:l.size]
+ for _, v := range buf {
+ length += 1 + len(v.vt100)
+ }
+ tmp := make([]byte, 0, length)
+ for _, v := range buf {
+ tmp = append(tmp, v.vt100...)
+ tmp = append(tmp, v.char)
+ }
+ return tmp
+}
+
+func (l *lineEdit) String() string {
+ return string(l.Bytes())
+}
diff --git a/vendor/github.com/lunixbochs/vtclean/vtclean.go b/vendor/github.com/lunixbochs/vtclean/vtclean.go
new file mode 100644
index 000000000..64fe01fdb
--- /dev/null
+++ b/vendor/github.com/lunixbochs/vtclean/vtclean.go
@@ -0,0 +1,95 @@
+package vtclean
+
+import (
+ "bytes"
+ "regexp"
+ "strconv"
+)
+
+// regex based on ECMA-48:
+// 1. optional:
+// one of [ or ]
+// any amount of 0x30-0x3f
+// any amount of 0x20-0x2f
+// 3. exactly one 0x40-0x7e
+var vt100re = regexp.MustCompile(`^\033([\[\]]([0-9:;<=>\?]*)([!"#$%&'()*+,\-./]*))?([@A-Z\[\]^_\x60a-z{|}~])`)
+var vt100exc = regexp.MustCompile(`^\033(\[[^a-zA-Z0-9@\?]+|[\(\)]).`)
+
+// this is to handle the RGB escape generated by `tput initc 1 500 500 500`
+var vt100long = regexp.MustCompile(`^\033](\d+);([^\033]+)\033\\`)
+
+func Clean(line string, color bool) string {
+ var edit = newLineEdit(len(line))
+ lineb := []byte(line)
+
+ hadColor := false
+ for i := 0; i < len(lineb); {
+ c := lineb[i]
+ switch c {
+ case '\r':
+ edit.MoveAbs(0)
+ case '\b':
+ edit.Move(-1)
+ case '\033':
+ // set terminal title
+ if bytes.HasPrefix(lineb[i:], []byte("\x1b]0;")) {
+ pos := bytes.Index(lineb[i:], []byte("\a"))
+ if pos != -1 {
+ i += pos + 1
+ continue
+ }
+ }
+ if m := vt100long.Find(lineb[i:]); m != nil {
+ i += len(m)
+ } else if m := vt100exc.Find(lineb[i:]); m != nil {
+ i += len(m)
+ } else if m := vt100re.FindSubmatch(lineb[i:]); m != nil {
+ i += len(m[0])
+ num := string(m[2])
+ n, err := strconv.Atoi(num)
+ if err != nil || n > 10000 {
+ n = 1
+ }
+ switch m[4][0] {
+ case 'm':
+ if color {
+ hadColor = true
+ edit.Vt100(m[0])
+ }
+ case '@':
+ edit.Insert(bytes.Repeat([]byte{' '}, n))
+ case 'G':
+ edit.MoveAbs(n)
+ case 'C':
+ edit.Move(n)
+ case 'D':
+ edit.Move(-n)
+ case 'P':
+ edit.Delete(n)
+ case 'K':
+ switch num {
+ case "", "0":
+ edit.ClearRight()
+ case "1":
+ edit.ClearLeft()
+ case "2":
+ edit.Clear()
+ }
+ }
+ } else {
+ i += 1
+ }
+ continue
+ default:
+ if c == '\n' || c == '\t' || c >= ' ' {
+ edit.Write([]byte{c})
+ }
+ }
+ i += 1
+ }
+ out := edit.Bytes()
+ if hadColor {
+ out = append(out, []byte("\033[0m")...)
+ }
+ return string(out)
+}
diff --git a/vendor/github.com/manifoldco/promptui/.gitignore b/vendor/github.com/manifoldco/promptui/.gitignore
new file mode 100644
index 000000000..8ee1778b5
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/.gitignore
@@ -0,0 +1,3 @@
+vendor
+all-cover.txt
+bin/
diff --git a/vendor/github.com/manifoldco/promptui/.golangci.yml b/vendor/github.com/manifoldco/promptui/.golangci.yml
new file mode 100644
index 000000000..e232bfbe5
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/.golangci.yml
@@ -0,0 +1,26 @@
+run:
+ deadline: 5m
+
+issues:
+ # Disable maximums so we see all issues
+ max-per-linter: 0
+ max-same-issues: 0
+
+ # golangci-lint ignores missing docstrings by default. That's no good!
+ exclude-use-default: false
+
+linters:
+ disable-all: true
+ enable:
+ - misspell
+ - golint
+ - goimports
+ - ineffassign
+ - deadcode
+ - gofmt
+ - govet
+ - structcheck
+ - unconvert
+ - megacheck
+ - typecheck
+ - varcheck
diff --git a/vendor/github.com/manifoldco/promptui/.travis.yml b/vendor/github.com/manifoldco/promptui/.travis.yml
new file mode 100644
index 000000000..01c16c440
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/.travis.yml
@@ -0,0 +1,14 @@
+dist: bionic
+language: go
+
+go:
+ - "1.12.x"
+ - "1.13.x"
+
+branches:
+ only:
+ - master
+
+after_success:
+ # only report coverage for go-version 1.11
+ - if [[ $TRAVIS_GO_VERSION =~ ^1\.11 ]] ; then bash <(curl -s https://codecov.io/bash) -f all-cover.txt; fi
diff --git a/vendor/github.com/manifoldco/promptui/CHANGELOG.md b/vendor/github.com/manifoldco/promptui/CHANGELOG.md
new file mode 100644
index 000000000..563e9d00a
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/CHANGELOG.md
@@ -0,0 +1,123 @@
+# CHANGELOG
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/)
+and this project adheres to [Semantic Versioning](http://semver.org/).
+
+## Unreleased
+
+## [0.8.0] - 2020-09-28
+
+### Added
+
+- Support ctrl-h for backspace
+- Allow hiding entered data after submit
+- Allow masking input with an empty rune to hide input length
+
+### Fixed
+
+- Fix echo of cursor after input is finished
+- Better support for keycodes on Windows
+
+
+## [0.7.0] - 2020-01-11
+
+### Added
+
+- Add support for configurable Stdin/Stdout on Prompt
+- Add support for setting initial cursor position
+- Switch to golangci-lint for linting
+
+### Removed
+
+- Removed support for Go 1.11
+
+### Fixed
+
+- Reduce tool-based deps, hopefully fixing any install issues
+
+## [0.6.0] - 2019-11-29
+
+### Added
+
+- Support configurable stdin
+
+### Fixed
+
+- Correct the dep on go-i18n
+
+## [0.5.0] - 2019-11-29
+
+### Added
+
+- Now building and testing on go 1.11, go 1.12, and go 1.13
+
+### Removed
+
+- Removed support for Go versions that don't include modules.
+
+## [0.4.0] - 2019-02-19
+
+### Added
+
+- The text displayed when an item was successfully selected can be hidden
+
+## [0.3.2] - 2018-11-26
+
+### Added
+
+- Support Go modules
+
+### Fixed
+
+- Fix typos in PromptTemplates documentation
+
+## [0.3.1] - 2018-07-26
+
+### Added
+
+- Improved documentation for GoDoc
+- Navigation keys information for Windows
+
+### Fixed
+
+- `success` template was not properly displayed after a successful prompt.
+
+## [0.3.0] - 2018-05-22
+
+### Added
+
+- Background colors codes and template helpers
+- `AllowEdit` for prompt to prevent deletion of the default value by any key
+- Added `StartInSearchMode` to allow starting the prompt in search mode
+
+### Fixed
+
+- `<Enter>` key press on Windows
+- `juju/ansiterm` dependency
+- `chzyer/readline#136` new api with ReadCloser
+- Deleting UTF-8 characters sequence
+
+## [0.2.1] - 2017-11-30
+
+### Fixed
+
+- `SelectWithAdd` panicking on `.Run` due to lack of keys setup
+- Backspace key on Windows
+
+## [0.2.0] - 2017-11-16
+
+### Added
+
+- `Select` items can now be searched
+
+## [0.1.0] - 2017-11-02
+
+### Added
+
+- extract `promptui` from [torus](https://github.com/manifoldco/torus-cli) as a
+ standalone lib.
+- `promptui.Prompt` provides a single input line to capture user information.
+- `promptui.Select` provides a list of options to choose from. Users can
+ navigate through the list either one item at time or by pagination
diff --git a/vendor/github.com/manifoldco/promptui/CODE_OF_CONDUCT.md b/vendor/github.com/manifoldco/promptui/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..cc58cce02
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/CODE_OF_CONDUCT.md
@@ -0,0 +1,73 @@
+# Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age,
+body size, disability, ethnicity, gender identity and expression, level of
+experience, nationality, personal appearance, race, religion, or sexual
+identity and orientation.
+
+## Our Standards
+
+Examples of behaviour that contributes to creating a positive environment
+include:
+
+- Using welcoming and inclusive language
+- Being respectful of differing viewpoints and experiences
+- Gracefully accepting constructive criticism
+- Focusing on what is best for the community
+- Showing empathy towards other community members
+
+Examples of unacceptable behaviour by participants include:
+
+- The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+- Trolling, insulting/derogatory comments, and personal or political attacks
+- Public or private harassment
+- Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+- Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behaviour and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behaviour.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviours that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an
+appointed representative at an online or offline event. Representation of a
+project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at
+[hello@manifold.co](mailto:hello@manifold.co). All complaints will be reviewed
+and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. The project team is obligated to maintain
+confidentiality with regard to the reporter of an incident. Further details of
+specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
+available at
+[http://contributor-covenant.org/version/1/4](http://contributor-covenant.org/version/1/4).
diff --git a/vendor/github.com/manifoldco/promptui/LICENSE.md b/vendor/github.com/manifoldco/promptui/LICENSE.md
new file mode 100644
index 000000000..3ae687b2c
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/LICENSE.md
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2017, Arigato Machine Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/manifoldco/promptui/Makefile b/vendor/github.com/manifoldco/promptui/Makefile
new file mode 100644
index 000000000..078a5613a
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/Makefile
@@ -0,0 +1,49 @@
+export GO111MODULE := on
+export PATH := ./bin:$(PATH)
+
+ci: bootstrap lint cover
+.PHONY: ci
+
+#################################################
+# Bootstrapping for base golang package and tool deps
+#################################################
+
+bootstrap:
+ curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.21.0
+.PHONY: bootstrap
+
+mod-update:
+ go get -u -m
+ go mod tidy
+
+mod-tidy:
+ go mod tidy
+
+.PHONY: $(CMD_PKGS)
+.PHONY: mod-update mod-tidy
+
+#################################################
+# Test and linting
+#################################################
+# Run all the linters
+lint:
+ bin/golangci-lint run ./...
+.PHONY: lint
+
+test:
+ CGO_ENABLED=0 go test $$(go list ./... | grep -v generated)
+.PHONY: test
+
+COVER_TEST_PKGS:=$(shell find . -type f -name '*_test.go' | rev | cut -d "/" -f 2- | rev | grep -v generated | sort -u)
+$(COVER_TEST_PKGS:=-cover): %-cover: all-cover.txt
+ @CGO_ENABLED=0 go test -v -coverprofile=$@.out -covermode=atomic ./$*
+ @if [ -f $@.out ]; then \
+ grep -v "mode: atomic" < $@.out >> all-cover.txt; \
+ rm $@.out; \
+ fi
+
+all-cover.txt:
+ echo "mode: atomic" > all-cover.txt
+
+cover: all-cover.txt $(COVER_TEST_PKGS:=-cover)
+.PHONY: cover all-cover.txt
diff --git a/vendor/github.com/manifoldco/promptui/README.md b/vendor/github.com/manifoldco/promptui/README.md
new file mode 100644
index 000000000..4fd4dc6d4
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/README.md
@@ -0,0 +1,107 @@
+# promptui
+
+Interactive prompt for command-line applications.
+
+We built Promptui because we wanted to make it easy and fun to explore cloud
+services with [manifold cli](https://github.com/manifoldco/manifold-cli).
+
+[Code of Conduct](./CODE_OF_CONDUCT.md) |
+[Contribution Guidelines](./.github/CONTRIBUTING.md)
+
+[![GitHub release](https://img.shields.io/github/tag/manifoldco/promptui.svg?label=latest)](https://github.com/manifoldco/promptui/releases)
+[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/manifoldco/promptui)
+[![Travis](https://img.shields.io/travis/manifoldco/promptui/master.svg)](https://travis-ci.org/manifoldco/promptui)
+[![Go Report Card](https://goreportcard.com/badge/github.com/manifoldco/promptui)](https://goreportcard.com/report/github.com/manifoldco/promptui)
+[![License](https://img.shields.io/badge/license-BSD-blue.svg)](./LICENSE.md)
+
+## Overview
+
+![promptui](https://media.giphy.com/media/xUNda0Ngb5qsogLsBi/giphy.gif)
+
+Promptui is a library providing a simple interface to create command-line
+prompts for go. It can be easily integrated into
+[spf13/cobra](https://github.com/spf13/cobra),
+[urfave/cli](https://github.com/urfave/cli) or any cli go application.
+
+Promptui has two main input modes:
+
+- `Prompt` provides a single line for user input. Prompt supports
+ optional live validation, confirmation and masking the input.
+
+- `Select` provides a list of options to choose from. Select supports
+ pagination, search, detailed view and custom templates.
+
+For a full list of options check [GoDoc](https://godoc.org/github.com/manifoldco/promptui).
+
+## Basic Usage
+
+### Prompt
+
+```go
+package main
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+
+ "github.com/manifoldco/promptui"
+)
+
+func main() {
+ validate := func(input string) error {
+ _, err := strconv.ParseFloat(input, 64)
+ if err != nil {
+ return errors.New("Invalid number")
+ }
+ return nil
+ }
+
+ prompt := promptui.Prompt{
+ Label: "Number",
+ Validate: validate,
+ }
+
+ result, err := prompt.Run()
+
+ if err != nil {
+ fmt.Printf("Prompt failed %v\n", err)
+ return
+ }
+
+ fmt.Printf("You choose %q\n", result)
+}
+```
+
+### Select
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/manifoldco/promptui"
+)
+
+func main() {
+ prompt := promptui.Select{
+ Label: "Select Day",
+ Items: []string{"Monday", "Tuesday", "Wednesday", "Thursday", "Friday",
+ "Saturday", "Sunday"},
+ }
+
+ _, result, err := prompt.Run()
+
+ if err != nil {
+ fmt.Printf("Prompt failed %v\n", err)
+ return
+ }
+
+ fmt.Printf("You choose %q\n", result)
+}
+```
+
+### More Examples
+
+See full list of [examples](https://github.com/manifoldco/promptui/tree/master/_examples)
diff --git a/vendor/github.com/manifoldco/promptui/codes.go b/vendor/github.com/manifoldco/promptui/codes.go
new file mode 100644
index 000000000..8138c40d6
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/codes.go
@@ -0,0 +1,120 @@
+package promptui
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "text/template"
+)
+
+const esc = "\033["
+
+type attribute int
+
+// The possible state of text inside the application, either Bold, faint, italic or underline.
+//
+// These constants are called through the use of the Styler function.
+const (
+ reset attribute = iota
+
+ FGBold
+ FGFaint
+ FGItalic
+ FGUnderline
+)
+
+// The possible colors of text inside the application.
+//
+// These constants are called through the use of the Styler function.
+const (
+ FGBlack attribute = iota + 30
+ FGRed
+ FGGreen
+ FGYellow
+ FGBlue
+ FGMagenta
+ FGCyan
+ FGWhite
+)
+
+// The possible background colors of text inside the application.
+//
+// These constants are called through the use of the Styler function.
+const (
+ BGBlack attribute = iota + 40
+ BGRed
+ BGGreen
+ BGYellow
+ BGBlue
+ BGMagenta
+ BGCyan
+ BGWhite
+)
+
+// ResetCode is the character code used to reset the terminal formatting
+var ResetCode = fmt.Sprintf("%s%dm", esc, reset)
+
+const (
+ hideCursor = esc + "?25l"
+ showCursor = esc + "?25h"
+ clearLine = esc + "2K"
+)
+
+// FuncMap defines template helpers for the output. It can be extended as a regular map.
+//
+// The functions inside the map link the state, color and background colors strings detected in templates to a Styler
+// function that applies the given style using the corresponding constant.
+var FuncMap = template.FuncMap{
+ "black": Styler(FGBlack),
+ "red": Styler(FGRed),
+ "green": Styler(FGGreen),
+ "yellow": Styler(FGYellow),
+ "blue": Styler(FGBlue),
+ "magenta": Styler(FGMagenta),
+ "cyan": Styler(FGCyan),
+ "white": Styler(FGWhite),
+ "bgBlack": Styler(BGBlack),
+ "bgRed": Styler(BGRed),
+ "bgGreen": Styler(BGGreen),
+ "bgYellow": Styler(BGYellow),
+ "bgBlue": Styler(BGBlue),
+ "bgMagenta": Styler(BGMagenta),
+ "bgCyan": Styler(BGCyan),
+ "bgWhite": Styler(BGWhite),
+ "bold": Styler(FGBold),
+ "faint": Styler(FGFaint),
+ "italic": Styler(FGItalic),
+ "underline": Styler(FGUnderline),
+}
+
+func upLine(n uint) string {
+ return movementCode(n, 'A')
+}
+
+func movementCode(n uint, code rune) string {
+ return esc + strconv.FormatUint(uint64(n), 10) + string(code)
+}
+
+// Styler is a function that accepts multiple possible styling transforms from the state,
+// color and background colors constants and transforms them into a templated string
+// to apply those styles in the CLI.
+//
+// The returned styling function accepts a string that will be extended with
+// the wrapping function's styling attributes.
+func Styler(attrs ...attribute) func(interface{}) string {
+ attrstrs := make([]string, len(attrs))
+ for i, v := range attrs {
+ attrstrs[i] = strconv.Itoa(int(v))
+ }
+
+ seq := strings.Join(attrstrs, ";")
+
+ return func(v interface{}) string {
+ end := ""
+ s, ok := v.(string)
+ if !ok || !strings.HasSuffix(s, ResetCode) {
+ end = ResetCode
+ }
+ return fmt.Sprintf("%s%sm%v%s", esc, seq, v, end)
+ }
+}
diff --git a/vendor/github.com/manifoldco/promptui/cursor.go b/vendor/github.com/manifoldco/promptui/cursor.go
new file mode 100644
index 000000000..7f0961bdb
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/cursor.go
@@ -0,0 +1,232 @@
+package promptui
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Pointer is A specific type that translates a given set of runes into a given
+// set of runes pointed at by the cursor.
+type Pointer func(to []rune) []rune
+
+func defaultCursor(ignored []rune) []rune {
+ return []rune("\u2588")
+}
+
+func blockCursor(input []rune) []rune {
+ return []rune(fmt.Sprintf("\\e[7m%s\\e[0m", string(input)))
+}
+
+func pipeCursor(input []rune) []rune {
+ marker := []rune("|")
+ out := []rune{}
+ out = append(out, marker...)
+ out = append(out, input...)
+ return out
+}
+
+var (
+ // DefaultCursor is a big square block character. Obscures whatever was
+ // input.
+ DefaultCursor Pointer = defaultCursor
+ // BlockCursor is a cursor which highlights a character by inverting colors
+ // on it.
+ BlockCursor Pointer = blockCursor
+ // PipeCursor is a pipe character "|" which appears before the input
+ // character.
+ PipeCursor Pointer = pipeCursor
+)
+
+// Cursor tracks the state associated with the movable cursor
+// The strategy is to keep the prompt, input pristine except for requested
+// modifications. The insertion of the cursor happens during a `format` call
+// and we read in new input via an `Update` call
+type Cursor struct {
+ // shows where the user inserts/updates text
+ Cursor Pointer
+ // what the user entered, and what we will echo back to them, after
+ // insertion of the cursor and prefixing with the prompt
+ input []rune
+ // Put the cursor before this slice
+ Position int
+ erase bool
+}
+
+// NewCursor create a new cursor, with the DefaultCursor, the specified input,
+// and position at the end of the specified starting input.
+func NewCursor(startinginput string, pointer Pointer, eraseDefault bool) Cursor {
+ if pointer == nil {
+ pointer = defaultCursor
+ }
+ cur := Cursor{Cursor: pointer, Position: len(startinginput), input: []rune(startinginput), erase: eraseDefault}
+ if eraseDefault {
+ cur.Start()
+ } else {
+ cur.End()
+ }
+ return cur
+}
+
+func (c *Cursor) String() string {
+ return fmt.Sprintf(
+ "Cursor: %s, input %s, Position %d",
+ string(c.Cursor([]rune(""))), string(c.input), c.Position)
+}
+
+// End is a convenience for c.Place(len(c.input)) so you don't have to know how I
+// indexed.
+func (c *Cursor) End() {
+ c.Place(len(c.input))
+}
+
+// Start is convenience for c.Place(0) so you don't have to know how I
+// indexed.
+func (c *Cursor) Start() {
+ c.Place(0)
+}
+
+// ensures we are in bounds.
+func (c *Cursor) correctPosition() {
+ if c.Position > len(c.input) {
+ c.Position = len(c.input)
+ }
+
+ if c.Position < 0 {
+ c.Position = 0
+ }
+}
+
+// insert the cursor rune array into r before the provided index
+func format(a []rune, c *Cursor) string {
+ i := c.Position
+ var b []rune
+
+ out := make([]rune, 0)
+ if i < len(a) {
+ b = c.Cursor(a[i : i+1])
+ out = append(out, a[:i]...) // does not include i
+ out = append(out, b...) // add the cursor
+ out = append(out, a[i+1:]...) // add the rest after i
+ } else {
+ b = c.Cursor([]rune{})
+ out = append(out, a...)
+ out = append(out, b...)
+ }
+ return string(out)
+}
+
+// Format renders the input with the Cursor appropriately positioned.
+func (c *Cursor) Format() string {
+ r := c.input
+ // insert the cursor
+ return format(r, c)
+}
+
+// FormatMask replaces all input runes with the mask rune.
+func (c *Cursor) FormatMask(mask rune) string {
+ if mask == ' ' {
+ return format([]rune{}, c)
+ }
+
+ r := make([]rune, len(c.input))
+ for i := range r {
+ r[i] = mask
+ }
+ return format(r, c)
+}
+
+// Update inserts newinput into the input []rune in the appropriate place.
+// The cursor is moved to the end of the inputed sequence.
+func (c *Cursor) Update(newinput string) {
+ a := c.input
+ b := []rune(newinput)
+ i := c.Position
+ a = append(a[:i], append(b, a[i:]...)...)
+ c.input = a
+ c.Move(len(b))
+}
+
+// Get returns a copy of the input
+func (c *Cursor) Get() string {
+ return string(c.input)
+}
+
+// GetMask returns a mask string with length equal to the input
+func (c *Cursor) GetMask(mask rune) string {
+ return strings.Repeat(string(mask), len(c.input))
+}
+
+// Replace replaces the previous input with whatever is specified, and moves the
+// cursor to the end position
+func (c *Cursor) Replace(input string) {
+ c.input = []rune(input)
+ c.End()
+}
+
+// Place moves the cursor to the absolute array index specified by position
+func (c *Cursor) Place(position int) {
+ c.Position = position
+ c.correctPosition()
+}
+
+// Move moves the cursor over in relative terms, by shift indices.
+func (c *Cursor) Move(shift int) {
+ // delete the current cursor
+ c.Position = c.Position + shift
+ c.correctPosition()
+}
+
+// Backspace removes the rune that precedes the cursor
+//
+// It handles being at the beginning or end of the row, and moves the cursor to
+// the appropriate position.
+func (c *Cursor) Backspace() {
+ a := c.input
+ i := c.Position
+ if i == 0 {
+ // Shrug
+ return
+ }
+ if i == len(a) {
+ c.input = a[:i-1]
+ } else {
+ c.input = append(a[:i-1], a[i:]...)
+ }
+ // now it's pointing to the i+1th element
+ c.Move(-1)
+}
+
+// Listen is a readline Listener that updates internal cursor state appropriately.
+func (c *Cursor) Listen(line []rune, pos int, key rune) ([]rune, int, bool) {
+ if line != nil {
+ // no matter what, update our internal representation.
+ c.Update(string(line))
+ }
+
+ switch key {
+ case 0: // empty
+ case KeyEnter:
+ return []rune(c.Get()), c.Position, false
+ case KeyBackspace, KeyCtrlH:
+ if c.erase {
+ c.erase = false
+ c.Replace("")
+ }
+ c.Backspace()
+ case KeyForward:
+ // the user wants to edit the default, despite how we set it up. Let
+ // them.
+ c.erase = false
+ c.Move(1)
+ case KeyBackward:
+ c.Move(-1)
+ default:
+ if c.erase {
+ c.erase = false
+ c.Replace("")
+ c.Update(string(key))
+ }
+ }
+
+ return []rune(c.Get()), c.Position, true
+}
diff --git a/vendor/github.com/manifoldco/promptui/go.mod b/vendor/github.com/manifoldco/promptui/go.mod
new file mode 100644
index 000000000..760a44deb
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/go.mod
@@ -0,0 +1,16 @@
+module github.com/manifoldco/promptui
+
+go 1.12
+
+require (
+ github.com/chzyer/logex v1.1.10 // indirect
+ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e
+ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 // indirect
+ github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a
+ github.com/kr/pretty v0.1.0 // indirect
+ github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a // indirect
+ github.com/mattn/go-colorable v0.0.9 // indirect
+ github.com/mattn/go-isatty v0.0.4 // indirect
+ golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b // indirect
+ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
+)
diff --git a/vendor/github.com/manifoldco/promptui/go.sum b/vendor/github.com/manifoldco/promptui/go.sum
new file mode 100644
index 000000000..be5f99025
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/go.sum
@@ -0,0 +1,23 @@
+github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a h1:FaWFmfWdAUKbSCtOU2QjDaorUexogfaMgbipgYATUMU=
+github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a h1:weJVJJRzAJBFRlAiJQROKQs8oC9vOxvm4rZmBBk0ONw=
+github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
+github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b h1:MQE+LT/ABUuuvEZ+YQAMSXindAdUh7slEmAkup74op4=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/vendor/github.com/manifoldco/promptui/keycodes.go b/vendor/github.com/manifoldco/promptui/keycodes.go
new file mode 100644
index 000000000..ef5cd6f0a
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/keycodes.go
@@ -0,0 +1,29 @@
+package promptui
+
+import "github.com/chzyer/readline"
+
+// These runes are used to identify the commands entered by the user in the command prompt. They map
+// to specific actions of promptui in prompt mode and can be remapped if necessary.
+var (
+ // KeyEnter is the default key for submission/selection.
+ KeyEnter rune = readline.CharEnter
+
+ // KeyCtrlH is the key for deleting input text.
+ KeyCtrlH rune = readline.CharCtrlH
+
+ // KeyPrev is the default key to go up during selection.
+ KeyPrev rune = readline.CharPrev
+ KeyPrevDisplay = "↑"
+
+ // KeyNext is the default key to go down during selection.
+ KeyNext rune = readline.CharNext
+ KeyNextDisplay = "↓"
+
+ // KeyBackward is the default key to page up during selection.
+ KeyBackward rune = readline.CharBackward
+ KeyBackwardDisplay = "←"
+
+ // KeyForward is the default key to page down during selection.
+ KeyForward rune = readline.CharForward
+ KeyForwardDisplay = "→"
+)
diff --git a/vendor/github.com/manifoldco/promptui/keycodes_other.go b/vendor/github.com/manifoldco/promptui/keycodes_other.go
new file mode 100644
index 000000000..789feae4c
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/keycodes_other.go
@@ -0,0 +1,10 @@
+// +build !windows
+
+package promptui
+
+import "github.com/chzyer/readline"
+
+var (
+ // KeyBackspace is the default key for deleting input text.
+ KeyBackspace rune = readline.CharBackspace
+)
diff --git a/vendor/github.com/manifoldco/promptui/keycodes_windows.go b/vendor/github.com/manifoldco/promptui/keycodes_windows.go
new file mode 100644
index 000000000..737d1566e
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/keycodes_windows.go
@@ -0,0 +1,10 @@
+// +build windows
+
+package promptui
+
+// source: https://msdn.microsoft.com/en-us/library/aa243025(v=vs.60).aspx
+
+var (
+ // KeyBackspace is the default key for deleting input text inside a command line prompt.
+ KeyBackspace rune = 8
+)
diff --git a/vendor/github.com/manifoldco/promptui/list/list.go b/vendor/github.com/manifoldco/promptui/list/list.go
new file mode 100644
index 000000000..c98a39cfa
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/list/list.go
@@ -0,0 +1,237 @@
+package list
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// Searcher is a base function signature that is used inside select when activating the search mode.
+// If defined, it is called on each items of the select and should return a boolean for whether or not
+// the item fits the searched term.
+type Searcher func(input string, index int) bool
+
+// NotFound is an index returned when no item was selected. This could
+// happen due to a search without results.
+const NotFound = -1
+
+// List holds a collection of items that can be displayed with an N number of
+// visible items. The list can be moved up, down by one item of time or an
+// entire page (ie: visible size). It keeps track of the current selected item.
+type List struct {
+ items []*interface{}
+ scope []*interface{}
+ cursor int // cursor holds the index of the current selected item
+ size int // size is the number of visible options
+ start int
+ Searcher Searcher
+}
+
+// New creates and initializes a list of searchable items. The items attribute must be a slice type with a
+// size greater than 0. Error will be returned if those two conditions are not met.
+func New(items interface{}, size int) (*List, error) {
+ if size < 1 {
+ return nil, fmt.Errorf("list size %d must be greater than 0", size)
+ }
+
+ if items == nil || reflect.TypeOf(items).Kind() != reflect.Slice {
+ return nil, fmt.Errorf("items %v is not a slice", items)
+ }
+
+ slice := reflect.ValueOf(items)
+ values := make([]*interface{}, slice.Len())
+
+ for i := range values {
+ item := slice.Index(i).Interface()
+ values[i] = &item
+ }
+
+ return &List{size: size, items: values, scope: values}, nil
+}
+
+// Prev moves the visible list back one item. If the selected item is out of
+// view, the new select item becomes the last visible item. If the list is
+// already at the top, nothing happens.
+func (l *List) Prev() {
+ if l.cursor > 0 {
+ l.cursor--
+ }
+
+ if l.start > l.cursor {
+ l.start = l.cursor
+ }
+}
+
+// Search allows the list to be filtered by a given term. The list must
+// implement the searcher function signature for this functionality to work.
+func (l *List) Search(term string) {
+ term = strings.Trim(term, " ")
+ l.cursor = 0
+ l.start = 0
+ l.search(term)
+}
+
+// CancelSearch stops the current search and returns the list to its
+// original order.
+func (l *List) CancelSearch() {
+ l.cursor = 0
+ l.start = 0
+ l.scope = l.items
+}
+
+func (l *List) search(term string) {
+ var scope []*interface{}
+
+ for i, item := range l.items {
+ if l.Searcher(term, i) {
+ scope = append(scope, item)
+ }
+ }
+
+ l.scope = scope
+}
+
+// Start returns the current render start position of the list.
+func (l *List) Start() int {
+ return l.start
+}
+
+// SetStart sets the current scroll position. Values out of bounds will be
+// clamped.
+func (l *List) SetStart(i int) {
+ if i < 0 {
+ i = 0
+ }
+ if i > l.cursor {
+ l.start = l.cursor
+ } else {
+ l.start = i
+ }
+}
+
+// SetCursor sets the position of the cursor in the list. Values out of bounds
+// will be clamped.
+func (l *List) SetCursor(i int) {
+ max := len(l.scope) - 1
+ if i >= max {
+ i = max
+ }
+ if i < 0 {
+ i = 0
+ }
+ l.cursor = i
+
+ if l.start > l.cursor {
+ l.start = l.cursor
+ } else if l.start+l.size <= l.cursor {
+ l.start = l.cursor - l.size + 1
+ }
+}
+
+// Next moves the visible list forward one item. If the selected item is out of
+// view, the new select item becomes the first visible item. If the list is
+// already at the bottom, nothing happens.
+func (l *List) Next() {
+ max := len(l.scope) - 1
+
+ if l.cursor < max {
+ l.cursor++
+ }
+
+ if l.start+l.size <= l.cursor {
+ l.start = l.cursor - l.size + 1
+ }
+}
+
+// PageUp moves the visible list backward by x items. Where x is the size of the
+// visible items on the list. The selected item becomes the first visible item.
+// If the list is already at the bottom, the selected item becomes the last
+// visible item.
+func (l *List) PageUp() {
+ start := l.start - l.size
+ if start < 0 {
+ l.start = 0
+ } else {
+ l.start = start
+ }
+
+ cursor := l.start
+
+ if cursor < l.cursor {
+ l.cursor = cursor
+ }
+}
+
+// PageDown moves the visible list forward by x items. Where x is the size of
+// the visible items on the list. The selected item becomes the first visible
+// item.
+func (l *List) PageDown() {
+ start := l.start + l.size
+ max := len(l.scope) - l.size
+
+ switch {
+ case len(l.scope) < l.size:
+ l.start = 0
+ case start > max:
+ l.start = max
+ default:
+ l.start = start
+ }
+
+ cursor := l.start
+
+ if cursor == l.cursor {
+ l.cursor = len(l.scope) - 1
+ } else if cursor > l.cursor {
+ l.cursor = cursor
+ }
+}
+
+// CanPageDown returns whether a list can still PageDown().
+func (l *List) CanPageDown() bool {
+ max := len(l.scope)
+ return l.start+l.size < max
+}
+
+// CanPageUp returns whether a list can still PageUp().
+func (l *List) CanPageUp() bool {
+ return l.start > 0
+}
+
+// Index returns the index of the item currently selected inside the searched list. If no item is selected,
+// the NotFound (-1) index is returned.
+func (l *List) Index() int {
+ selected := l.scope[l.cursor]
+
+ for i, item := range l.items {
+ if item == selected {
+ return i
+ }
+ }
+
+ return NotFound
+}
+
+// Items returns a slice equal to the size of the list with the current visible
+// items and the index of the active item in this list.
+func (l *List) Items() ([]interface{}, int) {
+ var result []interface{}
+ max := len(l.scope)
+ end := l.start + l.size
+
+ if end > max {
+ end = max
+ }
+
+ active := NotFound
+
+ for i, j := l.start, 0; i < end; i, j = i+1, j+1 {
+ if l.cursor == i {
+ active = j
+ }
+
+ result = append(result, *l.scope[i])
+ }
+
+ return result, active
+}
diff --git a/vendor/github.com/manifoldco/promptui/prompt.go b/vendor/github.com/manifoldco/promptui/prompt.go
new file mode 100644
index 000000000..8e35123b0
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/prompt.go
@@ -0,0 +1,341 @@
+package promptui
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "text/template"
+
+ "github.com/chzyer/readline"
+ "github.com/manifoldco/promptui/screenbuf"
+)
+
+// Prompt represents a single line text field input with options for validation and input masks.
+type Prompt struct {
+ // Label is the value displayed on the command line prompt.
+ //
+ // The value for Label can be a simple string or a struct that will need to be accessed by dot notation
+ // inside the templates. For example, `{{ .Name }}` will display the name property of a struct.
+ Label interface{}
+
+ // Default is the initial value for the prompt. This value will be displayed next to the prompt's label
+ // and the user will be able to view or change it depending on the options.
+ Default string
+
+ // AllowEdit lets the user edit the default value. If false, any key press
+ // other than <Enter> automatically clears the default value.
+ AllowEdit bool
+
+ // Validate is an optional function that fill be used against the entered value in the prompt to validate it.
+ Validate ValidateFunc
+
+ // Mask is an optional rune that sets which character to display instead of the entered characters. This
+ // allows hiding private information like passwords.
+ Mask rune
+
+ // HideEntered sets whether to hide the text after the user has pressed enter.
+ HideEntered bool
+
+ // Templates can be used to customize the prompt output. If nil is passed, the
+ // default templates are used. See the PromptTemplates docs for more info.
+ Templates *PromptTemplates
+
+ // IsConfirm makes the prompt ask for a yes or no ([Y/N]) question rather than request an input. When set,
+ // most properties related to input will be ignored.
+ IsConfirm bool
+
+ // IsVimMode enables vi-like movements (hjkl) and editing.
+ IsVimMode bool
+
+ // the Pointer defines how to render the cursor.
+ Pointer Pointer
+
+ Stdin io.ReadCloser
+ Stdout io.WriteCloser
+}
+
+// PromptTemplates allow a prompt to be customized following stdlib
+// text/template syntax. Custom state, colors and background color are available for use inside
+// the templates and are documented inside the Variable section of the docs.
+//
+// Examples
+//
+// text/templates use a special notation to display programmable content. Using the double bracket notation,
+// the value can be printed with specific helper functions. For example
+//
+// This displays the value given to the template as pure, unstylized text.
+// '{{ . }}'
+//
+// This displays the value colored in cyan
+// '{{ . | cyan }}'
+//
+// This displays the value colored in red with a cyan background-color
+// '{{ . | red | cyan }}'
+//
+// See the doc of text/template for more info: https://golang.org/pkg/text/template/
+type PromptTemplates struct {
+ // Prompt is a text/template for the prompt label displayed on the left side of the prompt.
+ Prompt string
+
+ // Prompt is a text/template for the prompt label when IsConfirm is set as true.
+ Confirm string
+
+ // Valid is a text/template for the prompt label when the value entered is valid.
+ Valid string
+
+ // Invalid is a text/template for the prompt label when the value entered is invalid.
+ Invalid string
+
+ // Success is a text/template for the prompt label when the user has pressed entered and the value has been
+ // deemed valid by the validation function. The label will keep using this template even when the prompt ends
+ // inside the console.
+ Success string
+
+ // Prompt is a text/template for the prompt label when the value is invalid due to an error triggered by
+ // the prompt's validation function.
+ ValidationError string
+
+ // FuncMap is a map of helper functions that can be used inside of templates according to the text/template
+ // documentation.
+ //
+ // By default, FuncMap contains the color functions used to color the text in templates. If FuncMap
+ // is overridden, the colors functions must be added in the override from promptui.FuncMap to work.
+ FuncMap template.FuncMap
+
+ prompt *template.Template
+ valid *template.Template
+ invalid *template.Template
+ validation *template.Template
+ success *template.Template
+}
+
+// Run executes the prompt. Its displays the label and default value if any, asking the user to enter a value.
+// Run will keep the prompt alive until it has been canceled from the command prompt or it has received a valid
+// value. It will return the value and an error if any occurred during the prompt's execution.
+func (p *Prompt) Run() (string, error) {
+ var err error
+
+ err = p.prepareTemplates()
+ if err != nil {
+ return "", err
+ }
+
+ c := &readline.Config{
+ Stdin: p.Stdin,
+ Stdout: p.Stdout,
+ EnableMask: p.Mask != 0,
+ MaskRune: p.Mask,
+ HistoryLimit: -1,
+ VimMode: p.IsVimMode,
+ UniqueEditLine: true,
+ }
+
+ err = c.Init()
+ if err != nil {
+ return "", err
+ }
+
+ rl, err := readline.NewEx(c)
+ if err != nil {
+ return "", err
+ }
+ // we're taking over the cursor, so stop showing it.
+ rl.Write([]byte(hideCursor))
+ sb := screenbuf.New(rl)
+
+ validFn := func(x string) error {
+ return nil
+ }
+ if p.Validate != nil {
+ validFn = p.Validate
+ }
+
+ var inputErr error
+ input := p.Default
+ if p.IsConfirm {
+ input = ""
+ }
+ eraseDefault := input != "" && !p.AllowEdit
+ cur := NewCursor(input, p.Pointer, eraseDefault)
+
+ listen := func(input []rune, pos int, key rune) ([]rune, int, bool) {
+ _, _, keepOn := cur.Listen(input, pos, key)
+ err := validFn(cur.Get())
+ var prompt []byte
+
+ if err != nil {
+ prompt = render(p.Templates.invalid, p.Label)
+ } else {
+ prompt = render(p.Templates.valid, p.Label)
+ if p.IsConfirm {
+ prompt = render(p.Templates.prompt, p.Label)
+ }
+ }
+
+ echo := cur.Format()
+ if p.Mask != 0 {
+ echo = cur.FormatMask(p.Mask)
+ }
+
+ prompt = append(prompt, []byte(echo)...)
+ sb.Reset()
+ sb.Write(prompt)
+ if inputErr != nil {
+ validation := render(p.Templates.validation, inputErr)
+ sb.Write(validation)
+ inputErr = nil
+ }
+ sb.Flush()
+ return nil, 0, keepOn
+ }
+
+ c.SetListener(listen)
+
+ for {
+ _, err = rl.Readline()
+ inputErr = validFn(cur.Get())
+ if inputErr == nil {
+ break
+ }
+
+ if err != nil {
+ break
+ }
+ }
+
+ if err != nil {
+ switch err {
+ case readline.ErrInterrupt:
+ err = ErrInterrupt
+ case io.EOF:
+ err = ErrEOF
+ }
+ if err.Error() == "Interrupt" {
+ err = ErrInterrupt
+ }
+ sb.Reset()
+ sb.WriteString("")
+ sb.Flush()
+ rl.Write([]byte(showCursor))
+ rl.Close()
+ return "", err
+ }
+
+ echo := cur.Get()
+ if p.Mask != 0 {
+ echo = cur.GetMask(p.Mask)
+ }
+
+ prompt := render(p.Templates.success, p.Label)
+ prompt = append(prompt, []byte(echo)...)
+
+ if p.IsConfirm {
+ lowerDefault := strings.ToLower(p.Default)
+ if strings.ToLower(cur.Get()) != "y" && (lowerDefault != "y" || (lowerDefault == "y" && cur.Get() != "")) {
+ prompt = render(p.Templates.invalid, p.Label)
+ err = ErrAbort
+ }
+ }
+
+ if p.HideEntered {
+ clearScreen(sb)
+ } else {
+ sb.Reset()
+ sb.Write(prompt)
+ sb.Flush()
+ }
+
+ rl.Write([]byte(showCursor))
+ rl.Close()
+
+ return cur.Get(), err
+}
+
+func (p *Prompt) prepareTemplates() error {
+ tpls := p.Templates
+ if tpls == nil {
+ tpls = &PromptTemplates{}
+ }
+
+ if tpls.FuncMap == nil {
+ tpls.FuncMap = FuncMap
+ }
+
+ bold := Styler(FGBold)
+
+ if p.IsConfirm {
+ if tpls.Confirm == "" {
+ confirm := "y/N"
+ if strings.ToLower(p.Default) == "y" {
+ confirm = "Y/n"
+ }
+ tpls.Confirm = fmt.Sprintf(`{{ "%s" | bold }} {{ . | bold }}? {{ "[%s]" | faint }} `, IconInitial, confirm)
+ }
+
+ tpl, err := template.New("").Funcs(tpls.FuncMap).Parse(tpls.Confirm)
+ if err != nil {
+ return err
+ }
+
+ tpls.prompt = tpl
+ } else {
+ if tpls.Prompt == "" {
+ tpls.Prompt = fmt.Sprintf("%s {{ . | bold }}%s ", bold(IconInitial), bold(":"))
+ }
+
+ tpl, err := template.New("").Funcs(tpls.FuncMap).Parse(tpls.Prompt)
+ if err != nil {
+ return err
+ }
+
+ tpls.prompt = tpl
+ }
+
+ if tpls.Valid == "" {
+ tpls.Valid = fmt.Sprintf("%s {{ . | bold }}%s ", bold(IconGood), bold(":"))
+ }
+
+ tpl, err := template.New("").Funcs(tpls.FuncMap).Parse(tpls.Valid)
+ if err != nil {
+ return err
+ }
+
+ tpls.valid = tpl
+
+ if tpls.Invalid == "" {
+ tpls.Invalid = fmt.Sprintf("%s {{ . | bold }}%s ", bold(IconBad), bold(":"))
+ }
+
+ tpl, err = template.New("").Funcs(tpls.FuncMap).Parse(tpls.Invalid)
+ if err != nil {
+ return err
+ }
+
+ tpls.invalid = tpl
+
+ if tpls.ValidationError == "" {
+ tpls.ValidationError = `{{ ">>" | red }} {{ . | red }}`
+ }
+
+ tpl, err = template.New("").Funcs(tpls.FuncMap).Parse(tpls.ValidationError)
+ if err != nil {
+ return err
+ }
+
+ tpls.validation = tpl
+
+ if tpls.Success == "" {
+ tpls.Success = fmt.Sprintf("{{ . | faint }}%s ", Styler(FGFaint)(":"))
+ }
+
+ tpl, err = template.New("").Funcs(tpls.FuncMap).Parse(tpls.Success)
+ if err != nil {
+ return err
+ }
+
+ tpls.success = tpl
+
+ p.Templates = tpls
+
+ return nil
+}
diff --git a/vendor/github.com/manifoldco/promptui/promptui.go b/vendor/github.com/manifoldco/promptui/promptui.go
new file mode 100644
index 000000000..6248e5859
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/promptui.go
@@ -0,0 +1,27 @@
+// Package promptui is a library providing a simple interface to create command-line prompts for go.
+// It can be easily integrated into spf13/cobra, urfave/cli or any cli go application.
+//
+// promptui has two main input modes:
+//
+// Prompt provides a single line for user input. It supports optional live validation,
+// confirmation and masking the input.
+//
+// Select provides a list of options to choose from. It supports pagination, search,
+// detailed view and custom templates.
+package promptui
+
+import "errors"
+
+// ErrEOF is the error returned from prompts when EOF is encountered.
+var ErrEOF = errors.New("^D")
+
+// ErrInterrupt is the error returned from prompts when an interrupt (ctrl-c) is
+// encountered.
+var ErrInterrupt = errors.New("^C")
+
+// ErrAbort is the error returned when confirm prompts are supplied "n"
+var ErrAbort = errors.New("")
+
+// ValidateFunc is a placeholder type for any validation functions that validates a given input. It should return
+// a ValidationError if the input is not valid.
+type ValidateFunc func(string) error
diff --git a/vendor/github.com/manifoldco/promptui/screenbuf/screenbuf.go b/vendor/github.com/manifoldco/promptui/screenbuf/screenbuf.go
new file mode 100644
index 000000000..861390045
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/screenbuf/screenbuf.go
@@ -0,0 +1,151 @@
+package screenbuf
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+)
+
+const esc = "\033["
+
+var (
+ clearLine = []byte(esc + "2K\r")
+ moveUp = []byte(esc + "1A")
+ moveDown = []byte(esc + "1B")
+)
+
+// ScreenBuf is a convenient way to write to terminal screens. It creates,
+// clears and, moves up or down lines as needed to write the output to the
+// terminal using ANSI escape codes.
+type ScreenBuf struct {
+ w io.Writer
+ buf *bytes.Buffer
+ reset bool
+ cursor int
+ height int
+}
+
+// New creates and initializes a new ScreenBuf.
+func New(w io.Writer) *ScreenBuf {
+ return &ScreenBuf{buf: &bytes.Buffer{}, w: w}
+}
+
+// Reset truncates the underlining buffer and marks all its previous lines to be
+// cleared during the next Write.
+func (s *ScreenBuf) Reset() {
+ s.buf.Reset()
+ s.reset = true
+}
+
+// Clear clears all previous lines and the output starts from the top.
+func (s *ScreenBuf) Clear() error {
+ for i := 0; i < s.height; i++ {
+ _, err := s.buf.Write(moveUp)
+ if err != nil {
+ return err
+ }
+ _, err = s.buf.Write(clearLine)
+ if err != nil {
+ return err
+ }
+ }
+ s.cursor = 0
+ s.height = 0
+ s.reset = false
+ return nil
+}
+
+// Write writes a single line to the underlining buffer. If the ScreenBuf was
+// previously reset, all previous lines are cleared and the output starts from
+// the top. Lines with \r or \n will cause an error since they can interfere with the
+// terminal ability to move between lines.
+func (s *ScreenBuf) Write(b []byte) (int, error) {
+ if bytes.ContainsAny(b, "\r\n") {
+ return 0, fmt.Errorf("%q should not contain either \\r or \\n", b)
+ }
+
+ if s.reset {
+ if err := s.Clear(); err != nil {
+ return 0, err
+ }
+ }
+
+ switch {
+ case s.cursor == s.height:
+ n, err := s.buf.Write(clearLine)
+ if err != nil {
+ return n, err
+ }
+
+ n, err = s.buf.Write(b)
+ if err != nil {
+ return n, err
+ }
+
+ _, err = s.buf.Write([]byte("\n"))
+ if err != nil {
+ return n, err
+ }
+
+ s.height++
+ s.cursor++
+ return n, nil
+ case s.cursor < s.height:
+ n, err := s.buf.Write(clearLine)
+ if err != nil {
+ return n, err
+ }
+ n, err = s.buf.Write(b)
+ if err != nil {
+ return n, err
+ }
+ n, err = s.buf.Write(moveDown)
+ if err != nil {
+ return n, err
+ }
+ s.cursor++
+ return n, nil
+ default:
+ return 0, fmt.Errorf("Invalid write cursor position (%d) exceeded line height: %d", s.cursor, s.height)
+ }
+}
+
+// Flush writes any buffered data to the underlying io.Writer, ensuring that any pending data is displayed.
+func (s *ScreenBuf) Flush() error {
+ for i := s.cursor; i < s.height; i++ {
+ if i < s.height {
+ _, err := s.buf.Write(clearLine)
+ if err != nil {
+ return err
+ }
+ }
+ _, err := s.buf.Write(moveDown)
+ if err != nil {
+ return err
+ }
+ }
+
+ _, err := s.buf.WriteTo(s.w)
+ if err != nil {
+ return err
+ }
+
+ s.buf.Reset()
+
+ for i := 0; i < s.height; i++ {
+ _, err := s.buf.Write(moveUp)
+ if err != nil {
+ return err
+ }
+ }
+
+ s.cursor = 0
+
+ return nil
+}
+
+// WriteString is a convenient function to write a new line passing a string.
+// Check ScreenBuf.Write() for a detailed explanation of the function behaviour.
+func (s *ScreenBuf) WriteString(str string) (int, error) {
+ return s.Write([]byte(str))
+}
diff --git a/vendor/github.com/manifoldco/promptui/select.go b/vendor/github.com/manifoldco/promptui/select.go
new file mode 100644
index 000000000..19b9e0c2e
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/select.go
@@ -0,0 +1,637 @@
+package promptui
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "text/template"
+
+ "github.com/chzyer/readline"
+ "github.com/juju/ansiterm"
+ "github.com/manifoldco/promptui/list"
+ "github.com/manifoldco/promptui/screenbuf"
+)
+
+// SelectedAdd is used internally inside SelectWithAdd when the add option is selected in select mode.
+// Since -1 is not a possible selected index, this ensure that add mode is always unique inside
+// SelectWithAdd's logic.
+const SelectedAdd = -1
+
+// Select represents a list of items used to enable selections, they can be used as search engines, menus
+// or as a list of items in a cli based prompt.
+type Select struct {
+ // Label is the text displayed on top of the list to direct input. The IconInitial value "?" will be
+ // appended automatically to the label so it does not need to be added.
+ //
+ // The value for Label can be a simple string or a struct that will need to be accessed by dot notation
+ // inside the templates. For example, `{{ .Name }}` will display the name property of a struct.
+ Label interface{}
+
+ // Items are the items to display inside the list. It expect a slice of any kind of values, including strings.
+ //
+ // If using a slice of strings, promptui will use those strings directly into its base templates or the
+ // provided templates. If using any other type in the slice, it will attempt to transform it into a string
+ // before giving it to its templates. Custom templates will override this behavior if using the dot notation
+ // inside the templates.
+ //
+ // For example, `{{ .Name }}` will display the name property of a struct.
+ Items interface{}
+
+ // Size is the number of items that should appear on the select before scrolling is necessary. Defaults to 5.
+ Size int
+
+ // CursorPos is the initial position of the cursor.
+ CursorPos int
+
+ // IsVimMode sets whether to use vim mode when using readline in the command prompt. Look at
+ // https://godoc.org/github.com/chzyer/readline#Config for more information on readline.
+ IsVimMode bool
+
+ // HideHelp sets whether to hide help information.
+ HideHelp bool
+
+ // HideSelected sets whether to hide the text displayed after an item is successfully selected.
+ HideSelected bool
+
+ // Templates can be used to customize the select output. If nil is passed, the
+ // default templates are used. See the SelectTemplates docs for more info.
+ Templates *SelectTemplates
+
+ // Keys is the set of keys used in select mode to control the command line interface. See the SelectKeys docs for
+ // more info.
+ Keys *SelectKeys
+
+ // Searcher is a function that can be implemented to refine the base searching algorithm in selects.
+ //
+ // Search is a function that will receive the searched term and the item's index and should return a boolean
+ // for whether or not the terms are alike. It is unimplemented by default and search will not work unless
+ // it is implemented.
+ Searcher list.Searcher
+
+ // StartInSearchMode sets whether or not the select mode should start in search mode or selection mode.
+ // For search mode to work, the Search property must be implemented.
+ StartInSearchMode bool
+
+ list *list.List
+
+ // A function that determines how to render the cursor
+ Pointer Pointer
+
+ Stdin io.ReadCloser
+ Stdout io.WriteCloser
+}
+
+// SelectKeys defines the available keys used by select mode to enable the user to move around the list
+// and trigger search mode. See the Key struct docs for more information on keys.
+type SelectKeys struct {
+ // Next is the key used to move to the next element inside the list. Defaults to down arrow key.
+ Next Key
+
+ // Prev is the key used to move to the previous element inside the list. Defaults to up arrow key.
+ Prev Key
+
+ // PageUp is the key used to jump back to the first element inside the list. Defaults to left arrow key.
+ PageUp Key
+
+ // PageUp is the key used to jump forward to the last element inside the list. Defaults to right arrow key.
+ PageDown Key
+
+ // Search is the key used to trigger the search mode for the list. Default to the "/" key.
+ Search Key
+}
+
+// Key defines a keyboard code and a display representation for the help menu.
+type Key struct {
+ // Code is a rune that will be used to compare against typed keys with readline.
+ // Check https://github.com/chzyer/readline for a list of codes
+ Code rune
+
+ // Display is the string that will be displayed inside the help menu to help inform the user
+ // of which key to use on his keyboard for various functions.
+ Display string
+}
+
+// SelectTemplates allow a select list to be customized following stdlib
+// text/template syntax. Custom state, colors and background color are available for use inside
+// the templates and are documented inside the Variable section of the docs.
+//
+// Examples
+//
+// text/templates use a special notation to display programmable content. Using the double bracket notation,
+// the value can be printed with specific helper functions. For example
+//
+// This displays the value given to the template as pure, unstylized text. Structs are transformed to string
+// with this notation.
+// '{{ . }}'
+//
+// This displays the name property of the value colored in cyan
+// '{{ .Name | cyan }}'
+//
+// This displays the label property of value colored in red with a cyan background-color
+// '{{ .Label | red | cyan }}'
+//
+// See the doc of text/template for more info: https://golang.org/pkg/text/template/
+//
+// Notes
+//
+// Setting any of these templates will remove the icons from the default templates. They must
+// be added back in each of their specific templates. The styles.go constants contains the default icons.
+type SelectTemplates struct {
+ // Label is a text/template for the main command line label. Defaults to printing the label as it with
+ // the IconInitial.
+ Label string
+
+ // Active is a text/template for when an item is currently active within the list.
+ Active string
+
+ // Inactive is a text/template for when an item is not currently active inside the list. This
+ // template is used for all items unless they are active or selected.
+ Inactive string
+
+ // Selected is a text/template for when an item was successfully selected.
+ Selected string
+
+ // Details is a text/template for when an item current active to show
+ // additional information. It can have multiple lines.
+ //
+ // Detail will always be displayed for the active element and thus can be used to display additional
+ // information on the element beyond its label.
+ //
+ // promptui will not trim spaces and tabs will be displayed if the template is indented.
+ Details string
+
+ // Help is a text/template for displaying instructions at the top. By default
+ // it shows keys for movement and search.
+ Help string
+
+ // FuncMap is a map of helper functions that can be used inside of templates according to the text/template
+ // documentation.
+ //
+ // By default, FuncMap contains the color functions used to color the text in templates. If FuncMap
+ // is overridden, the colors functions must be added in the override from promptui.FuncMap to work.
+ FuncMap template.FuncMap
+
+ label *template.Template
+ active *template.Template
+ inactive *template.Template
+ selected *template.Template
+ details *template.Template
+ help *template.Template
+}
+
+// SearchPrompt is the prompt displayed in search mode.
+var SearchPrompt = "Search: "
+
+// Run executes the select list. It displays the label and the list of items, asking the user to chose any
+// value within to list. Run will keep the prompt alive until it has been canceled from
+// the command prompt or it has received a valid value. It will return the value and an error if any
+// occurred during the select's execution.
+func (s *Select) Run() (int, string, error) {
+ return s.RunCursorAt(s.CursorPos, 0)
+}
+
+// RunCursorAt executes the select list, initializing the cursor to the given
+// position. Invalid cursor positions will be clamped to valid values. It
+// displays the label and the list of items, asking the user to chose any value
+// within to list. Run will keep the prompt alive until it has been canceled
+// from the command prompt or it has received a valid value. It will return
+// the value and an error if any occurred during the select's execution.
+func (s *Select) RunCursorAt(cursorPos, scroll int) (int, string, error) {
+ if s.Size == 0 {
+ s.Size = 5
+ }
+
+ l, err := list.New(s.Items, s.Size)
+ if err != nil {
+ return 0, "", err
+ }
+ l.Searcher = s.Searcher
+
+ s.list = l
+
+ s.setKeys()
+
+ err = s.prepareTemplates()
+ if err != nil {
+ return 0, "", err
+ }
+ return s.innerRun(cursorPos, scroll, ' ')
+}
+
+func (s *Select) innerRun(cursorPos, scroll int, top rune) (int, string, error) {
+ c := &readline.Config{
+ Stdin: s.Stdin,
+ Stdout: s.Stdout,
+ }
+ err := c.Init()
+ if err != nil {
+ return 0, "", err
+ }
+
+ c.Stdin = readline.NewCancelableStdin(c.Stdin)
+
+ if s.IsVimMode {
+ c.VimMode = true
+ }
+
+ c.HistoryLimit = -1
+ c.UniqueEditLine = true
+
+ rl, err := readline.NewEx(c)
+ if err != nil {
+ return 0, "", err
+ }
+
+ rl.Write([]byte(hideCursor))
+ sb := screenbuf.New(rl)
+
+ cur := NewCursor("", s.Pointer, false)
+
+ canSearch := s.Searcher != nil
+ searchMode := s.StartInSearchMode
+ s.list.SetCursor(cursorPos)
+ s.list.SetStart(scroll)
+
+ c.SetListener(func(line []rune, pos int, key rune) ([]rune, int, bool) {
+ switch {
+ case key == KeyEnter:
+ return nil, 0, true
+ case key == s.Keys.Next.Code || (key == 'j' && !searchMode):
+ s.list.Next()
+ case key == s.Keys.Prev.Code || (key == 'k' && !searchMode):
+ s.list.Prev()
+ case key == s.Keys.Search.Code:
+ if !canSearch {
+ break
+ }
+
+ if searchMode {
+ searchMode = false
+ cur.Replace("")
+ s.list.CancelSearch()
+ } else {
+ searchMode = true
+ }
+ case key == KeyBackspace || key == KeyCtrlH:
+ if !canSearch || !searchMode {
+ break
+ }
+
+ cur.Backspace()
+ if len(cur.Get()) > 0 {
+ s.list.Search(cur.Get())
+ } else {
+ s.list.CancelSearch()
+ }
+ case key == s.Keys.PageUp.Code || (key == 'h' && !searchMode):
+ s.list.PageUp()
+ case key == s.Keys.PageDown.Code || (key == 'l' && !searchMode):
+ s.list.PageDown()
+ default:
+ if canSearch && searchMode {
+ cur.Update(string(line))
+ s.list.Search(cur.Get())
+ }
+ }
+
+ if searchMode {
+ header := SearchPrompt + cur.Format()
+ sb.WriteString(header)
+ } else if !s.HideHelp {
+ help := s.renderHelp(canSearch)
+ sb.Write(help)
+ }
+
+ label := render(s.Templates.label, s.Label)
+ sb.Write(label)
+
+ items, idx := s.list.Items()
+ last := len(items) - 1
+
+ for i, item := range items {
+ page := " "
+
+ switch i {
+ case 0:
+ if s.list.CanPageUp() {
+ page = "↑"
+ } else {
+ page = string(top)
+ }
+ case last:
+ if s.list.CanPageDown() {
+ page = "↓"
+ }
+ }
+
+ output := []byte(page + " ")
+
+ if i == idx {
+ output = append(output, render(s.Templates.active, item)...)
+ } else {
+ output = append(output, render(s.Templates.inactive, item)...)
+ }
+
+ sb.Write(output)
+ }
+
+ if idx == list.NotFound {
+ sb.WriteString("")
+ sb.WriteString("No results")
+ } else {
+ active := items[idx]
+
+ details := s.renderDetails(active)
+ for _, d := range details {
+ sb.Write(d)
+ }
+ }
+
+ sb.Flush()
+
+ return nil, 0, true
+ })
+
+ for {
+ _, err = rl.Readline()
+
+ if err != nil {
+ switch {
+ case err == readline.ErrInterrupt, err.Error() == "Interrupt":
+ err = ErrInterrupt
+ case err == io.EOF:
+ err = ErrEOF
+ }
+ break
+ }
+
+ _, idx := s.list.Items()
+ if idx != list.NotFound {
+ break
+ }
+
+ }
+
+ if err != nil {
+ if err.Error() == "Interrupt" {
+ err = ErrInterrupt
+ }
+ sb.Reset()
+ sb.WriteString("")
+ sb.Flush()
+ rl.Write([]byte(showCursor))
+ rl.Close()
+ return 0, "", err
+ }
+
+ items, idx := s.list.Items()
+ item := items[idx]
+
+ if s.HideSelected {
+ clearScreen(sb)
+ } else {
+ sb.Reset()
+ sb.Write(render(s.Templates.selected, item))
+ sb.Flush()
+ }
+
+ rl.Write([]byte(showCursor))
+ rl.Close()
+
+ return s.list.Index(), fmt.Sprintf("%v", item), err
+}
+
+// ScrollPosition returns the current scroll position.
+func (s *Select) ScrollPosition() int {
+ return s.list.Start()
+}
+
+func (s *Select) prepareTemplates() error {
+ tpls := s.Templates
+ if tpls == nil {
+ tpls = &SelectTemplates{}
+ }
+
+ if tpls.FuncMap == nil {
+ tpls.FuncMap = FuncMap
+ }
+
+ if tpls.Label == "" {
+ tpls.Label = fmt.Sprintf("%s {{.}}: ", IconInitial)
+ }
+
+ tpl, err := template.New("").Funcs(tpls.FuncMap).Parse(tpls.Label)
+ if err != nil {
+ return err
+ }
+
+ tpls.label = tpl
+
+ if tpls.Active == "" {
+ tpls.Active = fmt.Sprintf("%s {{ . | underline }}", IconSelect)
+ }
+
+ tpl, err = template.New("").Funcs(tpls.FuncMap).Parse(tpls.Active)
+ if err != nil {
+ return err
+ }
+
+ tpls.active = tpl
+
+ if tpls.Inactive == "" {
+ tpls.Inactive = " {{.}}"
+ }
+
+ tpl, err = template.New("").Funcs(tpls.FuncMap).Parse(tpls.Inactive)
+ if err != nil {
+ return err
+ }
+
+ tpls.inactive = tpl
+
+ if tpls.Selected == "" {
+ tpls.Selected = fmt.Sprintf(`{{ "%s" | green }} {{ . | faint }}`, IconGood)
+ }
+
+ tpl, err = template.New("").Funcs(tpls.FuncMap).Parse(tpls.Selected)
+ if err != nil {
+ return err
+ }
+ tpls.selected = tpl
+
+ if tpls.Details != "" {
+ tpl, err = template.New("").Funcs(tpls.FuncMap).Parse(tpls.Details)
+ if err != nil {
+ return err
+ }
+
+ tpls.details = tpl
+ }
+
+ if tpls.Help == "" {
+ tpls.Help = fmt.Sprintf(`{{ "Use the arrow keys to navigate:" | faint }} {{ .NextKey | faint }} ` +
+ `{{ .PrevKey | faint }} {{ .PageDownKey | faint }} {{ .PageUpKey | faint }} ` +
+ `{{ if .Search }} {{ "and" | faint }} {{ .SearchKey | faint }} {{ "toggles search" | faint }}{{ end }}`)
+ }
+
+ tpl, err = template.New("").Funcs(tpls.FuncMap).Parse(tpls.Help)
+ if err != nil {
+ return err
+ }
+
+ tpls.help = tpl
+
+ s.Templates = tpls
+
+ return nil
+}
+
+// SelectWithAdd represents a list for selecting a single item inside a list of items with the possibility to
+// add new items to the list.
+type SelectWithAdd struct {
+ // Label is the text displayed on top of the list to direct input. The IconInitial value "?" will be
+ // appended automatically to the label so it does not need to be added.
+ Label string
+
+ // Items are the items to display inside the list. Each item will be listed individually with the
+ // AddLabel as the first item of the list.
+ Items []string
+
+ // AddLabel is the label used for the first item of the list that enables adding a new item.
+ // Selecting this item in the list displays the add item prompt using promptui/prompt.
+ AddLabel string
+
+ // Validate is an optional function that fill be used against the entered value in the prompt to validate it.
+ // If the value is valid, it is returned to the callee to be added in the list.
+ Validate ValidateFunc
+
+ // IsVimMode sets whether to use vim mode when using readline in the command prompt. Look at
+ // https://godoc.org/github.com/chzyer/readline#Config for more information on readline.
+ IsVimMode bool
+
+ // a function that defines how to render the cursor
+ Pointer Pointer
+
+ // HideHelp sets whether to hide help information.
+ HideHelp bool
+}
+
+// Run executes the select list. Its displays the label and the list of items, asking the user to chose any
+// value within to list or add his own. Run will keep the prompt alive until it has been canceled from
+// the command prompt or it has received a valid value.
+//
+// If the addLabel is selected in the list, this function will return a -1 index with the added label and no error.
+// Otherwise, it will return the index and the value of the selected item. In any case, if an error is triggered, it
+// will also return the error as its third return value.
+func (sa *SelectWithAdd) Run() (int, string, error) {
+ if len(sa.Items) > 0 {
+ newItems := append([]string{sa.AddLabel}, sa.Items...)
+
+ list, err := list.New(newItems, 5)
+ if err != nil {
+ return 0, "", err
+ }
+
+ s := Select{
+ Label: sa.Label,
+ Items: newItems,
+ IsVimMode: sa.IsVimMode,
+ HideHelp: sa.HideHelp,
+ Size: 5,
+ list: list,
+ Pointer: sa.Pointer,
+ }
+ s.setKeys()
+
+ err = s.prepareTemplates()
+ if err != nil {
+ return 0, "", err
+ }
+
+ selected, value, err := s.innerRun(1, 0, '+')
+ if err != nil || selected != 0 {
+ return selected - 1, value, err
+ }
+
+ // XXX run through terminal for windows
+ os.Stdout.Write([]byte(upLine(1) + "\r" + clearLine))
+ }
+
+ p := Prompt{
+ Label: sa.AddLabel,
+ Validate: sa.Validate,
+ IsVimMode: sa.IsVimMode,
+ Pointer: sa.Pointer,
+ }
+ value, err := p.Run()
+ return SelectedAdd, value, err
+}
+
+func (s *Select) setKeys() {
+ if s.Keys != nil {
+ return
+ }
+ s.Keys = &SelectKeys{
+ Prev: Key{Code: KeyPrev, Display: KeyPrevDisplay},
+ Next: Key{Code: KeyNext, Display: KeyNextDisplay},
+ PageUp: Key{Code: KeyBackward, Display: KeyBackwardDisplay},
+ PageDown: Key{Code: KeyForward, Display: KeyForwardDisplay},
+ Search: Key{Code: '/', Display: "/"},
+ }
+}
+
+func (s *Select) renderDetails(item interface{}) [][]byte {
+ if s.Templates.details == nil {
+ return nil
+ }
+
+ var buf bytes.Buffer
+ w := ansiterm.NewTabWriter(&buf, 0, 0, 8, ' ', 0)
+
+ err := s.Templates.details.Execute(w, item)
+ if err != nil {
+ fmt.Fprintf(w, "%v", item)
+ }
+
+ w.Flush()
+
+ output := buf.Bytes()
+
+ return bytes.Split(output, []byte("\n"))
+}
+
+func (s *Select) renderHelp(b bool) []byte {
+ keys := struct {
+ NextKey string
+ PrevKey string
+ PageDownKey string
+ PageUpKey string
+ Search bool
+ SearchKey string
+ }{
+ NextKey: s.Keys.Next.Display,
+ PrevKey: s.Keys.Prev.Display,
+ PageDownKey: s.Keys.PageDown.Display,
+ PageUpKey: s.Keys.PageUp.Display,
+ SearchKey: s.Keys.Search.Display,
+ Search: b,
+ }
+
+ return render(s.Templates.help, keys)
+}
+
+func render(tpl *template.Template, data interface{}) []byte {
+ var buf bytes.Buffer
+ err := tpl.Execute(&buf, data)
+ if err != nil {
+ return []byte(fmt.Sprintf("%v", data))
+ }
+ return buf.Bytes()
+}
+
+func clearScreen(sb *screenbuf.ScreenBuf) {
+ sb.Reset()
+ sb.Clear()
+ sb.Flush()
+}
diff --git a/vendor/github.com/manifoldco/promptui/styles.go b/vendor/github.com/manifoldco/promptui/styles.go
new file mode 100644
index 000000000..d7698c9cf
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/styles.go
@@ -0,0 +1,23 @@
+// +build !windows
+
+package promptui
+
+// These are the default icons used by promptui for select and prompts. These should not be overridden and instead
+// customized through the use of custom templates
+var (
+ // IconInitial is the icon used when starting in prompt mode and the icon next to the label when
+ // starting in select mode.
+ IconInitial = Styler(FGBlue)("?")
+
+ // IconGood is the icon used when a good answer is entered in prompt mode.
+ IconGood = Styler(FGGreen)("✔")
+
+ // IconWarn is the icon used when a good, but potentially invalid answer is entered in prompt mode.
+ IconWarn = Styler(FGYellow)("⚠")
+
+ // IconBad is the icon used when a bad answer is entered in prompt mode.
+ IconBad = Styler(FGRed)("✗")
+
+ // IconSelect is the icon used to identify the currently selected item in select mode.
+ IconSelect = Styler(FGBold)("▸")
+)
diff --git a/vendor/github.com/manifoldco/promptui/styles_windows.go b/vendor/github.com/manifoldco/promptui/styles_windows.go
new file mode 100644
index 000000000..36de268a6
--- /dev/null
+++ b/vendor/github.com/manifoldco/promptui/styles_windows.go
@@ -0,0 +1,21 @@
+package promptui
+
+// These are the default icons used bu promptui for select and prompts. They can either be overridden directly
+// from these variable or customized through the use of custom templates
+var (
+ // IconInitial is the icon used when starting in prompt mode and the icon next to the label when
+ // starting in select mode.
+ IconInitial = Styler(FGBlue)("?")
+
+ // IconGood is the icon used when a good answer is entered in prompt mode.
+ IconGood = Styler(FGGreen)("v")
+
+ // IconWarn is the icon used when a good, but potentially invalid answer is entered in prompt mode.
+ IconWarn = Styler(FGYellow)("!")
+
+ // IconBad is the icon used when a bad answer is entered in prompt mode.
+ IconBad = Styler(FGRed)("x")
+
+ // IconSelect is the icon used to identify the currently selected item in select mode.
+ IconSelect = Styler(FGBold)(">")
+)
diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml
new file mode 100644
index 000000000..98db8f060
--- /dev/null
+++ b/vendor/github.com/mattn/go-colorable/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+go:
+ - tip
+
+before_install:
+ - go get github.com/mattn/goveralls
+ - go get golang.org/x/tools/cmd/cover
+script:
+ - $HOME/gopath/bin/goveralls -repotoken xnXqRGwgW3SXIguzxf90ZSK1GPYZPaGrw
diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE
new file mode 100644
index 000000000..91b5cef30
--- /dev/null
+++ b/vendor/github.com/mattn/go-colorable/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md
new file mode 100644
index 000000000..56729a92c
--- /dev/null
+++ b/vendor/github.com/mattn/go-colorable/README.md
@@ -0,0 +1,48 @@
+# go-colorable
+
+[![Godoc Reference](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable)
+[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable)
+[![Coverage Status](https://coveralls.io/repos/github/mattn/go-colorable/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-colorable?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable)
+
+Colorable writer for windows.
+
+For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
+This package is possible to handle escape sequence for ansi color on windows.
+
+## Too Bad!
+
+![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
+
+
+## So Good!
+
+![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
+
+## Usage
+
+```go
+logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
+logrus.SetOutput(colorable.NewColorableStdout())
+
+logrus.Info("succeeded")
+logrus.Warn("not correct")
+logrus.Error("something error")
+logrus.Fatal("panic")
+```
+
+You can compile above code on non-windows OSs.
+
+## Installation
+
+```
+$ go get github.com/mattn/go-colorable
+```
+
+# License
+
+MIT
+
+# Author
+
+Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go
new file mode 100644
index 000000000..1f28d773d
--- /dev/null
+++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go
@@ -0,0 +1,29 @@
+// +build appengine
+
+package colorable
+
+import (
+ "io"
+ "os"
+
+ _ "github.com/mattn/go-isatty"
+)
+
+// NewColorable return new instance of Writer which handle escape sequence.
+func NewColorable(file *os.File) io.Writer {
+ if file == nil {
+ panic("nil passed instead of *os.File to NewColorable()")
+ }
+
+ return file
+}
+
+// NewColorableStdout return new instance of Writer which handle escape sequence for stdout.
+func NewColorableStdout() io.Writer {
+ return os.Stdout
+}
+
+// NewColorableStderr return new instance of Writer which handle escape sequence for stderr.
+func NewColorableStderr() io.Writer {
+ return os.Stderr
+}
diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go
new file mode 100644
index 000000000..887f203dc
--- /dev/null
+++ b/vendor/github.com/mattn/go-colorable/colorable_others.go
@@ -0,0 +1,30 @@
+// +build !windows
+// +build !appengine
+
+package colorable
+
+import (
+ "io"
+ "os"
+
+ _ "github.com/mattn/go-isatty"
+)
+
+// NewColorable return new instance of Writer which handle escape sequence.
+func NewColorable(file *os.File) io.Writer {
+ if file == nil {
+ panic("nil passed instead of *os.File to NewColorable()")
+ }
+
+ return file
+}
+
+// NewColorableStdout return new instance of Writer which handle escape sequence for stdout.
+func NewColorableStdout() io.Writer {
+ return os.Stdout
+}
+
+// NewColorableStderr return new instance of Writer which handle escape sequence for stderr.
+func NewColorableStderr() io.Writer {
+ return os.Stderr
+}
diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go
new file mode 100644
index 000000000..e17a5474e
--- /dev/null
+++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go
@@ -0,0 +1,884 @@
+// +build windows
+// +build !appengine
+
+package colorable
+
+import (
+ "bytes"
+ "io"
+ "math"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+ "unsafe"
+
+ "github.com/mattn/go-isatty"
+)
+
+const (
+ foregroundBlue = 0x1
+ foregroundGreen = 0x2
+ foregroundRed = 0x4
+ foregroundIntensity = 0x8
+ foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
+ backgroundBlue = 0x10
+ backgroundGreen = 0x20
+ backgroundRed = 0x40
+ backgroundIntensity = 0x80
+ backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
+)
+
+type wchar uint16
+type short int16
+type dword uint32
+type word uint16
+
+type coord struct {
+ x short
+ y short
+}
+
+type smallRect struct {
+ left short
+ top short
+ right short
+ bottom short
+}
+
+type consoleScreenBufferInfo struct {
+ size coord
+ cursorPosition coord
+ attributes word
+ window smallRect
+ maximumWindowSize coord
+}
+
+type consoleCursorInfo struct {
+ size dword
+ visible int32
+}
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+ procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
+ procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
+ procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
+ procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
+ procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo")
+ procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo")
+ procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW")
+)
+
+// Writer provide colorable Writer to the console
+type Writer struct {
+ out io.Writer
+ handle syscall.Handle
+ oldattr word
+ oldpos coord
+}
+
+// NewColorable return new instance of Writer which handle escape sequence from File.
+func NewColorable(file *os.File) io.Writer {
+ if file == nil {
+ panic("nil passed instead of *os.File to NewColorable()")
+ }
+
+ if isatty.IsTerminal(file.Fd()) {
+ var csbi consoleScreenBufferInfo
+ handle := syscall.Handle(file.Fd())
+ procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+ return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}}
+ }
+ return file
+}
+
+// NewColorableStdout return new instance of Writer which handle escape sequence for stdout.
+func NewColorableStdout() io.Writer {
+ return NewColorable(os.Stdout)
+}
+
+// NewColorableStderr return new instance of Writer which handle escape sequence for stderr.
+func NewColorableStderr() io.Writer {
+ return NewColorable(os.Stderr)
+}
+
+var color256 = map[int]int{
+ 0: 0x000000,
+ 1: 0x800000,
+ 2: 0x008000,
+ 3: 0x808000,
+ 4: 0x000080,
+ 5: 0x800080,
+ 6: 0x008080,
+ 7: 0xc0c0c0,
+ 8: 0x808080,
+ 9: 0xff0000,
+ 10: 0x00ff00,
+ 11: 0xffff00,
+ 12: 0x0000ff,
+ 13: 0xff00ff,
+ 14: 0x00ffff,
+ 15: 0xffffff,
+ 16: 0x000000,
+ 17: 0x00005f,
+ 18: 0x000087,
+ 19: 0x0000af,
+ 20: 0x0000d7,
+ 21: 0x0000ff,
+ 22: 0x005f00,
+ 23: 0x005f5f,
+ 24: 0x005f87,
+ 25: 0x005faf,
+ 26: 0x005fd7,
+ 27: 0x005fff,
+ 28: 0x008700,
+ 29: 0x00875f,
+ 30: 0x008787,
+ 31: 0x0087af,
+ 32: 0x0087d7,
+ 33: 0x0087ff,
+ 34: 0x00af00,
+ 35: 0x00af5f,
+ 36: 0x00af87,
+ 37: 0x00afaf,
+ 38: 0x00afd7,
+ 39: 0x00afff,
+ 40: 0x00d700,
+ 41: 0x00d75f,
+ 42: 0x00d787,
+ 43: 0x00d7af,
+ 44: 0x00d7d7,
+ 45: 0x00d7ff,
+ 46: 0x00ff00,
+ 47: 0x00ff5f,
+ 48: 0x00ff87,
+ 49: 0x00ffaf,
+ 50: 0x00ffd7,
+ 51: 0x00ffff,
+ 52: 0x5f0000,
+ 53: 0x5f005f,
+ 54: 0x5f0087,
+ 55: 0x5f00af,
+ 56: 0x5f00d7,
+ 57: 0x5f00ff,
+ 58: 0x5f5f00,
+ 59: 0x5f5f5f,
+ 60: 0x5f5f87,
+ 61: 0x5f5faf,
+ 62: 0x5f5fd7,
+ 63: 0x5f5fff,
+ 64: 0x5f8700,
+ 65: 0x5f875f,
+ 66: 0x5f8787,
+ 67: 0x5f87af,
+ 68: 0x5f87d7,
+ 69: 0x5f87ff,
+ 70: 0x5faf00,
+ 71: 0x5faf5f,
+ 72: 0x5faf87,
+ 73: 0x5fafaf,
+ 74: 0x5fafd7,
+ 75: 0x5fafff,
+ 76: 0x5fd700,
+ 77: 0x5fd75f,
+ 78: 0x5fd787,
+ 79: 0x5fd7af,
+ 80: 0x5fd7d7,
+ 81: 0x5fd7ff,
+ 82: 0x5fff00,
+ 83: 0x5fff5f,
+ 84: 0x5fff87,
+ 85: 0x5fffaf,
+ 86: 0x5fffd7,
+ 87: 0x5fffff,
+ 88: 0x870000,
+ 89: 0x87005f,
+ 90: 0x870087,
+ 91: 0x8700af,
+ 92: 0x8700d7,
+ 93: 0x8700ff,
+ 94: 0x875f00,
+ 95: 0x875f5f,
+ 96: 0x875f87,
+ 97: 0x875faf,
+ 98: 0x875fd7,
+ 99: 0x875fff,
+ 100: 0x878700,
+ 101: 0x87875f,
+ 102: 0x878787,
+ 103: 0x8787af,
+ 104: 0x8787d7,
+ 105: 0x8787ff,
+ 106: 0x87af00,
+ 107: 0x87af5f,
+ 108: 0x87af87,
+ 109: 0x87afaf,
+ 110: 0x87afd7,
+ 111: 0x87afff,
+ 112: 0x87d700,
+ 113: 0x87d75f,
+ 114: 0x87d787,
+ 115: 0x87d7af,
+ 116: 0x87d7d7,
+ 117: 0x87d7ff,
+ 118: 0x87ff00,
+ 119: 0x87ff5f,
+ 120: 0x87ff87,
+ 121: 0x87ffaf,
+ 122: 0x87ffd7,
+ 123: 0x87ffff,
+ 124: 0xaf0000,
+ 125: 0xaf005f,
+ 126: 0xaf0087,
+ 127: 0xaf00af,
+ 128: 0xaf00d7,
+ 129: 0xaf00ff,
+ 130: 0xaf5f00,
+ 131: 0xaf5f5f,
+ 132: 0xaf5f87,
+ 133: 0xaf5faf,
+ 134: 0xaf5fd7,
+ 135: 0xaf5fff,
+ 136: 0xaf8700,
+ 137: 0xaf875f,
+ 138: 0xaf8787,
+ 139: 0xaf87af,
+ 140: 0xaf87d7,
+ 141: 0xaf87ff,
+ 142: 0xafaf00,
+ 143: 0xafaf5f,
+ 144: 0xafaf87,
+ 145: 0xafafaf,
+ 146: 0xafafd7,
+ 147: 0xafafff,
+ 148: 0xafd700,
+ 149: 0xafd75f,
+ 150: 0xafd787,
+ 151: 0xafd7af,
+ 152: 0xafd7d7,
+ 153: 0xafd7ff,
+ 154: 0xafff00,
+ 155: 0xafff5f,
+ 156: 0xafff87,
+ 157: 0xafffaf,
+ 158: 0xafffd7,
+ 159: 0xafffff,
+ 160: 0xd70000,
+ 161: 0xd7005f,
+ 162: 0xd70087,
+ 163: 0xd700af,
+ 164: 0xd700d7,
+ 165: 0xd700ff,
+ 166: 0xd75f00,
+ 167: 0xd75f5f,
+ 168: 0xd75f87,
+ 169: 0xd75faf,
+ 170: 0xd75fd7,
+ 171: 0xd75fff,
+ 172: 0xd78700,
+ 173: 0xd7875f,
+ 174: 0xd78787,
+ 175: 0xd787af,
+ 176: 0xd787d7,
+ 177: 0xd787ff,
+ 178: 0xd7af00,
+ 179: 0xd7af5f,
+ 180: 0xd7af87,
+ 181: 0xd7afaf,
+ 182: 0xd7afd7,
+ 183: 0xd7afff,
+ 184: 0xd7d700,
+ 185: 0xd7d75f,
+ 186: 0xd7d787,
+ 187: 0xd7d7af,
+ 188: 0xd7d7d7,
+ 189: 0xd7d7ff,
+ 190: 0xd7ff00,
+ 191: 0xd7ff5f,
+ 192: 0xd7ff87,
+ 193: 0xd7ffaf,
+ 194: 0xd7ffd7,
+ 195: 0xd7ffff,
+ 196: 0xff0000,
+ 197: 0xff005f,
+ 198: 0xff0087,
+ 199: 0xff00af,
+ 200: 0xff00d7,
+ 201: 0xff00ff,
+ 202: 0xff5f00,
+ 203: 0xff5f5f,
+ 204: 0xff5f87,
+ 205: 0xff5faf,
+ 206: 0xff5fd7,
+ 207: 0xff5fff,
+ 208: 0xff8700,
+ 209: 0xff875f,
+ 210: 0xff8787,
+ 211: 0xff87af,
+ 212: 0xff87d7,
+ 213: 0xff87ff,
+ 214: 0xffaf00,
+ 215: 0xffaf5f,
+ 216: 0xffaf87,
+ 217: 0xffafaf,
+ 218: 0xffafd7,
+ 219: 0xffafff,
+ 220: 0xffd700,
+ 221: 0xffd75f,
+ 222: 0xffd787,
+ 223: 0xffd7af,
+ 224: 0xffd7d7,
+ 225: 0xffd7ff,
+ 226: 0xffff00,
+ 227: 0xffff5f,
+ 228: 0xffff87,
+ 229: 0xffffaf,
+ 230: 0xffffd7,
+ 231: 0xffffff,
+ 232: 0x080808,
+ 233: 0x121212,
+ 234: 0x1c1c1c,
+ 235: 0x262626,
+ 236: 0x303030,
+ 237: 0x3a3a3a,
+ 238: 0x444444,
+ 239: 0x4e4e4e,
+ 240: 0x585858,
+ 241: 0x626262,
+ 242: 0x6c6c6c,
+ 243: 0x767676,
+ 244: 0x808080,
+ 245: 0x8a8a8a,
+ 246: 0x949494,
+ 247: 0x9e9e9e,
+ 248: 0xa8a8a8,
+ 249: 0xb2b2b2,
+ 250: 0xbcbcbc,
+ 251: 0xc6c6c6,
+ 252: 0xd0d0d0,
+ 253: 0xdadada,
+ 254: 0xe4e4e4,
+ 255: 0xeeeeee,
+}
+
+// `\033]0;TITLESTR\007`
+func doTitleSequence(er *bytes.Reader) error {
+ var c byte
+ var err error
+
+ c, err = er.ReadByte()
+ if err != nil {
+ return err
+ }
+ if c != '0' && c != '2' {
+ return nil
+ }
+ c, err = er.ReadByte()
+ if err != nil {
+ return err
+ }
+ if c != ';' {
+ return nil
+ }
+ title := make([]byte, 0, 80)
+ for {
+ c, err = er.ReadByte()
+ if err != nil {
+ return err
+ }
+ if c == 0x07 || c == '\n' {
+ break
+ }
+ title = append(title, c)
+ }
+ if len(title) > 0 {
+ title8, err := syscall.UTF16PtrFromString(string(title))
+ if err == nil {
+ procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8)))
+ }
+ }
+ return nil
+}
+
+// Write write data on console
+func (w *Writer) Write(data []byte) (n int, err error) {
+ var csbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+
+ er := bytes.NewReader(data)
+ var bw [1]byte
+loop:
+ for {
+ c1, err := er.ReadByte()
+ if err != nil {
+ break loop
+ }
+ if c1 != 0x1b {
+ bw[0] = c1
+ w.out.Write(bw[:])
+ continue
+ }
+ c2, err := er.ReadByte()
+ if err != nil {
+ break loop
+ }
+
+ if c2 == ']' {
+ if err := doTitleSequence(er); err != nil {
+ break loop
+ }
+ continue
+ }
+ if c2 != 0x5b {
+ continue
+ }
+
+ var buf bytes.Buffer
+ var m byte
+ for {
+ c, err := er.ReadByte()
+ if err != nil {
+ break loop
+ }
+ if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
+ m = c
+ break
+ }
+ buf.Write([]byte(string(c)))
+ }
+
+ switch m {
+ case 'A':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.y -= short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'B':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.y += short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'C':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x += short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'D':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x -= short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'E':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x = 0
+ csbi.cursorPosition.y += short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'F':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x = 0
+ csbi.cursorPosition.y -= short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'G':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x = short(n - 1)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'H', 'f':
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ if buf.Len() > 0 {
+ token := strings.Split(buf.String(), ";")
+ switch len(token) {
+ case 1:
+ n1, err := strconv.Atoi(token[0])
+ if err != nil {
+ continue
+ }
+ csbi.cursorPosition.y = short(n1 - 1)
+ case 2:
+ n1, err := strconv.Atoi(token[0])
+ if err != nil {
+ continue
+ }
+ n2, err := strconv.Atoi(token[1])
+ if err != nil {
+ continue
+ }
+ csbi.cursorPosition.x = short(n2 - 1)
+ csbi.cursorPosition.y = short(n1 - 1)
+ }
+ } else {
+ csbi.cursorPosition.y = 0
+ }
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'J':
+ n := 0
+ if buf.Len() > 0 {
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ }
+ var count, written dword
+ var cursor coord
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ switch n {
+ case 0:
+ cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+ count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
+ case 1:
+ cursor = coord{x: csbi.window.left, y: csbi.window.top}
+ count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.window.top-csbi.cursorPosition.y)*csbi.size.x)
+ case 2:
+ cursor = coord{x: csbi.window.left, y: csbi.window.top}
+ count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
+ }
+ procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+ procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+ case 'K':
+ n := 0
+ if buf.Len() > 0 {
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ var cursor coord
+ var count, written dword
+ switch n {
+ case 0:
+ cursor = coord{x: csbi.cursorPosition.x + 1, y: csbi.cursorPosition.y}
+ count = dword(csbi.size.x - csbi.cursorPosition.x - 1)
+ case 1:
+ cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+ count = dword(csbi.size.x - csbi.cursorPosition.x)
+ case 2:
+ cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+ count = dword(csbi.size.x)
+ }
+ procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+ procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+ case 'm':
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ attr := csbi.attributes
+ cs := buf.String()
+ if cs == "" {
+ procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
+ continue
+ }
+ token := strings.Split(cs, ";")
+ for i := 0; i < len(token); i++ {
+ ns := token[i]
+ if n, err = strconv.Atoi(ns); err == nil {
+ switch {
+ case n == 0 || n == 100:
+ attr = w.oldattr
+ case 1 <= n && n <= 5:
+ attr |= foregroundIntensity
+ case n == 7:
+ attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+ case n == 22 || n == 25:
+ attr |= foregroundIntensity
+ case n == 27:
+ attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+ case 30 <= n && n <= 37:
+ attr &= backgroundMask
+ if (n-30)&1 != 0 {
+ attr |= foregroundRed
+ }
+ if (n-30)&2 != 0 {
+ attr |= foregroundGreen
+ }
+ if (n-30)&4 != 0 {
+ attr |= foregroundBlue
+ }
+ case n == 38: // set foreground color.
+ if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
+ if n256, err := strconv.Atoi(token[i+2]); err == nil {
+ if n256foreAttr == nil {
+ n256setup()
+ }
+ attr &= backgroundMask
+ attr |= n256foreAttr[n256]
+ i += 2
+ }
+ } else {
+ attr = attr & (w.oldattr & backgroundMask)
+ }
+ case n == 39: // reset foreground color.
+ attr &= backgroundMask
+ attr |= w.oldattr & foregroundMask
+ case 40 <= n && n <= 47:
+ attr &= foregroundMask
+ if (n-40)&1 != 0 {
+ attr |= backgroundRed
+ }
+ if (n-40)&2 != 0 {
+ attr |= backgroundGreen
+ }
+ if (n-40)&4 != 0 {
+ attr |= backgroundBlue
+ }
+ case n == 48: // set background color.
+ if i < len(token)-2 && token[i+1] == "5" {
+ if n256, err := strconv.Atoi(token[i+2]); err == nil {
+ if n256backAttr == nil {
+ n256setup()
+ }
+ attr &= foregroundMask
+ attr |= n256backAttr[n256]
+ i += 2
+ }
+ } else {
+ attr = attr & (w.oldattr & foregroundMask)
+ }
+ case n == 49: // reset foreground color.
+ attr &= foregroundMask
+ attr |= w.oldattr & backgroundMask
+ case 90 <= n && n <= 97:
+ attr = (attr & backgroundMask)
+ attr |= foregroundIntensity
+ if (n-90)&1 != 0 {
+ attr |= foregroundRed
+ }
+ if (n-90)&2 != 0 {
+ attr |= foregroundGreen
+ }
+ if (n-90)&4 != 0 {
+ attr |= foregroundBlue
+ }
+ case 100 <= n && n <= 107:
+ attr = (attr & foregroundMask)
+ attr |= backgroundIntensity
+ if (n-100)&1 != 0 {
+ attr |= backgroundRed
+ }
+ if (n-100)&2 != 0 {
+ attr |= backgroundGreen
+ }
+ if (n-100)&4 != 0 {
+ attr |= backgroundBlue
+ }
+ }
+ procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
+ }
+ }
+ case 'h':
+ var ci consoleCursorInfo
+ cs := buf.String()
+ if cs == "5>" {
+ procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+ ci.visible = 0
+ procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+ } else if cs == "?25" {
+ procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+ ci.visible = 1
+ procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+ }
+ case 'l':
+ var ci consoleCursorInfo
+ cs := buf.String()
+ if cs == "5>" {
+ procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+ ci.visible = 1
+ procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+ } else if cs == "?25" {
+ procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+ ci.visible = 0
+ procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+ }
+ case 's':
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ w.oldpos = csbi.cursorPosition
+ case 'u':
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&w.oldpos)))
+ }
+ }
+
+ return len(data), nil
+}
+
+type consoleColor struct {
+ rgb int
+ red bool
+ green bool
+ blue bool
+ intensity bool
+}
+
+func (c consoleColor) foregroundAttr() (attr word) {
+ if c.red {
+ attr |= foregroundRed
+ }
+ if c.green {
+ attr |= foregroundGreen
+ }
+ if c.blue {
+ attr |= foregroundBlue
+ }
+ if c.intensity {
+ attr |= foregroundIntensity
+ }
+ return
+}
+
+func (c consoleColor) backgroundAttr() (attr word) {
+ if c.red {
+ attr |= backgroundRed
+ }
+ if c.green {
+ attr |= backgroundGreen
+ }
+ if c.blue {
+ attr |= backgroundBlue
+ }
+ if c.intensity {
+ attr |= backgroundIntensity
+ }
+ return
+}
+
+var color16 = []consoleColor{
+ {0x000000, false, false, false, false},
+ {0x000080, false, false, true, false},
+ {0x008000, false, true, false, false},
+ {0x008080, false, true, true, false},
+ {0x800000, true, false, false, false},
+ {0x800080, true, false, true, false},
+ {0x808000, true, true, false, false},
+ {0xc0c0c0, true, true, true, false},
+ {0x808080, false, false, false, true},
+ {0x0000ff, false, false, true, true},
+ {0x00ff00, false, true, false, true},
+ {0x00ffff, false, true, true, true},
+ {0xff0000, true, false, false, true},
+ {0xff00ff, true, false, true, true},
+ {0xffff00, true, true, false, true},
+ {0xffffff, true, true, true, true},
+}
+
+type hsv struct {
+ h, s, v float32
+}
+
+func (a hsv) dist(b hsv) float32 {
+ dh := a.h - b.h
+ switch {
+ case dh > 0.5:
+ dh = 1 - dh
+ case dh < -0.5:
+ dh = -1 - dh
+ }
+ ds := a.s - b.s
+ dv := a.v - b.v
+ return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
+}
+
+func toHSV(rgb int) hsv {
+ r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
+ float32((rgb&0x00FF00)>>8)/256.0,
+ float32(rgb&0x0000FF)/256.0
+ min, max := minmax3f(r, g, b)
+ h := max - min
+ if h > 0 {
+ if max == r {
+ h = (g - b) / h
+ if h < 0 {
+ h += 6
+ }
+ } else if max == g {
+ h = 2 + (b-r)/h
+ } else {
+ h = 4 + (r-g)/h
+ }
+ }
+ h /= 6.0
+ s := max - min
+ if max != 0 {
+ s /= max
+ }
+ v := max
+ return hsv{h: h, s: s, v: v}
+}
+
+type hsvTable []hsv
+
+func toHSVTable(rgbTable []consoleColor) hsvTable {
+ t := make(hsvTable, len(rgbTable))
+ for i, c := range rgbTable {
+ t[i] = toHSV(c.rgb)
+ }
+ return t
+}
+
+func (t hsvTable) find(rgb int) consoleColor {
+ hsv := toHSV(rgb)
+ n := 7
+ l := float32(5.0)
+ for i, p := range t {
+ d := hsv.dist(p)
+ if d < l {
+ l, n = d, i
+ }
+ }
+ return color16[n]
+}
+
+func minmax3f(a, b, c float32) (min, max float32) {
+ if a < b {
+ if b < c {
+ return a, c
+ } else if a < c {
+ return a, b
+ } else {
+ return c, b
+ }
+ } else {
+ if a < c {
+ return b, c
+ } else if b < c {
+ return b, a
+ } else {
+ return c, a
+ }
+ }
+}
+
+var n256foreAttr []word
+var n256backAttr []word
+
+func n256setup() {
+ n256foreAttr = make([]word, 256)
+ n256backAttr = make([]word, 256)
+ t := toHSVTable(color16)
+ for i, rgb := range color256 {
+ c := t.find(rgb)
+ n256foreAttr[i] = c.foregroundAttr()
+ n256backAttr[i] = c.backgroundAttr()
+ }
+}
diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go
new file mode 100644
index 000000000..9721e16f4
--- /dev/null
+++ b/vendor/github.com/mattn/go-colorable/noncolorable.go
@@ -0,0 +1,55 @@
+package colorable
+
+import (
+ "bytes"
+ "io"
+)
+
+// NonColorable hold writer but remove escape sequence.
+type NonColorable struct {
+ out io.Writer
+}
+
+// NewNonColorable return new instance of Writer which remove escape sequence from Writer.
+func NewNonColorable(w io.Writer) io.Writer {
+ return &NonColorable{out: w}
+}
+
+// Write write data on console
+func (w *NonColorable) Write(data []byte) (n int, err error) {
+ er := bytes.NewReader(data)
+ var bw [1]byte
+loop:
+ for {
+ c1, err := er.ReadByte()
+ if err != nil {
+ break loop
+ }
+ if c1 != 0x1b {
+ bw[0] = c1
+ w.out.Write(bw[:])
+ continue
+ }
+ c2, err := er.ReadByte()
+ if err != nil {
+ break loop
+ }
+ if c2 != 0x5b {
+ continue
+ }
+
+ var buf bytes.Buffer
+ for {
+ c, err := er.ReadByte()
+ if err != nil {
+ break loop
+ }
+ if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
+ break
+ }
+ buf.Write([]byte(string(c)))
+ }
+ }
+
+ return len(data), nil
+}
diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml
new file mode 100644
index 000000000..5597e026d
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+go:
+ - tip
+
+os:
+ - linux
+ - osx
+
+before_install:
+ - go get github.com/mattn/goveralls
+ - go get golang.org/x/tools/cmd/cover
+script:
+ - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5
diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE
new file mode 100644
index 000000000..65dc692b6
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/LICENSE
@@ -0,0 +1,9 @@
+Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
+
+MIT License (Expat)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md
new file mode 100644
index 000000000..1e69004bb
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/README.md
@@ -0,0 +1,50 @@
+# go-isatty
+
+[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty)
+[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty)
+[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty)
+
+isatty for golang
+
+## Usage
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/mattn/go-isatty"
+ "os"
+)
+
+func main() {
+ if isatty.IsTerminal(os.Stdout.Fd()) {
+ fmt.Println("Is Terminal")
+ } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) {
+ fmt.Println("Is Cygwin/MSYS2 Terminal")
+ } else {
+ fmt.Println("Is Not Terminal")
+ }
+}
+```
+
+## Installation
+
+```
+$ go get github.com/mattn/go-isatty
+```
+
+## License
+
+MIT
+
+## Author
+
+Yasuhiro Matsumoto (a.k.a mattn)
+
+## Thanks
+
+* k-takata: base idea for IsCygwinTerminal
+
+ https://github.com/k-takata/go-iscygpty
diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go
new file mode 100644
index 000000000..17d4f90eb
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/doc.go
@@ -0,0 +1,2 @@
+// Package isatty implements interface to isatty
+package isatty
diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go
new file mode 100644
index 000000000..9584a9884
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_appengine.go
@@ -0,0 +1,15 @@
+// +build appengine
+
+package isatty
+
+// IsTerminal returns true if the file descriptor is terminal which
+// is always false on on appengine classic which is a sandboxed PaaS.
+func IsTerminal(fd uintptr) bool {
+ return false
+}
+
+// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
new file mode 100644
index 000000000..42f2514d1
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
@@ -0,0 +1,18 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package isatty
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var termios syscall.Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go
new file mode 100644
index 000000000..7384cf991
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go
@@ -0,0 +1,18 @@
+// +build linux
+// +build !appengine,!ppc64,!ppc64le
+
+package isatty
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const ioctlReadTermios = syscall.TCGETS
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var termios syscall.Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go
new file mode 100644
index 000000000..44e5d2130
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go
@@ -0,0 +1,19 @@
+// +build linux
+// +build ppc64 ppc64le
+
+package isatty
+
+import (
+ "unsafe"
+
+ syscall "golang.org/x/sys/unix"
+)
+
+const ioctlReadTermios = syscall.TCGETS
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var termios syscall.Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go
new file mode 100644
index 000000000..9d8b4a599
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_others.go
@@ -0,0 +1,10 @@
+// +build !windows
+// +build !appengine
+
+package isatty
+
+// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
new file mode 100644
index 000000000..1f0c6bf53
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
@@ -0,0 +1,16 @@
+// +build solaris
+// +build !appengine
+
+package isatty
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
+func IsTerminal(fd uintptr) bool {
+ var termio unix.Termio
+ err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
+ return err == nil
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go
new file mode 100644
index 000000000..af51cbcaa
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go
@@ -0,0 +1,94 @@
+// +build windows
+// +build !appengine
+
+package isatty
+
+import (
+ "strings"
+ "syscall"
+ "unicode/utf16"
+ "unsafe"
+)
+
+const (
+ fileNameInfo uintptr = 2
+ fileTypePipe = 3
+)
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+ procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx")
+ procGetFileType = kernel32.NewProc("GetFileType")
+)
+
+func init() {
+ // Check if GetFileInformationByHandleEx is available.
+ if procGetFileInformationByHandleEx.Find() != nil {
+ procGetFileInformationByHandleEx = nil
+ }
+}
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
+
+// Check pipe name is used for cygwin/msys2 pty.
+// Cygwin/MSYS2 PTY has a name like:
+// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
+func isCygwinPipeName(name string) bool {
+ token := strings.Split(name, "-")
+ if len(token) < 5 {
+ return false
+ }
+
+ if token[0] != `\msys` && token[0] != `\cygwin` {
+ return false
+ }
+
+ if token[1] == "" {
+ return false
+ }
+
+ if !strings.HasPrefix(token[2], "pty") {
+ return false
+ }
+
+ if token[3] != `from` && token[3] != `to` {
+ return false
+ }
+
+ if token[4] != "master" {
+ return false
+ }
+
+ return true
+}
+
+// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
+// terminal.
+func IsCygwinTerminal(fd uintptr) bool {
+ if procGetFileInformationByHandleEx == nil {
+ return false
+ }
+
+ // Cygwin/msys's pty is a pipe.
+ ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0)
+ if ft != fileTypePipe || e != 0 {
+ return false
+ }
+
+ var buf [2 + syscall.MAX_PATH]uint16
+ r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(),
+ 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)),
+ uintptr(len(buf)*2), 0, 0)
+ if r == 0 || e != 0 {
+ return false
+ }
+
+ l := *(*uint32)(unsafe.Pointer(&buf))
+ return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2])))
+}
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
deleted file mode 100644
index 916c9182a..000000000
--- a/vendor/github.com/prometheus/procfs/arp.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "io/ioutil"
- "net"
- "strings"
-)
-
-// ARPEntry contains a single row of the columnar data represented in
-// /proc/net/arp.
-type ARPEntry struct {
- // IP address
- IPAddr net.IP
- // MAC address
- HWAddr net.HardwareAddr
- // Name of the device
- Device string
-}
-
-// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
-// and then return a slice of ARPEntry's.
-func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
- data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
- if err != nil {
- return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err)
- }
-
- return parseARPEntries(data)
-}
-
-func parseARPEntries(data []byte) ([]ARPEntry, error) {
- lines := strings.Split(string(data), "\n")
- entries := make([]ARPEntry, 0)
- var err error
- const (
- expectedDataWidth = 6
- expectedHeaderWidth = 9
- )
- for _, line := range lines {
- columns := strings.Fields(line)
- width := len(columns)
-
- if width == expectedHeaderWidth || width == 0 {
- continue
- } else if width == expectedDataWidth {
- entry, err := parseARPEntry(columns)
- if err != nil {
- return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err)
- }
- entries = append(entries, entry)
- } else {
- return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth)
- }
-
- }
-
- return entries, err
-}
-
-func parseARPEntry(columns []string) (ARPEntry, error) {
- ip := net.ParseIP(columns[0])
- mac := net.HardwareAddr(columns[3])
-
- entry := ARPEntry{
- IPAddr: ip,
- HWAddr: mac,
- Device: columns[5],
- }
-
- return entry, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
index 10bd067a0..63d4229a4 100644
--- a/vendor/github.com/prometheus/procfs/buddyinfo.go
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -31,7 +31,7 @@ type BuddyInfo struct {
Sizes []float64
}
-// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
+// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
file, err := os.Open(fs.proc.Path("buddyinfo"))
if err != nil {
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
deleted file mode 100644
index 16491d6ab..000000000
--- a/vendor/github.com/prometheus/procfs/cpuinfo.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "bytes"
- "io/ioutil"
- "strconv"
- "strings"
-)
-
-// CPUInfo contains general information about a system CPU found in /proc/cpuinfo
-type CPUInfo struct {
- Processor uint
- VendorID string
- CPUFamily string
- Model string
- ModelName string
- Stepping string
- Microcode string
- CPUMHz float64
- CacheSize string
- PhysicalID string
- Siblings uint
- CoreID string
- CPUCores uint
- APICID string
- InitialAPICID string
- FPU string
- FPUException string
- CPUIDLevel uint
- WP string
- Flags []string
- Bugs []string
- BogoMips float64
- CLFlushSize uint
- CacheAlignment uint
- AddressSizes string
- PowerManagement string
-}
-
-// CPUInfo returns information about current system CPUs.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-func (fs FS) CPUInfo() ([]CPUInfo, error) {
- data, err := ioutil.ReadFile(fs.proc.Path("cpuinfo"))
- if err != nil {
- return nil, err
- }
- return parseCPUInfo(data)
-}
-
-// parseCPUInfo parses data from /proc/cpuinfo
-func parseCPUInfo(info []byte) ([]CPUInfo, error) {
- cpuinfo := []CPUInfo{}
- i := -1
- scanner := bufio.NewScanner(bytes.NewReader(info))
- for scanner.Scan() {
- line := scanner.Text()
- if strings.TrimSpace(line) == "" {
- continue
- }
- field := strings.SplitN(line, ": ", 2)
- switch strings.TrimSpace(field[0]) {
- case "processor":
- cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
- i++
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Processor = uint(v)
- case "vendor_id":
- cpuinfo[i].VendorID = field[1]
- case "cpu family":
- cpuinfo[i].CPUFamily = field[1]
- case "model":
- cpuinfo[i].Model = field[1]
- case "model name":
- cpuinfo[i].ModelName = field[1]
- case "stepping":
- cpuinfo[i].Stepping = field[1]
- case "microcode":
- cpuinfo[i].Microcode = field[1]
- case "cpu MHz":
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUMHz = v
- case "cache size":
- cpuinfo[i].CacheSize = field[1]
- case "physical id":
- cpuinfo[i].PhysicalID = field[1]
- case "siblings":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].Siblings = uint(v)
- case "core id":
- cpuinfo[i].CoreID = field[1]
- case "cpu cores":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUCores = uint(v)
- case "apicid":
- cpuinfo[i].APICID = field[1]
- case "initial apicid":
- cpuinfo[i].InitialAPICID = field[1]
- case "fpu":
- cpuinfo[i].FPU = field[1]
- case "fpu_exception":
- cpuinfo[i].FPUException = field[1]
- case "cpuid level":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CPUIDLevel = uint(v)
- case "wp":
- cpuinfo[i].WP = field[1]
- case "flags":
- cpuinfo[i].Flags = strings.Fields(field[1])
- case "bugs":
- cpuinfo[i].Bugs = strings.Fields(field[1])
- case "bogomips":
- v, err := strconv.ParseFloat(field[1], 64)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].BogoMips = v
- case "clflush size":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CLFlushSize = uint(v)
- case "cache_alignment":
- v, err := strconv.ParseUint(field[1], 0, 32)
- if err != nil {
- return nil, err
- }
- cpuinfo[i].CacheAlignment = uint(v)
- case "address sizes":
- cpuinfo[i].AddressSizes = field[1]
- case "power management":
- cpuinfo[i].PowerManagement = field[1]
- }
- }
- return cpuinfo, nil
-
-}
diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go
deleted file mode 100644
index 19d4041b2..000000000
--- a/vendor/github.com/prometheus/procfs/crypto.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "strconv"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Crypto holds info parsed from /proc/crypto.
-type Crypto struct {
- Alignmask *uint64
- Async bool
- Blocksize *uint64
- Chunksize *uint64
- Ctxsize *uint64
- Digestsize *uint64
- Driver string
- Geniv string
- Internal string
- Ivsize *uint64
- Maxauthsize *uint64
- MaxKeysize *uint64
- MinKeysize *uint64
- Module string
- Name string
- Priority *int64
- Refcnt *int64
- Seedsize *uint64
- Selftest string
- Type string
- Walksize *uint64
-}
-
-// Crypto parses an crypto-file (/proc/crypto) and returns a slice of
-// structs containing the relevant info. More information available here:
-// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
-func (fs FS) Crypto() ([]Crypto, error) {
- data, err := ioutil.ReadFile(fs.proc.Path("crypto"))
- if err != nil {
- return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
- }
- crypto, err := parseCrypto(data)
- if err != nil {
- return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
- }
- return crypto, nil
-}
-
-func parseCrypto(cryptoData []byte) ([]Crypto, error) {
- crypto := []Crypto{}
-
- cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n"))
-
- for _, block := range cryptoBlocks {
- var newCryptoElem Crypto
-
- lines := strings.Split(string(block), "\n")
- for _, line := range lines {
- if strings.TrimSpace(line) == "" || line[0] == ' ' {
- continue
- }
- fields := strings.Split(line, ":")
- key := strings.TrimSpace(fields[0])
- value := strings.TrimSpace(fields[1])
- vp := util.NewValueParser(value)
-
- switch strings.TrimSpace(key) {
- case "async":
- b, err := strconv.ParseBool(value)
- if err == nil {
- newCryptoElem.Async = b
- }
- case "blocksize":
- newCryptoElem.Blocksize = vp.PUInt64()
- case "chunksize":
- newCryptoElem.Chunksize = vp.PUInt64()
- case "digestsize":
- newCryptoElem.Digestsize = vp.PUInt64()
- case "driver":
- newCryptoElem.Driver = value
- case "geniv":
- newCryptoElem.Geniv = value
- case "internal":
- newCryptoElem.Internal = value
- case "ivsize":
- newCryptoElem.Ivsize = vp.PUInt64()
- case "maxauthsize":
- newCryptoElem.Maxauthsize = vp.PUInt64()
- case "max keysize":
- newCryptoElem.MaxKeysize = vp.PUInt64()
- case "min keysize":
- newCryptoElem.MinKeysize = vp.PUInt64()
- case "module":
- newCryptoElem.Module = value
- case "name":
- newCryptoElem.Name = value
- case "priority":
- newCryptoElem.Priority = vp.PInt64()
- case "refcnt":
- newCryptoElem.Refcnt = vp.PInt64()
- case "seedsize":
- newCryptoElem.Seedsize = vp.PUInt64()
- case "selftest":
- newCryptoElem.Selftest = value
- case "type":
- newCryptoElem.Type = value
- case "walksize":
- newCryptoElem.Walksize = vp.PUInt64()
- }
- }
- crypto = append(crypto, newCryptoElem)
- }
- return crypto, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar
index 0b2905544..6b42e7ba1 100644
--- a/vendor/github.com/prometheus/procfs/fixtures.ttar
+++ b/vendor/github.com/prometheus/procfs/fixtures.ttar
@@ -47,48 +47,6 @@ SymlinkTo: ../../symlinktargets/ghi
Path: fixtures/proc/26231/fd/3
SymlinkTo: ../../symlinktargets/uvw
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/fdinfo
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/0
-Lines: 6
-pos: 0
-flags: 02004000
-mnt_id: 13
-inotify wd:3 ino:1 sdev:34 mask:fce ignored_mask:0 fhandle-bytes:c fhandle-type:81 f_handle:000000000100000000000000
-inotify wd:2 ino:1300016 sdev:fd00002 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:16003001ed3f022a
-inotify wd:1 ino:2e0001 sdev:fd00000 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:01002e00138e7c65
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/1
-Lines: 4
-pos: 0
-flags: 02004002
-mnt_id: 13
-eventfd-count: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/10
-Lines: 3
-pos: 0
-flags: 02004002
-mnt_id: 9
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/2
-Lines: 3
-pos: 0
-flags: 02004002
-mnt_id: 9
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/3
-Lines: 3
-pos: 0
-flags: 02004002
-mnt_id: 9
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/26231/io
Lines: 7
rchar: 750339
@@ -168,11 +126,6 @@ SymlinkTo: net:[4026531993]
Path: fixtures/proc/26231/root
SymlinkTo: /
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/schedstat
-Lines: 1
-411605849 93680043 79
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/26231/stat
Lines: 1
26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
@@ -184,10 +137,10 @@ Lines: 53
Name: prometheus
Umask: 0022
State: S (sleeping)
-Tgid: 26231
+Tgid: 1
Ngid: 0
-Pid: 26231
-PPid: 1
+Pid: 1
+PPid: 0
TracerPid: 0
Uid: 0 0 0 0
Gid: 0 0 0 0
@@ -305,18 +258,6 @@ Lines: 1
com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26233/schedstat
-Lines: 8
- ____________________________________
-< this is a malformed schedstat file >
- ------------------------------------
- \ ^__^
- \ (oo)\_______
- (__)\ )\/\
- ||----w |
- || ||
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/proc/584
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -333,1201 +274,6 @@ Node 0, zone DMA32 759 572 791 475 194 45 12 0
Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/cpuinfo
-Lines: 216
-processor : 0
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 799.998
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 0
-cpu cores : 4
-apicid : 0
-initial apicid : 0
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 1
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.037
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 1
-cpu cores : 4
-apicid : 2
-initial apicid : 2
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 2
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.010
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 2
-cpu cores : 4
-apicid : 4
-initial apicid : 4
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 3
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.028
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 3
-cpu cores : 4
-apicid : 6
-initial apicid : 6
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 4
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 799.989
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 0
-cpu cores : 4
-apicid : 1
-initial apicid : 1
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 5
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.083
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 1
-cpu cores : 4
-apicid : 3
-initial apicid : 3
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 6
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.017
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 2
-cpu cores : 4
-apicid : 5
-initial apicid : 5
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 7
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.030
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 3
-cpu cores : 4
-apicid : 7
-initial apicid : 7
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/crypto
-Lines: 971
-name : ccm(aes)
-driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni))
-module : ccm
-priority : 300
-refcnt : 4
-selftest : passed
-internal : no
-type : aead
-async : no
-blocksize : 1
-ivsize : 16
-maxauthsize : 16
-geniv : <none>
-
-name : cbcmac(aes)
-driver : cbcmac(aes-aesni)
-module : ccm
-priority : 300
-refcnt : 7
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 16
-
-name : ecdh
-driver : ecdh-generic
-module : ecdh_generic
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : kpp
-
-name : ecb(arc4)
-driver : ecb(arc4)-generic
-module : arc4
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : no
-blocksize : 1
-min keysize : 1
-max keysize : 256
-ivsize : 0
-chunksize : 1
-walksize : 1
-
-name : arc4
-driver : arc4-generic
-module : arc4
-priority : 0
-refcnt : 3
-selftest : passed
-internal : no
-type : cipher
-blocksize : 1
-min keysize : 1
-max keysize : 256
-
-name : crct10dif
-driver : crct10dif-pclmul
-module : crct10dif_pclmul
-priority : 200
-refcnt : 2
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 2
-
-name : crc32
-driver : crc32-pclmul
-module : crc32_pclmul
-priority : 200
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 4
-
-name : __ghash
-driver : cryptd(__ghash-pclmulqdqni)
-module : kernel
-priority : 50
-refcnt : 1
-selftest : passed
-internal : yes
-type : ahash
-async : yes
-blocksize : 16
-digestsize : 16
-
-name : ghash
-driver : ghash-clmulni
-module : ghash_clmulni_intel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : ahash
-async : yes
-blocksize : 16
-digestsize : 16
-
-name : __ghash
-driver : __ghash-pclmulqdqni
-module : ghash_clmulni_intel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : yes
-type : shash
-blocksize : 16
-digestsize : 16
-
-name : crc32c
-driver : crc32c-intel
-module : crc32c_intel
-priority : 200
-refcnt : 5
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 4
-
-name : cbc(aes)
-driver : cbc(aes-aesni)
-module : kernel
-priority : 300
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : no
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : ctr(aes)
-driver : ctr(aes-aesni)
-module : kernel
-priority : 300
-refcnt : 5
-selftest : passed
-internal : no
-type : skcipher
-async : no
-blocksize : 1
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : pkcs1pad(rsa,sha256)
-driver : pkcs1pad(rsa-generic,sha256)
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : akcipher
-
-name : __xts(aes)
-driver : cryptd(__xts-aes-aesni)
-module : kernel
-priority : 451
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 32
-max keysize : 64
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : xts(aes)
-driver : xts-aes-aesni
-module : kernel
-priority : 401
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 32
-max keysize : 64
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __ctr(aes)
-driver : cryptd(__ctr-aes-aesni)
-module : kernel
-priority : 450
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : yes
-blocksize : 1
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : ctr(aes)
-driver : ctr-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : yes
-blocksize : 1
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __cbc(aes)
-driver : cryptd(__cbc-aes-aesni)
-module : kernel
-priority : 450
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : cbc(aes)
-driver : cbc-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __ecb(aes)
-driver : cryptd(__ecb-aes-aesni)
-module : kernel
-priority : 450
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 0
-chunksize : 16
-walksize : 16
-
-name : ecb(aes)
-driver : ecb-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 0
-chunksize : 16
-walksize : 16
-
-name : __generic-gcm-aes-aesni
-driver : cryptd(__driver-generic-gcm-aes-aesni)
-module : kernel
-priority : 50
-refcnt : 1
-selftest : passed
-internal : yes
-type : aead
-async : yes
-blocksize : 1
-ivsize : 12
-maxauthsize : 16
-geniv : <none>
-
-name : gcm(aes)
-driver : generic-gcm-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : aead
-async : yes
-blocksize : 1
-ivsize : 12
-maxauthsize : 16
-geniv : <none>
-
-name : __generic-gcm-aes-aesni
-driver : __driver-generic-gcm-aes-aesni
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : yes
-type : aead
-async : no
-blocksize : 1
-ivsize : 12
-maxauthsize : 16
-geniv : <none>
-
-name : __gcm-aes-aesni
-driver : cryptd(__driver-gcm-aes-aesni)
-module : kernel
-priority : 50
-refcnt : 1
-selftest : passed
-internal : yes
-type : aead
-async : yes
-blocksize : 1
-ivsize : 8
-maxauthsize : 16
-geniv : <none>
-
-name : rfc4106(gcm(aes))
-driver : rfc4106-gcm-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : aead
-async : yes
-blocksize : 1
-ivsize : 8
-maxauthsize : 16
-geniv : <none>
-
-name : __gcm-aes-aesni
-driver : __driver-gcm-aes-aesni
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : yes
-type : aead
-async : no
-blocksize : 1
-ivsize : 8
-maxauthsize : 16
-geniv : <none>
-
-name : __xts(aes)
-driver : __xts-aes-aesni
-module : kernel
-priority : 401
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : no
-blocksize : 16
-min keysize : 32
-max keysize : 64
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __ctr(aes)
-driver : __ctr-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : no
-blocksize : 1
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __cbc(aes)
-driver : __cbc-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : no
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __ecb(aes)
-driver : __ecb-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : no
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 0
-chunksize : 16
-walksize : 16
-
-name : __aes
-driver : __aes-aesni
-module : kernel
-priority : 300
-refcnt : 1
-selftest : passed
-internal : yes
-type : cipher
-blocksize : 16
-min keysize : 16
-max keysize : 32
-
-name : aes
-driver : aes-aesni
-module : kernel
-priority : 300
-refcnt : 8
-selftest : passed
-internal : no
-type : cipher
-blocksize : 16
-min keysize : 16
-max keysize : 32
-
-name : hmac(sha1)
-driver : hmac(sha1-generic)
-module : kernel
-priority : 100
-refcnt : 9
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 20
-
-name : ghash
-driver : ghash-generic
-module : kernel
-priority : 100
-refcnt : 3
-selftest : passed
-internal : no
-type : shash
-blocksize : 16
-digestsize : 16
-
-name : jitterentropy_rng
-driver : jitterentropy_rng
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_hmac_sha256
-module : kernel
-priority : 221
-refcnt : 2
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_hmac_sha512
-module : kernel
-priority : 220
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_hmac_sha384
-module : kernel
-priority : 219
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_hmac_sha1
-module : kernel
-priority : 218
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_sha256
-module : kernel
-priority : 217
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_sha512
-module : kernel
-priority : 216
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_sha384
-module : kernel
-priority : 215
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_sha1
-module : kernel
-priority : 214
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_ctr_aes256
-module : kernel
-priority : 213
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_ctr_aes192
-module : kernel
-priority : 212
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_ctr_aes128
-module : kernel
-priority : 211
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : hmac(sha256)
-driver : hmac(sha256-generic)
-module : kernel
-priority : 100
-refcnt : 10
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 32
-
-name : stdrng
-driver : drbg_pr_hmac_sha256
-module : kernel
-priority : 210
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_hmac_sha512
-module : kernel
-priority : 209
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_hmac_sha384
-module : kernel
-priority : 208
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_hmac_sha1
-module : kernel
-priority : 207
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_sha256
-module : kernel
-priority : 206
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_sha512
-module : kernel
-priority : 205
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_sha384
-module : kernel
-priority : 204
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_sha1
-module : kernel
-priority : 203
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_ctr_aes256
-module : kernel
-priority : 202
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_ctr_aes192
-module : kernel
-priority : 201
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_ctr_aes128
-module : kernel
-priority : 200
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : 842
-driver : 842-scomp
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : 842
-driver : 842-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : compression
-
-name : lzo-rle
-driver : lzo-rle-scomp
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : lzo-rle
-driver : lzo-rle-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : compression
-
-name : lzo
-driver : lzo-scomp
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : lzo
-driver : lzo-generic
-module : kernel
-priority : 0
-refcnt : 9
-selftest : passed
-internal : no
-type : compression
-
-name : crct10dif
-driver : crct10dif-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 2
-
-name : crc32c
-driver : crc32c-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 4
-
-name : zlib-deflate
-driver : zlib-deflate-scomp
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : deflate
-driver : deflate-scomp
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : deflate
-driver : deflate-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : compression
-
-name : aes
-driver : aes-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : cipher
-blocksize : 16
-min keysize : 16
-max keysize : 32
-
-name : sha224
-driver : sha224-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 28
-
-name : sha256
-driver : sha256-generic
-module : kernel
-priority : 100
-refcnt : 11
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 32
-
-name : sha1
-driver : sha1-generic
-module : kernel
-priority : 100
-refcnt : 11
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 20
-
-name : md5
-driver : md5-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 16
-
-name : ecb(cipher_null)
-driver : ecb-cipher_null
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : no
-blocksize : 1
-min keysize : 0
-max keysize : 0
-ivsize : 0
-chunksize : 1
-walksize : 1
-
-name : digest_null
-driver : digest_null-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 0
-
-name : compress_null
-driver : compress_null-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : compression
-
-name : cipher_null
-driver : cipher_null-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : cipher
-blocksize : 1
-min keysize : 0
-max keysize : 0
-
-name : rsa
-driver : rsa-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : akcipher
-
-name : dh
-driver : dh-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : kpp
-
-name : aes
-driver : aes-asm
-module : kernel
-priority : 200
-refcnt : 1
-selftest : passed
-internal : no
-type : cipher
-blocksize : 16
-min keysize : 16
-max keysize : 32
-
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/diskstats
Lines: 49
1 0 ram0 0 0 0 0 0 0 0 0 0 0 0
@@ -1677,12 +423,6 @@ Mode: 644
Directory: fixtures/proc/net
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/arp
-Lines: 2
-IP address HW type Flags HW address Mask Device
-192.168.224.1 0x1 0x2 00:50:56:c0:00:08 * ens33
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/dev
Lines: 6
Inter-| Receive | Transmit
@@ -1755,11 +495,6 @@ proc4 2 2 10853
proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/softnet_stat
-Lines: 1
-00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/unix
Lines: 6
Num RefCount Protocol Flags Type St Inode Path
@@ -1832,16 +567,6 @@ some avg10=0.10 avg60=2.00 avg300=3.85 total=15
full avg10=0.20 avg60=3.00 avg300=4.95 total=25
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/schedstat
-Lines: 6
-version 15
-timestamp 15819019232
-cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306
-domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0
-cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945
-domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/self
SymlinkTo: 26231
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -1894,493 +619,6 @@ Path: fixtures/proc/symlinktargets/xyz
Lines: 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys/vm
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/admin_reserve_kbytes
-Lines: 1
-8192
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/block_dump
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/compact_unevictable_allowed
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_background_bytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_background_ratio
-Lines: 1
-10
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_bytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_expire_centisecs
-Lines: 1
-3000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_ratio
-Lines: 1
-20
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_writeback_centisecs
-Lines: 1
-500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirtytime_expire_seconds
-Lines: 1
-43200
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/drop_caches
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/extfrag_threshold
-Lines: 1
-500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/hugetlb_shm_group
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/laptop_mode
-Lines: 1
-5
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/legacy_va_layout
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/lowmem_reserve_ratio
-Lines: 1
-256 256 32 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/max_map_count
-Lines: 1
-65530
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/memory_failure_early_kill
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/memory_failure_recovery
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/min_free_kbytes
-Lines: 1
-67584
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/min_slab_ratio
-Lines: 1
-5
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/min_unmapped_ratio
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/mmap_min_addr
-Lines: 1
-65536
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/nr_hugepages
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/nr_hugepages_mempolicy
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/nr_overcommit_hugepages
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/numa_stat
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/numa_zonelist_order
-Lines: 1
-Node
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/oom_dump_tasks
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/oom_kill_allocating_task
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/overcommit_kbytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/overcommit_memory
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/overcommit_ratio
-Lines: 1
-50
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/page-cluster
-Lines: 1
-3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/panic_on_oom
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/percpu_pagelist_fraction
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/stat_interval
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/swappiness
-Lines: 1
-60
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/user_reserve_kbytes
-Lines: 1
-131072
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/vfs_cache_pressure
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/watermark_boost_factor
-Lines: 1
-15000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/watermark_scale_factor
-Lines: 1
-10
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/zone_reclaim_mode
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/zoneinfo
-Lines: 262
-Node 0, zone DMA
- per-node stats
- nr_inactive_anon 230981
- nr_active_anon 547580
- nr_inactive_file 316904
- nr_active_file 346282
- nr_unevictable 115467
- nr_slab_reclaimable 131220
- nr_slab_unreclaimable 47320
- nr_isolated_anon 0
- nr_isolated_file 0
- workingset_nodes 11627
- workingset_refault 466886
- workingset_activate 276925
- workingset_restore 84055
- workingset_nodereclaim 487
- nr_anon_pages 795576
- nr_mapped 215483
- nr_file_pages 761874
- nr_dirty 908
- nr_writeback 0
- nr_writeback_temp 0
- nr_shmem 224925
- nr_shmem_hugepages 0
- nr_shmem_pmdmapped 0
- nr_anon_transparent_hugepages 0
- nr_unstable 0
- nr_vmscan_write 12950
- nr_vmscan_immediate_reclaim 3033
- nr_dirtied 8007423
- nr_written 7752121
- nr_kernel_misc_reclaimable 0
- pages free 3952
- min 33
- low 41
- high 49
- spanned 4095
- present 3975
- managed 3956
- protection: (0, 2877, 7826, 7826, 7826)
- nr_free_pages 3952
- nr_zone_inactive_anon 0
- nr_zone_active_anon 0
- nr_zone_inactive_file 0
- nr_zone_active_file 0
- nr_zone_unevictable 0
- nr_zone_write_pending 0
- nr_mlock 0
- nr_page_table_pages 0
- nr_kernel_stack 0
- nr_bounce 0
- nr_zspages 0
- nr_free_cma 0
- numa_hit 1
- numa_miss 0
- numa_foreign 0
- numa_interleave 0
- numa_local 1
- numa_other 0
- pagesets
- cpu: 0
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 1
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 2
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 3
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 4
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 5
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 6
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 7
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- node_unreclaimable: 0
- start_pfn: 1
-Node 0, zone DMA32
- pages free 204252
- min 19510
- low 21059
- high 22608
- spanned 1044480
- present 759231
- managed 742806
- protection: (0, 0, 4949, 4949, 4949)
- nr_free_pages 204252
- nr_zone_inactive_anon 118558
- nr_zone_active_anon 106598
- nr_zone_inactive_file 75475
- nr_zone_active_file 70293
- nr_zone_unevictable 66195
- nr_zone_write_pending 64
- nr_mlock 4
- nr_page_table_pages 1756
- nr_kernel_stack 2208
- nr_bounce 0
- nr_zspages 0
- nr_free_cma 0
- numa_hit 113952967
- numa_miss 0
- numa_foreign 0
- numa_interleave 0
- numa_local 113952967
- numa_other 0
- pagesets
- cpu: 0
- count: 345
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 1
- count: 356
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 2
- count: 325
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 3
- count: 346
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 4
- count: 321
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 5
- count: 316
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 6
- count: 373
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 7
- count: 339
- high: 378
- batch: 63
- vm stats threshold: 48
- node_unreclaimable: 0
- start_pfn: 4096
-Node 0, zone Normal
- pages free 18553
- min 11176
- low 13842
- high 16508
- spanned 1308160
- present 1308160
- managed 1268711
- protection: (0, 0, 0, 0, 0)
- nr_free_pages 18553
- nr_zone_inactive_anon 112423
- nr_zone_active_anon 440982
- nr_zone_inactive_file 241429
- nr_zone_active_file 275989
- nr_zone_unevictable 49272
- nr_zone_write_pending 844
- nr_mlock 154
- nr_page_table_pages 9750
- nr_kernel_stack 15136
- nr_bounce 0
- nr_zspages 0
- nr_free_cma 0
- numa_hit 162718019
- numa_miss 0
- numa_foreign 0
- numa_interleave 26812
- numa_local 162718019
- numa_other 0
- pagesets
- cpu: 0
- count: 316
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 1
- count: 366
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 2
- count: 60
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 3
- count: 256
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 4
- count: 253
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 5
- count: 159
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 6
- count: 311
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 7
- count: 264
- high: 378
- batch: 63
- vm stats threshold: 56
- node_unreclaimable: 0
- start_pfn: 1048576
-Node 0, zone Movable
- pages free 0
- min 0
- low 0
- high 0
- spanned 0
- present 0
- managed 0
- protection: (0, 0, 0, 0, 0)
-Node 0, zone Device
- pages free 0
- min 0
- low 0
- high 0
- spanned 0
- present 0
- managed 0
- protection: (0, 0, 0, 0, 0)
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/sys
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -2683,9 +921,6 @@ Lines: 1
0x20
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/device
-SymlinkTo: ../../../devices/pci0000:00/0000:00:1f.6/
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/class/net/eth0/dormant
Lines: 1
1
@@ -2770,391 +1005,114 @@ Mode: 644
Directory: fixtures/sys/class/power_supply
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/power_supply/AC
-SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/power_supply/BAT0
-SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/cooling_device0
+Directory: fixtures/sys/class/power_supply/AC
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device0/cur_state
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device0/max_state
-Lines: 1
-50
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device0/type
-Lines: 1
-Processor
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/cooling_device1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device1/cur_state
-Lines: 1
--1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device1/max_state
-Lines: 1
-27
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device1/type
-Lines: 1
-intel_powerclamp
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/thermal_zone0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone0/policy
-Lines: 1
-step_wise
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone0/temp
-Lines: 1
-49925
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone0/type
-Lines: 1
-bcm2835_thermal
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/thermal_zone1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/mode
-Lines: 1
-enabled
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/passive
-Lines: 1
-0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/policy
-Lines: 1
-step_wise
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/temp
-Lines: 1
-44000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/type
-Lines: 1
-acpitz
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/device
-SymlinkTo: ../../../ACPI0003:00
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/online
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/async
-Lines: 1
-disabled
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/autosuspend_delay_ms
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/control
-Lines: 1
-auto
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_kids
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_enabled
-Lines: 1
-disabled
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_status
-Lines: 1
-unsupported
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_suspended_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_usage
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup
-Lines: 1
-enabled
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_abort_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active_count
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_expire_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_last_time_ms
-Lines: 1
-10598
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_max_time_ms
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_prevent_sleep_time_ms
+Path: fixtures/sys/class/power_supply/AC/online
Lines: 1
0
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_total_time_ms
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/subsystem
-SymlinkTo: ../../../../../../../../../class/power_supply
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/type
+Path: fixtures/sys/class/power_supply/AC/type
Lines: 1
Mains
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/uevent
+Path: fixtures/sys/class/power_supply/AC/uevent
Lines: 2
POWER_SUPPLY_NAME=AC
POWER_SUPPLY_ONLINE=0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00
+Directory: fixtures/sys/class/power_supply/BAT0
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/alarm
+Path: fixtures/sys/class/power_supply/BAT0/alarm
Lines: 1
-2369000
+2503000
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity
+Path: fixtures/sys/class/power_supply/BAT0/capacity
Lines: 1
98
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity_level
+Path: fixtures/sys/class/power_supply/BAT0/capacity_level
Lines: 1
Normal
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_start_threshold
+Path: fixtures/sys/class/power_supply/BAT0/charge_start_threshold
Lines: 1
95
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_stop_threshold
+Path: fixtures/sys/class/power_supply/BAT0/charge_stop_threshold
Lines: 1
100
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/cycle_count
+Path: fixtures/sys/class/power_supply/BAT0/cycle_count
Lines: 1
0
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/device
-SymlinkTo: ../../../PNP0C0A:00
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full
+Path: fixtures/sys/class/power_supply/BAT0/energy_full
Lines: 1
50060000
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full_design
+Path: fixtures/sys/class/power_supply/BAT0/energy_full_design
Lines: 1
47520000
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_now
+Path: fixtures/sys/class/power_supply/BAT0/energy_now
Lines: 1
49450000
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/manufacturer
+Path: fixtures/sys/class/power_supply/BAT0/manufacturer
Lines: 1
LGC
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/model_name
+Path: fixtures/sys/class/power_supply/BAT0/model_name
Lines: 1
LNV-45N1
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/async
-Lines: 1
-disabled
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/autosuspend_delay_ms
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/control
-Lines: 1
-auto
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_kids
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_enabled
-Lines: 1
-disabled
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_status
-Lines: 1
-unsupported
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_suspended_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_usage
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power_now
+Path: fixtures/sys/class/power_supply/BAT0/power_now
Lines: 1
4830000
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/present
+Path: fixtures/sys/class/power_supply/BAT0/present
Lines: 1
1
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/serial_number
+Path: fixtures/sys/class/power_supply/BAT0/serial_number
Lines: 1
38109
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/status
+Path: fixtures/sys/class/power_supply/BAT0/status
Lines: 1
Discharging
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/subsystem
-SymlinkTo: ../../../../../../../../../class/power_supply
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/technology
+Path: fixtures/sys/class/power_supply/BAT0/technology
Lines: 1
Li-ion
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/type
+Path: fixtures/sys/class/power_supply/BAT0/type
Lines: 1
Battery
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/uevent
+Path: fixtures/sys/class/power_supply/BAT0/uevent
Lines: 16
POWER_SUPPLY_NAME=BAT0
POWER_SUPPLY_STATUS=Discharging
@@ -3162,28 +1120,80 @@ POWER_SUPPLY_PRESENT=1
POWER_SUPPLY_TECHNOLOGY=Li-ion
POWER_SUPPLY_CYCLE_COUNT=0
POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000
-POWER_SUPPLY_VOLTAGE_NOW=11750000
-POWER_SUPPLY_POWER_NOW=5064000
+POWER_SUPPLY_VOLTAGE_NOW=12229000
+POWER_SUPPLY_POWER_NOW=4830000
POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000
-POWER_SUPPLY_ENERGY_FULL=47390000
-POWER_SUPPLY_ENERGY_NOW=40730000
-POWER_SUPPLY_CAPACITY=85
+POWER_SUPPLY_ENERGY_FULL=50060000
+POWER_SUPPLY_ENERGY_NOW=49450000
+POWER_SUPPLY_CAPACITY=98
POWER_SUPPLY_CAPACITY_LEVEL=Normal
POWER_SUPPLY_MODEL_NAME=LNV-45N1
POWER_SUPPLY_MANUFACTURER=LGC
POWER_SUPPLY_SERIAL_NUMBER=38109
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_min_design
+Path: fixtures/sys/class/power_supply/BAT0/voltage_min_design
Lines: 1
10800000
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_now
+Path: fixtures/sys/class/power_supply/BAT0/voltage_now
Lines: 1
12229000
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/class/thermal
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/class/thermal/thermal_zone0
+Mode: 775
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/thermal/thermal_zone0/policy
+Lines: 1
+step_wise
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/thermal/thermal_zone0/temp
+Lines: 1
+49925
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/thermal/thermal_zone0/type
+Lines: 1
+bcm2835_thermal
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/class/thermal/thermal_zone1
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/thermal/thermal_zone1/mode
+Lines: 1
+enabled
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/thermal/thermal_zone1/passive
+Lines: 1
+0
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/thermal/thermal_zone1/policy
+Lines: 1
+step_wise
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/thermal/thermal_zone1/temp
+Lines: 1
+44000
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/sys/class/thermal/thermal_zone1/type
+Lines: 1
+acpitz
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/sys/devices
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/sys/devices/pci0000:00
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -3433,131 +1443,6 @@ Lines: 1
0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:1f.6
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/ari_enabled
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/broken_parity_status
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/class
-Lines: 1
-0x020000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/consistent_dma_mask_bits
-Lines: 1
-64
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/d3cold_allowed
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/device
-Lines: 1
-0x15d7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/dma_mask_bits
-Lines: 1
-64
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/driver_override
-Lines: 1
-(null)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/irq
-Lines: 1
-140
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpulist
-Lines: 1
-0-7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpus
-Lines: 1
-ff
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/modalias
-Lines: 1
-pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/msi_bus
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/numa_node
-Lines: 1
--1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/resource
-Lines: 13
-0x00000000ec200000 0x00000000ec21ffff 0x0000000000040200
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/revision
-Lines: 1
-0x21
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_device
-Lines: 1
-0x225a
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_vendor
-Lines: 1
-0x17aa
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/uevent
-Lines: 6
-DRIVER=e1000e
-PCI_CLASS=20000
-PCI_ID=8086:15D7
-PCI_SUBSYS_ID=17AA:225A
-PCI_SLOT_NAME=0000:00:1f.6
-MODALIAS=pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/vendor
-Lines: 1
-0x8086
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/sys/devices/rbd
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -3615,52 +1500,6 @@ Mode: 775
Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq
SymlinkTo: ../cpufreq/policy0
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count
-Lines: 1
-10084
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count
-Lines: 1
-34818
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu0/topology
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_id
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings
-Lines: 1
-ff
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings_list
-Lines: 1
-0-7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/physical_package_id
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings
-Lines: 1
-11
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings_list
-Lines: 1
-0,4
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/sys/devices/system/cpu/cpu1
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -3722,52 +1561,6 @@ Lines: 1
<unsupported>
Mode: 664
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count
-Lines: 1
-523
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count
-Lines: 1
-34818
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1/topology
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_id
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings
-Lines: 1
-ff
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings_list
-Lines: 1
-0-7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/physical_package_id
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings
-Lines: 1
-22
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings_list
-Lines: 1
-1,5
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/sys/devices/system/cpu/cpufreq
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
deleted file mode 100644
index 755591d9a..000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package util
-
-import (
- "io/ioutil"
- "strconv"
- "strings"
-)
-
-// ParseUint32s parses a slice of strings into a slice of uint32s.
-func ParseUint32s(ss []string) ([]uint32, error) {
- us := make([]uint32, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseUint(s, 10, 32)
- if err != nil {
- return nil, err
- }
-
- us = append(us, uint32(u))
- }
-
- return us, nil
-}
-
-// ParseUint64s parses a slice of strings into a slice of uint64s.
-func ParseUint64s(ss []string) ([]uint64, error) {
- us := make([]uint64, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- us = append(us, u)
- }
-
- return us, nil
-}
-
-// ParsePInt64s parses a slice of strings into a slice of int64 pointers.
-func ParsePInt64s(ss []string) ([]*int64, error) {
- us := make([]*int64, 0, len(ss))
- for _, s := range ss {
- u, err := strconv.ParseInt(s, 10, 64)
- if err != nil {
- return nil, err
- }
-
- us = append(us, &u)
- }
-
- return us, nil
-}
-
-// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
-func ReadUintFromFile(path string) (uint64, error) {
- data, err := ioutil.ReadFile(path)
- if err != nil {
- return 0, err
- }
- return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
-}
-
-// ParseBool parses a string into a boolean pointer.
-func ParseBool(b string) *bool {
- var truth bool
- switch b {
- case "enabled":
- truth = true
- case "disabled":
- truth = false
- default:
- return nil
- }
- return &truth
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
deleted file mode 100644
index 68b37c4b3..000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build linux,!appengine
-
-package util
-
-import (
- "bytes"
- "os"
- "syscall"
-)
-
-// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
-// https://github.com/prometheus/node_exporter/pull/728/files
-func SysReadFile(file string) (string, error) {
- f, err := os.Open(file)
- if err != nil {
- return "", err
- }
- defer f.Close()
-
- // On some machines, hwmon drivers are broken and return EAGAIN. This causes
- // Go's ioutil.ReadFile implementation to poll forever.
- //
- // Since we either want to read data or bail immediately, do the simplest
- // possible read using syscall directly.
- b := make([]byte, 128)
- n, err := syscall.Read(int(f.Fd()), b)
- if err != nil {
- return "", err
- }
-
- return string(bytes.TrimSpace(b[:n])), nil
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
deleted file mode 100644
index bd55b4537..000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build linux,appengine !linux
-
-package util
-
-import (
- "fmt"
-)
-
-// SysReadFile is here implemented as a noop for builds that do not support
-// the read syscall. For example Windows, or Linux on Google App Engine.
-func SysReadFile(file string) (string, error) {
- return "", fmt.Errorf("not supported on this platform")
-}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go
deleted file mode 100644
index ac93cb42d..000000000
--- a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package util
-
-import (
- "strconv"
-)
-
-// TODO(mdlayher): util packages are an anti-pattern and this should be moved
-// somewhere else that is more focused in the future.
-
-// A ValueParser enables parsing a single string into a variety of data types
-// in a concise and safe way. The Err method must be invoked after invoking
-// any other methods to ensure a value was successfully parsed.
-type ValueParser struct {
- v string
- err error
-}
-
-// NewValueParser creates a ValueParser using the input string.
-func NewValueParser(v string) *ValueParser {
- return &ValueParser{v: v}
-}
-
-// PInt64 interprets the underlying value as an int64 and returns a pointer to
-// that value.
-func (vp *ValueParser) PInt64() *int64 {
- if vp.err != nil {
- return nil
- }
-
- // A base value of zero makes ParseInt infer the correct base using the
- // string's prefix, if any.
- const base = 0
- v, err := strconv.ParseInt(vp.v, base, 64)
- if err != nil {
- vp.err = err
- return nil
- }
-
- return &v
-}
-
-// PUInt64 interprets the underlying value as an uint64 and returns a pointer to
-// that value.
-func (vp *ValueParser) PUInt64() *uint64 {
- if vp.err != nil {
- return nil
- }
-
- // A base value of zero makes ParseInt infer the correct base using the
- // string's prefix, if any.
- const base = 0
- v, err := strconv.ParseUint(vp.v, base, 64)
- if err != nil {
- vp.err = err
- return nil
- }
-
- return &v
-}
-
-// Err returns the last error, if any, encountered by the ValueParser.
-func (vp *ValueParser) Err() error {
- return vp.err
-}
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
deleted file mode 100644
index 6fcad20af..000000000
--- a/vendor/github.com/prometheus/procfs/net_softnet.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "fmt"
- "io/ioutil"
- "strconv"
- "strings"
-)
-
-// For the proc file format details,
-// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
-// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
-
-// SoftnetEntry contains a single row of data from /proc/net/softnet_stat
-type SoftnetEntry struct {
- // Number of processed packets
- Processed uint
- // Number of dropped packets
- Dropped uint
- // Number of times processing packets ran out of quota
- TimeSqueezed uint
-}
-
-// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns,
-// and then return a slice of SoftnetEntry's.
-func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) {
- data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat"))
- if err != nil {
- return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err)
- }
-
- return parseSoftnetEntries(data)
-}
-
-func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) {
- lines := strings.Split(string(data), "\n")
- entries := make([]SoftnetEntry, 0)
- var err error
- const (
- expectedColumns = 11
- )
- for _, line := range lines {
- columns := strings.Fields(line)
- width := len(columns)
- if width == 0 {
- continue
- }
- if width != expectedColumns {
- return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns)
- }
- var entry SoftnetEntry
- if entry, err = parseSoftnetEntry(columns); err != nil {
- return []SoftnetEntry{}, err
- }
- entries = append(entries, entry)
- }
-
- return entries, nil
-}
-
-func parseSoftnetEntry(columns []string) (SoftnetEntry, error) {
- var err error
- var processed, dropped, timeSqueezed uint64
- if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil {
- return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err)
- }
- if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil {
- return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err)
- }
- if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil {
- return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err)
- }
- return SoftnetEntry{
- Processed: uint(processed),
- Dropped: uint(dropped),
- TimeSqueezed: uint(timeSqueezed),
- }, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
index b7c79cf77..41c148d06 100644
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -279,33 +279,3 @@ func (p Proc) fileDescriptors() ([]string, error) {
func (p Proc) path(pa ...string) string {
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
}
-
-// FileDescriptorsInfo retrieves information about all file descriptors of
-// the process.
-func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) {
- names, err := p.fileDescriptors()
- if err != nil {
- return nil, err
- }
-
- var fdinfos ProcFDInfos
-
- for _, n := range names {
- fdinfo, err := p.FDInfo(n)
- if err != nil {
- continue
- }
- fdinfos = append(fdinfos, *fdinfo)
- }
-
- return fdinfos, nil
-}
-
-// Schedstat returns task scheduling information for the process.
-func (p Proc) Schedstat() (ProcSchedstat, error) {
- contents, err := ioutil.ReadFile(p.path("schedstat"))
- if err != nil {
- return ProcSchedstat{}, err
- }
- return parseProcSchedstat(string(contents))
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
deleted file mode 100644
index 83b67d1bd..000000000
--- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "io/ioutil"
- "os"
- "regexp"
- "strings"
-)
-
-// Regexp variables
-var (
- rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
- rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
- rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
- rInotify = regexp.MustCompile(`^inotify`)
-)
-
-// ProcFDInfo contains represents file descriptor information.
-type ProcFDInfo struct {
- // File descriptor
- FD string
- // File offset
- Pos string
- // File access mode and status flags
- Flags string
- // Mount point ID
- MntID string
- // List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only)
- InotifyInfos []InotifyInfo
-}
-
-// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty.
-func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
- f, err := os.Open(p.path("fdinfo", fd))
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- fdinfo, err := ioutil.ReadAll(f)
- if err != nil {
- return nil, fmt.Errorf("could not read %s: %s", f.Name(), err)
- }
-
- var text, pos, flags, mntid string
- var inotify []InotifyInfo
-
- scanner := bufio.NewScanner(strings.NewReader(string(fdinfo)))
- for scanner.Scan() {
- text = scanner.Text()
- if rPos.MatchString(text) {
- pos = rPos.FindStringSubmatch(text)[1]
- } else if rFlags.MatchString(text) {
- flags = rFlags.FindStringSubmatch(text)[1]
- } else if rMntID.MatchString(text) {
- mntid = rMntID.FindStringSubmatch(text)[1]
- } else if rInotify.MatchString(text) {
- newInotify, err := parseInotifyInfo(text)
- if err != nil {
- return nil, err
- }
- inotify = append(inotify, *newInotify)
- }
- }
-
- i := &ProcFDInfo{
- FD: fd,
- Pos: pos,
- Flags: flags,
- MntID: mntid,
- InotifyInfos: inotify,
- }
-
- return i, nil
-}
-
-// InotifyInfo represents a single inotify line in the fdinfo file.
-type InotifyInfo struct {
- // Watch descriptor number
- WD string
- // Inode number
- Ino string
- // Device ID
- Sdev string
- // Mask of events being monitored
- Mask string
-}
-
-// InotifyInfo constructor. Only available on kernel 3.8+.
-func parseInotifyInfo(line string) (*InotifyInfo, error) {
- r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`)
- m := r.FindStringSubmatch(line)
- i := &InotifyInfo{
- WD: m[1],
- Ino: m[2],
- Sdev: m[3],
- Mask: m[4],
- }
- return i, nil
-}
-
-// ProcFDInfos represents a list of ProcFDInfo structs.
-type ProcFDInfos []ProcFDInfo
-
-func (p ProcFDInfos) Len() int { return len(p) }
-func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
-
-// InotifyWatchLen returns the total number of inotify watches
-func (p ProcFDInfos) InotifyWatchLen() (int, error) {
- length := 0
- for _, f := range p {
- length += len(f.InotifyInfos)
- }
-
- return length, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
index ad290fae7..6b4b61f71 100644
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -21,7 +21,7 @@ import (
"strings"
)
-// ProcStatus provides status information about the process,
+// ProcStat provides status information about the process,
// read from /proc/[pid]/stat.
type ProcStatus struct {
// The process ID.
@@ -29,9 +29,6 @@ type ProcStatus struct {
// The process name.
Name string
- // Thread group ID.
- TGID int
-
// Peak virtual memory size.
VmPeak uint64
// Virtual memory size.
@@ -116,8 +113,6 @@ func (p Proc) NewStatus() (ProcStatus, error) {
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
switch k {
- case "Tgid":
- s.TGID = int(vUint)
case "Name":
s.Name = vString
case "VmPeak":
diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go
deleted file mode 100644
index a4c4089ac..000000000
--- a/vendor/github.com/prometheus/procfs/schedstat.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "errors"
- "os"
- "regexp"
- "strconv"
-)
-
-var (
- cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`)
- procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`)
-)
-
-// Schedstat contains scheduler statistics from /proc/schedstat
-//
-// See
-// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt
-// for a detailed description of what these numbers mean.
-//
-// Note the current kernel documentation claims some of the time units are in
-// jiffies when they are actually in nanoseconds since 2.6.23 with the
-// introduction of CFS. A fix to the documentation is pending. See
-// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473
-type Schedstat struct {
- CPUs []*SchedstatCPU
-}
-
-// SchedstatCPU contains the values from one "cpu<N>" line
-type SchedstatCPU struct {
- CPUNum string
-
- RunningNanoseconds uint64
- WaitingNanoseconds uint64
- RunTimeslices uint64
-}
-
-// ProcSchedstat contains the values from /proc/<pid>/schedstat
-type ProcSchedstat struct {
- RunningNanoseconds uint64
- WaitingNanoseconds uint64
- RunTimeslices uint64
-}
-
-// Schedstat reads data from /proc/schedstat
-func (fs FS) Schedstat() (*Schedstat, error) {
- file, err := os.Open(fs.proc.Path("schedstat"))
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- stats := &Schedstat{}
- scanner := bufio.NewScanner(file)
-
- for scanner.Scan() {
- match := cpuLineRE.FindStringSubmatch(scanner.Text())
- if match != nil {
- cpu := &SchedstatCPU{}
- cpu.CPUNum = match[1]
-
- cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64)
- if err != nil {
- continue
- }
-
- cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64)
- if err != nil {
- continue
- }
-
- cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64)
- if err != nil {
- continue
- }
-
- stats.CPUs = append(stats.CPUs, cpu)
- }
- }
-
- return stats, nil
-}
-
-func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) {
- match := procLineRE.FindStringSubmatch(contents)
-
- if match != nil {
- stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
- if err != nil {
- return
- }
-
- stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
- if err != nil {
- return
- }
-
- stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
- return
- }
-
- err = errors.New("could not parse schedstat")
- return
-}
diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go
deleted file mode 100644
index cb1389141..000000000
--- a/vendor/github.com/prometheus/procfs/vm.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package procfs
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// The VM interface is described at
-// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
-// Each setting is exposed as a single file.
-// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
-// and numa_zonelist_order (deprecated) which is a string
-type VM struct {
- AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes
- BlockDump *int64 // /proc/sys/vm/block_dump
- CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed
- DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes
- DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio
- DirtyBytes *int64 // /proc/sys/vm/dirty_bytes
- DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs
- DirtyRatio *int64 // /proc/sys/vm/dirty_ratio
- DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds
- DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs
- DropCaches *int64 // /proc/sys/vm/drop_caches
- ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold
- HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group
- LaptopMode *int64 // /proc/sys/vm/laptop_mode
- LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout
- LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio
- MaxMapCount *int64 // /proc/sys/vm/max_map_count
- MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill
- MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery
- MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes
- MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio
- MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio
- MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr
- NrHugepages *int64 // /proc/sys/vm/nr_hugepages
- NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy
- NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages
- NumaStat *int64 // /proc/sys/vm/numa_stat
- NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order
- OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks
- OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task
- OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes
- OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory
- OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio
- PageCluster *int64 // /proc/sys/vm/page-cluster
- PanicOnOom *int64 // /proc/sys/vm/panic_on_oom
- PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction
- StatInterval *int64 // /proc/sys/vm/stat_interval
- Swappiness *int64 // /proc/sys/vm/swappiness
- UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes
- VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure
- WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor
- WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor
- ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode
-}
-
-// VM reads the VM statistics from the specified `proc` filesystem.
-func (fs FS) VM() (*VM, error) {
- path := fs.proc.Path("sys/vm")
- file, err := os.Stat(path)
- if err != nil {
- return nil, err
- }
- if !file.Mode().IsDir() {
- return nil, fmt.Errorf("%s is not a directory", path)
- }
-
- files, err := ioutil.ReadDir(path)
- if err != nil {
- return nil, err
- }
-
- var vm VM
- for _, f := range files {
- if f.IsDir() {
- continue
- }
-
- name := filepath.Join(path, f.Name())
- // ignore errors on read, as there are some write only
- // in /proc/sys/vm
- value, err := util.SysReadFile(name)
- if err != nil {
- continue
- }
- vp := util.NewValueParser(value)
-
- switch f.Name() {
- case "admin_reserve_kbytes":
- vm.AdminReserveKbytes = vp.PInt64()
- case "block_dump":
- vm.BlockDump = vp.PInt64()
- case "compact_unevictable_allowed":
- vm.CompactUnevictableAllowed = vp.PInt64()
- case "dirty_background_bytes":
- vm.DirtyBackgroundBytes = vp.PInt64()
- case "dirty_background_ratio":
- vm.DirtyBackgroundRatio = vp.PInt64()
- case "dirty_bytes":
- vm.DirtyBytes = vp.PInt64()
- case "dirty_expire_centisecs":
- vm.DirtyExpireCentisecs = vp.PInt64()
- case "dirty_ratio":
- vm.DirtyRatio = vp.PInt64()
- case "dirtytime_expire_seconds":
- vm.DirtytimeExpireSeconds = vp.PInt64()
- case "dirty_writeback_centisecs":
- vm.DirtyWritebackCentisecs = vp.PInt64()
- case "drop_caches":
- vm.DropCaches = vp.PInt64()
- case "extfrag_threshold":
- vm.ExtfragThreshold = vp.PInt64()
- case "hugetlb_shm_group":
- vm.HugetlbShmGroup = vp.PInt64()
- case "laptop_mode":
- vm.LaptopMode = vp.PInt64()
- case "legacy_va_layout":
- vm.LegacyVaLayout = vp.PInt64()
- case "lowmem_reserve_ratio":
- stringSlice := strings.Fields(value)
- pint64Slice := make([]*int64, 0, len(stringSlice))
- for _, value := range stringSlice {
- vp := util.NewValueParser(value)
- pint64Slice = append(pint64Slice, vp.PInt64())
- }
- vm.LowmemReserveRatio = pint64Slice
- case "max_map_count":
- vm.MaxMapCount = vp.PInt64()
- case "memory_failure_early_kill":
- vm.MemoryFailureEarlyKill = vp.PInt64()
- case "memory_failure_recovery":
- vm.MemoryFailureRecovery = vp.PInt64()
- case "min_free_kbytes":
- vm.MinFreeKbytes = vp.PInt64()
- case "min_slab_ratio":
- vm.MinSlabRatio = vp.PInt64()
- case "min_unmapped_ratio":
- vm.MinUnmappedRatio = vp.PInt64()
- case "mmap_min_addr":
- vm.MmapMinAddr = vp.PInt64()
- case "nr_hugepages":
- vm.NrHugepages = vp.PInt64()
- case "nr_hugepages_mempolicy":
- vm.NrHugepagesMempolicy = vp.PInt64()
- case "nr_overcommit_hugepages":
- vm.NrOvercommitHugepages = vp.PInt64()
- case "numa_stat":
- vm.NumaStat = vp.PInt64()
- case "numa_zonelist_order":
- vm.NumaZonelistOrder = value
- case "oom_dump_tasks":
- vm.OomDumpTasks = vp.PInt64()
- case "oom_kill_allocating_task":
- vm.OomKillAllocatingTask = vp.PInt64()
- case "overcommit_kbytes":
- vm.OvercommitKbytes = vp.PInt64()
- case "overcommit_memory":
- vm.OvercommitMemory = vp.PInt64()
- case "overcommit_ratio":
- vm.OvercommitRatio = vp.PInt64()
- case "page-cluster":
- vm.PageCluster = vp.PInt64()
- case "panic_on_oom":
- vm.PanicOnOom = vp.PInt64()
- case "percpu_pagelist_fraction":
- vm.PercpuPagelistFraction = vp.PInt64()
- case "stat_interval":
- vm.StatInterval = vp.PInt64()
- case "swappiness":
- vm.Swappiness = vp.PInt64()
- case "user_reserve_kbytes":
- vm.UserReserveKbytes = vp.PInt64()
- case "vfs_cache_pressure":
- vm.VfsCachePressure = vp.PInt64()
- case "watermark_boost_factor":
- vm.WatermarkBoostFactor = vp.PInt64()
- case "watermark_scale_factor":
- vm.WatermarkScaleFactor = vp.PInt64()
- case "zone_reclaim_mode":
- vm.ZoneReclaimMode = vp.PInt64()
- }
- if err := vp.Err(); err != nil {
- return nil, err
- }
- }
-
- return &vm, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go
deleted file mode 100644
index e941503d5..000000000
--- a/vendor/github.com/prometheus/procfs/zoneinfo.go
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package procfs
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "regexp"
- "strings"
-
- "github.com/prometheus/procfs/internal/util"
-)
-
-// Zoneinfo holds info parsed from /proc/zoneinfo.
-type Zoneinfo struct {
- Node string
- Zone string
- NrFreePages *int64
- Min *int64
- Low *int64
- High *int64
- Scanned *int64
- Spanned *int64
- Present *int64
- Managed *int64
- NrActiveAnon *int64
- NrInactiveAnon *int64
- NrIsolatedAnon *int64
- NrAnonPages *int64
- NrAnonTransparentHugepages *int64
- NrActiveFile *int64
- NrInactiveFile *int64
- NrIsolatedFile *int64
- NrFilePages *int64
- NrSlabReclaimable *int64
- NrSlabUnreclaimable *int64
- NrMlockStack *int64
- NrKernelStack *int64
- NrMapped *int64
- NrDirty *int64
- NrWriteback *int64
- NrUnevictable *int64
- NrShmem *int64
- NrDirtied *int64
- NrWritten *int64
- NumaHit *int64
- NumaMiss *int64
- NumaForeign *int64
- NumaInterleave *int64
- NumaLocal *int64
- NumaOther *int64
- Protection []*int64
-}
-
-var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
-
-// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of
-// structs containing the relevant info. More information available here:
-// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
-func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
- data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo"))
- if err != nil {
- return nil, fmt.Errorf("error reading zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err)
- }
- zoneinfo, err := parseZoneinfo(data)
- if err != nil {
- return nil, fmt.Errorf("error parsing zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err)
- }
- return zoneinfo, nil
-}
-
-func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) {
-
- zoneinfo := []Zoneinfo{}
-
- zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode"))
- for _, block := range zoneinfoBlocks {
- var zoneinfoElement Zoneinfo
- lines := strings.Split(string(block), "\n")
- for _, line := range lines {
-
- if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil {
- zoneinfoElement.Node = nodeZone[1]
- zoneinfoElement.Zone = nodeZone[2]
- continue
- }
- if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
- zoneinfoElement.Zone = ""
- continue
- }
- parts := strings.Fields(strings.TrimSpace(line))
- if len(parts) < 2 {
- continue
- }
- vp := util.NewValueParser(parts[1])
- switch parts[0] {
- case "nr_free_pages":
- zoneinfoElement.NrFreePages = vp.PInt64()
- case "min":
- zoneinfoElement.Min = vp.PInt64()
- case "low":
- zoneinfoElement.Low = vp.PInt64()
- case "high":
- zoneinfoElement.High = vp.PInt64()
- case "scanned":
- zoneinfoElement.Scanned = vp.PInt64()
- case "spanned":
- zoneinfoElement.Spanned = vp.PInt64()
- case "present":
- zoneinfoElement.Present = vp.PInt64()
- case "managed":
- zoneinfoElement.Managed = vp.PInt64()
- case "nr_active_anon":
- zoneinfoElement.NrActiveAnon = vp.PInt64()
- case "nr_inactive_anon":
- zoneinfoElement.NrInactiveAnon = vp.PInt64()
- case "nr_isolated_anon":
- zoneinfoElement.NrIsolatedAnon = vp.PInt64()
- case "nr_anon_pages":
- zoneinfoElement.NrAnonPages = vp.PInt64()
- case "nr_anon_transparent_hugepages":
- zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64()
- case "nr_active_file":
- zoneinfoElement.NrActiveFile = vp.PInt64()
- case "nr_inactive_file":
- zoneinfoElement.NrInactiveFile = vp.PInt64()
- case "nr_isolated_file":
- zoneinfoElement.NrIsolatedFile = vp.PInt64()
- case "nr_file_pages":
- zoneinfoElement.NrFilePages = vp.PInt64()
- case "nr_slab_reclaimable":
- zoneinfoElement.NrSlabReclaimable = vp.PInt64()
- case "nr_slab_unreclaimable":
- zoneinfoElement.NrSlabUnreclaimable = vp.PInt64()
- case "nr_mlock_stack":
- zoneinfoElement.NrMlockStack = vp.PInt64()
- case "nr_kernel_stack":
- zoneinfoElement.NrKernelStack = vp.PInt64()
- case "nr_mapped":
- zoneinfoElement.NrMapped = vp.PInt64()
- case "nr_dirty":
- zoneinfoElement.NrDirty = vp.PInt64()
- case "nr_writeback":
- zoneinfoElement.NrWriteback = vp.PInt64()
- case "nr_unevictable":
- zoneinfoElement.NrUnevictable = vp.PInt64()
- case "nr_shmem":
- zoneinfoElement.NrShmem = vp.PInt64()
- case "nr_dirtied":
- zoneinfoElement.NrDirtied = vp.PInt64()
- case "nr_written":
- zoneinfoElement.NrWritten = vp.PInt64()
- case "numa_hit":
- zoneinfoElement.NumaHit = vp.PInt64()
- case "numa_miss":
- zoneinfoElement.NumaMiss = vp.PInt64()
- case "numa_foreign":
- zoneinfoElement.NumaForeign = vp.PInt64()
- case "numa_interleave":
- zoneinfoElement.NumaInterleave = vp.PInt64()
- case "numa_local":
- zoneinfoElement.NumaLocal = vp.PInt64()
- case "numa_other":
- zoneinfoElement.NumaOther = vp.PInt64()
- case "protection:":
- protectionParts := strings.Split(line, ":")
- protectionValues := strings.Replace(protectionParts[1], "(", "", 1)
- protectionValues = strings.Replace(protectionValues, ")", "", 1)
- protectionValues = strings.TrimSpace(protectionValues)
- protectionStringMap := strings.Split(protectionValues, ", ")
- val, err := util.ParsePInt64s(protectionStringMap)
- if err == nil {
- zoneinfoElement.Protection = val
- }
- }
-
- }
-
- zoneinfo = append(zoneinfo, zoneinfoElement)
- }
- return zoneinfo, nil
-}
diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go
index 49252a06d..f6e5e56ed 100644
--- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go
+++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go
@@ -111,7 +111,7 @@ func annotateEPERM(origErr error, spec port.Spec) error {
// origErr is unrelated to ip_unprivileged_port_start
return origErr
}
- text := fmt.Sprintf("cannot expose privileged port %d, you might need to add \"net.ipv4.ip_unprivileged_port_start=0\" (currently %d) to /etc/sysctl.conf", spec.ParentPort, start)
+ text := fmt.Sprintf("cannot expose privileged port %d, you can add 'net.ipv4.ip_unprivileged_port_start=%d' to /etc/sysctl.conf (currently %d)", spec.ParentPort, spec.ParentPort, start)
if filepath.Base(os.Args[0]) == "rootlesskit" {
// NOTE: The following sentence is appended only if Args[0] == "rootlesskit", because it does not apply to Podman (as of Podman v1.9).
// Podman launches the parent driver in the child user namespace (but in the parent network namespace), which disables the file capability.
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
index 846636d75..2ceebc552 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.go
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -5,70 +5,95 @@ import (
"fmt"
"io"
"os"
- "sort"
- "strings"
-
- "github.com/spf13/pflag"
)
// Annotations for Bash completion.
const (
- BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
+ BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
+ // BashCompCustom should be avoided as it only works for bash.
+ // Function RegisterFlagCompletionFunc() should be used instead.
BashCompCustom = "cobra_annotation_bash_completion_custom"
BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
)
-func writePreamble(buf *bytes.Buffer, name string) {
- buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
- buf.WriteString(fmt.Sprintf(`
-__%[1]s_debug()
-{
- if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
- echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
- fi
+// GenBashCompletion generates bash completion file and writes to the passed writer.
+func (c *Command) GenBashCompletion(w io.Writer) error {
+ return c.genBashCompletion(w, false)
}
-# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
-# _init_completion. This is a very minimal version of that function.
-__%[1]s_init_completion()
-{
- COMPREPLY=()
- _get_comp_words_by_ref "$@" cur prev words cword
+// GenBashCompletionWithDesc generates bash completion file with descriptions and writes to the passed writer.
+func (c *Command) GenBashCompletionWithDesc(w io.Writer) error {
+ return c.genBashCompletion(w, true)
}
-__%[1]s_index_of_word()
-{
- local w word=$1
- shift
- index=0
- for w in "$@"; do
- [[ $w = "$word" ]] && return
- index=$((index+1))
- done
- index=-1
+// GenBashCompletionFile generates bash completion file.
+func (c *Command) GenBashCompletionFile(filename string) error {
+ return c.genBashCompletionFile(filename, false)
+}
+
+// GenBashCompletionFileWithDesc generates bash completion file with descriptions.
+func (c *Command) GenBashCompletionFileWithDesc(filename string) error {
+ return c.genBashCompletionFile(filename, true)
+}
+
+func (c *Command) genBashCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.genBashCompletion(outFile, includeDesc)
}
-__%[1]s_contains_word()
+func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ if len(c.BashCompletionFunction) > 0 {
+ buf.WriteString(c.BashCompletionFunction + "\n")
+ }
+ genBashComp(buf, c.Name(), includeDesc)
+
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func genBashComp(buf *bytes.Buffer, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+
+ buf.WriteString(fmt.Sprintf(`# bash completion for %-36[1]s -*- shell-script -*-
+
+__%[1]s_debug()
{
- local w word=$1; shift
- for w in "$@"; do
- [[ $w = "$word" ]] && return
- done
- return 1
+ if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
+ echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+ fi
}
-__%[1]s_handle_go_custom_completion()
+__%[1]s_perform_completion()
{
- __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}"
+ __%[1]s_debug
+ __%[1]s_debug "========= starting completion logic =========="
+ __%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $cword location, so we need
+ # to truncate the command-line ($words) up to the $cword location.
+ words=("${words[@]:0:$cword+1}")
+ __%[1]s_debug "Truncated words[*]: ${words[*]},"
local shellCompDirectiveError=%[3]d
local shellCompDirectiveNoSpace=%[4]d
local shellCompDirectiveNoFileComp=%[5]d
local shellCompDirectiveFilterFileExt=%[6]d
local shellCompDirectiveFilterDirs=%[7]d
+ local shellCompDirectiveLegacyCustomComp=%[8]d
+ local shellCompDirectiveLegacyCustomArgsComp=%[9]d
- local out requestComp lastParam lastChar comp directive args
+ local out requestComp lastParam lastChar comp directive args flagPrefix
# Prepare the command to request completions for the program.
# Calling ${words[0]} instead of directly %[1]s allows to handle aliases
@@ -77,16 +102,24 @@ __%[1]s_handle_go_custom_completion()
lastParam=${words[$((${#words[@]}-1))]}
lastChar=${lastParam:$((${#lastParam}-1)):1}
- __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}"
+ __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}"
if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
# If the last parameter is complete (there is a space following it)
# We add an extra empty parameter so we can indicate this to the go method.
- __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter"
+ __%[1]s_debug "Adding extra empty parameter"
requestComp="${requestComp} \"\""
fi
- __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}"
+ # When completing a flag with an = (e.g., %[1]s -n=<TAB>)
+ # bash focuses on the part after the =, so we need to remove
+ # the flag part from $cur
+ if [[ "${cur}" == -*=* ]]; then
+ flagPrefix="${cur%%%%=*}="
+ cur="${cur#*=}"
+ fi
+
+ __%[1]s_debug "Calling ${requestComp}"
# Use eval to handle any environment variables and such
out=$(eval "${requestComp}" 2>/dev/null)
@@ -98,23 +131,23 @@ __%[1]s_handle_go_custom_completion()
# There is not directive specified
directive=0
fi
- __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}"
- __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out[*]}"
+ __%[1]s_debug "The completion directive is: ${directive}"
+ __%[1]s_debug "The completions are: ${out[*]}"
if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
# Error code. No completion.
- __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code"
+ __%[1]s_debug "Received error from custom completion go code"
return
else
if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
if [[ $(type -t compopt) = "builtin" ]]; then
- __%[1]s_debug "${FUNCNAME[0]}: activating no space"
+ __%[1]s_debug "Activating no space"
compopt -o nospace
fi
fi
if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
if [[ $(type -t compopt) = "builtin" ]]; then
- __%[1]s_debug "${FUNCNAME[0]}: activating no file completion"
+ __%[1]s_debug "Activating no file completion"
compopt +o default
fi
fi
@@ -123,6 +156,7 @@ __%[1]s_handle_go_custom_completion()
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
# File extension filtering
local fullFilter filter filteringCmd
+
# Do not use quotes around the $out variable or else newline
# characters will be kept.
for filter in ${out[*]}; do
@@ -134,545 +168,173 @@ __%[1]s_handle_go_custom_completion()
$filteringCmd
elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
# File completion for directories only
- local subDir
+
# Use printf to strip any trailing newline
+ local subdir
subdir=$(printf "%%s" "${out[0]}")
if [ -n "$subdir" ]; then
__%[1]s_debug "Listing directories in $subdir"
- __%[1]s_handle_subdirs_in_dir_flag "$subdir"
+ pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
else
__%[1]s_debug "Listing directories in ."
_filedir -d
fi
+ elif [ $((directive & shellCompDirectiveLegacyCustomComp)) -ne 0 ]; then
+ local cmd
+ __%[1]s_debug "Legacy custom completion. Directive: $directive, cmds: ${out[*]}"
+
+ # The following variables should get their value through the commands
+ # we have received as completions and are parsing below.
+ local last_command
+ local nouns
+
+ # Execute every command received
+ while IFS='' read -r cmd; do
+ __%[1]s_debug "About to execute: $cmd"
+ eval "$cmd"
+ done < <(printf "%%s\n" "${out[@]}")
+
+ __%[1]s_debug "last_command: $last_command"
+ __%[1]s_debug "nouns[0]: ${nouns[0]}, nouns[1]: ${nouns[1]}"
+
+ if [ $((directive & shellCompDirectiveLegacyCustomArgsComp)) -ne 0 ]; then
+ # We should call the global legacy custom completion function, if it is defined
+ if declare -F __%[1]s_custom_func >/dev/null; then
+ # Use command name qualified legacy custom func
+ __%[1]s_debug "About to call: __%[1]s_custom_func"
+ __%[1]s_custom_func
+ elif declare -F __custom_func >/dev/null; then
+ # Otherwise fall back to unqualified legacy custom func for compatibility
+ __%[1]s_debug "About to call: __custom_func"
+ __custom_func
+ fi
+ fi
else
+ local tab
+ tab=$(printf '\t')
+ local longest=0
+ # Look for the longest completion so that we can format things nicely
while IFS='' read -r comp; do
- COMPREPLY+=("$comp")
- done < <(compgen -W "${out[*]}" -- "$cur")
- fi
-}
-
-__%[1]s_handle_reply()
-{
- __%[1]s_debug "${FUNCNAME[0]}"
- local comp
- case $cur in
- -*)
- if [[ $(type -t compopt) = "builtin" ]]; then
- compopt -o nospace
- fi
- local allflags
- if [ ${#must_have_one_flag[@]} -ne 0 ]; then
- allflags=("${must_have_one_flag[@]}")
- else
- allflags=("${flags[*]} ${two_word_flags[*]}")
- fi
- while IFS='' read -r comp; do
- COMPREPLY+=("$comp")
- done < <(compgen -W "${allflags[*]}" -- "$cur")
- if [[ $(type -t compopt) = "builtin" ]]; then
- [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
+ comp=${comp%%%%$tab*}
+ if ((${#comp}>longest)); then
+ longest=${#comp}
fi
+ done < <(printf "%%s\n" "${out[@]}")
- # complete after --flag=abc
- if [[ $cur == *=* ]]; then
- if [[ $(type -t compopt) = "builtin" ]]; then
- compopt +o nospace
- fi
-
- local index flag
- flag="${cur%%=*}"
- __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
- COMPREPLY=()
- if [[ ${index} -ge 0 ]]; then
- PREFIX=""
- cur="${cur#*=}"
- ${flags_completion[${index}]}
- if [ -n "${ZSH_VERSION}" ]; then
- # zsh completion needs --flag= prefix
- eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
- fi
- fi
+ local completions=()
+ while IFS='' read -r comp; do
+ if [ -z "$comp" ]; then
+ continue
fi
- return 0;
- ;;
- esac
-
- # check if we are handling a flag with special work handling
- local index
- __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
- if [[ ${index} -ge 0 ]]; then
- ${flags_completion[${index}]}
- return
- fi
- # we are parsing a flag and don't have a special handler, no completion
- if [[ ${cur} != "${words[cword]}" ]]; then
- return
- fi
+ __%[1]s_debug "Original comp: $comp"
+ comp="$(__%[1]s_format_comp_descriptions "$comp" "$longest")"
+ __%[1]s_debug "Final comp: $comp"
+ completions+=("$comp")
+ done < <(printf "%%s\n" "${out[@]}")
- local completions
- completions=("${commands[@]}")
- if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
- completions+=("${must_have_one_noun[@]}")
- elif [[ -n "${has_completion_function}" ]]; then
- # if a go completion function is provided, defer to that function
- __%[1]s_handle_go_custom_completion
- fi
- if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
- completions+=("${must_have_one_flag[@]}")
- fi
- while IFS='' read -r comp; do
- COMPREPLY+=("$comp")
- done < <(compgen -W "${completions[*]}" -- "$cur")
-
- if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
while IFS='' read -r comp; do
+ # Although this script should only be used for bash
+ # there may be programs that still convert the bash
+ # script into a zsh one. To continue supporting those
+ # programs, we do this single adaptation for zsh
+ if [ -n "${ZSH_VERSION}" ]; then
+ # zsh completion needs --flag= prefix
+ COMPREPLY+=("$flagPrefix$comp")
+ else
+ COMPREPLY+=("$comp")
+ fi
+ done < <(compgen -W "${completions[*]}" -- "$cur")
+
+ # If there is a single completion left, remove the description text
+ if [ ${#COMPREPLY[*]} -eq 1 ]; then
+ __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}"
+ comp="${COMPREPLY[0]%%%% *}"
+ __%[1]s_debug "Removed description from single completion, which is now: ${comp}"
+ COMPREPLY=()
COMPREPLY+=("$comp")
- done < <(compgen -W "${noun_aliases[*]}" -- "$cur")
- fi
-
- if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
- if declare -F __%[1]s_custom_func >/dev/null; then
- # try command name qualified custom func
- __%[1]s_custom_func
- else
- # otherwise fall back to unqualified for compatibility
- declare -F __custom_func >/dev/null && __custom_func
- fi
- fi
-
- # available in bash-completion >= 2, not always present on macOS
- if declare -F __ltrim_colon_completions >/dev/null; then
- __ltrim_colon_completions "$cur"
- fi
-
- # If there is only 1 completion and it is a flag with an = it will be completed
- # but we don't want a space after the =
- if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
- compopt -o nospace
- fi
-}
-
-# The arguments should be in the form "ext1|ext2|extn"
-__%[1]s_handle_filename_extension_flag()
-{
- local ext="$1"
- _filedir "@(${ext})"
-}
-
-__%[1]s_handle_subdirs_in_dir_flag()
-{
- local dir="$1"
- pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
-}
-
-__%[1]s_handle_flag()
-{
- __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
-
- # if a command required a flag, and we found it, unset must_have_one_flag()
- local flagname=${words[c]}
- local flagvalue
- # if the word contained an =
- if [[ ${words[c]} == *"="* ]]; then
- flagvalue=${flagname#*=} # take in as flagvalue after the =
- flagname=${flagname%%=*} # strip everything after the =
- flagname="${flagname}=" # but put the = back
- fi
- __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
- if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
- must_have_one_flag=()
- fi
-
- # if you set a flag which only applies to this command, don't show subcommands
- if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
- commands=()
- fi
-
- # keep flag value with flagname as flaghash
- # flaghash variable is an associative array which is only supported in bash > 3.
- if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
- if [ -n "${flagvalue}" ] ; then
- flaghash[${flagname}]=${flagvalue}
- elif [ -n "${words[ $((c+1)) ]}" ] ; then
- flaghash[${flagname}]=${words[ $((c+1)) ]}
- else
- flaghash[${flagname}]="true" # pad "true" for bool flag
- fi
- fi
-
- # skip the argument to a two word flag
- if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
- __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument"
- c=$((c+1))
- # if we are looking for a flags value, don't show commands
- if [[ $c -eq $cword ]]; then
- commands=()
fi
fi
- c=$((c+1))
-
+ __%[1]s_handle_special_char "$cur" :
+ __%[1]s_handle_special_char "$cur" =
}
-__%[1]s_handle_noun()
+__%[1]s_handle_special_char()
{
- __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
-
- if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
- must_have_one_noun=()
- elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
- must_have_one_noun=()
+ local comp="$1"
+ local char=$2
+ if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then
+ local word=${comp%%"${comp##*${char}}"}
+ local idx=${#COMPREPLY[*]}
+ while [[ $((--idx)) -ge 0 ]]; do
+ COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"}
+ done
fi
-
- nouns+=("${words[c]}")
- c=$((c+1))
}
-__%[1]s_handle_command()
+__%[1]s_format_comp_descriptions()
{
- __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
-
- local next_command
- if [[ -n ${last_command} ]]; then
- next_command="_${last_command}_${words[c]//:/__}"
- else
- if [[ $c -eq 0 ]]; then
- next_command="_%[1]s_root_command"
+ local tab
+ tab=$(printf '\t')
+ local comp="$1"
+ local longest=$2
+
+ # Properly format the description string which follows a tab character if there is one
+ if [[ "$comp" == *$tab* ]]; then
+ desc=${comp#*$tab}
+ comp=${comp%%%%$tab*}
+
+ # $COLUMNS stores the current shell width.
+ # Remove an extra 4 because we add 2 spaces and 2 parentheses.
+ maxdesclength=$(( COLUMNS - longest - 4 ))
+
+ # Make sure we can fit a description of at least 8 characters
+ # if we are to align the descriptions.
+ if [[ $maxdesclength -gt 8 ]]; then
+ # Add the proper number of spaces to align the descriptions
+ for ((i = ${#comp} ; i < longest ; i++)); do
+ comp+=" "
+ done
else
- next_command="_${words[c]//:/__}"
+ # Don't pad the descriptions so we can fit more text after the completion
+ maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
fi
- fi
- c=$((c+1))
- __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
- declare -F "$next_command" >/dev/null && $next_command
-}
-__%[1]s_handle_word()
-{
- if [[ $c -ge $cword ]]; then
- __%[1]s_handle_reply
- return
- fi
- __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
- if [[ "${words[c]}" == -* ]]; then
- __%[1]s_handle_flag
- elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
- __%[1]s_handle_command
- elif [[ $c -eq 0 ]]; then
- __%[1]s_handle_command
- elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
- # aliashash variable is an associative array which is only supported in bash > 3.
- if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
- words[c]=${aliashash[${words[c]}]}
- __%[1]s_handle_command
- else
- __%[1]s_handle_noun
+ # If there is enough space for any description text,
+ # truncate the descriptions that are too long for the shell width
+ if [ $maxdesclength -gt 0 ]; then
+ if [ ${#desc} -gt $maxdesclength ]; then
+ desc=${desc:0:$(( maxdesclength - 1 ))}
+ desc+="…"
+ fi
+ comp+=" ($desc)"
fi
- else
- __%[1]s_handle_noun
fi
- __%[1]s_handle_word
-}
-`, name, ShellCompNoDescRequestCmd,
- ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ # Must use printf to escape all special characters
+ printf "%%q" "${comp}"
}
-func writePostscript(buf *bytes.Buffer, name string) {
- name = strings.Replace(name, ":", "__", -1)
- buf.WriteString(fmt.Sprintf("__start_%s()\n", name))
- buf.WriteString(fmt.Sprintf(`{
+__start_%[1]s()
+{
local cur prev words cword
- declare -A flaghash 2>/dev/null || :
- declare -A aliashash 2>/dev/null || :
- if declare -F _init_completion >/dev/null 2>&1; then
- _init_completion -s || return
- else
- __%[1]s_init_completion -n "=" || return
- fi
- local c=0
- local flags=()
- local two_word_flags=()
- local local_nonpersistent_flags=()
- local flags_with_completion=()
- local flags_completion=()
- local commands=("%[1]s")
- local must_have_one_flag=()
- local must_have_one_noun=()
- local has_completion_function
- local last_command
- local nouns=()
-
- __%[1]s_handle_word
+ COMPREPLY=()
+ _get_comp_words_by_ref -n "=:" cur prev words cword
+
+ __%[1]s_perform_completion
}
-`, name))
- buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
- complete -o default -F __start_%s %s
+if [[ $(type -t compopt) = "builtin" ]]; then
+ complete -o default -F __start_%[1]s %[1]s
else
- complete -o default -o nospace -F __start_%s %s
+ complete -o default -o nospace -F __start_%[1]s %[1]s
fi
-`, name, name, name, name))
- buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n")
-}
-
-func writeCommands(buf *bytes.Buffer, cmd *Command) {
- buf.WriteString(" commands=()\n")
- for _, c := range cmd.Commands() {
- if !c.IsAvailableCommand() && c != cmd.helpCommand {
- continue
- }
- buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name()))
- writeCmdAliases(buf, c)
- }
- buf.WriteString("\n")
-}
-
-func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) {
- for key, value := range annotations {
- switch key {
- case BashCompFilenameExt:
- buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
-
- var ext string
- if len(value) > 0 {
- ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
- } else {
- ext = "_filedir"
- }
- buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
- case BashCompCustom:
- buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
- if len(value) > 0 {
- handlers := strings.Join(value, "; ")
- buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
- } else {
- buf.WriteString(" flags_completion+=(:)\n")
- }
- case BashCompSubdirsInDir:
- buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
-
- var ext string
- if len(value) == 1 {
- ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
- } else {
- ext = "_filedir -d"
- }
- buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
- }
- }
-}
-
-func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
- name := flag.Shorthand
- format := " "
- if len(flag.NoOptDefVal) == 0 {
- format += "two_word_"
- }
- format += "flags+=(\"-%s\")\n"
- buf.WriteString(fmt.Sprintf(format, name))
- writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
-}
-
-func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
- name := flag.Name
- format := " flags+=(\"--%s"
- if len(flag.NoOptDefVal) == 0 {
- format += "="
- }
- format += "\")\n"
- buf.WriteString(fmt.Sprintf(format, name))
- if len(flag.NoOptDefVal) == 0 {
- format = " two_word_flags+=(\"--%s\")\n"
- buf.WriteString(fmt.Sprintf(format, name))
- }
- writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
-}
-
-func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
- name := flag.Name
- format := " local_nonpersistent_flags+=(\"--%[1]s\")\n"
- if len(flag.NoOptDefVal) == 0 {
- format += " local_nonpersistent_flags+=(\"--%[1]s=\")\n"
- }
- buf.WriteString(fmt.Sprintf(format, name))
- if len(flag.Shorthand) > 0 {
- buf.WriteString(fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand))
- }
-}
-
-// Setup annotations for go completions for registered flags
-func prepareCustomAnnotationsForFlags(cmd *Command) {
- for flag := range flagCompletionFunctions {
- // Make sure the completion script calls the __*_go_custom_completion function for
- // every registered flag. We need to do this here (and not when the flag was registered
- // for completion) so that we can know the root command name for the prefix
- // of __<prefix>_go_custom_completion
- if flag.Annotations == nil {
- flag.Annotations = map[string][]string{}
- }
- flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())}
- }
-}
-
-func writeFlags(buf *bytes.Buffer, cmd *Command) {
- prepareCustomAnnotationsForFlags(cmd)
- buf.WriteString(` flags=()
- two_word_flags=()
- local_nonpersistent_flags=()
- flags_with_completion=()
- flags_completion=()
-
-`)
- localNonPersistentFlags := cmd.LocalNonPersistentFlags()
- cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
- if nonCompletableFlag(flag) {
- return
- }
- writeFlag(buf, flag, cmd)
- if len(flag.Shorthand) > 0 {
- writeShortFlag(buf, flag, cmd)
- }
- // localNonPersistentFlags are used to stop the completion of subcommands when one is set
- // if TraverseChildren is true we should allow to complete subcommands
- if localNonPersistentFlags.Lookup(flag.Name) != nil && !cmd.Root().TraverseChildren {
- writeLocalNonPersistentFlag(buf, flag)
- }
- })
- cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
- if nonCompletableFlag(flag) {
- return
- }
- writeFlag(buf, flag, cmd)
- if len(flag.Shorthand) > 0 {
- writeShortFlag(buf, flag, cmd)
- }
- })
-
- buf.WriteString("\n")
-}
-
-func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) {
- buf.WriteString(" must_have_one_flag=()\n")
- flags := cmd.NonInheritedFlags()
- flags.VisitAll(func(flag *pflag.Flag) {
- if nonCompletableFlag(flag) {
- return
- }
- for key := range flag.Annotations {
- switch key {
- case BashCompOneRequiredFlag:
- format := " must_have_one_flag+=(\"--%s"
- if flag.Value.Type() != "bool" {
- format += "="
- }
- format += "\")\n"
- buf.WriteString(fmt.Sprintf(format, flag.Name))
-
- if len(flag.Shorthand) > 0 {
- buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand))
- }
- }
- }
- })
-}
-
-func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) {
- buf.WriteString(" must_have_one_noun=()\n")
- sort.Sort(sort.StringSlice(cmd.ValidArgs))
- for _, value := range cmd.ValidArgs {
- // Remove any description that may be included following a tab character.
- // Descriptions are not supported by bash completion.
- value = strings.Split(value, "\t")[0]
- buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
- }
- if cmd.ValidArgsFunction != nil {
- buf.WriteString(" has_completion_function=1\n")
- }
-}
-
-func writeCmdAliases(buf *bytes.Buffer, cmd *Command) {
- if len(cmd.Aliases) == 0 {
- return
- }
-
- sort.Sort(sort.StringSlice(cmd.Aliases))
-
- buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n"))
- for _, value := range cmd.Aliases {
- buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value))
- buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
- }
- buf.WriteString(` fi`)
- buf.WriteString("\n")
-}
-func writeArgAliases(buf *bytes.Buffer, cmd *Command) {
- buf.WriteString(" noun_aliases=()\n")
- sort.Sort(sort.StringSlice(cmd.ArgAliases))
- for _, value := range cmd.ArgAliases {
- buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value))
- }
-}
-
-func gen(buf *bytes.Buffer, cmd *Command) {
- for _, c := range cmd.Commands() {
- if !c.IsAvailableCommand() && c != cmd.helpCommand {
- continue
- }
- gen(buf, c)
- }
- commandName := cmd.CommandPath()
- commandName = strings.Replace(commandName, " ", "_", -1)
- commandName = strings.Replace(commandName, ":", "__", -1)
-
- if cmd.Root() == cmd {
- buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName))
- } else {
- buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName))
- }
-
- buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName))
- buf.WriteString("\n")
- buf.WriteString(" command_aliases=()\n")
- buf.WriteString("\n")
-
- writeCommands(buf, cmd)
- writeFlags(buf, cmd)
- writeRequiredFlag(buf, cmd)
- writeRequiredNouns(buf, cmd)
- writeArgAliases(buf, cmd)
- buf.WriteString("}\n\n")
-}
-
-// GenBashCompletion generates bash completion file and writes to the passed writer.
-func (c *Command) GenBashCompletion(w io.Writer) error {
- buf := new(bytes.Buffer)
- writePreamble(buf, c.Name())
- if len(c.BashCompletionFunction) > 0 {
- buf.WriteString(c.BashCompletionFunction + "\n")
- }
- gen(buf, c)
- writePostscript(buf, c.Name())
-
- _, err := buf.WriteTo(w)
- return err
-}
-
-func nonCompletableFlag(flag *pflag.Flag) bool {
- return flag.Hidden || len(flag.Deprecated) > 0
-}
-
-// GenBashCompletionFile generates bash completion file.
-func (c *Command) GenBashCompletionFile(filename string) error {
- outFile, err := os.Create(filename)
- if err != nil {
- return err
- }
- defer outFile.Close()
-
- return c.GenBashCompletion(outFile)
+# ex: ts=4 sw=4 et filetype=sh
+`, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs,
+ shellCompDirectiveLegacyCustomComp, shellCompDirectiveLegacyCustomArgsComp))
}
diff --git a/vendor/github.com/spf13/cobra/custom_completions.go b/vendor/github.com/spf13/cobra/custom_completions.go
index f9e88e081..e2c68ae1e 100644
--- a/vendor/github.com/spf13/cobra/custom_completions.go
+++ b/vendor/github.com/spf13/cobra/custom_completions.go
@@ -51,6 +51,11 @@ const (
// obtain the same behavior but only for flags.
ShellCompDirectiveFilterDirs
+ // For internal use only.
+ // Used to maintain backwards-compatibility with the legacy bash custom completions.
+ shellCompDirectiveLegacyCustomComp
+ shellCompDirectiveLegacyCustomArgsComp
+
// ===========================================================================
// All directives using iota should be above this one.
@@ -94,6 +99,12 @@ func (d ShellCompDirective) string() string {
if d&ShellCompDirectiveFilterDirs != 0 {
directives = append(directives, "ShellCompDirectiveFilterDirs")
}
+ if d&shellCompDirectiveLegacyCustomComp != 0 {
+ directives = append(directives, "shellCompDirectiveLegacyCustomComp")
+ }
+ if d&shellCompDirectiveLegacyCustomArgsComp != 0 {
+ directives = append(directives, "shellCompDirectiveLegacyCustomArgsComp")
+ }
if len(directives) == 0 {
directives = append(directives, "ShellCompDirectiveDefault")
}
@@ -149,10 +160,6 @@ func (c *Command) initCompleteCmd(args []string) {
fmt.Fprintln(finalCmd.OutOrStdout(), comp)
}
- if directive >= shellCompDirectiveMaxValue {
- directive = ShellCompDirectiveDefault
- }
-
// As the last printout, print the completion directive for the completion script to parse.
// The directive integer must be that last character following a single colon (:).
// The completion script expects :<directive>
@@ -362,6 +369,10 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi
var comps []string
comps, directive = completionFn(finalCmd, finalArgs, toComplete)
completions = append(completions, comps...)
+ } else {
+ // If there is no Go custom completion defined, check for legacy bash
+ // custom completion to preserve backwards-compatibility
+ completions, directive = checkLegacyCustomCompletion(finalCmd, finalArgs, flag, completions, directive)
}
return finalCmd, completions, directive, nil
@@ -442,7 +453,16 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p
if len(lastArg) > 0 && lastArg[0] == '-' {
if index := strings.Index(lastArg, "="); index >= 0 {
// Flag with an =
- flagName = strings.TrimLeft(lastArg[:index], "-")
+ if strings.HasPrefix(lastArg[:index], "--") {
+ // Flag has full name
+ flagName = lastArg[2:index]
+ } else {
+ // Flag is shorthand
+ // We have to get the last shorthand flag name
+ // e.g. `-asd` => d to provide the correct completion
+ // https://github.com/spf13/cobra/issues/1257
+ flagName = lastArg[index-1 : index]
+ }
lastArg = lastArg[index+1:]
flagWithEqual = true
} else {
@@ -459,8 +479,16 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p
// If the flag contains an = it means it has already been fully processed,
// so we don't need to deal with it here.
if index := strings.Index(prevArg, "="); index < 0 {
- flagName = strings.TrimLeft(prevArg, "-")
-
+ if strings.HasPrefix(prevArg, "--") {
+ // Flag has full name
+ flagName = prevArg[2:]
+ } else {
+ // Flag is shorthand
+ // We have to get the last shorthand flag name
+ // e.g. `-asd` => d to provide the correct completion
+ // https://github.com/spf13/cobra/issues/1257
+ flagName = prevArg[len(prevArg)-1:]
+ }
// Remove the uncompleted flag or else there could be an error created
// for an invalid value for that flag
trimmedArgs = args[:len(args)-1]
@@ -513,6 +541,65 @@ func findFlag(cmd *Command, name string) *pflag.Flag {
return cmd.Flag(name)
}
+func nonCompletableFlag(flag *pflag.Flag) bool {
+ return flag.Hidden || len(flag.Deprecated) > 0
+}
+
+// This function checks if legacy bash custom completion should be performed and if so,
+// it provides the shell script with the necessary information.
+func checkLegacyCustomCompletion(cmd *Command, args []string, flag *pflag.Flag, completions []string, directive ShellCompDirective) ([]string, ShellCompDirective) {
+ // Check if any legacy custom completion is defined for the program
+ if len(cmd.Root().BashCompletionFunction) > 0 {
+ // Legacy custom completion is only triggered if no other completions were found.
+ if len(completions) == 0 {
+ if flag != nil {
+ // For legacy custom flag completion, we must let the script know the bash
+ // functions it should call based on the content of the annotation BashCompCustom.
+ if values, present := flag.Annotations[BashCompCustom]; present {
+ if len(values) > 0 {
+ handlers := strings.Join(values, "; ")
+ // We send the commands to set the shell variables that are needed
+ // for legacy custom completions followed by the functions to call
+ // to perform the actual flag completion
+ completions = append(prepareLegacyCustomCompletionVars(cmd, args), handlers)
+ directive = directive | shellCompDirectiveLegacyCustomComp
+ }
+ }
+ } else {
+ // Check if the legacy custom_func is defined.
+ // This check will work for both "__custom_func" and "__<program>_custom_func".
+ // This could happen if the program defined some functions for legacy flag completion
+ // but not the legacy custom_func.
+ if strings.Contains(cmd.Root().BashCompletionFunction, "_custom_func") {
+ // For legacy args completion, the script already knows what to call
+ // so we only need to tell it the commands to set the shell variables needed
+ completions = prepareLegacyCustomCompletionVars(cmd, args)
+ directive = directive | shellCompDirectiveLegacyCustomComp | shellCompDirectiveLegacyCustomArgsComp
+ }
+ }
+ }
+ }
+ return completions, directive
+}
+
+// The original bash completion script had some shell variables that are used by legacy bash
+// custom completions. Let's set those variables to allow those legacy custom completions
+// to continue working.
+func prepareLegacyCustomCompletionVars(cmd *Command, args []string) []string {
+ var compVarCmds []string
+
+ // "last_command" variable
+ commandName := cmd.CommandPath()
+ commandName = strings.Replace(commandName, " ", "_", -1)
+ commandName = strings.Replace(commandName, ":", "__", -1)
+ compVarCmds = append(compVarCmds, fmt.Sprintf("last_command=%s", commandName))
+
+ // "nouns" array variable
+ compVarCmds = append(compVarCmds, fmt.Sprintf("nouns=(%s)", strings.Join(args, " ")))
+
+ return compVarCmds
+}
+
// CompDebug prints the specified string to the same file as where the
// completion script prints its logs.
// Note that completion printouts should never be on stdout as they would
diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go
index eaae9bca8..aeaef0087 100644
--- a/vendor/github.com/spf13/cobra/fish_completions.go
+++ b/vendor/github.com/spf13/cobra/fish_completions.go
@@ -28,9 +28,9 @@ function __%[1]s_debug
end
function __%[1]s_perform_completion
- __%[1]s_debug "Starting __%[1]s_perform_completion with: $argv"
+ __%[1]s_debug "Starting __%[1]s_perform_completion"
- set args (string split -- " " "$argv")
+ set args (string split -- " " (commandline -c))
set lastArg "$args[-1]"
__%[1]s_debug "args: $args"
@@ -71,31 +71,22 @@ function __%[1]s_perform_completion
printf "%%s\n" "$directiveLine"
end
-# This function does three things:
-# 1- Obtain the completions and store them in the global __%[1]s_comp_results
-# 2- Set the __%[1]s_comp_do_file_comp flag if file completion should be performed
-# and unset it otherwise
-# 3- Return true if the completion results are not empty
+# This function does two things:
+# - Obtain the completions and store them in the global __%[1]s_comp_results
+# - Return false if file completion should be performed
function __%[1]s_prepare_completions
+ __%[1]s_debug ""
+ __%[1]s_debug "========= starting completion logic =========="
+
# Start fresh
- set --erase __%[1]s_comp_do_file_comp
set --erase __%[1]s_comp_results
- # Check if the command-line is already provided. This is useful for testing.
- if not set --query __%[1]s_comp_commandLine
- # Use the -c flag to allow for completion in the middle of the line
- set __%[1]s_comp_commandLine (commandline -c)
- end
- __%[1]s_debug "commandLine is: $__%[1]s_comp_commandLine"
-
- set results (__%[1]s_perform_completion "$__%[1]s_comp_commandLine")
- set --erase __%[1]s_comp_commandLine
+ set results (__%[1]s_perform_completion)
__%[1]s_debug "Completion results: $results"
if test -z "$results"
__%[1]s_debug "No completion, probably due to a failure"
# Might as well do file completion, in case it helps
- set --global __%[1]s_comp_do_file_comp 1
return 1
end
@@ -110,6 +101,8 @@ function __%[1]s_prepare_completions
set shellCompDirectiveNoFileComp %[6]d
set shellCompDirectiveFilterFileExt %[7]d
set shellCompDirectiveFilterDirs %[8]d
+ set shellCompDirectiveLegacyCustomComp %[9]d
+ set shellCompDirectiveLegacyCustomArgsComp %[10]d
if test -z "$directive"
set directive 0
@@ -119,6 +112,14 @@ function __%[1]s_prepare_completions
if test $compErr -eq 1
__%[1]s_debug "Received error directive: aborting."
# Might as well do file completion, in case it helps
+ return 1
+ end
+
+ set legacyCustom (math (math --scale 0 $directive / $shellCompDirectiveLegacyCustomComp) %% 2)
+ set legacyCustomArgs (math (math --scale 0 $directive / $shellCompDirectiveLegacyCustomArgsComp) %% 2)
+ if test $legacyCustom -eq 1; or test $legacyCustomArgs -eq 1
+ __%[1]s_debug "Legacy bash custom completion not applicable to fish"
+ # Do full file completion instead
set --global __%[1]s_comp_do_file_comp 1
return 1
end
@@ -128,7 +129,6 @@ function __%[1]s_prepare_completions
if test $filefilter -eq 1; or test $dirfilter -eq 1
__%[1]s_debug "File extension filtering or directory filtering not supported"
# Do full file completion instead
- set --global __%[1]s_comp_do_file_comp 1
return 1
end
@@ -137,27 +137,51 @@ function __%[1]s_prepare_completions
__%[1]s_debug "nospace: $nospace, nofiles: $nofiles"
- # Important not to quote the variable for count to work
- set numComps (count $__%[1]s_comp_results)
- __%[1]s_debug "numComps: $numComps"
-
- if test $numComps -eq 1; and test $nospace -ne 0
- # To support the "nospace" directive we trick the shell
- # by outputting an extra, longer completion.
- __%[1]s_debug "Adding second completion to perform nospace directive"
- set --append __%[1]s_comp_results $__%[1]s_comp_results[1].
+ # If we want to prevent a space, or if file completion is NOT disabled,
+ # we need to count the number of valid completions.
+ # To do so, we will filter on prefix as the completions we have received
+ # may not already be filtered so as to allow fish to match on different
+ # criteria than prefix.
+ if test $nospace -ne 0; or test $nofiles -eq 0
+ set prefix (commandline -t)
+ __%[1]s_debug "prefix: $prefix"
+
+ set completions
+ for comp in $__%[1]s_comp_results
+ if test (string match -e -r "^$prefix" "$comp")
+ set -a completions $comp
+ end
+ end
+ set --global __%[1]s_comp_results $completions
+ __%[1]s_debug "Filtered completions are: $__%[1]s_comp_results"
+
+ # Important not to quote the variable for count to work
+ set numComps (count $__%[1]s_comp_results)
+ __%[1]s_debug "numComps: $numComps"
+
+ if test $numComps -eq 1; and test $nospace -ne 0
+ # To support the "nospace" directive we trick the shell
+ # by outputting an extra, longer completion.
+ # We must first split on \t to get rid of the descriptions because
+ # the extra character we add to the fake second completion must be
+ # before the description. We don't need descriptions anyway since
+ # there is only a single real completion which the shell will expand
+ # immediately.
+ __%[1]s_debug "Adding second completion to perform nospace directive"
+ set split (string split --max 1 \t $__%[1]s_comp_results[1])
+ set --global __%[1]s_comp_results $split[1] $split[1].
+ __%[1]s_debug "Completions are now: $__%[1]s_comp_results"
+ end
+
+ if test $numComps -eq 0; and test $nofiles -eq 0
+ # To be consistent with bash and zsh, we only trigger file
+ # completion when there are no other completions
+ __%[1]s_debug "Requesting file completion"
+ return 1
+ end
end
- if test $numComps -eq 0; and test $nofiles -eq 0
- __%[1]s_debug "Requesting file completion"
- set --global __%[1]s_comp_do_file_comp 1
- end
-
- # If we don't want file completion, we must return true even if there
- # are no completions found. This is because fish will perform the last
- # completion command, even if its condition is false, if no other
- # completion command was triggered
- return (not set --query __%[1]s_comp_do_file_comp)
+ return 0
end
# Since Fish completions are only loaded once the user triggers them, we trigger them ourselves
@@ -170,21 +194,14 @@ complete --do-complete "%[2]s " > /dev/null 2>&1
# Remove any pre-existing completions for the program since we will be handling all of them.
complete -c %[2]s -e
-# The order in which the below two lines are defined is very important so that __%[1]s_prepare_completions
-# is called first. It is __%[1]s_prepare_completions that sets up the __%[1]s_comp_do_file_comp variable.
-#
-# This completion will be run second as complete commands are added FILO.
-# It triggers file completion choices when __%[1]s_comp_do_file_comp is set.
-complete -c %[2]s -n 'set --query __%[1]s_comp_do_file_comp'
-
-# This completion will be run first as complete commands are added FILO.
-# The call to __%[1]s_prepare_completions will setup both __%[1]s_comp_results and __%[1]s_comp_do_file_comp.
-# It provides the program's completion choices.
+# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results
+# which provides the program's completion choices.
complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
`, nameForVar, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs,
+ shellCompDirectiveLegacyCustomComp, shellCompDirectiveLegacyCustomArgsComp))
}
// GenFishCompletion generates fish completion file and writes to the passed writer.
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
index 92a70394a..ef290e2eb 100644
--- a/vendor/github.com/spf13/cobra/zsh_completions.go
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -94,8 +94,10 @@ _%[1]s()
local shellCompDirectiveNoFileComp=%[5]d
local shellCompDirectiveFilterFileExt=%[6]d
local shellCompDirectiveFilterDirs=%[7]d
+ local shellCompDirectiveLegacyCustomComp=%[8]d
+ local shellCompDirectiveLegacyCustomArgsComp=%[9]d
- local lastParam lastChar flagPrefix requestComp out directive compCount comp lastComp
+ local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace
local -a completions
__%[1]s_debug "\n========= starting completion logic =========="
@@ -163,7 +165,6 @@ _%[1]s()
return
fi
- compCount=0
while IFS='\n' read -r comp; do
if [ -n "$comp" ]; then
# If requested, completions are returned with a description.
@@ -175,13 +176,17 @@ _%[1]s()
local tab=$(printf '\t')
comp=${comp//$tab/:}
- ((compCount++))
__%[1]s_debug "Adding completion: ${comp}"
completions+=${comp}
lastComp=$comp
fi
done < <(printf "%%s\n" "${out[@]}")
+ if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
+ __%[1]s_debug "Activating nospace."
+ noSpace="-S ''"
+ fi
+
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
# File extension filtering
local filteringCmd
@@ -208,25 +213,40 @@ _%[1]s()
__%[1]s_debug "Listing directories in ."
fi
+ local result
_arguments '*:dirname:_files -/'" ${flagPrefix}"
+ result=$?
if [ -n "$subdir" ]; then
popd >/dev/null 2>&1
fi
- elif [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ] && [ ${compCount} -eq 1 ]; then
- __%[1]s_debug "Activating nospace."
- # We can use compadd here as there is no description when
- # there is only one completion.
- compadd -S '' "${lastComp}"
- elif [ ${compCount} -eq 0 ]; then
- if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
- __%[1]s_debug "deactivating file completion"
+ return $result
+ else
+ __%[1]s_debug "Calling _describe"
+ if eval _describe "completions" completions $flagPrefix $noSpace; then
+ __%[1]s_debug "_describe found some completions"
+
+ # Return the success of having called _describe
+ return 0
else
- # Perform file completion
- __%[1]s_debug "activating file completion"
- _arguments '*:filename:_files'" ${flagPrefix}"
+ __%[1]s_debug "_describe did not find completions."
+ __%[1]s_debug "Checking if we should do file completion."
+ if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
+ __%[1]s_debug "deactivating file completion"
+
+ # We must return an error code here to let zsh know that there were no
+ # completions found by _describe; this is what will trigger other
+ # matching algorithms to attempt to find completions.
+ # For example zsh can match letters in the middle of words.
+ return 1
+ else
+ # Perform file completion
+ __%[1]s_debug "Activating file completion"
+
+ # We must return the result of this command, so it must be the
+ # last command, or else we must store its result to return it.
+ _arguments '*:filename:_files'" ${flagPrefix}"
+ fi
fi
- else
- _describe "completions" completions $(echo $flagPrefix)
fi
}
@@ -236,5 +256,6 @@ if [ "$funcstack[1]" = "_%[1]s" ]; then
fi
`, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
- ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs,
+ shellCompDirectiveLegacyCustomComp, shellCompDirectiveLegacyCustomArgsComp))
}